mirror of
https://github.com/explosion/spaCy.git
synced 2025-01-12 10:16:27 +03:00
Remove MDX imports
This commit is contained in:
parent
888a1f4c60
commit
84db6ea20f
|
@ -3,6 +3,4 @@ title: Library Architecture
|
|||
next: /api/architectures
|
||||
---
|
||||
|
||||
import Architecture101 from 'usage/101/_architecture.mdx'
|
||||
|
||||
<Architecture101 />
|
||||
|
|
|
@ -16,8 +16,6 @@ menu:
|
|||
> For more details on how to use trained pipelines with spaCy, see the
|
||||
> [usage guide](/usage/models).
|
||||
|
||||
import QuickstartModels from 'widgets/quickstart-models.js'
|
||||
|
||||
<QuickstartModels id="quickstart" />
|
||||
|
||||
## Package naming conventions {id="conventions"}
|
||||
|
|
|
@ -44,8 +44,6 @@ enough, JSX components can be used.
|
|||
|
||||
## Logo {id="logo",source="website/src/images/logo.svg"}
|
||||
|
||||
import { Logos } from 'widgets/styleguide'
|
||||
|
||||
If you would like to use the spaCy logo on your site, please get in touch and
|
||||
ask us first. However, if you want to show support and tell others that your
|
||||
project is using spaCy, you can grab one of our
|
||||
|
@ -55,8 +53,6 @@ project is using spaCy, you can grab one of our
|
|||
|
||||
## Colors {id="colors"}
|
||||
|
||||
import { Colors, Patterns } from 'widgets/styleguide'
|
||||
|
||||
<Colors />
|
||||
|
||||
### Patterns
|
||||
|
@ -65,8 +61,6 @@ import { Colors, Patterns } from 'widgets/styleguide'
|
|||
|
||||
## Typography {id="typography"}
|
||||
|
||||
import { H1, H2, H3, H4, H5, Label } from 'components/typography'
|
||||
|
||||
> #### Markdown
|
||||
>
|
||||
> ```markdown_
|
||||
|
@ -147,8 +141,6 @@ Special link styles are used depending on the link URL.
|
|||
|
||||
### Abbreviations {id="abbr"}
|
||||
|
||||
import { Abbr } from 'components/typography'
|
||||
|
||||
> #### JSX
|
||||
>
|
||||
> ```jsx
|
||||
|
@ -161,8 +153,6 @@ abbreviation.
|
|||
|
||||
### Tags {id="tags"}
|
||||
|
||||
import Tag from 'components/tag'
|
||||
|
||||
> ```jsx
|
||||
> <Tag>method</Tag>
|
||||
> <Tag variant="version">4</Tag>
|
||||
|
@ -186,8 +176,6 @@ installed.
|
|||
|
||||
### Buttons {id="buttons"}
|
||||
|
||||
import Button from 'components/button'
|
||||
|
||||
> ```jsx
|
||||
> <Button to="#" variant="primary">Primary small</Button>
|
||||
> <Button to="#" variant="secondary">Secondary small</Button>
|
||||
|
@ -477,8 +465,6 @@ https://github.com/explosion/spaCy/tree/master/spacy/language.py
|
|||
|
||||
### Infobox {id="infobox"}
|
||||
|
||||
import Infobox from 'components/infobox'
|
||||
|
||||
> #### JSX
|
||||
>
|
||||
> ```jsx
|
||||
|
@ -515,8 +501,6 @@ blocks.
|
|||
|
||||
### Accordion {id="accordion"}
|
||||
|
||||
import Accordion from 'components/accordion'
|
||||
|
||||
> #### JSX
|
||||
>
|
||||
> ```jsx
|
||||
|
|
|
@ -32,8 +32,6 @@ for ent in doc.ents:
|
|||
Using spaCy's built-in [displaCy visualizer](/usage/visualizers), here's what
|
||||
our example sentence and its named entities look like:
|
||||
|
||||
import { Iframe } from 'components/embed'
|
||||
|
||||
<Iframe
|
||||
title="displaCy visualization of entities"
|
||||
src="/images/displacy-ent1.html"
|
||||
|
|
|
@ -35,8 +35,6 @@ the [config](/usage/training#config):
|
|||
pipeline = ["tok2vec", "tagger", "parser", "ner"]
|
||||
```
|
||||
|
||||
import Accordion from 'components/accordion.js'
|
||||
|
||||
<Accordion title="Does the order of pipeline components matter?" id="pipeline-components-order">
|
||||
|
||||
The statistical components like the tagger or parser are typically independent
|
||||
|
|
|
@ -57,8 +57,6 @@ for token in doc:
|
|||
Using spaCy's built-in [displaCy visualizer](/usage/visualizers), here's what
|
||||
our example sentence and its dependencies look like:
|
||||
|
||||
import { Iframe } from 'components/embed'
|
||||
|
||||
<Iframe
|
||||
title="displaCy visualization of dependencies and entities"
|
||||
src="/images/displacy-long.html"
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
import Infobox from 'components/infobox'
|
||||
|
||||
Similarity is determined by comparing **word vectors** or "word embeddings",
|
||||
multi-dimensional meaning representations of a word. Word vectors can be
|
||||
generated using an algorithm like
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
import { Help } from 'components/typography'
|
||||
import Link from 'components/link'
|
||||
|
||||
<figure>
|
||||
|
||||
| Pipeline | Parser | Tagger | NER |
|
||||
|
|
|
@ -18,8 +18,6 @@ understanding systems.
|
|||
|
||||
### Feature overview {id="comparison-features"}
|
||||
|
||||
import Features from 'widgets/features.js'
|
||||
|
||||
<Features />
|
||||
|
||||
### When should I use spaCy? {id="comparison-usage"}
|
||||
|
@ -69,8 +67,6 @@ pipeline, which is less accurate but much cheaper to run.
|
|||
> gold-standard segmentation and tokenization, from a pretty specific type of
|
||||
> text (articles from a single newspaper, 1984-1989).
|
||||
|
||||
import Benchmarks from 'usage/_benchmarks-models.mdx'
|
||||
|
||||
<Benchmarks />
|
||||
|
||||
<figure>
|
||||
|
|
|
@ -16,8 +16,6 @@ menu:
|
|||
> website to [**v2.spacy.io**](https://v2.spacy.io/docs). To see what's changed
|
||||
> and how to migrate, see the [v3.0 guide](/usage/v3).
|
||||
|
||||
import QuickstartInstall from 'widgets/quickstart-install.js'
|
||||
|
||||
<QuickstartInstall id="quickstart" />
|
||||
|
||||
## Installation instructions {id="installation"}
|
||||
|
@ -449,6 +447,4 @@ either of these, clone your repository again.
|
|||
|
||||
## Changelog {id="changelog"}
|
||||
|
||||
import Changelog from 'widgets/changelog.js'
|
||||
|
||||
<Changelog />
|
||||
|
|
|
@ -28,8 +28,6 @@ annotations.
|
|||
|
||||
## Part-of-speech tagging {id="pos-tagging",model="tagger, parser"}
|
||||
|
||||
import PosDeps101 from 'usage/101/_pos-deps.mdx'
|
||||
|
||||
<PosDeps101 />
|
||||
|
||||
<Infobox title="Part-of-speech tag scheme" emoji="📖">
|
||||
|
@ -538,8 +536,6 @@ with new examples.
|
|||
|
||||
### Named Entity Recognition 101 {id="named-entities-101"}
|
||||
|
||||
import NER101 from 'usage/101/_named-entities.mdx'
|
||||
|
||||
<NER101 />
|
||||
|
||||
### Accessing entity annotations and labels {id="accessing-ner"}
|
||||
|
@ -789,8 +785,6 @@ during tokenization. This is kind of a core principle of spaCy's `Doc` object:
|
|||
|
||||
</Infobox>
|
||||
|
||||
import Tokenization101 from 'usage/101/_tokenization.mdx'
|
||||
|
||||
<Tokenization101 />
|
||||
|
||||
<Accordion title="Algorithm details: How spaCy's tokenizer works" id="how-tokenizer-works" spaced>
|
||||
|
@ -1872,8 +1866,6 @@ initialized before training. See the
|
|||
|
||||
## Word vectors and semantic similarity {id="vectors-similarity"}
|
||||
|
||||
import Vectors101 from 'usage/101/_vectors-similarity.mdx'
|
||||
|
||||
<Vectors101 />
|
||||
|
||||
### Adding word vectors {id="adding-vectors"}
|
||||
|
@ -2002,8 +1994,6 @@ for word, vector in vector_data.items():
|
|||
|
||||
## Language Data {id="language-data"}
|
||||
|
||||
import LanguageData101 from 'usage/101/_language-data.mdx'
|
||||
|
||||
<LanguageData101 />
|
||||
|
||||
### Creating a custom language subclass {id="language-subclass"}
|
||||
|
|
|
@ -23,8 +23,6 @@ located anywhere on your file system.
|
|||
|
||||
## Quickstart {hidden="true"}
|
||||
|
||||
import QuickstartModels from 'widgets/quickstart-models.js'
|
||||
|
||||
<QuickstartModels
|
||||
title="Quickstart"
|
||||
id="quickstart"
|
||||
|
@ -70,8 +68,6 @@ contribute to development. Also see the
|
|||
[training documentation](/usage/training) for how to train your own pipelines on
|
||||
your data.
|
||||
|
||||
import Languages from 'widgets/languages.js'
|
||||
|
||||
<Languages />
|
||||
|
||||
### Multi-language support {id="multi-language",version="2"}
|
||||
|
|
|
@ -12,8 +12,6 @@ menu:
|
|||
- ['Plugins & Wrappers', 'plugins']
|
||||
---
|
||||
|
||||
import Pipelines101 from 'usage/101/_pipelines.mdx'
|
||||
|
||||
<Pipelines101 />
|
||||
|
||||
## Processing text {id="processing"}
|
||||
|
|
|
@ -10,8 +10,6 @@ menu:
|
|||
|
||||
## Basics {id="basics",hidden="true"}
|
||||
|
||||
import Serialization101 from 'usage/101/_serialization.mdx'
|
||||
|
||||
<Serialization101 />
|
||||
|
||||
### Serializing the pipeline {id="pipeline"}
|
||||
|
|
|
@ -195,8 +195,6 @@ text with spaCy.
|
|||
|
||||
### Tokenization {id="annotations-token"}
|
||||
|
||||
import Tokenization101 from 'usage/101/_tokenization.mdx'
|
||||
|
||||
<Tokenization101 />
|
||||
|
||||
<Infobox title="Tokenization rules" emoji="📖">
|
||||
|
@ -211,8 +209,6 @@ language-specific data**, see the usage guides on
|
|||
|
||||
### Part-of-speech tags and dependencies {id="annotations-pos-deps",model="parser"}
|
||||
|
||||
import PosDeps101 from 'usage/101/_pos-deps.mdx'
|
||||
|
||||
<PosDeps101 />
|
||||
|
||||
<Infobox title="Part-of-speech tagging and morphology" emoji="📖">
|
||||
|
@ -226,8 +222,6 @@ how to **navigate and use the parse tree** effectively, see the usage guides on
|
|||
|
||||
### Named Entities {id="annotations-ner",model="ner"}
|
||||
|
||||
import NER101 from 'usage/101/_named-entities.mdx'
|
||||
|
||||
<NER101 />
|
||||
|
||||
<Infobox title="Named Entity Recognition" emoji="📖">
|
||||
|
@ -242,8 +236,6 @@ of a model, see the usage guides on
|
|||
|
||||
### Word vectors and similarity {id="vectors-similarity",model="vectors"}
|
||||
|
||||
import Vectors101 from 'usage/101/_vectors-similarity.mdx'
|
||||
|
||||
<Vectors101 />
|
||||
|
||||
<Infobox title="Word vectors" emoji="📖">
|
||||
|
@ -256,8 +248,6 @@ To learn more about word vectors, how to **customize them** and how to load
|
|||
|
||||
## Pipelines {id="pipelines"}
|
||||
|
||||
import Pipelines101 from 'usage/101/_pipelines.mdx'
|
||||
|
||||
<Pipelines101 />
|
||||
|
||||
<Infobox title="Processing pipelines" emoji="📖">
|
||||
|
@ -270,8 +260,6 @@ guide on [language processing pipelines](/usage/processing-pipelines).
|
|||
|
||||
## Architecture {id="architecture"}
|
||||
|
||||
import Architecture101 from 'usage/101/_architecture.mdx'
|
||||
|
||||
<Architecture101 />
|
||||
|
||||
## Vocab, hashes and lexemes {id="vocab"}
|
||||
|
@ -388,8 +376,6 @@ it.
|
|||
|
||||
## Serialization {id="serialization"}
|
||||
|
||||
import Serialization101 from 'usage/101/_serialization.mdx'
|
||||
|
||||
<Serialization101 />
|
||||
|
||||
<Infobox title="Saving and loading" emoji="📖">
|
||||
|
@ -401,8 +387,6 @@ guide on [saving and loading](/usage/saving-loading#models).
|
|||
|
||||
## Training {id="training"}
|
||||
|
||||
import Training101 from 'usage/101/_training.mdx'
|
||||
|
||||
<Training101 />
|
||||
|
||||
<Infobox title="Training pipelines and models" emoji="📖">
|
||||
|
@ -480,8 +464,6 @@ for trainable components.
|
|||
|
||||
## Language data {id="language-data"}
|
||||
|
||||
import LanguageData101 from 'usage/101/_language-data.mdx'
|
||||
|
||||
<LanguageData101 />
|
||||
|
||||
## Community & FAQ {id="community-faq"}
|
||||
|
|
|
@ -17,8 +17,6 @@ menu:
|
|||
|
||||
## Introduction to training {id="basics",hidden="true"}
|
||||
|
||||
import Training101 from 'usage/101/_training.mdx'
|
||||
|
||||
<Training101 />
|
||||
|
||||
<Infobox title="Tip: Try the Prodigy annotation tool">
|
||||
|
@ -69,8 +67,6 @@ config.
|
|||
> requirements and settings as CLI arguments.
|
||||
> 2. Run [`train`](/api/cli#train) with the exported config and data.
|
||||
|
||||
import QuickstartTraining from 'widgets/quickstart-training.js'
|
||||
|
||||
<QuickstartTraining />
|
||||
|
||||
After you've saved the starter config to a file `base_config.cfg`, you can use
|
||||
|
|
|
@ -88,8 +88,6 @@ giving you access to thousands of pretrained models for your pipelines.
|
|||
|
||||
![Pipeline components listening to shared embedding component](/images/tok2vec-listener.svg)
|
||||
|
||||
import Benchmarks from 'usage/_benchmarks-models.mdx'
|
||||
|
||||
<Benchmarks />
|
||||
|
||||
#### New trained transformer-based pipelines {id="features-transformers-pipelines"}
|
||||
|
|
Loading…
Reference in New Issue
Block a user