mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-26 18:06:29 +03:00
* Add links in reference.rst
This commit is contained in:
parent
9fa46743c0
commit
a32c6ff930
|
@ -2,43 +2,85 @@
|
|||
Reference
|
||||
=========
|
||||
|
||||
spaCy is a suite of natural language processing tools, arranged into
|
||||
a pipeline. It is substantially more opinionated than most similar libraries,
|
||||
which often give users the choice of multiple models that compute the same annotation.
|
||||
spaCy's philosophy is to just have one --- the best one. Our perspective is that
|
||||
the redundant options are really only useful to researchers, who need to replicate
|
||||
some prior work exactly.
|
||||
|
||||
Being opinionated allows us to keep the library small, fast, and up-to-date.
|
||||
Below is a summary table showing every class in spaCy, where it is implemented,
|
||||
the basic usage, and a link to its documentation.
|
||||
|
||||
+-------------+--------------------------+---------------------------+------------------------------+------------------+
|
||||
| Class name | Implemented in | Getting | Using | Documentation |
|
||||
+-------------+--------------------------+---------------------------+------------------------------+------------------+
|
||||
| English | spacy/en/__init__.py | nlp = English() | tokens = nlp(u'Some text') | processing.rst |
|
||||
+-------------+--------------------------+---------------------------+------------------------------+------------------+
|
||||
| Doc | spacy/doc.pyx | doc = nlp(text) | token = doc[10] | accessing/doc.rst|
|
||||
+-------------+--------------------------+---------------------------+------------------------------+------------------+
|
||||
| Token | spacy/token.pyx | token = doc[10] | token.head.repvec | accessing.rst |
|
||||
+-------------+--------------------------+---------------------------+------------------------------+------------------+
|
||||
| Span | spacy/span.pyx | sent = list(doc.sents)[0] | token = sent[0] | accessing.rst |
|
||||
+-------------+--------------------------+---------------------------+------------------------------+------------------+
|
||||
| Vocab | spacy/vocab.pyx | nlp.vocab | nlp.vocab[u'word'] | lookup/vocab.rst |
|
||||
+-------------+--------------------------+---------------------------+------------------------------+------------------+
|
||||
| StringStore | spacy/strings.pyx | nlp.vocab.strings | nlp.vocab.strings[u'word'] | lookup/token.rst |
|
||||
+-------------+--------------------------+---------------------------+------------------------------+------------------+
|
||||
| Tokenizer | spacy/tokenizer.pyx | nlp.tokenizer | tokens = nlp.tokenizer(text) | processing.rst |
|
||||
+-------------+--------------------------+---------------------------+------------------------------+------------------+
|
||||
| EnPosTagger | spacy/en/pos.pyx | nlp.tagger | nlp.tagger(tokens) | processing.rst |
|
||||
+-------------+--------------------------+---------------------------+------------------------------+------------------+
|
||||
| Parser | spacy/syntax/parser.pyx | nlp.parser | nlp.parser(tokens) | processing.rst |
|
||||
+-------------+--------------------------+---------------------------+------------------------------+------------------+
|
||||
| Lexeme | spacy/lexeme.pyx | lex = nlp.vocab[u'word'] | lex.repvec | lookup.rst |
|
||||
+-------------+--------------------------+---------------------------+------------------------------+------------------+
|
||||
| Lemmatizer | spacy/en/lemmatizer.py | | | misc.rst |
|
||||
+-------------+--------------------------+---------------------------+------------------------------+------------------+
|
||||
| GoldParse | spacy/gold.pyx | | | misc.rst |
|
||||
+-------------+--------------------------+---------------------------+------------------------------+------------------+
|
||||
| Scorer | spacy/scorer.py | | | |
|
||||
+-------------+--------------------------+---------------------------+------------------------------+------------------+
|
||||
|
||||
+----------------+--------------------------------+--------------------------+
|
||||
| Class name | Implemention | Usage |
|
||||
+================+================================+==========================+
|
||||
| `English`_ | `spacy/en/__init__.py`_ | doc = English() |
|
||||
+----------------+--------------------------------+--------------------------+
|
||||
| `Doc`_ | `spacy/doc.pyx`_ | doc = nlp(text) |
|
||||
+----------------+--------------------------------+--------------------------+
|
||||
| `Token`_ | `spacy/token.pyx`_ | token = doc[10] |
|
||||
+----------------+--------------------------------+--------------------------+
|
||||
| `Span`_ | `spacy/span.pyx`_ | sent = doc.sents.next() |
|
||||
+----------------+--------------------------------+--------------------------+
|
||||
| `Vocab`_ | `spacy/vocab.pyx`_ | nlp.vocab |
|
||||
+----------------+--------------------------------+--------------------------+
|
||||
| `StringStore`_ | `spacy/strings.pyx`_ | nlp.vocab.strings |
|
||||
+----------------+--------------------------------+--------------------------+
|
||||
| `Tokenizer`_ | `spacy/tokenizer.pyx`_ | nlp.tokenizer |
|
||||
+----------------+--------------------------------+--------------------------+
|
||||
| `EnPosTagger`_ | `spacy/en/pos.pyx`_ | nlp.tagger |
|
||||
+----------------+--------------------------------+--------------------------+
|
||||
| `Parser`_ | `spacy/syntax/parser.pyx`_ | nlp.parser |
|
||||
+----------------+--------------------------------+--------------------------+
|
||||
| `Lexeme`_ | `spacy/lexeme.pyx`_ | lex = nlp.vocab[u'word'] |
|
||||
+----------------+--------------------------------+--------------------------+
|
||||
| `GoldParse`_ | `spacy/gold.pyx`_ | |
|
||||
+----------------+--------------------------------+--------------------------+
|
||||
| `Scorer`_ | `spacy/scorer.py`_ | |
|
||||
+----------------+--------------------------------+--------------------------+
|
||||
|
||||
|
||||
.. _English: processing.html
|
||||
|
||||
.. _Doc: using/doc.html
|
||||
|
||||
.. _Token: using/token.html
|
||||
|
||||
.. _Span: using/span.html
|
||||
|
||||
.. _Vocab: lookup.html
|
||||
|
||||
.. _StringStore: lookup.html
|
||||
|
||||
.. _Tokenizer: processing.html
|
||||
|
||||
.. _EnPosTagger: processing.html
|
||||
|
||||
.. _Parser: processing.html
|
||||
|
||||
.. _Lexeme: lookup.html
|
||||
|
||||
.. _Scorer: misc.html
|
||||
|
||||
.. _GoldParse: misc.html
|
||||
|
||||
|
||||
.. _spacy/en/__init__.py: https://github.com/honnibal/spaCy/tree/master/spacy/en/__init__.py
|
||||
|
||||
.. _spacy/doc.pyx: https://github.com/honnibal/spaCy/tree/master/spacy/tokens.pyx
|
||||
|
||||
.. _spacy/token.pyx: https://github.com/honnibal/spaCy/tree/master/spacy/tokens.pyx
|
||||
|
||||
.. _spacy/span.pyx: https://github.com/honnibal/spaCy/tree/master/spacy/spans.pyx
|
||||
|
||||
.. _spacy/vocab.pyx: https://github.com/honnibal/spaCy/tree/master/spacy/vocab.pyx
|
||||
|
||||
.. _spacy/strings.pyx: https://github.com/honnibal/spaCy/tree/master/spacy/strings.pyx
|
||||
|
||||
.. _spacy/tokenizer.pyx: https://github.com/honnibal/spaCy/tree/master/spacy/tokenizer.pyx
|
||||
|
||||
.. _spacy/en/pos.pyx: https://github.com/honnibal/spaCy/tree/master/spacy/en/pos.pyx
|
||||
|
||||
.. _spacy/syntax/parser.pyx: https://github.com/honnibal/spaCy/tree/master/spacy/syntax/parser.pyx
|
||||
|
||||
.. _spacy/lexeme.pyx: https://github.com/honnibal/spaCy/tree/master/spacy/lexeme.pyx
|
||||
|
||||
.. _spacy/gold.pyx: https://github.com/honnibal/spaCy/tree/master/spacy/gold.pyx
|
||||
|
||||
.. _spacy/scorer.py: https://github.com/honnibal/spaCy/tree/master/spacy/scorer.py
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user