Merge branch 'develop' of https://github.com/explosion/spaCy into develop

This commit is contained in:
Matthew Honnibal 2017-05-31 07:21:14 -05:00
commit 8dfb9546f0
21 changed files with 362 additions and 235 deletions

View File

@ -3,66 +3,26 @@ import json
import pathlib
import random
import spacy
from spacy.pipeline import EntityRecognizer
from spacy.gold import GoldParse
from spacy.tagger import Tagger
try:
unicode
except:
unicode = str
import spacy.lang.en
from spacy.gold import GoldParse, biluo_tags_from_offsets
def train_ner(nlp, train_data, entity_types):
# Add new words to vocab.
for raw_text, _ in train_data:
doc = nlp.make_doc(raw_text)
for word in doc:
_ = nlp.vocab[word.orth]
# Train NER.
ner = EntityRecognizer(nlp.vocab, entity_types=entity_types)
for itn in range(5):
random.shuffle(train_data)
for raw_text, entity_offsets in train_data:
doc = nlp.make_doc(raw_text)
gold = GoldParse(doc, entities=entity_offsets)
ner.update(doc, gold)
return ner
def save_model(ner, model_dir):
model_dir = pathlib.Path(model_dir)
if not model_dir.exists():
model_dir.mkdir()
assert model_dir.is_dir()
with (model_dir / 'config.json').open('wb') as file_:
data = json.dumps(ner.cfg)
if isinstance(data, unicode):
data = data.encode('utf8')
file_.write(data)
ner.model.dump(str(model_dir / 'model'))
if not (model_dir / 'vocab').exists():
(model_dir / 'vocab').mkdir()
ner.vocab.dump(str(model_dir / 'vocab' / 'lexemes.bin'))
with (model_dir / 'vocab' / 'strings.json').open('w', encoding='utf8') as file_:
ner.vocab.strings.dump(file_)
def reformat_train_data(tokenizer, examples):
"""Reformat data to match JSON format"""
output = []
for i, (text, entity_offsets) in enumerate(examples):
doc = tokenizer(text)
ner_tags = biluo_tags_from_offsets(tokenizer(text), entity_offsets)
words = [w.text for w in doc]
tags = ['-'] * len(doc)
heads = [0] * len(doc)
deps = [''] * len(doc)
sentence = (range(len(doc)), words, tags, heads, deps, ner_tags)
output.append((text, [(sentence, [])]))
return output
def main(model_dir=None):
nlp = spacy.load('en', parser=False, entity=False, add_vectors=False)
# v1.1.2 onwards
if nlp.tagger is None:
print('---- WARNING ----')
print('Data directory not found')
print('please run: `python -m spacy.en.download --force all` for better performance')
print('Using feature templates for tagging')
print('-----------------')
nlp.tagger = Tagger(nlp.vocab, features=Tagger.feature_templates)
train_data = [
(
'Who is Shaka Khan?',
@ -74,23 +34,35 @@ def main(model_dir=None):
(len('I like London and '), len('I like London and Berlin'), 'LOC')]
)
]
ner = train_ner(nlp, train_data, ['PERSON', 'LOC'])
doc = nlp.make_doc('Who is Shaka Khan?')
nlp.tagger(doc)
ner(doc)
for word in doc:
print(word.text, word.orth, word.lower, word.tag_, word.ent_type_, word.ent_iob)
if model_dir is not None:
save_model(ner, model_dir)
nlp = spacy.lang.en.English(pipeline=['tensorizer', 'ner'])
get_data = lambda: reformat_train_data(nlp.tokenizer, train_data)
optimizer = nlp.begin_training(get_data)
for itn in range(100):
random.shuffle(train_data)
losses = {}
for raw_text, entity_offsets in train_data:
doc = nlp.make_doc(raw_text)
gold = GoldParse(doc, entities=entity_offsets)
nlp.update(
[doc], # Batch of Doc objects
[gold], # Batch of GoldParse objects
drop=0.5, # Dropout -- make it harder to memorise data
sgd=optimizer, # Callable to update weights
losses=losses)
print(losses)
print("Save to", model_dir)
nlp.to_disk(model_dir)
print("Load from", model_dir)
nlp = spacy.lang.en.English(pipeline=['tensorizer', 'ner'])
nlp.from_disk(model_dir)
for raw_text, _ in train_data:
doc = nlp(raw_text)
for word in doc:
print(word.text, word.ent_type_, word.ent_iob_)
if __name__ == '__main__':
main('ner')
import plac
plac.call(main)
# Who "" 2
# is "" 2
# Shaka "" PERSON 3

View File

@ -1,18 +1,17 @@
# coding: utf8
from __future__ import unicode_literals
from ..punctuation import TOKENIZER_INFIXES
from ..char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, CURRENCY
from ..char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES
from ..char_classes import QUOTES, UNITS, ALPHA, ALPHA_LOWER, ALPHA_UPPER
LIST_ICONS = [r'[\p{So}--[°]]']
_currency = r'\$|¢|£|€|¥|฿'
_quotes = QUOTES.replace("'", '')
_prefixes = ([r'\+'] + LIST_PUNCT + LIST_ELLIPSES + LIST_QUOTES + LIST_ICONS)
_prefixes = ([r'\+'] + LIST_PUNCT + LIST_ELLIPSES + LIST_QUOTES)
_suffixes = (LIST_PUNCT + LIST_ELLIPSES + LIST_QUOTES +
_suffixes = (LIST_PUNCT + LIST_ELLIPSES + LIST_QUOTES + LIST_ICONS +
[r'(?<=[0-9])\+',
r'(?<=°[FfCcKk])\.',
r'(?<=[0-9])(?:{})'.format(_currency),
@ -20,8 +19,7 @@ _suffixes = (LIST_PUNCT + LIST_ELLIPSES + LIST_QUOTES +
r'(?<=[{}{}{}(?:{})])\.'.format(ALPHA_LOWER, r'%²\-\)\]\+', QUOTES, _currency),
r'(?<=[{})])-e'.format(ALPHA_LOWER)])
_infixes = (LIST_ELLIPSES +
_infixes = (LIST_ELLIPSES + LIST_ICONS +
[r'(?<=[{}])\.(?=[{}])'.format(ALPHA_LOWER, ALPHA_UPPER),
r'(?<=[{a}]),(?=[{a}])'.format(a=ALPHA),
r'(?<=[{a}"])[:<>=](?=[{a}])'.format(a=ALPHA),
@ -29,7 +27,6 @@ _infixes = (LIST_ELLIPSES +
r'(?<=[{a}]),(?=[{a}])'.format(a=ALPHA),
r'(?<=[{a}])([{q}\)\]\(\[])(?=[\-{a}])'.format(a=ALPHA, q=_quotes)])
TOKENIZER_PREFIXES = _prefixes
TOKENIZER_SUFFIXES = _suffixes
TOKENIZER_INFIXES = _infixes

View File

@ -96,6 +96,13 @@ class BaseDefaults(object):
factories = {
'make_doc': create_tokenizer,
'tensorizer': lambda nlp, **cfg: [TokenVectorEncoder(nlp.vocab, **cfg)],
'tagger': lambda nlp, **cfg: [NeuralTagger(nlp.vocab, **cfg)],
'parser': lambda nlp, **cfg: [
NeuralDependencyParser(nlp.vocab, **cfg),
nonproj.deprojectivize],
'ner': lambda nlp, **cfg: [NeuralEntityRecognizer(nlp.vocab, **cfg)],
# Temporary compatibility -- delete after pivot
'token_vectors': lambda nlp, **cfg: [TokenVectorEncoder(nlp.vocab, **cfg)],
'tags': lambda nlp, **cfg: [NeuralTagger(nlp.vocab, **cfg)],
'dependencies': lambda nlp, **cfg: [
@ -358,37 +365,35 @@ class Language(object):
for doc in docs:
yield doc
def to_disk(self, path, disable=[]):
def to_disk(self, path, disable=tuple()):
"""Save the current state to a directory. If a model is loaded, this
will include the model.
path (unicode or Path): A path to a directory, which will be created if
it doesn't exist. Paths may be either strings or `Path`-like objects.
disable (list): Nameds of pipeline components to disable and prevent
disable (list): Names of pipeline components to disable and prevent
from being saved.
EXAMPLE:
>>> nlp.to_disk('/path/to/models')
"""
path = util.ensure_path(path)
with path.open('wb') as file_:
file_.write(self.to_bytes(disable))
#serializers = {
# 'vocab': lambda p: self.vocab.to_disk(p),
# 'tokenizer': lambda p: self.tokenizer.to_disk(p, vocab=False),
# 'meta.json': lambda p: ujson.dump(p.open('w'), self.meta)
#}
#for proc in self.pipeline:
# if not hasattr(proc, 'name'):
# continue
# if proc.name in disable:
# continue
# if not hasattr(proc, 'to_disk'):
# continue
# serializers[proc.name] = lambda p: proc.to_disk(p, vocab=False)
#util.to_disk(serializers, path)
serializers = OrderedDict((
('vocab', lambda p: self.vocab.to_disk(p)),
('tokenizer', lambda p: self.tokenizer.to_disk(p, vocab=False)),
('meta.json', lambda p: p.open('w').write(json_dumps(self.meta)))
))
for proc in self.pipeline:
if not hasattr(proc, 'name'):
continue
if proc.name in disable:
continue
if not hasattr(proc, 'to_disk'):
continue
serializers[proc.name] = lambda p, proc=proc: proc.to_disk(p, vocab=False)
util.to_disk(path, serializers, {p: False for p in disable})
def from_disk(self, path, disable=[]):
def from_disk(self, path, disable=tuple()):
"""Loads state from a directory. Modifies the object in place and
returns it. If the saved `Language` object contains a model, the
model will be loaded.
@ -403,24 +408,21 @@ class Language(object):
>>> nlp = Language().from_disk('/path/to/models')
"""
path = util.ensure_path(path)
with path.open('rb') as file_:
bytes_data = file_.read()
return self.from_bytes(bytes_data, disable)
#deserializers = {
# 'vocab': lambda p: self.vocab.from_disk(p),
# 'tokenizer': lambda p: self.tokenizer.from_disk(p, vocab=False),
# 'meta.json': lambda p: ujson.dump(p.open('w'), self.meta)
#}
#for proc in self.pipeline:
# if not hasattr(proc, 'name'):
# continue
# if proc.name in disable:
# continue
# if not hasattr(proc, 'to_disk'):
# continue
# deserializers[proc.name] = lambda p: proc.from_disk(p, vocab=False)
#util.from_disk(deserializers, path)
#return self
deserializers = OrderedDict((
('vocab', lambda p: self.vocab.from_disk(p)),
('tokenizer', lambda p: self.tokenizer.from_disk(p, vocab=False)),
('meta.json', lambda p: p.open('w').write(json_dumps(self.meta)))
))
for proc in self.pipeline:
if not hasattr(proc, 'name'):
continue
if proc.name in disable:
continue
if not hasattr(proc, 'to_disk'):
continue
deserializers[proc.name] = lambda p, proc=proc: proc.from_disk(p, vocab=False)
util.from_disk(path, deserializers, {p: False for p in disable})
return self
def to_bytes(self, disable=[]):
"""Serialize the current state to a binary string.

View File

@ -41,7 +41,7 @@ from .parts_of_speech import X
class TokenVectorEncoder(object):
"""Assign position-sensitive vectors to tokens, using a CNN or RNN."""
name = 'tok2vec'
name = 'tensorizer'
@classmethod
def Model(cls, width=128, embed_size=7500, **cfg):
@ -176,17 +176,19 @@ class TokenVectorEncoder(object):
return self
def to_disk(self, path, **exclude):
serialize = {
'model': lambda p: p.open('w').write(util.model_to_bytes(self.model)),
'vocab': lambda p: self.vocab.to_disk(p)
}
serialize = OrderedDict((
('model', lambda p: p.open('wb').write(util.model_to_bytes(self.model))),
('vocab', lambda p: self.vocab.to_disk(p))
))
util.to_disk(path, serialize, exclude)
def from_disk(self, path, **exclude):
deserialize = {
'model': lambda p: util.model_from_bytes(self.model, p.open('rb').read()),
'vocab': lambda p: self.vocab.from_disk(p)
}
if self.model is True:
self.model = self.Model()
deserialize = OrderedDict((
('model', lambda p: util.model_from_bytes(self.model, p.open('rb').read())),
('vocab', lambda p: self.vocab.from_disk(p))
))
util.from_disk(path, deserialize, exclude)
return self
@ -315,7 +317,7 @@ class NeuralTagger(object):
def to_disk(self, path, **exclude):
serialize = {
'model': lambda p: p.open('w').write(util.model_to_bytes(self.model)),
'model': lambda p: p.open('wb').write(util.model_to_bytes(self.model)),
'vocab': lambda p: self.vocab.to_disk(p)
}
util.to_disk(path, serialize, exclude)
@ -420,7 +422,7 @@ cdef class NeuralDependencyParser(NeuralParser):
cdef class NeuralEntityRecognizer(NeuralParser):
name = 'entity'
name = 'ner'
TransitionSystem = BiluoPushDown
nr_feature = 6

View File

@ -16,6 +16,7 @@ from .symbols import NAMES as SYMBOLS_BY_INT
from .typedefs cimport hash_t
from . import util
from .compat import json_dumps
cpdef hash_t hash_string(unicode string) except 0:
@ -201,7 +202,7 @@ cdef class StringStore:
path = util.ensure_path(path)
strings = list(self)
with path.open('w') as file_:
ujson.dump(strings, file_)
file_.write(json_dumps(strings))
def from_disk(self, path):
"""Loads state from a directory. Modifies the object in place and

View File

@ -44,6 +44,7 @@ from .. import util
from ..util import get_async, get_cuda_stream
from .._ml import zero_init, PrecomputableAffine, PrecomputableMaxouts
from .._ml import Tok2Vec, doc2feats, rebatch
from ..compat import json_dumps
from . import _parse_features
from ._parse_features cimport CONTEXT_SIZE
@ -633,11 +634,13 @@ cdef class Parser:
def to_disk(self, path, **exclude):
serializers = {
'model': lambda p: p.open('wb').write(
util.model_to_bytes(self.model)),
'lower_model': lambda p: p.open('wb').write(
util.model_to_bytes(self.model[0])),
'upper_model': lambda p: p.open('wb').write(
util.model_to_bytes(self.model[1])),
'vocab': lambda p: self.vocab.to_disk(p),
'moves': lambda p: self.moves.to_disk(p, strings=False),
'cfg': lambda p: ujson.dumps(p.open('w'), self.cfg)
'cfg': lambda p: p.open('w').write(json_dumps(self.cfg))
}
util.to_disk(path, serializers, exclude)
@ -645,7 +648,7 @@ cdef class Parser:
deserializers = {
'vocab': lambda p: self.vocab.from_disk(p),
'moves': lambda p: self.moves.from_disk(p, strings=False),
'cfg': lambda p: self.cfg.update(ujson.load((path/'cfg.json').open())),
'cfg': lambda p: self.cfg.update(ujson.load(p.open())),
'model': lambda p: None
}
util.from_disk(path, deserializers, exclude)
@ -653,7 +656,14 @@ cdef class Parser:
path = util.ensure_path(path)
if self.model is True:
self.model, cfg = self.Model(**self.cfg)
util.model_from_disk(self.model, path / 'model')
else:
cfg = {}
with (path / 'lower_model').open('rb') as file_:
bytes_data = file_.read()
util.model_from_bytes(self.model[0], bytes_data)
with (path / 'upper_model').open('rb') as file_:
bytes_data = file_.read()
util.model_from_bytes(self.model[1], bytes_data)
self.cfg.update(cfg)
return self

View File

@ -157,22 +157,13 @@ cdef class TransitionSystem:
return 1
def to_disk(self, path, **exclude):
actions = list(self.move_names)
deserializers = {
'actions': lambda p: ujson.dump(p.open('w'), actions),
'strings': lambda p: self.strings.to_disk(p)
}
util.to_disk(path, deserializers, exclude)
with path.open('wb') as file_:
file_.write(self.to_bytes(**exclude))
def from_disk(self, path, **exclude):
actions = []
deserializers = {
'strings': lambda p: self.strings.from_disk(p),
'actions': lambda p: actions.extend(ujson.load(p.open()))
}
util.from_disk(path, deserializers, exclude)
for move, label in actions:
self.add_action(move, label)
with path.open('rb') as file_:
byte_data = file_.read()
self.from_bytes(byte_data, **exclude)
return self
def to_bytes(self, **exclude):

View File

@ -13,21 +13,32 @@ Tests for spaCy modules and classes live in their own directories of the same na
2. [Dos and don'ts](#dos-and-donts)
3. [Parameters](#parameters)
4. [Fixtures](#fixtures)
5. [Helpers and utilities](#helpers-and-utilities)
6. [Contributing to the tests](#contributing-to-the-tests)
5. [Testing models](#testing-models)
6. [Helpers and utilities](#helpers-and-utilities)
7. [Contributing to the tests](#contributing-to-the-tests)
## Running the tests
To show print statements, run the tests with `py.test -s`. To abort after the
first failure, run them with `py.test -x`.
```bash
py.test spacy # run basic tests
py.test spacy --models # run basic and model tests
py.test spacy --slow # run basic and slow tests
py.test spacy --models --slow # run all tests
py.test spacy # run basic tests
py.test spacy --models --en # run basic and English model tests
py.test spacy --models --all # run basic and all model tests
py.test spacy --slow # run basic and slow tests
py.test spacy --models --all --slow # run all tests
```
To show print statements, run the tests with `py.test -s`. To abort after the first failure, run them with `py.test -x`.
You can also run tests in a specific file or directory, or even only one
specific test:
```bash
py.test spacy/tests/tokenizer # run all tests in directory
py.test spacy/tests/tokenizer/test_exceptions.py # run all tests in file
py.test spacy/tests/tokenizer/test_exceptions.py::test_tokenizer_handles_emoji # run specific test
```
## Dos and don'ts
@ -83,14 +94,9 @@ These are the main fixtures that are currently available:
| Fixture | Description |
| --- | --- |
| `tokenizer` | Creates **all available** language tokenizers and runs the test for **each of them**. |
| `en_tokenizer` | Creates an English `Tokenizer` object. |
| `de_tokenizer` | Creates a German `Tokenizer` object. |
| `hu_tokenizer` | Creates a Hungarian `Tokenizer` object. |
| `en_vocab` | Creates an English `Vocab` object. |
| `en_entityrecognizer` | Creates an English `EntityRecognizer` object. |
| `lemmatizer` | Creates a `Lemmatizer` object from the installed language data (`None` if no data is found).
| `EN` | Creates an instance of `English`. Only use for tests that require the models. |
| `DE` | Creates an instance of `German`. Only use for tests that require the models. |
| `en_tokenizer`, `de_tokenizer`, ... | Creates an English, German etc. tokenizer. |
| `en_vocab`, `en_entityrecognizer`, ... | Creates an instance of the English `Vocab`, `EntityRecognizer` object etc. |
| `EN`, `DE`, ... | Creates a language class with a loaded model. For more info, see [Testing models](#testing-models). |
| `text_file` | Creates an instance of `StringIO` to simulate reading from and writing to files. |
| `text_file_b` | Creates an instance of `ByteIO` to simulate reading from and writing to files. |
@ -103,6 +109,48 @@ def test_module_do_something(en_tokenizer):
If all tests in a file require a specific configuration, or use the same complex example, it can be helpful to create a separate fixture. This fixture should be added at the top of each file. Make sure to use descriptive names for these fixtures and don't override any of the global fixtures listed above. **From looking at a test, it should immediately be clear which fixtures are used, and where they are coming from.**
## Testing models
Models should only be loaded and tested **if absolutely necessary** for example, if you're specifically testing a model's performance, or if your test is related to model loading. If you only need an annotated `Doc`, you should use the `get_doc()` helper function to create it manually instead.
To specify which language models a test is related to, set the language ID as an argument of `@pytest.mark.models`. This allows you to later run the tests with `--models --en`. You can then use the `EN` [fixture](#fixtures) to get a language
class with a loaded model.
```python
@pytest.mark.models('en')
def test_english_model(EN):
doc = EN(u'This is a test')
```
> ⚠️ **Important note:** In order to test models, they need to be installed as a packge. The [conftest.py](conftest.py) includes a list of all available models, mapped to their IDs, e.g. `en`. Unless otherwise specified, each model that's installed in your environment will be imported and tested. If you don't have a model installed, **the test will be skipped**.
Under the hood, `pytest.importorskip` is used to import a model package and skip the test if the package is not installed. The `EN` fixture for example gets all
available models for `en`, [parametrizes](#parameters) them to run the test for *each of them*, and uses `load_test_model()` to import the model and run the test, or skip it if the model is not installed.
### Testing specific models
Using the `load_test_model()` helper function, you can also write tests for specific models, or combinations of them:
```python
from .util import load_test_model
@pytest.mark.models('en')
def test_en_md_only():
nlp = load_test_model('en_core_web_md')
# test something specific to en_core_web_md
@pytest.mark.models('en', 'fr')
@pytest.mark.parametrize('model', ['en_core_web_md', 'fr_depvec_web_lg'])
def test_different_models(model):
nlp = load_test_model(model)
# test something specific to the parametrized models
```
### Known issues and future improvements
Using `importorskip` on a list of model packages is not ideal and we're looking to improve this in the future. But at the moment, it's the best way to ensure that tests are performed on specific model packages only, and that you'll always be able to run the tests, even if you don't have *all available models* installed. (If the tests made a call to `spacy.load('en')` instead, this would load whichever model you've created an `en` shortcut for. This may be one of spaCy's default models, but it could just as easily be your own custom English model.)
The current setup also doesn't provide an easy way to only run tests on specific model versions. The `minversion` keyword argument on `pytest.importorskip` can take care of this, but it currently only checks for the package's `__version__` attribute. An alternative solution would be to load a model package's meta.json and skip if the model's version does not match the one specified in the test.
## Helpers and utilities
@ -152,11 +200,11 @@ print([token.dep_ for token in doc])
**Note:** There's currently no way of setting the serializer data for the parser without loading the models. If this is relevant to your test, constructing the `Doc` via `get_doc()` won't work.
### Other utilities
| Name | Description |
| --- | --- |
| `load_test_model` | Load a model if it's installed as a package, otherwise skip test. |
| `apply_transition_sequence(parser, doc, sequence)` | Perform a series of pre-specified transitions, to put the parser in a desired state. |
| `add_vecs_to_vocab(vocab, vectors)` | Add list of vector tuples (`[("text", [1, 2, 3])]`) to given vocab. All vectors need to have the same length. |
| `get_cosine(vec1, vec2)` | Get cosine for two given vectors. |

View File

@ -102,7 +102,7 @@ def test_doc_api_getitem(en_tokenizer):
def test_doc_api_serialize(en_tokenizer, text):
tokens = en_tokenizer(text)
new_tokens = get_doc(tokens.vocab).from_bytes(tokens.to_bytes())
assert tokens.string == new_tokens.string
assert tokens.text == new_tokens.text
assert [t.text for t in tokens] == [t.text for t in new_tokens]
assert [t.orth for t in tokens] == [t.orth for t in new_tokens]

View File

@ -41,7 +41,5 @@ def test_tokenizer_excludes_false_pos_emoticons(tokenizer, text, length):
@pytest.mark.parametrize('text,length', [('can you still dunk?🍕🍔😵LOL', 8),
('i💙you', 3), ('🤘🤘yay!', 4)])
def test_tokenizer_handles_emoji(tokenizer, text, length):
exceptions = ["hu"]
tokens = tokenizer(text)
if tokens[0].lang_ not in exceptions:
assert len(tokens) == length
assert len(tokens) == length

View File

@ -12,6 +12,7 @@ MODELS = {}
def load_test_model(model):
"""Load a model if it's installed as a package, otherwise skip."""
if model not in MODELS:
module = pytest.importorskip(model)
MODELS[model] = module.load()

View File

@ -6,8 +6,8 @@ from cython.operator cimport dereference as deref
from cython.operator cimport preincrement as preinc
from cymem.cymem cimport Pool
from preshed.maps cimport PreshMap
import regex as re
import dill
from .strings cimport hash_string
from . import util
cimport cython
@ -344,8 +344,8 @@ cdef class Tokenizer:
strings or `Path`-like objects.
RETURNS (Tokenizer): The modified `Tokenizer` object.
"""
with path.open('wb') as file_:
bytes_data = file_.read(path)
with path.open('rb') as file_:
bytes_data = file_.read()
self.from_bytes(bytes_data, **exclude)
return self
@ -355,14 +355,13 @@ cdef class Tokenizer:
**exclude: Named attributes to prevent from being serialized.
RETURNS (bytes): The serialized form of the `Tokenizer` object.
"""
# TODO: Improve this so it doesn't need pickle
serializers = {
'vocab': lambda: self.vocab.to_bytes(),
'prefix': lambda: dill.dumps(self.prefix_search),
'suffix_search': lambda: dill.dumps(self.suffix_search),
'infix_finditer': lambda: dill.dumps(self.infix_finditer),
'token_match': lambda: dill.dumps(self.token_match),
'exceptions': lambda: dill.dumps(self._rules)
'prefix': lambda: self.prefix_search.__self__.pattern,
'suffix_search': lambda: self.suffix_search.__self__.pattern,
'infix_finditer': lambda: self.infix_finditer.__self__.pattern,
'token_match': lambda: self.token_match.__self__.pattern,
'exceptions': lambda: self._rules
}
return util.to_bytes(serializers, exclude)
@ -373,26 +372,23 @@ cdef class Tokenizer:
**exclude: Named attributes to prevent from being loaded.
RETURNS (Tokenizer): The `Tokenizer` object.
"""
# TODO: Improve this so it doesn't need pickle
data = {}
deserializers = {
'vocab': lambda b: self.vocab.from_bytes(b),
'prefix': lambda b: data.setdefault('prefix', dill.loads(b)),
'suffix_search': lambda b: data.setdefault('suffix_search', dill.loads(b)),
'infix_finditer': lambda b: data.setdefault('infix_finditer', dill.loads(b)),
'token_match': lambda b: data.setdefault('token_match', dill.loads(b)),
'exceptions': lambda b: data.setdefault('rules', dill.loads(b))
'prefix': lambda b: data.setdefault('prefix', b),
'suffix_search': lambda b: data.setdefault('suffix_search', b),
'infix_finditer': lambda b: data.setdefault('infix_finditer', b),
'token_match': lambda b: data.setdefault('token_match', b),
'exceptions': lambda b: data.setdefault('rules', b)
}
msg = util.from_bytes(bytes_data, deserializers, exclude)
if 'prefix' in data:
self.prefix_search = data['prefix']
self.prefix_search = re.compile(data['prefix']).search
if 'suffix' in data:
self.suffix_search = data['suffix']
self.suffix_search = re.compile(data['suffix']).search
if 'infix' in data:
self.infix_finditer = data['infix']
self.infix_finditer = re.compile(data['infix']).finditer
if 'token_match' in data:
self.token_match = data['token_match']
self.token_match = re.compile(data['token_match']).search
for string, substrings in data.get('rules', {}).items():
self.add_special_case(string, substrings)

View File

@ -30,6 +30,7 @@ from ..syntax.iterators import CHUNKERS
from ..util import normalize_slice
from ..compat import is_config
from .. import about
from .. import util
DEF PADDING = 5
@ -252,8 +253,12 @@ cdef class Doc:
def __get__(self):
if 'has_vector' in self.user_hooks:
return self.user_hooks['has_vector'](self)
return any(token.has_vector for token in self)
elif any(token.has_vector for token in self):
return True
elif self.tensor:
return True
else:
return False
property vector:
"""A real-valued meaning representation. Defaults to an average of the
@ -265,12 +270,16 @@ cdef class Doc:
def __get__(self):
if 'vector' in self.user_hooks:
return self.user_hooks['vector'](self)
if self._vector is None:
if len(self):
self._vector = sum(t.vector for t in self) / len(self)
else:
return numpy.zeros((self.vocab.vectors_length,), dtype='float32')
return self._vector
if self._vector is not None:
return self._vector
elif self.has_vector and len(self):
self._vector = sum(t.vector for t in self) / len(self)
return self._vector
elif self.tensor:
self._vector = self.tensor.mean(axis=0)
return self._vector
else:
return numpy.zeros((self.vocab.vectors_length,), dtype='float32')
def __set__(self, value):
self._vector = value
@ -295,10 +304,6 @@ cdef class Doc:
def __set__(self, value):
self._vector_norm = value
@property
def string(self):
return self.text
property text:
"""A unicode representation of the document text.
@ -598,15 +603,16 @@ cdef class Doc:
self.is_tagged = bool(TAG in attrs or POS in attrs)
return self
def to_disk(self, path):
def to_disk(self, path, **exclude):
"""Save the current state to a directory.
path (unicode or Path): A path to a directory, which will be created if
it doesn't exist. Paths may be either strings or `Path`-like objects.
"""
raise NotImplementedError()
with path.open('wb') as file_:
file_.write(self.to_bytes(**exclude))
def from_disk(self, path):
def from_disk(self, path, **exclude):
"""Loads state from a directory. Modifies the object in place and
returns it.
@ -614,25 +620,28 @@ cdef class Doc:
strings or `Path`-like objects.
RETURNS (Doc): The modified `Doc` object.
"""
raise NotImplementedError()
with path.open('rb') as file_:
bytes_data = file_.read()
self.from_bytes(bytes_data, **exclude)
def to_bytes(self):
def to_bytes(self, **exclude):
"""Serialize, i.e. export the document contents to a binary string.
RETURNS (bytes): A losslessly serialized copy of the `Doc`, including
all annotations.
"""
return dill.dumps(
(self.text,
self.to_array([LENGTH,SPACY,TAG,LEMMA,HEAD,DEP,ENT_IOB,ENT_TYPE]),
self.sentiment,
self.tensor,
self.noun_chunks_iterator,
self.user_data,
(self.user_hooks, self.user_token_hooks, self.user_span_hooks)),
protocol=-1)
array_head = [LENGTH,SPACY,TAG,LEMMA,HEAD,DEP,ENT_IOB,ENT_TYPE]
serializers = {
'text': lambda: self.text,
'array_head': lambda: array_head,
'array_body': lambda: self.to_array(array_head),
'sentiment': lambda: self.sentiment,
'tensor': lambda: self.tensor,
'user_data': lambda: self.user_data
}
return util.to_bytes(serializers, exclude)
def from_bytes(self, data):
def from_bytes(self, bytes_data, **exclude):
"""Deserialize, i.e. import the document contents from a binary string.
data (bytes): The string to load from.
@ -640,27 +649,36 @@ cdef class Doc:
"""
if self.length != 0:
raise ValueError("Cannot load into non-empty Doc")
deserializers = {
'text': lambda b: None,
'array_head': lambda b: None,
'array_body': lambda b: None,
'sentiment': lambda b: None,
'tensor': lambda b: None,
'user_data': lambda user_data: self.user_data.update(user_data)
}
msg = util.from_bytes(bytes_data, deserializers, exclude)
cdef attr_t[:, :] attrs
cdef int i, start, end, has_space
fields = dill.loads(data)
text, attrs = fields[:2]
self.sentiment, self.tensor = fields[2:4]
self.noun_chunks_iterator, self.user_data = fields[4:6]
self.user_hooks, self.user_token_hooks, self.user_span_hooks = fields[6]
self.sentiment = msg['sentiment']
self.tensor = msg['tensor']
start = 0
cdef const LexemeC* lex
cdef unicode orth_
text = msg['text']
attrs = msg['array_body']
for i in range(attrs.shape[0]):
end = start + attrs[i, 0]
has_space = attrs[i, 1]
orth_ = text[start:end]
lex = self.vocab.get(self.mem, orth_)
self.push_back(lex, has_space)
start = end + has_space
self.from_array([TAG,LEMMA,HEAD,DEP,ENT_IOB,ENT_TYPE],
attrs[:, 2:])
self.from_array(msg['array_head'][2:],
attrs[:, 2:])
return self
def merge(self, int start_idx, int end_idx, *args, **attributes):

View File

@ -111,7 +111,7 @@ cdef class Token:
RETURNS (float): A scalar similarity score. Higher is more similar.
"""
if 'similarity' in self.doc.user_token_hooks:
return self.doc.user_token_hooks['similarity'](self)
return self.doc.user_token_hooks['similarity'](self)
if self.vector_norm == 0 or other.vector_norm == 0:
return 0.0
return numpy.dot(self.vector, other.vector) / (self.vector_norm * other.vector_norm)
@ -245,7 +245,10 @@ cdef class Token:
def __get__(self):
if 'vector' in self.doc.user_token_hooks:
return self.doc.user_token_hooks['vector'](self)
return self.vocab.get_vector(self.c.lex.orth)
if self.has_vector:
return self.vocab.get_vector(self.c.lex.orth)
else:
return self.doc.tensor[self.i]
property vector_norm:
"""The L2 norm of the token's vector representation.

View File

@ -13,6 +13,7 @@ import random
import numpy
import io
import dill
from collections import OrderedDict
import msgpack
import msgpack_numpy
@ -418,7 +419,7 @@ def get_raw_input(description, default=False):
def to_bytes(getters, exclude):
serialized = {}
serialized = OrderedDict()
for key, getter in getters.items():
if key not in exclude:
serialized[key] = getter()
@ -433,6 +434,24 @@ def from_bytes(bytes_data, setters, exclude):
return msg
def to_disk(path, writers, exclude):
path = ensure_path(path)
if not path.exists():
path.mkdir()
for key, writer in writers.items():
if key not in exclude:
writer(path / key)
return path
def from_disk(path, readers, exclude):
path = ensure_path(path)
for key, reader in readers.items():
if key not in exclude:
reader(path / key)
return path
# This stuff really belongs in thinc -- but I expect
# to refactor how all this works in thinc anyway.
# What a mess!

View File

@ -27,7 +27,8 @@ cdef struct _Cached:
cdef class Vocab:
cdef Pool mem
cpdef readonly StringStore strings
cpdef readonly Morphology morphology
cpdef public Morphology morphology
cpdef public object vectors
cdef readonly int length
cdef public object data_dir
cdef public object lex_attr_getters
@ -35,11 +36,10 @@ cdef class Vocab:
cdef const LexemeC* get(self, Pool mem, unicode string) except NULL
cdef const LexemeC* get_by_orth(self, Pool mem, attr_t orth) except NULL
cdef const TokenC* make_fused_token(self, substrings) except NULL
cdef const LexemeC* _new_lexeme(self, Pool mem, unicode string) except NULL
cdef int _add_lex_to_vocab(self, hash_t key, const LexemeC* lex) except -1
cdef const LexemeC* _new_lexeme(self, Pool mem, unicode string) except NULL
cdef PreshMap _by_hash
cdef PreshMap _by_orth
cdef readonly int vectors_length

View File

@ -239,6 +239,16 @@ cdef class Vocab:
Token.set_struct_attr(token, attr_id, value)
return tokens
@property
def vectors_length(self):
raise NotImplementedError
def clear_vectors(self):
"""Drop the current vector table. Because all vectors must be the same
width, you have to call this to change the size of the vectors.
"""
raise NotImplementedError
def get_vector(self, orth):
"""Retrieve a vector for a word in the vocabulary.
@ -253,6 +263,16 @@ cdef class Vocab:
"""
raise NotImplementedError
def set_vector(self, orth, vector):
"""Set a vector for a word in the vocabulary.
Words can be referenced by string or int ID.
RETURNS:
None
"""
raise NotImplementedError
def has_vector(self, orth):
"""Check whether a word has a vector. Returns False if no
vectors have been loaded. Words can be looked up by string

View File

@ -108,8 +108,8 @@ mixin quickstart(groups, headline, description, hide_results)
| #[+help(group.help)]
.c-quickstart__fields
for option in group.options
input.c-quickstart__input(class="c-quickstart__input--" + (group.input_style ? group.input_style : group.multiple ? "check" : "radio") type=group.multiple ? "checkbox" : "radio" name=group.id id=option.id value=option.id checked=option.checked)
label.c-quickstart__label(for=option.id)!=option.title
input.c-quickstart__input(class="c-quickstart__input--" + (group.input_style ? group.input_style : group.multiple ? "check" : "radio") type=group.multiple ? "checkbox" : "radio" name=group.id id="qs-#{option.id}" value=option.id checked=option.checked)
label.c-quickstart__label(for="qs-#{option.id}")!=option.title
if option.meta
| #[span.c-quickstart__label__meta (#{option.meta})]
if option.help

View File

@ -354,12 +354,14 @@ p
python -c "import os; import spacy; print(os.path.dirname(spacy.__file__))"
p
| Then run #[code pytest] on that directory. The flags #[code --vectors],
| #[code --slow] and #[code --model] are optional and enable additional
| tests:
| Then run #[code pytest] on that directory. The flags #[code --slow] and
| #[code --model] are optional and enable additional tests.
+code(false, "bash").
# make sure you are using recent pytest version
python -m pip install -U pytest
python -m pytest &lt;spacy-directory&gt; --vectors --models --slow
python -m pytest &lt;spacy-directory&gt; # basic tests
python -m pytest &lt;spacy-directory&gt; --slow # basic and slow tests
python -m pytest &lt;spacy-directory&gt; --models --all # basic and all model tests
python -m pytest &lt;spacy-directory&gt; --models --en # basic and English model tests

View File

@ -408,7 +408,7 @@ p
| To label the hashtags, we first need to add a new custom flag.
| #[code IS_HASHTAG] will be the flag's ID, which you can use to assign it
| to the hashtag's span, and check its value via a token's
| #[+api("token#check_flag") #[code code check_flag()]] method. On each
| #[+api("token#check_flag") #[code check_flag()]] method. On each
| match, we merge the hashtag and assign the flag.
+code.

View File

@ -2,6 +2,13 @@
include ../../_includes/_mixins
p
| Whether you're new to spaCy, or just want to brush up on some
| NLP basics and implementation details this page should have you covered.
| Each section will explain one of spaCy's features in simple terms and
| with examples or illustrations. Some sections will also reappear across
| the usage guides as a quick introcution.
+aside("Help us improve the docs")
| Did you spot a mistake or come across explanations that
| are unclear? We always appreciate improvement
@ -13,6 +20,23 @@ include ../../_includes/_mixins
+grid.o-no-block
+grid-col("half")
p
| spaCy is a #[strong free, open-source library] for advanced
| #[strong Natural Language Processing] (NLP) in Python.
p
| If you're working with a lot of text, you'll eventually want to
| know more about it. For example, what's it about? What do the
| words mean in context? Who is doing what to whom? What companies
| and products are mentioned? Which texts are similar to each other?
p
| spaCy is designed specifically for #[strong production use] and
| helps you build applications that process and "understand"
| large volumes of text. It can be used to build
| #[strong information extraction] or
| #[strong natural language understanding] systems, or to
| pre-process text for #[strong deep learning].
+grid-col("half")
+infobox
@ -31,6 +55,29 @@ include ../../_includes/_mixins
+item #[+a("#architecture") Architecture]
+item #[+a("#community") Community & FAQ]
+h(3, "what-spacy-isnt") What spaCy isn't
+list
+item #[strong spaCy is not a platform or "an API"].
| Unlike a platform, spaCy does not provide a software as a service, or
| a web application. It's an open-source library designed to help you
| build NLP applications, not a consumable service.
+item #[strong spaCy is not an out-of-the-box chat bot engine].
| While spaCy can be used to power conversational applications, it's
| not designed specifically for chat bots, and only provides the
| underlying text processing capabilities.
+item #[strong spaCy is not research software].
| It's is built on the latest research, but unlike
| #[+a("https://github./nltk/nltk") NLTK], which is intended for
| teaching and research, spaCy follows a more opinionated approach and
| focuses on production usage. Its aim is to provide you with the best
| possible general-purpose solution for text processing and machine learning
| with text input but this also means that there's only one implementation
| of each component.
+item #[strong spaCy is not a company].
| It's an open-source library. Our company publishing spaCy and other
| software is called #[+a(COMPANY_URL, true) Explosion AI].
+h(2, "features") Features
p