2017-04-15 13:05:47 +03:00
|
|
|
# coding: utf8
|
|
|
|
from __future__ import absolute_import, unicode_literals
|
2015-08-27 10:16:11 +03:00
|
|
|
|
2017-05-25 04:10:54 +03:00
|
|
|
import random
|
2017-05-29 14:42:55 +03:00
|
|
|
import ujson
|
2017-07-25 19:57:59 +03:00
|
|
|
import itertools
|
2017-10-16 20:22:40 +03:00
|
|
|
import weakref
|
2017-10-17 19:18:10 +03:00
|
|
|
import functools
|
2017-10-27 22:07:59 +03:00
|
|
|
from collections import OrderedDict
|
|
|
|
from contextlib import contextmanager
|
|
|
|
from copy import copy
|
|
|
|
from thinc.neural import Model
|
|
|
|
from thinc.neural.optimizers import Adam
|
2017-05-18 12:25:19 +03:00
|
|
|
|
2015-08-26 20:16:09 +03:00
|
|
|
from .tokenizer import Tokenizer
|
|
|
|
from .vocab import Vocab
|
2016-09-25 16:37:33 +03:00
|
|
|
from .lemmatizer import Lemmatizer
|
2017-10-27 22:07:59 +03:00
|
|
|
from .pipeline import DependencyParser, Tensorizer, Tagger, EntityRecognizer
|
2017-11-05 20:45:57 +03:00
|
|
|
from .pipeline import SimilarityHook, TextCategorizer, SentenceSegmenter
|
2018-03-27 20:23:02 +03:00
|
|
|
from .pipeline import merge_noun_chunks, merge_entities, merge_subtokens
|
2017-11-07 00:07:38 +03:00
|
|
|
from .compat import json_dumps, izip, basestring_
|
|
|
|
from .gold import GoldParse
|
2017-10-07 01:26:05 +03:00
|
|
|
from .scorer import Scorer
|
2017-11-06 17:06:27 +03:00
|
|
|
from ._ml import link_vectors_to_models, create_default_optimizer
|
2017-04-15 13:05:47 +03:00
|
|
|
from .attrs import IS_STOP
|
2017-10-27 15:40:14 +03:00
|
|
|
from .lang.punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES
|
|
|
|
from .lang.punctuation import TOKENIZER_INFIXES
|
2017-05-09 00:58:31 +03:00
|
|
|
from .lang.tokenizer_exceptions import TOKEN_MATCH
|
|
|
|
from .lang.tag_map import TAG_MAP
|
2017-10-17 19:18:10 +03:00
|
|
|
from .lang.lex_attrs import LEX_ATTRS, is_stop
|
2018-05-22 19:29:45 +03:00
|
|
|
from .errors import Errors, Warnings, user_warning
|
2017-04-15 13:05:47 +03:00
|
|
|
from . import util
|
2017-10-07 01:26:05 +03:00
|
|
|
from . import about
|
2016-10-09 13:24:24 +03:00
|
|
|
|
2015-08-27 10:16:11 +03:00
|
|
|
|
2016-09-24 21:26:17 +03:00
|
|
|
class BaseDefaults(object):
|
2016-10-18 17:18:25 +03:00
|
|
|
@classmethod
|
|
|
|
def create_lemmatizer(cls, nlp=None):
|
2017-10-11 14:26:05 +03:00
|
|
|
return Lemmatizer(cls.lemma_index, cls.lemma_exc, cls.lemma_rules,
|
|
|
|
cls.lemma_lookup)
|
2016-10-18 17:18:25 +03:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def create_vocab(cls, nlp=None):
|
|
|
|
lemmatizer = cls.create_lemmatizer(nlp)
|
2017-05-16 12:21:59 +03:00
|
|
|
lex_attr_getters = dict(cls.lex_attr_getters)
|
|
|
|
# This is messy, but it's the minimal working fix to Issue #639.
|
2017-10-17 19:18:10 +03:00
|
|
|
lex_attr_getters[IS_STOP] = functools.partial(is_stop,
|
|
|
|
stops=cls.stop_words)
|
2017-05-16 12:21:59 +03:00
|
|
|
vocab = Vocab(lex_attr_getters=lex_attr_getters, tag_map=cls.tag_map,
|
|
|
|
lemmatizer=lemmatizer)
|
2017-03-15 17:24:40 +03:00
|
|
|
for tag_str, exc in cls.morph_rules.items():
|
|
|
|
for orth_str, attrs in exc.items():
|
|
|
|
vocab.morphology.add_special_case(tag_str, orth_str, attrs)
|
|
|
|
return vocab
|
2016-12-18 18:54:52 +03:00
|
|
|
|
2016-10-18 17:18:25 +03:00
|
|
|
@classmethod
|
|
|
|
def create_tokenizer(cls, nlp=None):
|
|
|
|
rules = cls.tokenizer_exceptions
|
2017-05-16 12:21:59 +03:00
|
|
|
token_match = cls.token_match
|
2017-10-27 15:40:14 +03:00
|
|
|
prefix_search = (util.compile_prefix_regex(cls.prefixes).search
|
|
|
|
if cls.prefixes else None)
|
|
|
|
suffix_search = (util.compile_suffix_regex(cls.suffixes).search
|
|
|
|
if cls.suffixes else None)
|
|
|
|
infix_finditer = (util.compile_infix_regex(cls.infixes).finditer
|
|
|
|
if cls.infixes else None)
|
2016-10-18 17:18:25 +03:00
|
|
|
vocab = nlp.vocab if nlp is not None else cls.create_vocab(nlp)
|
2016-11-26 14:36:04 +03:00
|
|
|
return Tokenizer(vocab, rules=rules,
|
2017-10-27 15:40:14 +03:00
|
|
|
prefix_search=prefix_search,
|
|
|
|
suffix_search=suffix_search,
|
|
|
|
infix_finditer=infix_finditer,
|
|
|
|
token_match=token_match)
|
2016-09-24 15:08:53 +03:00
|
|
|
|
2017-11-01 21:48:33 +03:00
|
|
|
pipe_names = ['tagger', 'parser', 'ner']
|
2017-05-09 00:58:31 +03:00
|
|
|
token_match = TOKEN_MATCH
|
|
|
|
prefixes = tuple(TOKENIZER_PREFIXES)
|
|
|
|
suffixes = tuple(TOKENIZER_SUFFIXES)
|
|
|
|
infixes = tuple(TOKENIZER_INFIXES)
|
|
|
|
tag_map = dict(TAG_MAP)
|
2016-10-09 13:24:24 +03:00
|
|
|
tokenizer_exceptions = {}
|
2016-09-24 21:26:17 +03:00
|
|
|
stop_words = set()
|
2016-12-18 17:50:09 +03:00
|
|
|
lemma_rules = {}
|
2017-03-15 12:52:50 +03:00
|
|
|
lemma_exc = {}
|
|
|
|
lemma_index = {}
|
2017-10-11 14:26:05 +03:00
|
|
|
lemma_lookup = {}
|
2017-03-15 17:24:40 +03:00
|
|
|
morph_rules = {}
|
2017-05-09 01:58:10 +03:00
|
|
|
lex_attr_getters = LEX_ATTRS
|
2017-06-04 22:53:39 +03:00
|
|
|
syntax_iterators = {}
|
2015-09-14 10:48:51 +03:00
|
|
|
|
2015-08-26 20:16:09 +03:00
|
|
|
|
2016-09-24 15:08:53 +03:00
|
|
|
class Language(object):
|
2017-05-19 00:57:38 +03:00
|
|
|
"""A text-processing pipeline. Usually you'll load this once per process,
|
|
|
|
and pass the instance around your application.
|
2017-05-19 19:47:24 +03:00
|
|
|
|
|
|
|
Defaults (class): Settings, data and factory methods for creating the `nlp`
|
|
|
|
object and processing pipeline.
|
|
|
|
lang (unicode): Two-letter language ID, i.e. ISO code.
|
2017-04-15 12:59:21 +03:00
|
|
|
"""
|
2016-09-24 21:26:17 +03:00
|
|
|
Defaults = BaseDefaults
|
2016-09-24 15:08:53 +03:00
|
|
|
lang = None
|
2015-08-25 16:37:17 +03:00
|
|
|
|
2017-10-07 01:25:54 +03:00
|
|
|
factories = {
|
|
|
|
'tokenizer': lambda nlp: nlp.Defaults.create_tokenizer(nlp),
|
2017-10-27 15:40:14 +03:00
|
|
|
'tensorizer': lambda nlp, **cfg: Tensorizer(nlp.vocab, **cfg),
|
2017-10-26 13:38:23 +03:00
|
|
|
'tagger': lambda nlp, **cfg: Tagger(nlp.vocab, **cfg),
|
|
|
|
'parser': lambda nlp, **cfg: DependencyParser(nlp.vocab, **cfg),
|
|
|
|
'ner': lambda nlp, **cfg: EntityRecognizer(nlp.vocab, **cfg),
|
2017-10-07 01:25:54 +03:00
|
|
|
'similarity': lambda nlp, **cfg: SimilarityHook(nlp.vocab, **cfg),
|
2017-11-05 20:45:57 +03:00
|
|
|
'textcat': lambda nlp, **cfg: TextCategorizer(nlp.vocab, **cfg),
|
|
|
|
'sbd': lambda nlp, **cfg: SentenceSegmenter(nlp.vocab, **cfg),
|
2018-03-15 02:18:51 +03:00
|
|
|
'sentencizer': lambda nlp, **cfg: SentenceSegmenter(nlp.vocab, **cfg),
|
|
|
|
'merge_noun_chunks': lambda nlp, **cfg: merge_noun_chunks,
|
2018-03-27 20:23:02 +03:00
|
|
|
'merge_entities': lambda nlp, **cfg: merge_entities,
|
|
|
|
'merge_subtokens': lambda nlp, **cfg: merge_subtokens,
|
2017-10-07 01:25:54 +03:00
|
|
|
}
|
|
|
|
|
2018-03-29 22:45:26 +03:00
|
|
|
def __init__(self, vocab=True, make_doc=True, max_length=10**6, meta={}, **kwargs):
|
2017-05-19 00:57:38 +03:00
|
|
|
"""Initialise a Language object.
|
|
|
|
|
|
|
|
vocab (Vocab): A `Vocab` object. If `True`, a vocab is created via
|
|
|
|
`Language.Defaults.create_vocab`.
|
2017-05-21 14:17:40 +03:00
|
|
|
make_doc (callable): A function that takes text and returns a `Doc`
|
2017-05-19 00:57:38 +03:00
|
|
|
object. Usually a `Tokenizer`.
|
|
|
|
pipeline (list): A list of annotation processes or IDs of annotation,
|
|
|
|
processes, e.g. a `Tagger` object, or `'tagger'`. IDs are looked
|
|
|
|
up in `Language.Defaults.factories`.
|
2017-06-05 14:13:07 +03:00
|
|
|
disable (list): A list of component names to exclude from the pipeline.
|
|
|
|
The disable list has priority over the pipeline list -- if the same
|
|
|
|
string occurs in both, the component is not loaded.
|
2017-05-19 00:57:38 +03:00
|
|
|
meta (dict): Custom meta data for the Language class. Is written to by
|
|
|
|
models to add model meta data.
|
2018-03-29 22:45:26 +03:00
|
|
|
max_length (int) :
|
|
|
|
Maximum number of characters in a single text. The current v2 models
|
|
|
|
may run out memory on extremely long texts, due to large internal
|
|
|
|
allocations. You should segment these texts into meaningful units,
|
|
|
|
e.g. paragraphs, subsections etc, before passing them to spaCy.
|
|
|
|
Default maximum length is 1,000,000 characters (1mb). As a rule of
|
|
|
|
thumb, if all pipeline components are enabled, spaCy's default
|
|
|
|
models currently requires roughly 1GB of temporary memory per
|
|
|
|
100,000 characters in one text.
|
2017-05-19 00:57:38 +03:00
|
|
|
RETURNS (Language): The newly constructed object.
|
|
|
|
"""
|
2018-05-22 19:29:45 +03:00
|
|
|
user_factories = util.get_entry_points('spacy_factories')
|
|
|
|
for factory in user_factories.keys():
|
|
|
|
if factory in self.factories:
|
|
|
|
user_warning(Warnings.W009.format(name=factory))
|
|
|
|
self.factories.update(user_factories)
|
2017-07-23 01:50:18 +03:00
|
|
|
self._meta = dict(meta)
|
2017-10-25 12:57:43 +03:00
|
|
|
self._path = None
|
2017-05-16 12:21:59 +03:00
|
|
|
if vocab is True:
|
|
|
|
factory = self.Defaults.create_vocab
|
|
|
|
vocab = factory(self, **meta.get('vocab', {}))
|
2018-03-28 17:02:59 +03:00
|
|
|
if vocab.vectors.name is None:
|
|
|
|
vocab.vectors.name = meta.get('vectors', {}).get('name')
|
2017-05-16 12:21:59 +03:00
|
|
|
self.vocab = vocab
|
|
|
|
if make_doc is True:
|
|
|
|
factory = self.Defaults.create_tokenizer
|
|
|
|
make_doc = factory(self, **meta.get('tokenizer', {}))
|
2017-05-29 16:40:45 +03:00
|
|
|
self.tokenizer = make_doc
|
2017-10-07 01:25:54 +03:00
|
|
|
self.pipeline = []
|
2018-03-29 22:45:26 +03:00
|
|
|
self.max_length = max_length
|
2017-08-20 15:42:07 +03:00
|
|
|
self._optimizer = None
|
2015-10-12 11:33:11 +03:00
|
|
|
|
2017-10-25 12:57:43 +03:00
|
|
|
@property
|
|
|
|
def path(self):
|
|
|
|
return self._path
|
|
|
|
|
2017-07-23 01:50:18 +03:00
|
|
|
@property
|
|
|
|
def meta(self):
|
|
|
|
self._meta.setdefault('lang', self.vocab.lang)
|
2017-10-26 17:12:23 +03:00
|
|
|
self._meta.setdefault('name', 'model')
|
2017-07-23 01:50:18 +03:00
|
|
|
self._meta.setdefault('version', '0.0.0')
|
2017-11-26 23:02:07 +03:00
|
|
|
self._meta.setdefault('spacy_version', '>={}'.format(about.__version__))
|
2017-07-23 01:50:18 +03:00
|
|
|
self._meta.setdefault('description', '')
|
|
|
|
self._meta.setdefault('author', '')
|
|
|
|
self._meta.setdefault('email', '')
|
|
|
|
self._meta.setdefault('url', '')
|
|
|
|
self._meta.setdefault('license', '')
|
2017-10-30 20:39:48 +03:00
|
|
|
self._meta['vectors'] = {'width': self.vocab.vectors_length,
|
2017-11-01 03:25:09 +03:00
|
|
|
'vectors': len(self.vocab.vectors),
|
2018-03-28 17:02:59 +03:00
|
|
|
'keys': self.vocab.vectors.n_keys,
|
|
|
|
'name': self.vocab.vectors.name}
|
2017-10-07 01:25:54 +03:00
|
|
|
self._meta['pipeline'] = self.pipe_names
|
2017-07-23 01:50:18 +03:00
|
|
|
return self._meta
|
|
|
|
|
|
|
|
@meta.setter
|
|
|
|
def meta(self, value):
|
|
|
|
self._meta = value
|
|
|
|
|
2017-06-04 23:52:09 +03:00
|
|
|
# Conveniences to access pipeline components
|
|
|
|
@property
|
|
|
|
def tensorizer(self):
|
2017-10-07 01:25:54 +03:00
|
|
|
return self.get_pipe('tensorizer')
|
2017-06-04 23:52:09 +03:00
|
|
|
|
|
|
|
@property
|
|
|
|
def tagger(self):
|
2017-10-07 01:25:54 +03:00
|
|
|
return self.get_pipe('tagger')
|
2017-06-04 23:52:09 +03:00
|
|
|
|
|
|
|
@property
|
|
|
|
def parser(self):
|
2017-10-07 01:25:54 +03:00
|
|
|
return self.get_pipe('parser')
|
2017-06-04 23:52:09 +03:00
|
|
|
|
|
|
|
@property
|
|
|
|
def entity(self):
|
2017-10-07 01:25:54 +03:00
|
|
|
return self.get_pipe('ner')
|
2017-06-04 23:52:09 +03:00
|
|
|
|
|
|
|
@property
|
|
|
|
def matcher(self):
|
2017-10-07 01:25:54 +03:00
|
|
|
return self.get_pipe('matcher')
|
|
|
|
|
|
|
|
@property
|
|
|
|
def pipe_names(self):
|
|
|
|
"""Get names of available pipeline components.
|
|
|
|
|
|
|
|
RETURNS (list): List of component name strings, in order.
|
|
|
|
"""
|
|
|
|
return [pipe_name for pipe_name, _ in self.pipeline]
|
|
|
|
|
|
|
|
def get_pipe(self, name):
|
|
|
|
"""Get a pipeline component for a given component name.
|
|
|
|
|
|
|
|
name (unicode): Name of pipeline component to get.
|
|
|
|
RETURNS (callable): The pipeline component.
|
|
|
|
"""
|
|
|
|
for pipe_name, component in self.pipeline:
|
|
|
|
if pipe_name == name:
|
|
|
|
return component
|
2018-04-03 16:50:31 +03:00
|
|
|
raise KeyError(Errors.E001.format(name=name, opts=self.pipe_names))
|
2017-10-07 01:25:54 +03:00
|
|
|
|
|
|
|
def create_pipe(self, name, config=dict()):
|
|
|
|
"""Create a pipeline component from a factory.
|
|
|
|
|
|
|
|
name (unicode): Factory name to look up in `Language.factories`.
|
2017-10-07 02:04:50 +03:00
|
|
|
config (dict): Configuration parameters to initialise component.
|
2017-10-07 01:25:54 +03:00
|
|
|
RETURNS (callable): Pipeline component.
|
|
|
|
"""
|
|
|
|
if name not in self.factories:
|
2018-04-03 16:50:31 +03:00
|
|
|
raise KeyError(Errors.E002.format(name=name))
|
2017-10-07 01:25:54 +03:00
|
|
|
factory = self.factories[name]
|
|
|
|
return factory(self, **config)
|
|
|
|
|
|
|
|
def add_pipe(self, component, name=None, before=None, after=None,
|
|
|
|
first=None, last=None):
|
|
|
|
"""Add a component to the processing pipeline. Valid components are
|
2017-10-27 15:40:14 +03:00
|
|
|
callables that take a `Doc` object, modify it and return it. Only one
|
|
|
|
of before/after/first/last can be set. Default behaviour is "last".
|
2017-10-07 01:25:54 +03:00
|
|
|
|
|
|
|
component (callable): The pipeline component.
|
|
|
|
name (unicode): Name of pipeline component. Overwrites existing
|
|
|
|
component.name attribute if available. If no name is set and
|
|
|
|
the component exposes no name attribute, component.__name__ is
|
2017-10-27 15:40:14 +03:00
|
|
|
used. An error is raised if a name already exists in the pipeline.
|
2017-10-07 01:25:54 +03:00
|
|
|
before (unicode): Component name to insert component directly before.
|
|
|
|
after (unicode): Component name to insert component directly after.
|
|
|
|
first (bool): Insert component first / not first in the pipeline.
|
|
|
|
last (bool): Insert component last / not last in the pipeline.
|
|
|
|
|
|
|
|
EXAMPLE:
|
|
|
|
>>> nlp.add_pipe(component, before='ner')
|
|
|
|
>>> nlp.add_pipe(component, name='custom_name', last=True)
|
|
|
|
"""
|
2018-01-30 17:43:03 +03:00
|
|
|
if not hasattr(component, '__call__'):
|
2018-04-03 16:50:31 +03:00
|
|
|
msg = Errors.E003.format(component=repr(component), name=name)
|
2018-01-30 18:29:07 +03:00
|
|
|
if isinstance(component, basestring_) and component in self.factories:
|
2018-04-03 16:50:31 +03:00
|
|
|
msg += Errors.E004.format(component=component)
|
2018-01-30 17:43:03 +03:00
|
|
|
raise ValueError(msg)
|
2017-10-07 01:25:54 +03:00
|
|
|
if name is None:
|
2017-10-10 05:23:05 +03:00
|
|
|
if hasattr(component, 'name'):
|
|
|
|
name = component.name
|
|
|
|
elif hasattr(component, '__name__'):
|
|
|
|
name = component.__name__
|
2017-10-27 15:40:14 +03:00
|
|
|
elif (hasattr(component, '__class__') and
|
|
|
|
hasattr(component.__class__, '__name__')):
|
2017-10-10 05:23:05 +03:00
|
|
|
name = component.__class__.__name__
|
|
|
|
else:
|
|
|
|
name = repr(component)
|
2017-10-07 01:25:54 +03:00
|
|
|
if name in self.pipe_names:
|
2018-04-03 16:50:31 +03:00
|
|
|
raise ValueError(Errors.E007.format(name=name, opts=self.pipe_names))
|
2017-10-07 01:25:54 +03:00
|
|
|
if sum([bool(before), bool(after), bool(first), bool(last)]) >= 2:
|
2018-04-03 16:50:31 +03:00
|
|
|
raise ValueError(Errors.E006)
|
2017-10-07 01:25:54 +03:00
|
|
|
pipe = (name, component)
|
|
|
|
if last or not any([first, before, after]):
|
|
|
|
self.pipeline.append(pipe)
|
|
|
|
elif first:
|
|
|
|
self.pipeline.insert(0, pipe)
|
|
|
|
elif before and before in self.pipe_names:
|
|
|
|
self.pipeline.insert(self.pipe_names.index(before), pipe)
|
|
|
|
elif after and after in self.pipe_names:
|
2017-11-28 22:37:55 +03:00
|
|
|
self.pipeline.insert(self.pipe_names.index(after) + 1, pipe)
|
2017-10-07 01:25:54 +03:00
|
|
|
else:
|
2018-04-03 16:50:31 +03:00
|
|
|
raise ValueError(Errors.E001.format(name=before or after,
|
|
|
|
opts=self.pipe_names))
|
2017-06-04 23:52:09 +03:00
|
|
|
|
2017-10-17 12:20:07 +03:00
|
|
|
def has_pipe(self, name):
|
|
|
|
"""Check if a component name is present in the pipeline. Equivalent to
|
|
|
|
`name in nlp.pipe_names`.
|
|
|
|
|
|
|
|
name (unicode): Name of the component.
|
2017-10-27 15:40:14 +03:00
|
|
|
RETURNS (bool): Whether a component of the name exists in the pipeline.
|
2017-10-17 12:20:07 +03:00
|
|
|
"""
|
|
|
|
return name in self.pipe_names
|
|
|
|
|
2017-10-07 01:25:54 +03:00
|
|
|
def replace_pipe(self, name, component):
|
|
|
|
"""Replace a component in the pipeline.
|
|
|
|
|
|
|
|
name (unicode): Name of the component to replace.
|
|
|
|
component (callable): Pipeline component.
|
|
|
|
"""
|
|
|
|
if name not in self.pipe_names:
|
2018-04-03 16:50:31 +03:00
|
|
|
raise ValueError(Errors.E001.format(name=name, opts=self.pipe_names))
|
2017-10-07 01:25:54 +03:00
|
|
|
self.pipeline[self.pipe_names.index(name)] = (name, component)
|
|
|
|
|
|
|
|
def rename_pipe(self, old_name, new_name):
|
|
|
|
"""Rename a pipeline component.
|
|
|
|
|
|
|
|
old_name (unicode): Name of the component to rename.
|
|
|
|
new_name (unicode): New name of the component.
|
|
|
|
"""
|
|
|
|
if old_name not in self.pipe_names:
|
2018-04-03 16:50:31 +03:00
|
|
|
raise ValueError(Errors.E001.format(name=old_name, opts=self.pipe_names))
|
2017-10-07 01:25:54 +03:00
|
|
|
if new_name in self.pipe_names:
|
2018-04-03 16:50:31 +03:00
|
|
|
raise ValueError(Errors.E007.format(name=new_name, opts=self.pipe_names))
|
2017-10-07 01:25:54 +03:00
|
|
|
i = self.pipe_names.index(old_name)
|
|
|
|
self.pipeline[i] = (new_name, self.pipeline[i][1])
|
|
|
|
|
|
|
|
def remove_pipe(self, name):
|
|
|
|
"""Remove a component from the pipeline.
|
|
|
|
|
|
|
|
name (unicode): Name of the component to remove.
|
2017-10-07 02:04:50 +03:00
|
|
|
RETURNS (tuple): A `(name, component)` tuple of the removed component.
|
2017-10-07 01:25:54 +03:00
|
|
|
"""
|
|
|
|
if name not in self.pipe_names:
|
2018-04-03 16:50:31 +03:00
|
|
|
raise ValueError(Errors.E001.format(name=name, opts=self.pipe_names))
|
2017-10-07 01:25:54 +03:00
|
|
|
return self.pipeline.pop(self.pipe_names.index(name))
|
2017-06-04 23:52:09 +03:00
|
|
|
|
2017-05-26 13:33:54 +03:00
|
|
|
def __call__(self, text, disable=[]):
|
2017-10-07 01:26:05 +03:00
|
|
|
"""Apply the pipeline to some text. The text can span multiple sentences,
|
2017-05-19 00:57:38 +03:00
|
|
|
and can contain arbtrary whitespace. Alignment into the original string
|
2015-08-25 16:37:17 +03:00
|
|
|
is preserved.
|
2016-12-18 18:54:52 +03:00
|
|
|
|
2017-05-19 00:57:38 +03:00
|
|
|
text (unicode): The text to be processed.
|
2017-05-26 13:33:54 +03:00
|
|
|
disable (list): Names of the pipeline components to disable.
|
2017-05-19 00:57:38 +03:00
|
|
|
RETURNS (Doc): A container for accessing the annotations.
|
2016-11-01 14:25:36 +03:00
|
|
|
|
2017-05-19 00:57:38 +03:00
|
|
|
EXAMPLE:
|
2016-11-01 14:25:36 +03:00
|
|
|
>>> tokens = nlp('An example sentence. Another example sentence.')
|
2017-05-19 00:57:38 +03:00
|
|
|
>>> tokens[0].text, tokens[0].head.tag_
|
2016-11-01 14:25:36 +03:00
|
|
|
('An', 'NN')
|
2015-08-25 16:37:17 +03:00
|
|
|
"""
|
2018-03-29 22:45:26 +03:00
|
|
|
if len(text) >= self.max_length:
|
2018-04-03 16:50:31 +03:00
|
|
|
raise ValueError(Errors.E088.format(length=len(text),
|
|
|
|
max_length=self.max_length))
|
2016-10-14 18:38:29 +03:00
|
|
|
doc = self.make_doc(text)
|
2017-10-07 01:25:54 +03:00
|
|
|
for name, proc in self.pipeline:
|
2017-05-26 13:33:54 +03:00
|
|
|
if name in disable:
|
2017-05-16 12:21:59 +03:00
|
|
|
continue
|
2018-04-03 16:50:31 +03:00
|
|
|
if not hasattr(proc, '__call__'):
|
|
|
|
raise ValueError(Errors.E003.format(component=type(proc), name=name))
|
2017-05-28 16:11:58 +03:00
|
|
|
doc = proc(doc)
|
2018-04-03 16:50:31 +03:00
|
|
|
if doc is None:
|
|
|
|
raise ValueError(Errors.E005.format(name=name))
|
2016-05-17 17:55:42 +03:00
|
|
|
return doc
|
2015-08-25 16:37:17 +03:00
|
|
|
|
2017-10-25 14:46:41 +03:00
|
|
|
def disable_pipes(self, *names):
|
2017-10-27 15:40:14 +03:00
|
|
|
"""Disable one or more pipeline components. If used as a context
|
|
|
|
manager, the pipeline will be restored to the initial state at the end
|
|
|
|
of the block. Otherwise, a DisabledPipes object is returned, that has
|
|
|
|
a `.restore()` method you can use to undo your changes.
|
2017-10-25 14:46:41 +03:00
|
|
|
|
|
|
|
EXAMPLE:
|
|
|
|
>>> nlp.add_pipe('parser')
|
|
|
|
>>> nlp.add_pipe('tagger')
|
|
|
|
>>> with nlp.disable_pipes('parser', 'tagger'):
|
|
|
|
>>> assert not nlp.has_pipe('parser')
|
|
|
|
>>> assert nlp.has_pipe('parser')
|
|
|
|
>>> disabled = nlp.disable_pipes('parser')
|
|
|
|
>>> assert len(disabled) == 1
|
|
|
|
>>> assert not nlp.has_pipe('parser')
|
|
|
|
>>> disabled.restore()
|
|
|
|
>>> assert nlp.has_pipe('parser')
|
2017-10-27 15:40:14 +03:00
|
|
|
"""
|
2017-10-25 14:46:41 +03:00
|
|
|
return DisabledPipes(self, *names)
|
|
|
|
|
2017-05-29 16:40:45 +03:00
|
|
|
def make_doc(self, text):
|
|
|
|
return self.tokenizer(text)
|
|
|
|
|
2017-09-26 13:41:35 +03:00
|
|
|
def update(self, docs, golds, drop=0., sgd=None, losses=None):
|
2017-05-19 00:57:38 +03:00
|
|
|
"""Update the models in the pipeline.
|
|
|
|
|
|
|
|
docs (iterable): A batch of `Doc` objects.
|
|
|
|
golds (iterable): A batch of `GoldParse` objects.
|
|
|
|
drop (float): The droput rate.
|
2017-05-21 14:17:40 +03:00
|
|
|
sgd (callable): An optimizer.
|
2017-05-19 00:57:38 +03:00
|
|
|
RETURNS (dict): Results from the update.
|
|
|
|
|
|
|
|
EXAMPLE:
|
2017-10-27 15:40:14 +03:00
|
|
|
>>> with nlp.begin_training(gold) as (trainer, optimizer):
|
2017-05-19 00:57:38 +03:00
|
|
|
>>> for epoch in trainer.epochs(gold):
|
|
|
|
>>> for docs, golds in epoch:
|
|
|
|
>>> state = nlp.update(docs, golds, sgd=optimizer)
|
|
|
|
"""
|
2017-08-01 23:10:17 +03:00
|
|
|
if len(docs) != len(golds):
|
2018-04-03 16:50:31 +03:00
|
|
|
raise IndexError(Errors.E009.format(n_docs=len(docs), n_golds=len(golds)))
|
2017-08-01 23:10:17 +03:00
|
|
|
if len(docs) == 0:
|
|
|
|
return
|
2017-08-20 15:42:07 +03:00
|
|
|
if sgd is None:
|
|
|
|
if self._optimizer is None:
|
2017-11-07 00:07:38 +03:00
|
|
|
self._optimizer = create_default_optimizer(Model.ops)
|
2017-08-20 15:42:07 +03:00
|
|
|
sgd = self._optimizer
|
2017-11-07 00:07:38 +03:00
|
|
|
|
|
|
|
# Allow dict of args to GoldParse, instead of GoldParse objects.
|
|
|
|
gold_objs = []
|
|
|
|
doc_objs = []
|
|
|
|
for doc, gold in zip(docs, golds):
|
|
|
|
if isinstance(doc, basestring_):
|
|
|
|
doc = self.make_doc(doc)
|
|
|
|
if not isinstance(gold, GoldParse):
|
|
|
|
gold = GoldParse(doc, **gold)
|
|
|
|
doc_objs.append(doc)
|
|
|
|
gold_objs.append(gold)
|
|
|
|
golds = gold_objs
|
|
|
|
docs = doc_objs
|
2017-05-25 04:10:54 +03:00
|
|
|
grads = {}
|
2017-10-27 15:40:14 +03:00
|
|
|
|
2017-05-25 04:10:54 +03:00
|
|
|
def get_grads(W, dW, key=None):
|
|
|
|
grads[key] = (W, dW)
|
2017-10-27 15:40:14 +03:00
|
|
|
|
2017-09-21 15:59:48 +03:00
|
|
|
pipes = list(self.pipeline)
|
2017-05-28 02:32:21 +03:00
|
|
|
random.shuffle(pipes)
|
2017-10-07 01:25:54 +03:00
|
|
|
for name, proc in pipes:
|
2017-05-22 02:43:31 +03:00
|
|
|
if not hasattr(proc, 'update'):
|
|
|
|
continue
|
2017-11-03 22:20:01 +03:00
|
|
|
grads = {}
|
2017-09-21 15:59:48 +03:00
|
|
|
proc.update(docs, golds, drop=drop, sgd=get_grads, losses=losses)
|
2017-11-03 22:20:01 +03:00
|
|
|
for key, (W, dW) in grads.items():
|
|
|
|
sgd(W, dW, key=key)
|
2017-05-16 17:17:30 +03:00
|
|
|
|
2017-05-21 17:07:06 +03:00
|
|
|
def preprocess_gold(self, docs_golds):
|
2017-05-22 13:29:30 +03:00
|
|
|
"""Can be called before training to pre-process gold data. By default,
|
|
|
|
it handles nonprojectivity and adds missing tags to the tag map.
|
|
|
|
|
|
|
|
docs_golds (iterable): Tuples of `Doc` and `GoldParse` objects.
|
|
|
|
YIELDS (tuple): Tuples of preprocessed `Doc` and `GoldParse` objects.
|
|
|
|
"""
|
2017-10-07 01:25:54 +03:00
|
|
|
for name, proc in self.pipeline:
|
2017-05-21 17:07:06 +03:00
|
|
|
if hasattr(proc, 'preprocess_gold'):
|
|
|
|
docs_golds = proc.preprocess_gold(docs_golds)
|
|
|
|
for doc, gold in docs_golds:
|
|
|
|
yield doc, gold
|
|
|
|
|
2017-11-06 16:26:00 +03:00
|
|
|
def begin_training(self, get_gold_tuples=None, sgd=None, **cfg):
|
2017-05-19 00:57:38 +03:00
|
|
|
"""Allocate models, pre-process training data and acquire a trainer and
|
|
|
|
optimizer. Used as a contextmanager.
|
|
|
|
|
2017-09-14 17:18:30 +03:00
|
|
|
get_gold_tuples (function): Function returning gold data
|
2017-05-19 00:57:38 +03:00
|
|
|
**cfg: Config parameters.
|
2017-10-07 01:26:05 +03:00
|
|
|
RETURNS: An optimizer
|
2017-05-19 00:57:38 +03:00
|
|
|
"""
|
2017-11-01 15:14:31 +03:00
|
|
|
if get_gold_tuples is None:
|
|
|
|
get_gold_tuples = lambda: []
|
2017-05-17 13:04:50 +03:00
|
|
|
# Populate vocab
|
2017-11-01 15:14:31 +03:00
|
|
|
else:
|
2017-09-21 03:15:20 +03:00
|
|
|
for _, annots_brackets in get_gold_tuples():
|
|
|
|
for annots, _ in annots_brackets:
|
|
|
|
for word in annots[1]:
|
|
|
|
_ = self.vocab[word]
|
2017-05-16 12:21:59 +03:00
|
|
|
contexts = []
|
2017-06-04 00:10:23 +03:00
|
|
|
if cfg.get('device', -1) >= 0:
|
2017-09-21 03:15:20 +03:00
|
|
|
device = util.use_gpu(cfg['device'])
|
2017-09-19 02:04:16 +03:00
|
|
|
if self.vocab.vectors.data.shape[1] >= 1:
|
|
|
|
self.vocab.vectors.data = Model.ops.asarray(
|
|
|
|
self.vocab.vectors.data)
|
2017-06-04 00:10:23 +03:00
|
|
|
else:
|
|
|
|
device = None
|
2017-09-23 04:11:52 +03:00
|
|
|
link_vectors_to_models(self.vocab)
|
2018-03-28 17:02:59 +03:00
|
|
|
if self.vocab.vectors.data.shape[1]:
|
|
|
|
cfg['pretrained_vectors'] = self.vocab.vectors.name
|
2017-11-06 16:26:00 +03:00
|
|
|
if sgd is None:
|
|
|
|
sgd = create_default_optimizer(Model.ops)
|
|
|
|
self._optimizer = sgd
|
2017-10-07 01:25:54 +03:00
|
|
|
for name, proc in self.pipeline:
|
2017-05-16 12:21:59 +03:00
|
|
|
if hasattr(proc, 'begin_training'):
|
2018-03-27 12:39:59 +03:00
|
|
|
proc.begin_training(get_gold_tuples,
|
2017-11-06 16:26:00 +03:00
|
|
|
pipeline=self.pipeline,
|
2018-01-31 05:29:54 +03:00
|
|
|
sgd=self._optimizer,
|
|
|
|
**cfg)
|
2017-08-20 15:42:07 +03:00
|
|
|
return self._optimizer
|
2017-05-21 17:07:06 +03:00
|
|
|
|
2017-10-03 17:14:57 +03:00
|
|
|
def evaluate(self, docs_golds, verbose=False):
|
2017-08-14 14:00:23 +03:00
|
|
|
scorer = Scorer()
|
2017-08-18 23:26:12 +03:00
|
|
|
docs, golds = zip(*docs_golds)
|
|
|
|
docs = list(docs)
|
|
|
|
golds = list(golds)
|
2017-10-07 01:25:54 +03:00
|
|
|
for name, pipe in self.pipeline:
|
2017-08-18 23:26:12 +03:00
|
|
|
if not hasattr(pipe, 'pipe'):
|
2017-10-18 22:46:12 +03:00
|
|
|
docs = (pipe(doc) for doc in docs)
|
2017-08-18 23:26:12 +03:00
|
|
|
else:
|
2017-10-18 22:46:12 +03:00
|
|
|
docs = pipe.pipe(docs, batch_size=256)
|
2017-08-18 23:26:12 +03:00
|
|
|
for doc, gold in zip(docs, golds):
|
2017-10-03 17:14:57 +03:00
|
|
|
if verbose:
|
|
|
|
print(doc)
|
|
|
|
scorer.score(doc, gold, verbose=verbose)
|
2017-05-21 17:07:06 +03:00
|
|
|
return scorer
|
2017-05-16 12:21:59 +03:00
|
|
|
|
2017-05-18 12:25:19 +03:00
|
|
|
@contextmanager
|
|
|
|
def use_params(self, params, **cfg):
|
2017-05-19 00:57:38 +03:00
|
|
|
"""Replace weights of models in the pipeline with those provided in the
|
|
|
|
params dictionary. Can be used as a contextmanager, in which case,
|
|
|
|
models go back to their original weights after the block.
|
|
|
|
|
|
|
|
params (dict): A dictionary of parameters keyed by model ID.
|
|
|
|
**cfg: Config parameters.
|
|
|
|
|
|
|
|
EXAMPLE:
|
|
|
|
>>> with nlp.use_params(optimizer.averages):
|
|
|
|
>>> nlp.to_disk('/tmp/checkpoint')
|
|
|
|
"""
|
2017-10-07 01:25:54 +03:00
|
|
|
contexts = [pipe.use_params(params) for name, pipe
|
2017-05-18 16:30:59 +03:00
|
|
|
in self.pipeline if hasattr(pipe, 'use_params')]
|
|
|
|
# TODO: Having trouble with contextlib
|
|
|
|
# Workaround: these aren't actually context managers atm.
|
|
|
|
for context in contexts:
|
|
|
|
try:
|
|
|
|
next(context)
|
|
|
|
except StopIteration:
|
|
|
|
pass
|
2017-05-18 12:25:19 +03:00
|
|
|
yield
|
|
|
|
for context in contexts:
|
|
|
|
try:
|
2017-05-18 16:30:59 +03:00
|
|
|
next(context)
|
2017-05-18 12:25:19 +03:00
|
|
|
except StopIteration:
|
|
|
|
pass
|
|
|
|
|
2017-08-19 13:21:33 +03:00
|
|
|
def pipe(self, texts, as_tuples=False, n_threads=2, batch_size=1000,
|
2017-11-23 15:18:59 +03:00
|
|
|
disable=[], cleanup=False):
|
2017-10-27 15:40:14 +03:00
|
|
|
"""Process texts as a stream, and yield `Doc` objects in order.
|
2017-05-19 00:57:38 +03:00
|
|
|
|
|
|
|
texts (iterator): A sequence of texts to process.
|
2017-08-19 13:21:33 +03:00
|
|
|
as_tuples (bool):
|
|
|
|
If set to True, inputs should be a sequence of
|
|
|
|
(text, context) tuples. Output will then be a sequence of
|
|
|
|
(doc, context) tuples. Defaults to False.
|
2017-11-23 15:18:59 +03:00
|
|
|
n_threads (int): Currently inactive.
|
2017-05-19 00:57:38 +03:00
|
|
|
batch_size (int): The number of texts to buffer.
|
2017-05-26 13:33:54 +03:00
|
|
|
disable (list): Names of the pipeline components to disable.
|
2017-11-23 15:18:59 +03:00
|
|
|
cleanup (bool): If True, unneeded strings are freed,
|
|
|
|
to control memory use. Experimental.
|
2017-05-19 00:57:38 +03:00
|
|
|
YIELDS (Doc): Documents in the order of the original text.
|
|
|
|
|
|
|
|
EXAMPLE:
|
|
|
|
>>> texts = [u'One document.', u'...', u'Lots of documents']
|
|
|
|
>>> for doc in nlp.pipe(texts, batch_size=50, n_threads=4):
|
|
|
|
>>> assert doc.is_parsed
|
2017-04-15 12:59:21 +03:00
|
|
|
"""
|
2017-08-19 13:21:33 +03:00
|
|
|
if as_tuples:
|
2017-07-25 19:57:59 +03:00
|
|
|
text_context1, text_context2 = itertools.tee(texts)
|
|
|
|
texts = (tc[0] for tc in text_context1)
|
|
|
|
contexts = (tc[1] for tc in text_context2)
|
|
|
|
docs = self.pipe(texts, n_threads=n_threads, batch_size=batch_size,
|
|
|
|
disable=disable)
|
|
|
|
for doc, context in izip(docs, contexts):
|
|
|
|
yield (doc, context)
|
|
|
|
return
|
2017-05-23 11:06:53 +03:00
|
|
|
docs = (self.make_doc(text) for text in texts)
|
2017-10-07 01:25:54 +03:00
|
|
|
for name, proc in self.pipeline:
|
2017-05-26 13:33:54 +03:00
|
|
|
if name in disable:
|
2017-05-16 12:21:59 +03:00
|
|
|
continue
|
|
|
|
if hasattr(proc, 'pipe'):
|
2017-10-27 15:40:14 +03:00
|
|
|
docs = proc.pipe(docs, n_threads=n_threads,
|
|
|
|
batch_size=batch_size)
|
2017-05-16 12:21:59 +03:00
|
|
|
else:
|
2017-05-22 02:43:31 +03:00
|
|
|
# Apply the function, but yield the doc
|
|
|
|
docs = _pipe(proc, docs)
|
2017-10-16 20:22:40 +03:00
|
|
|
# Track weakrefs of "recent" documents, so that we can see when they
|
|
|
|
# expire from memory. When they do, we know we don't need old strings.
|
|
|
|
# This way, we avoid maintaining an unbounded growth in string entries
|
|
|
|
# in the string store.
|
|
|
|
recent_refs = weakref.WeakSet()
|
|
|
|
old_refs = weakref.WeakSet()
|
2017-11-23 15:19:18 +03:00
|
|
|
# Keep track of the original string data, so that if we flush old strings,
|
|
|
|
# we can recover the original ones. However, we only want to do this if we're
|
|
|
|
# really adding strings, to save up-front costs.
|
|
|
|
original_strings_data = None
|
2017-10-16 20:22:40 +03:00
|
|
|
nr_seen = 0
|
2017-05-19 21:25:42 +03:00
|
|
|
for doc in docs:
|
2016-02-03 04:04:55 +03:00
|
|
|
yield doc
|
2017-11-23 15:19:18 +03:00
|
|
|
if cleanup:
|
|
|
|
recent_refs.add(doc)
|
|
|
|
if nr_seen < 10000:
|
|
|
|
old_refs.add(doc)
|
|
|
|
nr_seen += 1
|
|
|
|
elif len(old_refs) == 0:
|
|
|
|
old_refs, recent_refs = recent_refs, old_refs
|
|
|
|
if original_strings_data is None:
|
|
|
|
original_strings_data = list(self.vocab.strings)
|
|
|
|
else:
|
|
|
|
keys, strings = self.vocab.strings._cleanup_stale_strings(original_strings_data)
|
|
|
|
self.vocab._reset_cache(keys, strings)
|
|
|
|
self.tokenizer._reset_cache(keys)
|
|
|
|
nr_seen = 0
|
2016-02-01 11:01:13 +03:00
|
|
|
|
2017-05-31 14:42:39 +03:00
|
|
|
def to_disk(self, path, disable=tuple()):
|
2017-05-26 13:33:54 +03:00
|
|
|
"""Save the current state to a directory. If a model is loaded, this
|
|
|
|
will include the model.
|
2017-04-17 02:40:26 +03:00
|
|
|
|
2017-05-19 00:57:38 +03:00
|
|
|
path (unicode or Path): A path to a directory, which will be created if
|
2017-10-27 15:40:14 +03:00
|
|
|
it doesn't exist. Paths may be strings or `Path`-like objects.
|
2017-05-31 14:42:39 +03:00
|
|
|
disable (list): Names of pipeline components to disable and prevent
|
2017-05-26 13:33:54 +03:00
|
|
|
from being saved.
|
2017-05-19 00:57:38 +03:00
|
|
|
|
|
|
|
EXAMPLE:
|
|
|
|
>>> nlp.to_disk('/path/to/models')
|
2017-05-17 13:04:50 +03:00
|
|
|
"""
|
|
|
|
path = util.ensure_path(path)
|
2017-05-31 14:42:39 +03:00
|
|
|
serializers = OrderedDict((
|
|
|
|
('tokenizer', lambda p: self.tokenizer.to_disk(p, vocab=False)),
|
|
|
|
('meta.json', lambda p: p.open('w').write(json_dumps(self.meta)))
|
|
|
|
))
|
2017-10-07 01:25:54 +03:00
|
|
|
for name, proc in self.pipeline:
|
2017-05-31 14:42:39 +03:00
|
|
|
if not hasattr(proc, 'name'):
|
|
|
|
continue
|
2017-10-07 01:25:54 +03:00
|
|
|
if name in disable:
|
2017-05-31 14:42:39 +03:00
|
|
|
continue
|
|
|
|
if not hasattr(proc, 'to_disk'):
|
|
|
|
continue
|
2017-10-07 01:25:54 +03:00
|
|
|
serializers[name] = lambda p, proc=proc: proc.to_disk(p, vocab=False)
|
2017-09-24 13:01:45 +03:00
|
|
|
serializers['vocab'] = lambda p: self.vocab.to_disk(p)
|
2017-05-31 14:42:39 +03:00
|
|
|
util.to_disk(path, serializers, {p: False for p in disable})
|
|
|
|
|
|
|
|
def from_disk(self, path, disable=tuple()):
|
2017-05-19 00:57:38 +03:00
|
|
|
"""Loads state from a directory. Modifies the object in place and
|
2017-05-26 13:33:54 +03:00
|
|
|
returns it. If the saved `Language` object contains a model, the
|
|
|
|
model will be loaded.
|
2017-05-17 13:04:50 +03:00
|
|
|
|
2017-05-19 00:57:38 +03:00
|
|
|
path (unicode or Path): A path to a directory. Paths may be either
|
|
|
|
strings or `Path`-like objects.
|
2017-05-26 13:33:54 +03:00
|
|
|
disable (list): Names of the pipeline components to disable.
|
2017-05-19 00:57:38 +03:00
|
|
|
RETURNS (Language): The modified `Language` object.
|
2017-05-17 13:04:50 +03:00
|
|
|
|
2017-05-19 00:57:38 +03:00
|
|
|
EXAMPLE:
|
|
|
|
>>> from spacy.language import Language
|
|
|
|
>>> nlp = Language().from_disk('/path/to/models')
|
2017-05-17 13:04:50 +03:00
|
|
|
"""
|
|
|
|
path = util.ensure_path(path)
|
2017-05-31 14:42:39 +03:00
|
|
|
deserializers = OrderedDict((
|
2018-04-04 00:11:48 +03:00
|
|
|
('meta.json', lambda p: self.meta.update(util.read_json(p))),
|
|
|
|
('vocab', lambda p: (
|
|
|
|
self.vocab.from_disk(p) and _fix_pretrained_vectors_name(self))),
|
2017-05-31 14:42:39 +03:00
|
|
|
('tokenizer', lambda p: self.tokenizer.from_disk(p, vocab=False)),
|
|
|
|
))
|
2017-10-07 01:25:54 +03:00
|
|
|
for name, proc in self.pipeline:
|
|
|
|
if name in disable:
|
2017-05-31 14:42:39 +03:00
|
|
|
continue
|
2018-06-29 15:32:16 +03:00
|
|
|
if not hasattr(proc, 'from_disk'):
|
2017-05-31 14:42:39 +03:00
|
|
|
continue
|
2017-10-07 01:25:54 +03:00
|
|
|
deserializers[name] = lambda p, proc=proc: proc.from_disk(p, vocab=False)
|
2017-06-01 15:38:35 +03:00
|
|
|
exclude = {p: False for p in disable}
|
|
|
|
if not (path / 'vocab').exists():
|
|
|
|
exclude['vocab'] = True
|
|
|
|
util.from_disk(path, deserializers, exclude)
|
2017-10-25 12:57:43 +03:00
|
|
|
self._path = path
|
2017-05-31 14:42:39 +03:00
|
|
|
return self
|
2017-05-17 13:04:50 +03:00
|
|
|
|
2017-10-17 19:18:10 +03:00
|
|
|
def to_bytes(self, disable=[], **exclude):
|
2017-05-17 13:04:50 +03:00
|
|
|
"""Serialize the current state to a binary string.
|
2016-12-18 18:54:52 +03:00
|
|
|
|
2017-05-26 13:33:54 +03:00
|
|
|
disable (list): Nameds of pipeline components to disable and prevent
|
|
|
|
from being serialized.
|
2017-05-19 00:57:38 +03:00
|
|
|
RETURNS (bytes): The serialized form of the `Language` object.
|
2017-05-17 13:04:50 +03:00
|
|
|
"""
|
2017-05-29 16:40:45 +03:00
|
|
|
serializers = OrderedDict((
|
|
|
|
('vocab', lambda: self.vocab.to_bytes()),
|
|
|
|
('tokenizer', lambda: self.tokenizer.to_bytes(vocab=False)),
|
2017-10-27 22:07:59 +03:00
|
|
|
('meta', lambda: json_dumps(self.meta))
|
2017-05-29 16:40:45 +03:00
|
|
|
))
|
2017-10-07 01:25:54 +03:00
|
|
|
for i, (name, proc) in enumerate(self.pipeline):
|
|
|
|
if name in disable:
|
2017-05-29 12:45:45 +03:00
|
|
|
continue
|
|
|
|
if not hasattr(proc, 'to_bytes'):
|
|
|
|
continue
|
2017-05-29 21:23:28 +03:00
|
|
|
serializers[i] = lambda proc=proc: proc.to_bytes(vocab=False)
|
2017-10-17 19:18:10 +03:00
|
|
|
return util.to_bytes(serializers, exclude)
|
2017-04-15 13:05:47 +03:00
|
|
|
|
2017-05-26 13:33:54 +03:00
|
|
|
def from_bytes(self, bytes_data, disable=[]):
|
2017-05-17 13:04:50 +03:00
|
|
|
"""Load state from a binary string.
|
|
|
|
|
2017-05-19 00:57:38 +03:00
|
|
|
bytes_data (bytes): The data to load from.
|
2017-05-26 13:33:54 +03:00
|
|
|
disable (list): Names of the pipeline components to disable.
|
2017-05-19 00:57:38 +03:00
|
|
|
RETURNS (Language): The `Language` object.
|
2017-05-17 13:04:50 +03:00
|
|
|
"""
|
2017-05-29 16:40:45 +03:00
|
|
|
deserializers = OrderedDict((
|
2018-04-04 00:11:48 +03:00
|
|
|
('meta', lambda b: self.meta.update(ujson.loads(b))),
|
|
|
|
('vocab', lambda b: (
|
|
|
|
self.vocab.from_bytes(b) and _fix_pretrained_vectors_name(self))),
|
2017-05-29 16:40:45 +03:00
|
|
|
('tokenizer', lambda b: self.tokenizer.from_bytes(b, vocab=False)),
|
|
|
|
))
|
2017-10-07 01:25:54 +03:00
|
|
|
for i, (name, proc) in enumerate(self.pipeline):
|
|
|
|
if name in disable:
|
2017-05-29 12:45:45 +03:00
|
|
|
continue
|
2017-05-29 16:40:45 +03:00
|
|
|
if not hasattr(proc, 'from_bytes'):
|
2017-05-29 12:45:45 +03:00
|
|
|
continue
|
2017-05-29 21:23:28 +03:00
|
|
|
deserializers[i] = lambda b, proc=proc: proc.from_bytes(b, vocab=False)
|
|
|
|
msg = util.from_bytes(bytes_data, deserializers, {})
|
2017-05-17 13:04:50 +03:00
|
|
|
return self
|
2017-05-22 02:43:31 +03:00
|
|
|
|
2017-05-29 12:45:45 +03:00
|
|
|
|
2018-03-28 17:02:59 +03:00
|
|
|
def _fix_pretrained_vectors_name(nlp):
|
|
|
|
# TODO: Replace this once we handle vectors consistently as static
|
|
|
|
# data
|
|
|
|
if 'vectors' in nlp.meta and nlp.meta['vectors'].get('name'):
|
|
|
|
nlp.vocab.vectors.name = nlp.meta['vectors']['name']
|
2018-03-28 22:08:58 +03:00
|
|
|
elif not nlp.vocab.vectors.size:
|
|
|
|
nlp.vocab.vectors.name = None
|
2018-03-28 17:02:59 +03:00
|
|
|
elif 'name' in nlp.meta and 'lang' in nlp.meta:
|
|
|
|
vectors_name = '%s_%s.vectors' % (nlp.meta['lang'], nlp.meta['name'])
|
|
|
|
nlp.vocab.vectors.name = vectors_name
|
|
|
|
else:
|
2018-04-03 22:40:29 +03:00
|
|
|
raise ValueError(Errors.E092)
|
2018-04-04 02:31:25 +03:00
|
|
|
if nlp.vocab.vectors.size != 0:
|
|
|
|
link_vectors_to_models(nlp.vocab)
|
2018-03-28 17:02:59 +03:00
|
|
|
for name, proc in nlp.pipeline:
|
|
|
|
if not hasattr(proc, 'cfg'):
|
|
|
|
continue
|
2018-04-04 00:11:48 +03:00
|
|
|
proc.cfg.setdefault('deprecation_fixes', {})
|
|
|
|
proc.cfg['deprecation_fixes']['vectors_name'] = nlp.vocab.vectors.name
|
2018-03-28 17:02:59 +03:00
|
|
|
|
2017-05-29 12:45:45 +03:00
|
|
|
|
2017-10-25 14:46:41 +03:00
|
|
|
class DisabledPipes(list):
|
2017-10-27 15:40:14 +03:00
|
|
|
"""Manager for temporary pipeline disabling."""
|
2017-10-25 14:46:41 +03:00
|
|
|
def __init__(self, nlp, *names):
|
|
|
|
self.nlp = nlp
|
|
|
|
self.names = names
|
|
|
|
# Important! Not deep copy -- we just want the container (but we also
|
|
|
|
# want to support people providing arbitrarily typed nlp.pipeline
|
|
|
|
# objects.)
|
2017-10-27 22:07:59 +03:00
|
|
|
self.original_pipeline = copy(nlp.pipeline)
|
2017-10-25 14:46:41 +03:00
|
|
|
list.__init__(self)
|
|
|
|
self.extend(nlp.remove_pipe(name) for name in names)
|
|
|
|
|
|
|
|
def __enter__(self):
|
2017-10-25 15:56:16 +03:00
|
|
|
return self
|
2017-10-25 14:46:41 +03:00
|
|
|
|
|
|
|
def __exit__(self, *args):
|
|
|
|
self.restore()
|
|
|
|
|
|
|
|
def restore(self):
|
|
|
|
'''Restore the pipeline to its state when DisabledPipes was created.'''
|
|
|
|
current, self.nlp.pipeline = self.nlp.pipeline, self.original_pipeline
|
2017-10-27 15:40:14 +03:00
|
|
|
unexpected = [name for name, pipe in current
|
|
|
|
if not self.nlp.has_pipe(name)]
|
2017-10-25 14:46:41 +03:00
|
|
|
if unexpected:
|
|
|
|
# Don't change the pipeline if we're raising an error.
|
|
|
|
self.nlp.pipeline = current
|
2018-04-03 16:50:31 +03:00
|
|
|
raise ValueError(Errors.E008.format(names=unexpected))
|
2017-10-25 14:46:41 +03:00
|
|
|
self[:] = []
|
|
|
|
|
|
|
|
|
2017-05-22 02:43:31 +03:00
|
|
|
def _pipe(func, docs):
|
|
|
|
for doc in docs:
|
2018-02-15 23:51:49 +03:00
|
|
|
doc = func(doc)
|
2017-05-22 02:43:31 +03:00
|
|
|
yield doc
|