mirror of
https://github.com/explosion/spaCy.git
synced 2025-01-13 02:36:32 +03:00
distinct load() and from_package() methods
This commit is contained in:
parent
634ea57876
commit
846fa49b2a
|
@ -4,4 +4,4 @@ from .en import English
|
||||||
|
|
||||||
def load(name, via=None):
|
def load(name, via=None):
|
||||||
package = util.get_package_by_name(name, via=via)
|
package = util.get_package_by_name(name, via=via)
|
||||||
return English(package)
|
return English(package=package)
|
||||||
|
|
|
@ -140,7 +140,7 @@ class Language(object):
|
||||||
def default_vocab(cls, package, get_lex_attr=None):
|
def default_vocab(cls, package, get_lex_attr=None):
|
||||||
if get_lex_attr is None:
|
if get_lex_attr is None:
|
||||||
get_lex_attr = cls.default_lex_attrs()
|
get_lex_attr = cls.default_lex_attrs()
|
||||||
return Vocab.load(package, get_lex_attr=get_lex_attr)
|
return Vocab.from_package(package, get_lex_attr=get_lex_attr)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def default_parser(cls, package, vocab):
|
def default_parser(cls, package, vocab):
|
||||||
|
@ -164,7 +164,8 @@ class Language(object):
|
||||||
entity=None,
|
entity=None,
|
||||||
matcher=None,
|
matcher=None,
|
||||||
serializer=None,
|
serializer=None,
|
||||||
load_vectors=True):
|
load_vectors=True,
|
||||||
|
package=None):
|
||||||
"""
|
"""
|
||||||
a model can be specified:
|
a model can be specified:
|
||||||
|
|
||||||
|
@ -182,30 +183,29 @@ class Language(object):
|
||||||
4) by package name with a relocated package base
|
4) by package name with a relocated package base
|
||||||
- spacy.load('en_default', via='/my/package/root')
|
- spacy.load('en_default', via='/my/package/root')
|
||||||
- spacy.load('en_default==1.0.0', via='/my/package/root')
|
- spacy.load('en_default==1.0.0', via='/my/package/root')
|
||||||
|
|
||||||
5) by package object
|
|
||||||
- spacy.en.English(package)
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if data_dir is not None and via is None:
|
if data_dir is not None and via is None:
|
||||||
warn("Use of data_dir is deprecated, use via instead.", DeprecationWarning)
|
warn("Use of data_dir is deprecated, use via instead.", DeprecationWarning)
|
||||||
via = data_dir
|
via = data_dir
|
||||||
|
|
||||||
if via is None:
|
if package is None:
|
||||||
package = util.get_package_by_name()
|
if via is None:
|
||||||
else:
|
package = util.get_package_by_name()
|
||||||
package = util.get_package(via)
|
else:
|
||||||
|
package = util.get_package(via)
|
||||||
|
|
||||||
if load_vectors is not True:
|
if load_vectors is not True:
|
||||||
warn("load_vectors is deprecated", DeprecationWarning)
|
warn("load_vectors is deprecated", DeprecationWarning)
|
||||||
|
|
||||||
if vocab in (None, True):
|
if vocab in (None, True):
|
||||||
vocab = Vocab.load(package, get_lex_attr=self.default_lex_attrs())
|
vocab = self.default_vocab(package)
|
||||||
self.vocab = vocab
|
self.vocab = vocab
|
||||||
if tokenizer in (None, True):
|
if tokenizer in (None, True):
|
||||||
tokenizer = Tokenizer.load(package, self.vocab)
|
tokenizer = Tokenizer.from_package(package, self.vocab)
|
||||||
self.tokenizer = tokenizer
|
self.tokenizer = tokenizer
|
||||||
if tagger in (None, True):
|
if tagger in (None, True):
|
||||||
tagger = Tagger.load(package, self.vocab)
|
tagger = Tagger.from_package(package, self.vocab)
|
||||||
self.tagger = tagger
|
self.tagger = tagger
|
||||||
if entity in (None, True):
|
if entity in (None, True):
|
||||||
entity = self.default_entity(package, self.vocab)
|
entity = self.default_entity(package, self.vocab)
|
||||||
|
@ -214,7 +214,7 @@ class Language(object):
|
||||||
parser = self.default_parser(package, self.vocab)
|
parser = self.default_parser(package, self.vocab)
|
||||||
self.parser = parser
|
self.parser = parser
|
||||||
if matcher in (None, True):
|
if matcher in (None, True):
|
||||||
matcher = Matcher.load(package, self.vocab)
|
matcher = Matcher.from_package(package, self.vocab)
|
||||||
self.matcher = matcher
|
self.matcher = matcher
|
||||||
|
|
||||||
def __reduce__(self):
|
def __reduce__(self):
|
||||||
|
|
|
@ -14,7 +14,10 @@ from .util import get_package
|
||||||
class Lemmatizer(object):
|
class Lemmatizer(object):
|
||||||
@classmethod
|
@classmethod
|
||||||
def load(cls, via):
|
def load(cls, via):
|
||||||
pkg = get_package(via)
|
return cls.from_package(get_package(via))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_package(cls, pkg):
|
||||||
index = {}
|
index = {}
|
||||||
exc = {}
|
exc = {}
|
||||||
for pos in ['adj', 'noun', 'verb']:
|
for pos in ['adj', 'noun', 'verb']:
|
||||||
|
|
|
@ -171,7 +171,10 @@ cdef class Matcher:
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def load(cls, via, Vocab vocab):
|
def load(cls, via, Vocab vocab):
|
||||||
package = get_package(via)
|
return cls.from_package(get_package(via), vocab=vocab)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_package(cls, package, Vocab vocab):
|
||||||
patterns = package.load_json(('vocab', 'gazetteer.json'))
|
patterns = package.load_json(('vocab', 'gazetteer.json'))
|
||||||
return cls(vocab, patterns)
|
return cls(vocab, patterns)
|
||||||
|
|
||||||
|
|
|
@ -149,7 +149,10 @@ cdef class Tagger:
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def load(cls, via, vocab):
|
def load(cls, via, vocab):
|
||||||
pkg = get_package(via)
|
return cls.from_package(get_package(via), vocab=vocab)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_package(cls, pkg, vocab):
|
||||||
# TODO: templates.json deprecated? not present in latest package
|
# TODO: templates.json deprecated? not present in latest package
|
||||||
templates = cls.default_templates()
|
templates = cls.default_templates()
|
||||||
# templates = package.load_utf8(json.load,
|
# templates = package.load_utf8(json.load,
|
||||||
|
|
|
@ -43,8 +43,11 @@ cdef class Tokenizer:
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def load(cls, via, Vocab vocab):
|
def load(cls, via, Vocab vocab):
|
||||||
pkg = get_package(via)
|
return cls.from_package(get_package(via), vocab=vocab)
|
||||||
rules, prefix_re, suffix_re, infix_re = read_lang_data(pkg)
|
|
||||||
|
@classmethod
|
||||||
|
def from_package(cls, package, Vocab vocab):
|
||||||
|
rules, prefix_re, suffix_re, infix_re = read_lang_data(package)
|
||||||
prefix_re = re.compile(prefix_re)
|
prefix_re = re.compile(prefix_re)
|
||||||
suffix_re = re.compile(suffix_re)
|
suffix_re = re.compile(suffix_re)
|
||||||
infix_re = re.compile(infix_re)
|
infix_re = re.compile(infix_re)
|
||||||
|
|
|
@ -4,9 +4,9 @@ import json
|
||||||
import re
|
import re
|
||||||
import os.path
|
import os.path
|
||||||
|
|
||||||
|
import six
|
||||||
import sputnik
|
import sputnik
|
||||||
from sputnik.dir_package import DirPackage
|
from sputnik.dir_package import DirPackage
|
||||||
from sputnik.package_stub import PackageStub
|
|
||||||
from sputnik.package_list import (PackageNotFoundException,
|
from sputnik.package_list import (PackageNotFoundException,
|
||||||
CompatiblePackageNotFoundException)
|
CompatiblePackageNotFoundException)
|
||||||
|
|
||||||
|
@ -15,8 +15,8 @@ from .attrs import TAG, HEAD, DEP, ENT_IOB, ENT_TYPE
|
||||||
|
|
||||||
|
|
||||||
def get_package(via=None):
|
def get_package(via=None):
|
||||||
if isinstance(via, PackageStub):
|
if not isinstance(via, six.string_types):
|
||||||
return via
|
raise RuntimeError('via must be a string')
|
||||||
return DirPackage(via)
|
return DirPackage(via)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -49,10 +49,13 @@ cdef class Vocab:
|
||||||
'''
|
'''
|
||||||
@classmethod
|
@classmethod
|
||||||
def load(cls, via, get_lex_attr=None):
|
def load(cls, via, get_lex_attr=None):
|
||||||
package = get_package(via)
|
return cls.from_package(get_package(via), get_lex_attr=get_lex_attr)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_package(cls, package, get_lex_attr=None):
|
||||||
tag_map = package.load_json(('vocab', 'tag_map.json'), default={})
|
tag_map = package.load_json(('vocab', 'tag_map.json'), default={})
|
||||||
|
|
||||||
lemmatizer = Lemmatizer.load(package)
|
lemmatizer = Lemmatizer.from_package(package)
|
||||||
|
|
||||||
serializer_freqs = package.load_json(('vocab', 'serializer.json'), default={})
|
serializer_freqs = package.load_json(('vocab', 'serializer.json'), default={})
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user