mirror of
https://github.com/explosion/spaCy.git
synced 2025-01-27 09:44:36 +03:00
strip data/ from package, friendlier Language invocation, make data_dir backward/forward-compatible
This commit is contained in:
parent
970278a3d6
commit
8359bd4d93
|
@ -33,6 +33,10 @@ your yours yourself yourselves
|
||||||
STOPWORDS = set(w for w in STOPWORDS.split() if w)
|
STOPWORDS = set(w for w in STOPWORDS.split() if w)
|
||||||
|
|
||||||
class English(Language):
|
class English(Language):
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
kwargs['lang'] = 'en'
|
||||||
|
super(English, self).__init__(**kwargs)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def is_stop(string):
|
def is_stop(string):
|
||||||
return 1 if string.lower() in STOPWORDS else 0
|
return 1 if string.lower() in STOPWORDS else 0
|
||||||
|
|
|
@ -20,7 +20,7 @@ from .syntax.ner import BiluoPushDown
|
||||||
from .syntax.arc_eager import ArcEager
|
from .syntax.arc_eager import ArcEager
|
||||||
|
|
||||||
from .attrs import TAG, DEP, ENT_IOB, ENT_TYPE, HEAD
|
from .attrs import TAG, DEP, ENT_IOB, ENT_TYPE, HEAD
|
||||||
from .util import default_package
|
from .util import get_package
|
||||||
|
|
||||||
|
|
||||||
class Language(object):
|
class Language(object):
|
||||||
|
@ -137,48 +137,100 @@ class Language(object):
|
||||||
@classmethod
|
@classmethod
|
||||||
def default_vocab(cls, package=None, get_lex_attr=None):
|
def default_vocab(cls, package=None, get_lex_attr=None):
|
||||||
if package is None:
|
if package is None:
|
||||||
package = default_package()
|
package = get_package()
|
||||||
if get_lex_attr is None:
|
if get_lex_attr is None:
|
||||||
get_lex_attr = cls.default_lex_attrs()
|
get_lex_attr = cls.default_lex_attrs()
|
||||||
return Vocab.from_package(package, get_lex_attr=get_lex_attr)
|
return Vocab.from_package(package, get_lex_attr=get_lex_attr)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def default_parser(cls, package, vocab):
|
def default_parser(cls, package, vocab):
|
||||||
data_dir = package.dir_path('data', 'deps', require=False)
|
data_dir = package.dir_path('deps', require=False)
|
||||||
if data_dir and path.exists(data_dir):
|
if data_dir and path.exists(data_dir):
|
||||||
return Parser.from_dir(data_dir, vocab.strings, ArcEager)
|
return Parser.from_dir(data_dir, vocab.strings, ArcEager)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def default_entity(cls, package, vocab):
|
def default_entity(cls, package, vocab):
|
||||||
data_dir = package.dir_path('data', 'ner', require=False)
|
data_dir = package.dir_path('ner', require=False)
|
||||||
if data_dir and path.exists(data_dir):
|
if data_dir and path.exists(data_dir):
|
||||||
return Parser.from_dir(data_dir, vocab.strings, BiluoPushDown)
|
return Parser.from_dir(data_dir, vocab.strings, BiluoPushDown)
|
||||||
|
|
||||||
def __init__(self, package=None, vocab=None, tokenizer=None, tagger=None,
|
def __init__(self, **kwargs):
|
||||||
parser=None, entity=None, matcher=None, serializer=None,
|
"""
|
||||||
load_vectors=True):
|
a model can be specified:
|
||||||
|
|
||||||
|
1) by a path to the model directory (DEPRECATED)
|
||||||
|
- Language(data_dir='path/to/data')
|
||||||
|
|
||||||
|
2) by a language identifier (and optionally a package root dir)
|
||||||
|
- Language(lang='en')
|
||||||
|
- Language(lang='en', data_dir='spacy/data')
|
||||||
|
|
||||||
|
3) by a model name/version (and optionally a package root dir)
|
||||||
|
- Language(model='en_default')
|
||||||
|
- Language(model='en_default', version='1.0.0')
|
||||||
|
- Language(model='en_default', version='1.0.0', data_dir='spacy/data')
|
||||||
|
"""
|
||||||
|
|
||||||
|
data_dir = kwargs.pop('data_dir', None)
|
||||||
|
|
||||||
|
lang = kwargs.pop('lang', None)
|
||||||
|
model = kwargs.pop('model', None)
|
||||||
|
version = kwargs.pop('version', None)
|
||||||
|
|
||||||
|
vocab = kwargs.pop('vocab', None)
|
||||||
|
tokenizer = kwargs.pop('tokenizer', None)
|
||||||
|
tagger = kwargs.pop('tagger', None)
|
||||||
|
parser = kwargs.pop('parser', None)
|
||||||
|
entity = kwargs.pop('entity', None)
|
||||||
|
matcher = kwargs.pop('matcher', None)
|
||||||
|
serializer = kwargs.pop('serializer', None)
|
||||||
|
|
||||||
|
load_vectors = kwargs.pop('load_vectors', True)
|
||||||
|
|
||||||
|
# support non-package data dirs
|
||||||
|
if data_dir and path.exists(path.join(data_dir, 'vocab')):
|
||||||
|
class Package(object):
|
||||||
|
def __init__(self, root):
|
||||||
|
self.root = root
|
||||||
|
|
||||||
|
def has_file(self, *path_parts):
|
||||||
|
return path.exists(path.join(self.root, *path_parts))
|
||||||
|
|
||||||
|
def file_path(self, *path_parts, **kwargs):
|
||||||
|
return path.join(self.root, *path_parts)
|
||||||
|
|
||||||
|
def dir_path(self, *path_parts, **kwargs):
|
||||||
|
return path.join(self.root, *path_parts)
|
||||||
|
|
||||||
|
def load_utf8(self, func, *path_parts, **kwargs):
|
||||||
|
with io.open(self.file_path(path.join(*path_parts)),
|
||||||
|
mode='r', encoding='utf8') as f:
|
||||||
|
return func(f)
|
||||||
|
|
||||||
|
warn("using non-package data_dir", DeprecationWarning)
|
||||||
|
package = Package(data_dir)
|
||||||
|
else:
|
||||||
|
if model is None:
|
||||||
|
model = '%s_default' % (lang or 'en')
|
||||||
|
version = None
|
||||||
|
print(model, version)
|
||||||
|
package = get_package(name=model, version=version,
|
||||||
|
data_path=data_dir)
|
||||||
|
|
||||||
if load_vectors is not True:
|
if load_vectors is not True:
|
||||||
warn("load_vectors is deprecated", DeprecationWarning)
|
warn("load_vectors is deprecated", DeprecationWarning)
|
||||||
if package in (None, True):
|
|
||||||
package = default_package()
|
|
||||||
if vocab in (None, True):
|
if vocab in (None, True):
|
||||||
vocab = self.default_vocab(package)
|
self.vocab = self.default_vocab(package)
|
||||||
if tokenizer in (None, True):
|
if tokenizer in (None, True):
|
||||||
tokenizer = Tokenizer.from_package(package, vocab)
|
self.tokenizer = Tokenizer.from_package(package, self.vocab)
|
||||||
if tagger in (None, True):
|
if tagger in (None, True):
|
||||||
tagger = Tagger.from_package(package, vocab)
|
self.tagger = Tagger.from_package(package, self.vocab)
|
||||||
if entity in (None, True):
|
if entity in (None, True):
|
||||||
entity = self.default_entity(package, vocab)
|
self.entity = self.default_entity(package, self.vocab)
|
||||||
if parser in (None, True):
|
if parser in (None, True):
|
||||||
parser = self.default_parser(package, vocab)
|
self.parser = self.default_parser(package, self.vocab)
|
||||||
if matcher in (None, True):
|
if matcher in (None, True):
|
||||||
matcher = Matcher.from_package(package, vocab)
|
self.matcher = Matcher.from_package(package, self.vocab)
|
||||||
self.vocab = vocab
|
|
||||||
self.tokenizer = tokenizer
|
|
||||||
self.tagger = tagger
|
|
||||||
self.parser = parser
|
|
||||||
self.entity = entity
|
|
||||||
self.matcher = matcher
|
|
||||||
|
|
||||||
def __reduce__(self):
|
def __reduce__(self):
|
||||||
return (self.__class__,
|
return (self.__class__,
|
||||||
|
|
|
@ -17,14 +17,14 @@ class Lemmatizer(object):
|
||||||
exc = {}
|
exc = {}
|
||||||
for pos in ['adj', 'noun', 'verb']:
|
for pos in ['adj', 'noun', 'verb']:
|
||||||
index[pos] = package.load_utf8(read_index,
|
index[pos] = package.load_utf8(read_index,
|
||||||
'data', 'wordnet', 'index.%s' % pos,
|
'wordnet', 'index.%s' % pos,
|
||||||
default=set()) # TODO: really optional?
|
default=set()) # TODO: really optional?
|
||||||
exc[pos] = package.load_utf8(read_exc,
|
exc[pos] = package.load_utf8(read_exc,
|
||||||
'data', 'wordnet', '%s.exc' % pos,
|
'wordnet', '%s.exc' % pos,
|
||||||
default={}) # TODO: really optional?
|
default={}) # TODO: really optional?
|
||||||
|
|
||||||
rules = package.load_utf8(json.load,
|
rules = package.load_utf8(json.load,
|
||||||
'data', 'vocab', 'lemma_rules.json',
|
'vocab', 'lemma_rules.json',
|
||||||
default={}) # TODO: really optional?
|
default={}) # TODO: really optional?
|
||||||
|
|
||||||
return cls(index, exc, rules)
|
return cls(index, exc, rules)
|
||||||
|
|
|
@ -171,7 +171,7 @@ cdef class Matcher:
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_package(cls, package, Vocab vocab):
|
def from_package(cls, package, Vocab vocab):
|
||||||
patterns = package.load_utf8(json.load,
|
patterns = package.load_utf8(json.load,
|
||||||
'data', 'vocab', 'gazetteer.json',
|
'vocab', 'gazetteer.json',
|
||||||
default={}) # TODO: really optional?
|
default={}) # TODO: really optional?
|
||||||
return cls(vocab, patterns)
|
return cls(vocab, patterns)
|
||||||
|
|
||||||
|
|
|
@ -148,15 +148,16 @@ cdef class Tagger:
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_package(cls, package, vocab):
|
def from_package(cls, package, vocab):
|
||||||
# TODO: templates.json deprecated? not present in latest package
|
# TODO: templates.json deprecated? not present in latest package
|
||||||
templates = package.load_utf8(json.load,
|
templates = cls.default_templates()
|
||||||
'data', 'pos', 'templates.json',
|
# templates = package.load_utf8(json.load,
|
||||||
default=cls.default_templates())
|
# 'pos', 'templates.json',
|
||||||
|
# default=cls.default_templates())
|
||||||
|
|
||||||
model = TaggerModel(vocab.morphology.n_tags,
|
model = TaggerModel(vocab.morphology.n_tags,
|
||||||
ConjunctionExtracter(N_CONTEXT_FIELDS, templates))
|
ConjunctionExtracter(N_CONTEXT_FIELDS, templates))
|
||||||
|
|
||||||
if package.has_file('data', 'pos', 'model'): # TODO: really optional?
|
if package.has_file('pos', 'model'): # TODO: really optional?
|
||||||
model.load(package.file_path('data', 'pos', 'model'))
|
model.load(package.file_path('pos', 'model'))
|
||||||
|
|
||||||
return cls(vocab, model)
|
return cls(vocab, model)
|
||||||
|
|
||||||
|
|
|
@ -20,14 +20,14 @@ def lemmatizer(package):
|
||||||
|
|
||||||
|
|
||||||
def test_read_index(package):
|
def test_read_index(package):
|
||||||
index = package.load_utf8(read_index, 'data', 'wordnet', 'index.noun')
|
index = package.load_utf8(read_index, 'wordnet', 'index.noun')
|
||||||
assert 'man' in index
|
assert 'man' in index
|
||||||
assert 'plantes' not in index
|
assert 'plantes' not in index
|
||||||
assert 'plant' in index
|
assert 'plant' in index
|
||||||
|
|
||||||
|
|
||||||
def test_read_exc(package):
|
def test_read_exc(package):
|
||||||
exc = package.load_utf8(read_exc, 'data', 'wordnet', 'verb.exc')
|
exc = package.load_utf8(read_exc, 'wordnet', 'verb.exc')
|
||||||
assert exc['was'] == ('be',)
|
assert exc['was'] == ('be',)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -8,16 +8,20 @@ from sputnik import Sputnik
|
||||||
from .attrs import TAG, HEAD, DEP, ENT_IOB, ENT_TYPE
|
from .attrs import TAG, HEAD, DEP, ENT_IOB, ENT_TYPE
|
||||||
|
|
||||||
|
|
||||||
def default_package():
|
def get_package(name=None, version=None, data_path=None):
|
||||||
if os.environ.get('SPACY_DATA'):
|
if data_path is None:
|
||||||
data_path = os.environ.get('SPACY_DATA')
|
if os.environ.get('SPACY_DATA'):
|
||||||
else:
|
data_path = os.environ.get('SPACY_DATA')
|
||||||
data_path = os.path.abspath(
|
else:
|
||||||
os.path.join(os.path.dirname(__file__), 'data'))
|
data_path = os.path.abspath(
|
||||||
|
os.path.join(os.path.dirname(__file__), 'data'))
|
||||||
|
|
||||||
sputnik = Sputnik('spacy', '0.99.0') # TODO: retrieve version
|
sputnik = Sputnik('spacy', '0.100.0') # TODO: retrieve version
|
||||||
pool = sputnik.pool(data_path)
|
pool = sputnik.pool(data_path)
|
||||||
return pool.get('en_default')
|
|
||||||
|
if version:
|
||||||
|
name += ' ==%s' % version
|
||||||
|
return pool.get(name)
|
||||||
|
|
||||||
|
|
||||||
def normalize_slice(length, start, stop, step=None):
|
def normalize_slice(length, start, stop, step=None):
|
||||||
|
@ -45,10 +49,10 @@ def utf8open(loc, mode='r'):
|
||||||
|
|
||||||
|
|
||||||
def read_lang_data(package):
|
def read_lang_data(package):
|
||||||
tokenization = package.load_utf8(json.load, 'data', 'tokenizer', 'specials.json')
|
tokenization = package.load_utf8(json.load, 'tokenizer', 'specials.json')
|
||||||
prefix = package.load_utf8(read_prefix, 'data', 'tokenizer', 'prefix.txt')
|
prefix = package.load_utf8(read_prefix, 'tokenizer', 'prefix.txt')
|
||||||
suffix = package.load_utf8(read_suffix, 'data', 'tokenizer', 'suffix.txt')
|
suffix = package.load_utf8(read_suffix, 'tokenizer', 'suffix.txt')
|
||||||
infix = package.load_utf8(read_infix, 'data', 'tokenizer', 'infix.txt')
|
infix = package.load_utf8(read_infix, 'tokenizer', 'infix.txt')
|
||||||
return tokenization, prefix, suffix, infix
|
return tokenization, prefix, suffix, infix
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -49,24 +49,24 @@ cdef class Vocab:
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_package(cls, package, get_lex_attr=None):
|
def from_package(cls, package, get_lex_attr=None):
|
||||||
tag_map = package.load_utf8(json.load,
|
tag_map = package.load_utf8(json.load,
|
||||||
'data', 'vocab', 'tag_map.json')
|
'vocab', 'tag_map.json')
|
||||||
|
|
||||||
lemmatizer = Lemmatizer.from_package(package)
|
lemmatizer = Lemmatizer.from_package(package)
|
||||||
|
|
||||||
serializer_freqs = package.load_utf8(json.load,
|
serializer_freqs = package.load_utf8(json.load,
|
||||||
'data', 'vocab', 'serializer.json',
|
'vocab', 'serializer.json',
|
||||||
require=False) # TODO: really optional?
|
require=False) # TODO: really optional?
|
||||||
|
|
||||||
cdef Vocab self = cls(get_lex_attr=get_lex_attr, tag_map=tag_map,
|
cdef Vocab self = cls(get_lex_attr=get_lex_attr, tag_map=tag_map,
|
||||||
lemmatizer=lemmatizer, serializer_freqs=serializer_freqs)
|
lemmatizer=lemmatizer, serializer_freqs=serializer_freqs)
|
||||||
|
|
||||||
if package.has_file('data', 'vocab', 'strings.json'): # TODO: really optional?
|
if package.has_file('vocab', 'strings.json'): # TODO: really optional?
|
||||||
package.load_utf8(self.strings.load, 'data', 'vocab', 'strings.json')
|
package.load_utf8(self.strings.load, 'vocab', 'strings.json')
|
||||||
self.load_lexemes(package.file_path('data', 'vocab', 'lexemes.bin'))
|
self.load_lexemes(package.file_path('vocab', 'lexemes.bin'))
|
||||||
|
|
||||||
if package.has_file('data', 'vocab', 'vec.bin'): # TODO: really optional?
|
if package.has_file('vocab', 'vec.bin'): # TODO: really optional?
|
||||||
self.vectors_length = self.load_vectors_from_bin_loc(
|
self.vectors_length = self.load_vectors_from_bin_loc(
|
||||||
package.file_path('data', 'vocab', 'vec.bin'))
|
package.file_path('vocab', 'vec.bin'))
|
||||||
|
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user