mirror of
https://github.com/explosion/spaCy.git
synced 2025-01-13 18:56:36 +03:00
Merge remote-tracking branch 'refs/remotes/honnibal/master'
This commit is contained in:
commit
c27393a85d
|
@ -12,9 +12,9 @@ environment:
|
||||||
PYTHON_VERSION: "2.7.10"
|
PYTHON_VERSION: "2.7.10"
|
||||||
PYTHON_ARCH: "64"
|
PYTHON_ARCH: "64"
|
||||||
|
|
||||||
#- PYTHON: "C:\\Python27.10-x32"
|
- PYTHON: "C:\\Python27.10-x32"
|
||||||
# PYTHON_VERSION: "2.7.10"
|
PYTHON_VERSION: "2.7.10"
|
||||||
# PYTHON_ARCH: "32"
|
PYTHON_ARCH: "32"
|
||||||
|
|
||||||
# The lastest Python 3.4.
|
# The lastest Python 3.4.
|
||||||
- PYTHON: "C:\\Python34-x64"
|
- PYTHON: "C:\\Python34-x64"
|
||||||
|
@ -38,10 +38,11 @@ install:
|
||||||
- "SET PYTHONPATH=%CD%;%PYTHONPATH%"
|
- "SET PYTHONPATH=%CD%;%PYTHONPATH%"
|
||||||
|
|
||||||
# Filesystem root
|
# Filesystem root
|
||||||
- ps: "ls \"C:/\""
|
#- ps: "ls \"C:/\""
|
||||||
|
#- SET
|
||||||
|
|
||||||
# Installed SDKs
|
# Installed SDKs
|
||||||
- ps: "ls \"C:/Program Files/Microsoft SDKs/Windows\""
|
#- ps: "ls \"C:/Program Files/Microsoft SDKs/Windows\""
|
||||||
|
|
||||||
# Checking stdint.h
|
# Checking stdint.h
|
||||||
#- ps: "ls \"C:/projects/spacy/include/\""
|
#- ps: "ls \"C:/projects/spacy/include/\""
|
||||||
|
|
|
@ -191,7 +191,8 @@ def setup_vocab(get_lex_attr, tag_map, src_dir, dst_dir):
|
||||||
else:
|
else:
|
||||||
lexeme.cluster = 0
|
lexeme.cluster = 0
|
||||||
vocab.dump(str(dst_dir / 'lexemes.bin'))
|
vocab.dump(str(dst_dir / 'lexemes.bin'))
|
||||||
vocab.strings.dump(str(dst_dir / 'strings.txt'))
|
with (dst_dir / 'strings.json').open('w') as file_:
|
||||||
|
vocab.strings.dump(file_)
|
||||||
with (dst_dir / 'oov_prob').open('w') as file_:
|
with (dst_dir / 'oov_prob').open('w') as file_:
|
||||||
file_.write('%f' % oov_prob)
|
file_.write('%f' % oov_prob)
|
||||||
|
|
||||||
|
|
2
fabfile.py
vendored
2
fabfile.py
vendored
|
@ -68,7 +68,7 @@ def publish(version):
|
||||||
local('git push origin %s' % version)
|
local('git push origin %s' % version)
|
||||||
local('python setup.py sdist')
|
local('python setup.py sdist')
|
||||||
local('python setup.py register')
|
local('python setup.py register')
|
||||||
local('twine upload dist/%s.tar.gz' % version)
|
local('twine upload dist/spacy-%s.tar.gz' % version)
|
||||||
|
|
||||||
|
|
||||||
def env(lang="python2.7"):
|
def env(lang="python2.7"):
|
||||||
|
|
6
setup.py
6
setup.py
|
@ -129,11 +129,13 @@ def cython_setup(mod_names, language, includes):
|
||||||
version=VERSION,
|
version=VERSION,
|
||||||
url="http://honnibal.github.io/spaCy/",
|
url="http://honnibal.github.io/spaCy/",
|
||||||
package_data={"spacy": ["*.pxd"],
|
package_data={"spacy": ["*.pxd"],
|
||||||
|
"spacy.tokens": ["*.pxd"],
|
||||||
|
"spacy.serialize": ["*.pxd"],
|
||||||
"spacy.en": ["*.pxd", "data/pos/*",
|
"spacy.en": ["*.pxd", "data/pos/*",
|
||||||
"data/wordnet/*", "data/tokenizer/*",
|
"data/wordnet/*", "data/tokenizer/*",
|
||||||
"data/vocab/tag_map.json",
|
"data/vocab/tag_map.json",
|
||||||
"data/vocab/lexemes.bin",
|
"data/vocab/lexemes.bin",
|
||||||
"data/vocab/strings.txt"],
|
"data/vocab/strings.json"],
|
||||||
"spacy.syntax": ["*.pxd"]},
|
"spacy.syntax": ["*.pxd"]},
|
||||||
ext_modules=exts,
|
ext_modules=exts,
|
||||||
cmdclass={'build_ext': build_ext_cython_subclass},
|
cmdclass={'build_ext': build_ext_cython_subclass},
|
||||||
|
@ -175,7 +177,7 @@ def run_setup(exts):
|
||||||
headers_workaround.install_headers('numpy')
|
headers_workaround.install_headers('numpy')
|
||||||
|
|
||||||
|
|
||||||
VERSION = '0.96'
|
VERSION = '0.97'
|
||||||
def main(modules, is_pypy):
|
def main(modules, is_pypy):
|
||||||
language = "cpp"
|
language = "cpp"
|
||||||
includes = ['.', path.join(sys.prefix, 'include')]
|
includes = ['.', path.join(sys.prefix, 'include')]
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
from os import path
|
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
import tarfile
|
import tarfile
|
||||||
|
@ -8,52 +7,58 @@ import plac
|
||||||
|
|
||||||
from . import uget
|
from . import uget
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
FileExistsError
|
||||||
|
except NameError:
|
||||||
|
FileExistsError = Exception
|
||||||
|
|
||||||
|
|
||||||
# TODO: Read this from the same source as the setup
|
# TODO: Read this from the same source as the setup
|
||||||
VERSION = '0.9.5'
|
VERSION = '0.9.6'
|
||||||
|
|
||||||
AWS_STORE = 'https://s3-us-west-1.amazonaws.com/media.spacynlp.com'
|
AWS_STORE = 'https://s3-us-west-1.amazonaws.com/media.spacynlp.com'
|
||||||
|
|
||||||
ALL_DATA_DIR_URL = '%s/en_data_all-%s.tgz' % (AWS_STORE, VERSION)
|
ALL_DATA_DIR_URL = '%s/en_data_all-%s.tgz' % (AWS_STORE, VERSION)
|
||||||
|
|
||||||
DEST_DIR = path.join(path.dirname(path.abspath(__file__)), 'data')
|
DEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
|
||||||
|
|
||||||
def download_file(url, dest_dir):
|
def download_file(url, download_path):
|
||||||
return uget.download(url, dest_dir, console=sys.stdout)
|
return uget.download(url, download_path, console=sys.stdout)
|
||||||
|
|
||||||
|
|
||||||
def install_data(url, dest_dir):
|
def install_data(url, extract_path, download_path):
|
||||||
filename = download_file(url, dest_dir)
|
try:
|
||||||
t = tarfile.open(filename)
|
os.makedirs(extract_path)
|
||||||
t.extractall(dest_dir)
|
except FileExistsError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
tmp = download_file(url, download_path)
|
||||||
def install_parser_model(url, dest_dir):
|
assert tmp == download_path
|
||||||
filename = download_file(url, dest_dir)
|
t = tarfile.open(download_path)
|
||||||
t = tarfile.open(filename, mode=":gz")
|
t.extractall(extract_path)
|
||||||
t.extractall(dest_dir)
|
|
||||||
|
|
||||||
|
|
||||||
def install_dep_vectors(url, dest_dir):
|
|
||||||
download_file(url, dest_dir)
|
|
||||||
|
|
||||||
|
|
||||||
@plac.annotations(
|
@plac.annotations(
|
||||||
force=("Force overwrite", "flag", "f", bool),
|
force=("Force overwrite", "flag", "f", bool),
|
||||||
)
|
)
|
||||||
def main(data_size='all', force=False):
|
def main(data_size='all', force=False):
|
||||||
if data_size == 'all':
|
filename = ALL_DATA_DIR_URL.rsplit('/', 1)[1]
|
||||||
data_url = ALL_DATA_DIR_URL
|
download_path = os.path.join(DEST_DIR, filename)
|
||||||
elif data_size == 'small':
|
data_path = os.path.join(DEST_DIR, 'data')
|
||||||
data_url = SM_DATA_DIR_URL
|
|
||||||
|
|
||||||
if force and path.exists(DEST_DIR):
|
if force and os.path.exists(download_path):
|
||||||
shutil.rmtree(DEST_DIR)
|
os.unlink(download_path)
|
||||||
|
|
||||||
if not os.path.exists(DEST_DIR):
|
if force and os.path.exists(data_path):
|
||||||
os.makedirs(DEST_DIR)
|
shutil.rmtree(data_path)
|
||||||
|
|
||||||
install_data(data_url, DEST_DIR)
|
if os.path.exists(data_path):
|
||||||
|
print('data already installed at %s, overwrite with --force' % DEST_DIR)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
install_data(ALL_DATA_DIR_URL, DEST_DIR, download_path)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
from os import path
|
from os import path
|
||||||
from warnings import warn
|
from warnings import warn
|
||||||
|
import io
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import ujson as json
|
import ujson as json
|
||||||
|
@ -247,7 +248,10 @@ class Language(object):
|
||||||
self.parser.model.end_training(path.join(data_dir, 'deps', 'model'))
|
self.parser.model.end_training(path.join(data_dir, 'deps', 'model'))
|
||||||
self.entity.model.end_training(path.join(data_dir, 'ner', 'model'))
|
self.entity.model.end_training(path.join(data_dir, 'ner', 'model'))
|
||||||
self.tagger.model.end_training(path.join(data_dir, 'pos', 'model'))
|
self.tagger.model.end_training(path.join(data_dir, 'pos', 'model'))
|
||||||
self.vocab.strings.dump(path.join(data_dir, 'vocab', 'strings.txt'))
|
|
||||||
|
strings_loc = path.join(data_dir, 'vocab', 'strings.json')
|
||||||
|
with io.open(strings_loc, 'w', encoding='utf8') as file_:
|
||||||
|
self.vocab.strings.dump(file_)
|
||||||
|
|
||||||
with open(path.join(data_dir, 'vocab', 'serializer.json'), 'w') as file_:
|
with open(path.join(data_dir, 'vocab', 'serializer.json'), 'w') as file_:
|
||||||
file_.write(
|
file_.write(
|
||||||
|
|
|
@ -12,8 +12,15 @@ from libc.stdint cimport int64_t
|
||||||
|
|
||||||
from .typedefs cimport hash_t, attr_t
|
from .typedefs cimport hash_t, attr_t
|
||||||
|
|
||||||
|
try:
|
||||||
|
import codecs as io
|
||||||
|
except ImportError:
|
||||||
|
import io
|
||||||
|
|
||||||
SEPARATOR = '\n|-SEP-|\n'
|
try:
|
||||||
|
import ujson as json
|
||||||
|
except ImportError:
|
||||||
|
import json
|
||||||
|
|
||||||
|
|
||||||
cpdef hash_t hash_string(unicode string) except 0:
|
cpdef hash_t hash_string(unicode string) except 0:
|
||||||
|
@ -114,7 +121,11 @@ cdef class StringStore:
|
||||||
def __iter__(self):
|
def __iter__(self):
|
||||||
cdef int i
|
cdef int i
|
||||||
for i in range(self.size):
|
for i in range(self.size):
|
||||||
yield self[i]
|
if i == 0:
|
||||||
|
yield u''
|
||||||
|
else:
|
||||||
|
utf8str = &self.c[i]
|
||||||
|
yield _decode(utf8str)
|
||||||
|
|
||||||
def __reduce__(self):
|
def __reduce__(self):
|
||||||
strings = [""]
|
strings = [""]
|
||||||
|
@ -138,28 +149,22 @@ cdef class StringStore:
|
||||||
self.size += 1
|
self.size += 1
|
||||||
return &self.c[self.size-1]
|
return &self.c[self.size-1]
|
||||||
|
|
||||||
def dump(self, loc):
|
def dump(self, file_):
|
||||||
cdef Utf8Str* string
|
string_data = json.dumps([s for s in self])
|
||||||
cdef unicode py_string
|
if not isinstance(string_data, unicode):
|
||||||
cdef int i
|
string_data = string_data.decode('utf8')
|
||||||
with codecs.open(loc, 'w', 'utf8') as file_:
|
file_.write(string_data)
|
||||||
for i in range(1, self.size):
|
|
||||||
string = &self.c[i]
|
|
||||||
py_string = _decode(string)
|
|
||||||
file_.write(py_string)
|
|
||||||
if (i+1) != self.size:
|
|
||||||
file_.write(SEPARATOR)
|
|
||||||
|
|
||||||
def load(self, loc):
|
def load(self, file_):
|
||||||
with codecs.open(loc, 'r', 'utf8') as file_:
|
strings = json.load(file_)
|
||||||
strings = file_.read().split(SEPARATOR)
|
|
||||||
if strings == ['']:
|
if strings == ['']:
|
||||||
return None
|
return None
|
||||||
cdef unicode string
|
cdef unicode string
|
||||||
cdef bytes byte_string
|
cdef bytes byte_string
|
||||||
for string in strings:
|
for string in strings:
|
||||||
byte_string = string.encode('utf8')
|
if string:
|
||||||
self.intern(byte_string, len(byte_string))
|
byte_string = string.encode('utf8')
|
||||||
|
self.intern(byte_string, len(byte_string))
|
||||||
|
|
||||||
def _realloc(self):
|
def _realloc(self):
|
||||||
# We want to map straight to pointers, but they'll be invalidated if
|
# We want to map straight to pointers, but they'll be invalidated if
|
||||||
|
|
|
@ -120,6 +120,9 @@ cdef class Doc:
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return u''.join([t.string for t in self])
|
return u''.join([t.string for t in self])
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return u''.join([t.string for t in self])
|
||||||
|
|
||||||
def similarity(self, other):
|
def similarity(self, other):
|
||||||
if self.vector_norm == 0 or other.vector_norm == 0:
|
if self.vector_norm == 0 or other.vector_norm == 0:
|
||||||
return 0.0
|
return 0.0
|
||||||
|
|
|
@ -46,6 +46,12 @@ cdef class Span:
|
||||||
return 0
|
return 0
|
||||||
return self.end - self.start
|
return self.end - self.start
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
text = self.text_with_ws
|
||||||
|
if self[-1].whitespace_:
|
||||||
|
text = text[:-1]
|
||||||
|
return text
|
||||||
|
|
||||||
def __getitem__(self, object i):
|
def __getitem__(self, object i):
|
||||||
if isinstance(i, slice):
|
if isinstance(i, slice):
|
||||||
start, end = normalize_slice(len(self), i.start, i.stop, i.step)
|
start, end = normalize_slice(len(self), i.start, i.stop, i.step)
|
||||||
|
|
|
@ -43,6 +43,9 @@ cdef class Token:
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return self.string
|
return self.string
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return self.string
|
||||||
|
|
||||||
cpdef bint check_flag(self, attr_id_t flag_id) except -1:
|
cpdef bint check_flag(self, attr_id_t flag_id) except -1:
|
||||||
return Lexeme.c_check_flag(self.c.lex, flag_id)
|
return Lexeme.c_check_flag(self.c.lex, flag_id)
|
||||||
|
|
||||||
|
|
|
@ -62,7 +62,9 @@ cdef class Vocab:
|
||||||
cdef Vocab self = cls(get_lex_attr=get_lex_attr, tag_map=tag_map,
|
cdef Vocab self = cls(get_lex_attr=get_lex_attr, tag_map=tag_map,
|
||||||
lemmatizer=lemmatizer, serializer_freqs=serializer_freqs)
|
lemmatizer=lemmatizer, serializer_freqs=serializer_freqs)
|
||||||
|
|
||||||
self.load_lexemes(path.join(data_dir, 'strings.txt'), path.join(data_dir, 'lexemes.bin'))
|
with io.open(path.join(data_dir, 'strings.json'), 'r', encoding='utf8') as file_:
|
||||||
|
self.strings.load(file_)
|
||||||
|
self.load_lexemes(path.join(data_dir, 'lexemes.bin'))
|
||||||
if path.exists(path.join(data_dir, 'vec.bin')):
|
if path.exists(path.join(data_dir, 'vec.bin')):
|
||||||
self.vectors_length = self.load_vectors_from_bin_loc(path.join(data_dir, 'vec.bin'))
|
self.vectors_length = self.load_vectors_from_bin_loc(path.join(data_dir, 'vec.bin'))
|
||||||
return self
|
return self
|
||||||
|
@ -106,11 +108,12 @@ cdef class Vocab:
|
||||||
# TODO: Dump vectors
|
# TODO: Dump vectors
|
||||||
tmp_dir = tempfile.mkdtemp()
|
tmp_dir = tempfile.mkdtemp()
|
||||||
lex_loc = path.join(tmp_dir, 'lexemes.bin')
|
lex_loc = path.join(tmp_dir, 'lexemes.bin')
|
||||||
str_loc = path.join(tmp_dir, 'strings.txt')
|
str_loc = path.join(tmp_dir, 'strings.json')
|
||||||
vec_loc = path.join(self.data_dir, 'vec.bin') if self.data_dir is not None else None
|
vec_loc = path.join(self.data_dir, 'vec.bin') if self.data_dir is not None else None
|
||||||
|
|
||||||
self.dump(lex_loc)
|
self.dump(lex_loc)
|
||||||
self.strings.dump(str_loc)
|
with io.open(str_loc, 'w', encoding='utf8') as file_:
|
||||||
|
self.strings.dump(file_)
|
||||||
|
|
||||||
state = (str_loc, lex_loc, vec_loc, self.morphology, self.get_lex_attr,
|
state = (str_loc, lex_loc, vec_loc, self.morphology, self.get_lex_attr,
|
||||||
self.serializer_freqs, self.data_dir)
|
self.serializer_freqs, self.data_dir)
|
||||||
|
@ -250,8 +253,7 @@ cdef class Vocab:
|
||||||
fp.write_from(&lexeme.l2_norm, sizeof(lexeme.l2_norm), 1)
|
fp.write_from(&lexeme.l2_norm, sizeof(lexeme.l2_norm), 1)
|
||||||
fp.close()
|
fp.close()
|
||||||
|
|
||||||
def load_lexemes(self, strings_loc, loc):
|
def load_lexemes(self, loc):
|
||||||
self.strings.load(strings_loc)
|
|
||||||
if not path.exists(loc):
|
if not path.exists(loc):
|
||||||
raise IOError('LexemeCs file not found at %s' % loc)
|
raise IOError('LexemeCs file not found at %s' % loc)
|
||||||
fp = CFile(loc, 'rb')
|
fp = CFile(loc, 'rb')
|
||||||
|
@ -369,7 +371,9 @@ def unpickle_vocab(strings_loc, lex_loc, vec_loc, morphology, get_lex_attr,
|
||||||
vocab.data_dir = data_dir
|
vocab.data_dir = data_dir
|
||||||
vocab.serializer_freqs = serializer_freqs
|
vocab.serializer_freqs = serializer_freqs
|
||||||
|
|
||||||
vocab.load_lexemes(strings_loc, lex_loc)
|
with io.open(strings_loc, 'r', encoding='utf8') as file_:
|
||||||
|
vocab.strings.load(file_)
|
||||||
|
vocab.load_lexemes(lex_loc)
|
||||||
if vec_loc is not None:
|
if vec_loc is not None:
|
||||||
vocab.load_vectors_from_bin_loc(vec_loc)
|
vocab.load_vectors_from_bin_loc(vec_loc)
|
||||||
return vocab
|
return vocab
|
||||||
|
|
|
@ -1,12 +1,13 @@
|
||||||
# -*- coding: utf8 -*-
|
# -*- coding: utf8 -*-
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
import pickle
|
import pickle
|
||||||
import io
|
|
||||||
|
|
||||||
from spacy.strings import StringStore
|
from spacy.strings import StringStore
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
|
import io
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def sstore():
|
def sstore():
|
||||||
|
@ -92,4 +93,12 @@ def test_pickle_string_store(sstore):
|
||||||
assert loaded[hello_id] == u'Hi'
|
assert loaded[hello_id] == u'Hi'
|
||||||
|
|
||||||
|
|
||||||
|
def test_dump_load(sstore):
|
||||||
|
id_ = sstore[u'qqqqq']
|
||||||
|
loc = '/tmp/sstore.json'
|
||||||
|
with io.open(loc, 'w', encoding='utf8') as file_:
|
||||||
|
sstore.dump(file_)
|
||||||
|
new_store = StringStore()
|
||||||
|
with io.open(loc, 'r', encoding='utf8') as file_:
|
||||||
|
new_store.load(file_)
|
||||||
|
assert new_store[id_] == u'qqqqq'
|
||||||
|
|
Loading…
Reference in New Issue
Block a user