mirror of
https://github.com/explosion/spaCy.git
synced 2025-01-27 09:44:36 +03:00
Merge branch 'master' of ssh://github.com/honnibal/spaCy into develop
This commit is contained in:
commit
23855db3ca
|
@ -8,16 +8,24 @@ environment:
|
||||||
matrix:
|
matrix:
|
||||||
|
|
||||||
# Python 2.7.10 is the latest version and is not pre-installed.
|
# Python 2.7.10 is the latest version and is not pre-installed.
|
||||||
|
|
||||||
- PYTHON: "C:\\Python27.10-x64"
|
- PYTHON: "C:\\Python27.10-x64"
|
||||||
PYTHON_VERSION: "2.7.10"
|
PYTHON_VERSION: "2.7.10"
|
||||||
PYTHON_ARCH: "64"
|
PYTHON_ARCH: "64"
|
||||||
|
|
||||||
|
- PYTHON: "C:\\Python27.10-x32"
|
||||||
|
PYTHON_VERSION: "2.7.10"
|
||||||
|
PYTHON_ARCH: "32"
|
||||||
|
|
||||||
# The lastest Python 3.4.
|
# The lastest Python 3.4.
|
||||||
- PYTHON: "C:\\Python34-x64"
|
- PYTHON: "C:\\Python34-x64"
|
||||||
PYTHON_VERSION: "3.4.x" # currently 3.4.3
|
PYTHON_VERSION: "3.4.x" # currently 3.4.3
|
||||||
PYTHON_ARCH: "64"
|
PYTHON_ARCH: "64"
|
||||||
|
|
||||||
|
#- PYTHON: "C:\\Python34-x32"
|
||||||
|
# PYTHON_VERSION: "3.4.x" # currently 3.4.3
|
||||||
|
# PYTHON_ARCH: "32"
|
||||||
|
|
||||||
|
|
||||||
install:
|
install:
|
||||||
# Install Python (from the official .msi of http://python.org) and pip when
|
# Install Python (from the official .msi of http://python.org) and pip when
|
||||||
# not already installed.
|
# not already installed.
|
||||||
|
@ -31,6 +39,7 @@ install:
|
||||||
|
|
||||||
# Filesystem root
|
# Filesystem root
|
||||||
#- ps: "ls \"C:/\""
|
#- ps: "ls \"C:/\""
|
||||||
|
#- SET
|
||||||
|
|
||||||
# Installed SDKs
|
# Installed SDKs
|
||||||
#- ps: "ls \"C:/Program Files/Microsoft SDKs/Windows\""
|
#- ps: "ls \"C:/Program Files/Microsoft SDKs/Windows\""
|
||||||
|
|
|
@ -1,3 +1,6 @@
|
||||||
|
|
||||||
|
<img src="https://ci.appveyor.com/api/projects/status/aoe3dtkep36rdaqf?svg=true" />
|
||||||
|
|
||||||
spaCy: Industrial-strength NLP
|
spaCy: Industrial-strength NLP
|
||||||
==============================
|
==============================
|
||||||
|
|
||||||
|
@ -49,3 +52,6 @@ Difficult to support:
|
||||||
|
|
||||||
* PyPy 2.7
|
* PyPy 2.7
|
||||||
* PyPy 3.4
|
* PyPy 3.4
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
from os import path
|
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
import tarfile
|
import tarfile
|
||||||
|
@ -15,45 +14,44 @@ AWS_STORE = 'https://s3-us-west-1.amazonaws.com/media.spacynlp.com'
|
||||||
|
|
||||||
ALL_DATA_DIR_URL = '%s/en_data_all-%s.tgz' % (AWS_STORE, VERSION)
|
ALL_DATA_DIR_URL = '%s/en_data_all-%s.tgz' % (AWS_STORE, VERSION)
|
||||||
|
|
||||||
DEST_DIR = path.join(path.dirname(path.abspath(__file__)), 'data')
|
DEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
|
||||||
|
|
||||||
def download_file(url, dest_dir):
|
def download_file(url, download_path):
|
||||||
return uget.download(url, dest_dir, console=sys.stdout)
|
return uget.download(url, download_path, console=sys.stdout)
|
||||||
|
|
||||||
|
|
||||||
def install_data(url, dest_dir):
|
def install_data(url, extract_path, download_path):
|
||||||
filename = download_file(url, dest_dir)
|
try:
|
||||||
t = tarfile.open(filename)
|
os.makedirs(extract_path)
|
||||||
t.extractall(dest_dir)
|
except FileExistsError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
tmp = download_file(url, download_path)
|
||||||
def install_parser_model(url, dest_dir):
|
assert tmp == download_path
|
||||||
filename = download_file(url, dest_dir)
|
t = tarfile.open(download_path)
|
||||||
t = tarfile.open(filename, mode=":gz")
|
t.extractall(extract_path)
|
||||||
t.extractall(dest_dir)
|
|
||||||
|
|
||||||
|
|
||||||
def install_dep_vectors(url, dest_dir):
|
|
||||||
download_file(url, dest_dir)
|
|
||||||
|
|
||||||
|
|
||||||
@plac.annotations(
|
@plac.annotations(
|
||||||
force=("Force overwrite", "flag", "f", bool),
|
force=("Force overwrite", "flag", "f", bool),
|
||||||
)
|
)
|
||||||
def main(data_size='all', force=False):
|
def main(data_size='all', force=False):
|
||||||
if data_size == 'all':
|
filename = ALL_DATA_DIR_URL.rsplit('/', 1)[1]
|
||||||
data_url = ALL_DATA_DIR_URL
|
download_path = os.path.join(DEST_DIR, filename)
|
||||||
elif data_size == 'small':
|
data_path = os.path.join(DEST_DIR, 'data')
|
||||||
data_url = SM_DATA_DIR_URL
|
|
||||||
|
|
||||||
if force and path.exists(DEST_DIR):
|
if force and os.path.exists(download_path):
|
||||||
shutil.rmtree(DEST_DIR)
|
os.unlink(download_path)
|
||||||
|
|
||||||
if not os.path.exists(DEST_DIR):
|
if force and os.path.exists(data_path):
|
||||||
os.makedirs(DEST_DIR)
|
shutil.rmtree(data_path)
|
||||||
|
|
||||||
install_data(data_url, DEST_DIR)
|
if os.path.exists(data_path):
|
||||||
|
print('data already installed at %s, overwrite with --force' % DEST_DIR)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
install_data(ALL_DATA_DIR_URL, DEST_DIR, download_path)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
|
@ -19,7 +19,7 @@ class Lemmatizer(object):
|
||||||
index[pos] = read_index(path.join(data_dir, 'wordnet', 'index.%s' % pos))
|
index[pos] = read_index(path.join(data_dir, 'wordnet', 'index.%s' % pos))
|
||||||
exc[pos] = read_exc(path.join(data_dir, 'wordnet', '%s.exc' % pos))
|
exc[pos] = read_exc(path.join(data_dir, 'wordnet', '%s.exc' % pos))
|
||||||
if path.exists(path.join(data_dir, 'vocab', 'lemma_rules.json')):
|
if path.exists(path.join(data_dir, 'vocab', 'lemma_rules.json')):
|
||||||
rules = json.load(open(path.join(data_dir, 'vocab', 'lemma_rules.json')))
|
rules = json.load(codecs.open(path.join(data_dir, 'vocab', 'lemma_rules.json'), encoding='utf_8'))
|
||||||
else:
|
else:
|
||||||
rules = {}
|
rules = {}
|
||||||
return cls(index, exc, rules)
|
return cls(index, exc, rules)
|
||||||
|
|
|
@ -120,6 +120,9 @@ cdef class Doc:
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return u''.join([t.string for t in self])
|
return u''.join([t.string for t in self])
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return u''.join([t.string for t in self])
|
||||||
|
|
||||||
def similarity(self, other):
|
def similarity(self, other):
|
||||||
if self.vector_norm == 0 or other.vector_norm == 0:
|
if self.vector_norm == 0 or other.vector_norm == 0:
|
||||||
return 0.0
|
return 0.0
|
||||||
|
|
|
@ -46,6 +46,12 @@ cdef class Span:
|
||||||
return 0
|
return 0
|
||||||
return self.end - self.start
|
return self.end - self.start
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
text = self.text_with_ws
|
||||||
|
if self[-1].whitespace_:
|
||||||
|
text = text[:-1]
|
||||||
|
return text
|
||||||
|
|
||||||
def __getitem__(self, object i):
|
def __getitem__(self, object i):
|
||||||
if isinstance(i, slice):
|
if isinstance(i, slice):
|
||||||
start, end = normalize_slice(len(self), i.start, i.stop, i.step)
|
start, end = normalize_slice(len(self), i.start, i.stop, i.step)
|
||||||
|
|
|
@ -43,6 +43,9 @@ cdef class Token:
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return self.string
|
return self.string
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return self.string
|
||||||
|
|
||||||
cpdef bint check_flag(self, attr_id_t flag_id) except -1:
|
cpdef bint check_flag(self, attr_id_t flag_id) except -1:
|
||||||
return Lexeme.c_check_flag(self.c.lex, flag_id)
|
return Lexeme.c_check_flag(self.c.lex, flag_id)
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user