spaCy/spacy/util.py

850 lines
27 KiB
Python
Raw Normal View History

2017-03-12 15:07:28 +03:00
# coding: utf8
from __future__ import unicode_literals, print_function
import os
import importlib
import re
from pathlib import Path
import random
2017-05-31 14:42:39 +03:00
from collections import OrderedDict
2017-09-21 03:16:35 +03:00
from thinc.neural._classes.model import Model
from thinc.neural.ops import NumpyOps
import functools
2017-11-10 21:05:18 +03:00
import itertools
2018-02-13 14:52:48 +03:00
import numpy.random
import srsly
import sys
try:
import jsonschema
except ImportError:
jsonschema = None
💫 New JSON helpers, training data internals & CLI rewrite (#2932) * Support nowrap setting in util.prints * Tidy up and fix whitespace * Simplify script and use read_jsonl helper * Add JSON schemas (see #2928) * Deprecate Doc.print_tree Will be replaced with Doc.to_json, which will produce a unified format * Add Doc.to_json() method (see #2928) Converts Doc objects to JSON using the same unified format as the training data. Method also supports serializing selected custom attributes in the doc._. space. * Remove outdated test * Add write_json and write_jsonl helpers * WIP: Update spacy train * Tidy up spacy train * WIP: Use wasabi for formatting * Add GoldParse helpers for JSON format * WIP: add debug-data command * Fix typo * Add missing import * Update wasabi pin * Add missing import * 💫 Refactor CLI (#2943) To be merged into #2932. ## Description - [x] refactor CLI To use [`wasabi`](https://github.com/ines/wasabi) - [x] use [`black`](https://github.com/ambv/black) for auto-formatting - [x] add `flake8` config - [x] move all messy UD-related scripts to `cli.ud` - [x] make converters function that take the opened file and return the converted data (instead of having them handle the IO) ### Types of change enhancement ## Checklist <!--- Before you submit the PR, go over this checklist and make sure you can tick off all the boxes. [] -> [x] --> - [x] I have submitted the spaCy Contributor Agreement. - [x] I ran the tests, and all new and existing tests passed. - [x] My changes don't require a change to the documentation, or if they do, I've added all required information. * Update wasabi pin * Delete old test * Update errors * Fix typo * Tidy up and format remaining code * Fix formatting * Improve formatting of messages * Auto-format remaining code * Add tok2vec stuff to spacy.train * Fix typo * Update wasabi pin * Fix path checks for when train() is called as function * Reformat and tidy up pretrain script * Update argument annotations * Raise error if model language doesn't match lang * Document new train command
2018-11-30 22:16:14 +03:00
try:
import cupy.random
except ImportError:
cupy = None
2017-10-27 15:39:09 +03:00
from .symbols import ORTH
from .compat import cupy, CudaStream, path2str, basestring_, unicode_
from .compat import import_file, importlib_metadata
from .errors import Errors, Warnings, deprecation_warning
2017-10-27 15:39:09 +03:00
2016-03-25 20:54:45 +03:00
LANGUAGES = {}
ARCHITECTURES = {}
_data_path = Path(__file__).parent / "data"
2017-10-27 15:39:09 +03:00
_PRINT_ENV = False
# NB: Ony ever call this once! If called more than ince within the
# function, test_issue1506 hangs and it's not 100% clear why.
AVAILABLE_ENTRY_POINTS = importlib_metadata.entry_points()
class ENTRY_POINTS(object):
"""Available entry points to register extensions."""
factories = "spacy_factories"
languages = "spacy_languages"
displacy_colors = "spacy_displacy_colors"
lookups = "spacy_lookups"
architectures = "spacy_architectures"
2017-10-27 15:39:09 +03:00
def set_env_log(value):
global _PRINT_ENV
_PRINT_ENV = value
2016-03-25 20:54:45 +03:00
def lang_class_is_loaded(lang):
"""Check whether a Language class is already loaded. Language classes are
loaded lazily, to avoid expensive setup code associated with the language
data.
lang (unicode): Two-letter language code, e.g. 'en'.
RETURNS (bool): Whether a Language class has been loaded.
"""
global LANGUAGES
return lang in LANGUAGES
2019-03-11 19:10:50 +03:00
def get_lang_class(lang):
"""Import and load a Language class.
2016-03-25 20:54:45 +03:00
lang (unicode): Two-letter language code, e.g. 'en'.
RETURNS (Language): Language class.
"""
global LANGUAGES
# Check if an entry point is exposed for the language code
entry_point = get_entry_point(ENTRY_POINTS.languages, lang)
if entry_point is not None:
LANGUAGES[lang] = entry_point
return entry_point
2017-10-27 15:39:09 +03:00
if lang not in LANGUAGES:
try:
module = importlib.import_module(".lang.%s" % lang, "spacy")
except ImportError as err:
raise ImportError(Errors.E048.format(lang=lang, err=err))
LANGUAGES[lang] = getattr(module, module.__all__[0])
2016-03-25 20:54:45 +03:00
return LANGUAGES[lang]
def set_lang_class(name, cls):
"""Set a custom Language class name that can be loaded via get_lang_class.
name (unicode): Name of Language class.
cls (Language): Language class.
"""
global LANGUAGES
LANGUAGES[name] = cls
2017-05-09 00:50:45 +03:00
2017-01-10 01:40:26 +03:00
def get_data_path(require_exists=True):
"""Get path to spaCy data directory.
2017-05-14 02:30:29 +03:00
require_exists (bool): Only return path if it exists, otherwise None.
RETURNS (Path or None): Data path or None.
"""
2017-01-10 01:40:26 +03:00
if not require_exists:
return _data_path
else:
return _data_path if _data_path.exists() else None
2016-09-24 21:26:17 +03:00
def set_data_path(path):
"""Set path to spaCy data directory.
2017-05-14 02:30:29 +03:00
path (unicode or Path): Path to new data directory.
"""
2016-09-24 21:26:17 +03:00
global _data_path
_data_path = ensure_path(path)
def register_architecture(name, arch=None):
"""Decorator to register an architecture. An architecture is a function
that returns a Thinc Model object.
name (unicode): The name of the architecture to register.
arch (Model): Optional architecture if function is called directly and
not used as a decorator.
RETURNS (callable): Function to register architecture.
"""
global ARCHITECTURES
if arch is not None:
ARCHITECTURES[name] = arch
return arch
def do_registration(arch):
ARCHITECTURES[name] = arch
return arch
return do_registration
def get_architecture(name):
"""Get a model architecture function by name. Raises a KeyError if the
architecture is not found.
name (unicode): The mame of the architecture.
RETURNS (Model): The architecture.
"""
# Check if an entry point is exposed for the architecture code
entry_point = get_entry_point(ENTRY_POINTS.architectures, name)
if entry_point is not None:
ARCHITECTURES[name] = entry_point
if name not in ARCHITECTURES:
names = ", ".join(sorted(ARCHITECTURES.keys()))
raise KeyError(Errors.E174.format(name=name, names=names))
return ARCHITECTURES[name]
def ensure_path(path):
2017-05-14 02:30:29 +03:00
"""Ensure string is converted to a Path.
path: Anything. If string, it's converted to Path.
RETURNS: Path or original argument.
"""
if isinstance(path, basestring_):
return Path(path)
else:
return path
2016-09-24 21:26:17 +03:00
Reduce size of language data (#4141) * Move Turkish lemmas to a json file Rather than a large dict in Python source, the data is now a big json file. This includes a method for loading the json file, falling back to a compressed file, and an update to MANIFEST.in that excludes json in the spacy/lang directory. This focuses on Turkish specifically because it has the most language data in core. * Transition all lemmatizer.py files to json This covers all lemmatizer.py files of a significant size (>500k or so). Small files were left alone. None of the affected files have logic, so this was pretty straightforward. One unusual thing is that the lemma data for Urdu doesn't seem to be used anywhere. That may require further investigation. * Move large lang data to json for fr/nb/nl/sv These are the languages that use a lemmatizer directory (rather than a single file) and are larger than English. For most of these languages there were many language data files, in which case only the large ones (>500k or so) were converted to json. It may or may not be a good idea to migrate the remaining Python files to json in the future. * Fix id lemmas.json The contents of this file were originally just copied from the Python source, but that used single quotes, so it had to be properly converted to json first. * Add .json.gz to gitignore This covers the json.gz files built as part of distribution. * Add language data gzip to build process Currently this gzip data on every build; it works, but it should be changed to only gzip when the source file has been updated. * Remove Danish lemmatizer.py Missed this when I added the json. * Update to match latest explosion/srsly#9 The way gzipped json is loaded/saved in srsly changed a bit. * Only compress language data if necessary If a .json.gz file exists and is newer than the corresponding json file, it's not recompressed. * Move en/el language data to json This only affected files >500kb, which was nouns for both languages and the generic lookup table for English. * Remove empty files in Norwegian tokenizer It's unclear why, but the Norwegian (nb) tokenizer had empty files for adj/adv/noun/verb lemmas. This may have been a result of copying the structure of the English lemmatizer. This removed the files, but still creates the empty sets in the lemmatizer. That may not actually be necessary. * Remove dubious entries in English lookup.json " furthest" and " skilled" - both prefixed with a space - were in the English lookup table. That seems obviously wrong so I have removed them. * Fix small issues with en/fr lemmatizers The en tokenizer was including the removed _nouns.py file, so that's removed. The fr tokenizer is unusual in that it has a lemmatizer directory with both __init__.py and lemmatizer.py. lemmatizer.py had not been converted to load the json language data, so that was fixed. * Auto-format * Auto-format * Update srsly pin * Consistently use pathlib paths
2019-08-20 15:54:11 +03:00
def load_language_data(path):
"""Load JSON language data using the given path as a base. If the provided
path isn't present, will attempt to load a gzipped version before giving up.
Reduce size of language data (#4141) * Move Turkish lemmas to a json file Rather than a large dict in Python source, the data is now a big json file. This includes a method for loading the json file, falling back to a compressed file, and an update to MANIFEST.in that excludes json in the spacy/lang directory. This focuses on Turkish specifically because it has the most language data in core. * Transition all lemmatizer.py files to json This covers all lemmatizer.py files of a significant size (>500k or so). Small files were left alone. None of the affected files have logic, so this was pretty straightforward. One unusual thing is that the lemma data for Urdu doesn't seem to be used anywhere. That may require further investigation. * Move large lang data to json for fr/nb/nl/sv These are the languages that use a lemmatizer directory (rather than a single file) and are larger than English. For most of these languages there were many language data files, in which case only the large ones (>500k or so) were converted to json. It may or may not be a good idea to migrate the remaining Python files to json in the future. * Fix id lemmas.json The contents of this file were originally just copied from the Python source, but that used single quotes, so it had to be properly converted to json first. * Add .json.gz to gitignore This covers the json.gz files built as part of distribution. * Add language data gzip to build process Currently this gzip data on every build; it works, but it should be changed to only gzip when the source file has been updated. * Remove Danish lemmatizer.py Missed this when I added the json. * Update to match latest explosion/srsly#9 The way gzipped json is loaded/saved in srsly changed a bit. * Only compress language data if necessary If a .json.gz file exists and is newer than the corresponding json file, it's not recompressed. * Move en/el language data to json This only affected files >500kb, which was nouns for both languages and the generic lookup table for English. * Remove empty files in Norwegian tokenizer It's unclear why, but the Norwegian (nb) tokenizer had empty files for adj/adv/noun/verb lemmas. This may have been a result of copying the structure of the English lemmatizer. This removed the files, but still creates the empty sets in the lemmatizer. That may not actually be necessary. * Remove dubious entries in English lookup.json " furthest" and " skilled" - both prefixed with a space - were in the English lookup table. That seems obviously wrong so I have removed them. * Fix small issues with en/fr lemmatizers The en tokenizer was including the removed _nouns.py file, so that's removed. The fr tokenizer is unusual in that it has a lemmatizer directory with both __init__.py and lemmatizer.py. lemmatizer.py had not been converted to load the json language data, so that was fixed. * Auto-format * Auto-format * Update srsly pin * Consistently use pathlib paths
2019-08-20 15:54:11 +03:00
path (unicode / Path): The data to load.
RETURNS: The loaded data.
Reduce size of language data (#4141) * Move Turkish lemmas to a json file Rather than a large dict in Python source, the data is now a big json file. This includes a method for loading the json file, falling back to a compressed file, and an update to MANIFEST.in that excludes json in the spacy/lang directory. This focuses on Turkish specifically because it has the most language data in core. * Transition all lemmatizer.py files to json This covers all lemmatizer.py files of a significant size (>500k or so). Small files were left alone. None of the affected files have logic, so this was pretty straightforward. One unusual thing is that the lemma data for Urdu doesn't seem to be used anywhere. That may require further investigation. * Move large lang data to json for fr/nb/nl/sv These are the languages that use a lemmatizer directory (rather than a single file) and are larger than English. For most of these languages there were many language data files, in which case only the large ones (>500k or so) were converted to json. It may or may not be a good idea to migrate the remaining Python files to json in the future. * Fix id lemmas.json The contents of this file were originally just copied from the Python source, but that used single quotes, so it had to be properly converted to json first. * Add .json.gz to gitignore This covers the json.gz files built as part of distribution. * Add language data gzip to build process Currently this gzip data on every build; it works, but it should be changed to only gzip when the source file has been updated. * Remove Danish lemmatizer.py Missed this when I added the json. * Update to match latest explosion/srsly#9 The way gzipped json is loaded/saved in srsly changed a bit. * Only compress language data if necessary If a .json.gz file exists and is newer than the corresponding json file, it's not recompressed. * Move en/el language data to json This only affected files >500kb, which was nouns for both languages and the generic lookup table for English. * Remove empty files in Norwegian tokenizer It's unclear why, but the Norwegian (nb) tokenizer had empty files for adj/adv/noun/verb lemmas. This may have been a result of copying the structure of the English lemmatizer. This removed the files, but still creates the empty sets in the lemmatizer. That may not actually be necessary. * Remove dubious entries in English lookup.json " furthest" and " skilled" - both prefixed with a space - were in the English lookup table. That seems obviously wrong so I have removed them. * Fix small issues with en/fr lemmatizers The en tokenizer was including the removed _nouns.py file, so that's removed. The fr tokenizer is unusual in that it has a lemmatizer directory with both __init__.py and lemmatizer.py. lemmatizer.py had not been converted to load the json language data, so that was fixed. * Auto-format * Auto-format * Update srsly pin * Consistently use pathlib paths
2019-08-20 15:54:11 +03:00
"""
path = ensure_path(path)
if path.exists():
Reduce size of language data (#4141) * Move Turkish lemmas to a json file Rather than a large dict in Python source, the data is now a big json file. This includes a method for loading the json file, falling back to a compressed file, and an update to MANIFEST.in that excludes json in the spacy/lang directory. This focuses on Turkish specifically because it has the most language data in core. * Transition all lemmatizer.py files to json This covers all lemmatizer.py files of a significant size (>500k or so). Small files were left alone. None of the affected files have logic, so this was pretty straightforward. One unusual thing is that the lemma data for Urdu doesn't seem to be used anywhere. That may require further investigation. * Move large lang data to json for fr/nb/nl/sv These are the languages that use a lemmatizer directory (rather than a single file) and are larger than English. For most of these languages there were many language data files, in which case only the large ones (>500k or so) were converted to json. It may or may not be a good idea to migrate the remaining Python files to json in the future. * Fix id lemmas.json The contents of this file were originally just copied from the Python source, but that used single quotes, so it had to be properly converted to json first. * Add .json.gz to gitignore This covers the json.gz files built as part of distribution. * Add language data gzip to build process Currently this gzip data on every build; it works, but it should be changed to only gzip when the source file has been updated. * Remove Danish lemmatizer.py Missed this when I added the json. * Update to match latest explosion/srsly#9 The way gzipped json is loaded/saved in srsly changed a bit. * Only compress language data if necessary If a .json.gz file exists and is newer than the corresponding json file, it's not recompressed. * Move en/el language data to json This only affected files >500kb, which was nouns for both languages and the generic lookup table for English. * Remove empty files in Norwegian tokenizer It's unclear why, but the Norwegian (nb) tokenizer had empty files for adj/adv/noun/verb lemmas. This may have been a result of copying the structure of the English lemmatizer. This removed the files, but still creates the empty sets in the lemmatizer. That may not actually be necessary. * Remove dubious entries in English lookup.json " furthest" and " skilled" - both prefixed with a space - were in the English lookup table. That seems obviously wrong so I have removed them. * Fix small issues with en/fr lemmatizers The en tokenizer was including the removed _nouns.py file, so that's removed. The fr tokenizer is unusual in that it has a lemmatizer directory with both __init__.py and lemmatizer.py. lemmatizer.py had not been converted to load the json language data, so that was fixed. * Auto-format * Auto-format * Update srsly pin * Consistently use pathlib paths
2019-08-20 15:54:11 +03:00
return srsly.read_json(path)
path = path.with_suffix(path.suffix + ".gz")
if path.exists():
return srsly.read_gzip_json(path)
raise ValueError(Errors.E160.format(path=path2str(path)))
def get_module_path(module):
if not hasattr(module, "__module__"):
raise ValueError(Errors.E169.format(module=repr(module)))
return Path(sys.modules[module.__module__].__file__).parent
Reduce size of language data (#4141) * Move Turkish lemmas to a json file Rather than a large dict in Python source, the data is now a big json file. This includes a method for loading the json file, falling back to a compressed file, and an update to MANIFEST.in that excludes json in the spacy/lang directory. This focuses on Turkish specifically because it has the most language data in core. * Transition all lemmatizer.py files to json This covers all lemmatizer.py files of a significant size (>500k or so). Small files were left alone. None of the affected files have logic, so this was pretty straightforward. One unusual thing is that the lemma data for Urdu doesn't seem to be used anywhere. That may require further investigation. * Move large lang data to json for fr/nb/nl/sv These are the languages that use a lemmatizer directory (rather than a single file) and are larger than English. For most of these languages there were many language data files, in which case only the large ones (>500k or so) were converted to json. It may or may not be a good idea to migrate the remaining Python files to json in the future. * Fix id lemmas.json The contents of this file were originally just copied from the Python source, but that used single quotes, so it had to be properly converted to json first. * Add .json.gz to gitignore This covers the json.gz files built as part of distribution. * Add language data gzip to build process Currently this gzip data on every build; it works, but it should be changed to only gzip when the source file has been updated. * Remove Danish lemmatizer.py Missed this when I added the json. * Update to match latest explosion/srsly#9 The way gzipped json is loaded/saved in srsly changed a bit. * Only compress language data if necessary If a .json.gz file exists and is newer than the corresponding json file, it's not recompressed. * Move en/el language data to json This only affected files >500kb, which was nouns for both languages and the generic lookup table for English. * Remove empty files in Norwegian tokenizer It's unclear why, but the Norwegian (nb) tokenizer had empty files for adj/adv/noun/verb lemmas. This may have been a result of copying the structure of the English lemmatizer. This removed the files, but still creates the empty sets in the lemmatizer. That may not actually be necessary. * Remove dubious entries in English lookup.json " furthest" and " skilled" - both prefixed with a space - were in the English lookup table. That seems obviously wrong so I have removed them. * Fix small issues with en/fr lemmatizers The en tokenizer was including the removed _nouns.py file, so that's removed. The fr tokenizer is unusual in that it has a lemmatizer directory with both __init__.py and lemmatizer.py. lemmatizer.py had not been converted to load the json language data, so that was fixed. * Auto-format * Auto-format * Update srsly pin * Consistently use pathlib paths
2019-08-20 15:54:11 +03:00
def load_model(name, **overrides):
"""Load a model from a shortcut link, package or data path.
2017-05-14 02:30:29 +03:00
name (unicode): Package name, shortcut link or model path.
**overrides: Specific overrides, like pipeline components to disable.
RETURNS (Language): `Language` class with the loaded model.
"""
2017-05-09 00:51:15 +03:00
data_path = get_data_path()
if not data_path or not data_path.exists():
raise IOError(Errors.E049.format(path=path2str(data_path)))
2017-10-27 15:39:09 +03:00
if isinstance(name, basestring_): # in data dir / shortcut
if name in set([d.name for d in data_path.iterdir()]):
2017-06-05 14:02:31 +03:00
return load_model_from_link(name, **overrides)
2017-10-27 15:39:09 +03:00
if is_package(name): # installed as package
2017-06-05 14:02:31 +03:00
return load_model_from_package(name, **overrides)
2017-10-27 15:39:09 +03:00
if Path(name).exists(): # path to model data directory
2017-06-05 14:02:31 +03:00
return load_model_from_path(Path(name), **overrides)
elif hasattr(name, "exists"): # Path or Path-like to model data
2017-06-05 14:02:31 +03:00
return load_model_from_path(name, **overrides)
raise IOError(Errors.E050.format(name=name))
2017-05-09 00:51:15 +03:00
2017-06-05 14:02:31 +03:00
def load_model_from_link(name, **overrides):
"""Load a model from a shortcut link, or directory in spaCy data path."""
path = get_data_path() / name / "__init__.py"
2017-06-05 14:02:31 +03:00
try:
2017-08-18 22:57:06 +03:00
cls = import_file(name, path)
2017-06-05 14:02:31 +03:00
except AttributeError:
raise IOError(Errors.E051.format(name=name))
2017-06-05 14:02:31 +03:00
return cls.load(**overrides)
def load_model_from_package(name, **overrides):
"""Load a model from an installed package."""
cls = importlib.import_module(name)
return cls.load(**overrides)
def load_model_from_path(model_path, meta=False, **overrides):
"""Load a model from a data directory path. Creates Language class with
pipeline from meta.json and then calls from_disk() with path."""
if not meta:
meta = get_model_meta(model_path)
# Support language factories registered via entry points (e.g. custom
# language subclass) while keeping top-level language identifier "lang"
lang = meta.get("lang_factory", meta["lang"])
cls = get_lang_class(lang)
nlp = cls(meta=meta, **overrides)
pipeline = meta.get("pipeline", [])
disable = overrides.get("disable", [])
if pipeline is True:
pipeline = nlp.Defaults.pipe_names
elif pipeline in (False, None):
pipeline = []
for name in pipeline:
if name not in disable:
config = meta.get("pipeline_args", {}).get(name, {})
component = nlp.create_pipe(name, config=config)
nlp.add_pipe(component, name=name)
2017-06-05 14:02:31 +03:00
return nlp.from_disk(model_path)
def load_model_from_init_py(init_file, **overrides):
"""Helper function to use in the `load()` method of a model package's
__init__.py.
init_file (unicode): Path to model's __init__.py, i.e. `__file__`.
**overrides: Specific overrides, like pipeline components to disable.
RETURNS (Language): `Language` class with loaded model.
"""
model_path = Path(init_file).parent
meta = get_model_meta(model_path)
data_dir = "%s_%s-%s" % (meta["lang"], meta["name"], meta["version"])
data_path = model_path / data_dir
if not model_path.exists():
raise IOError(Errors.E052.format(path=path2str(data_path)))
2017-06-05 14:02:31 +03:00
return load_model_from_path(data_path, meta, **overrides)
def get_model_meta(path):
"""Get model meta.json from a directory path and validate its contents.
path (unicode or Path): Path to model directory.
RETURNS (dict): The model's meta data.
"""
model_path = ensure_path(path)
if not model_path.exists():
raise IOError(Errors.E052.format(path=path2str(model_path)))
meta_path = model_path / "meta.json"
if not meta_path.is_file():
raise IOError(Errors.E053.format(path=meta_path))
meta = srsly.read_json(meta_path)
for setting in ["lang", "name", "version"]:
2017-08-29 12:21:44 +03:00
if setting not in meta or not meta[setting]:
raise ValueError(Errors.E054.format(setting=setting))
return meta
def is_package(name):
"""Check if string maps to a package installed via pip.
2017-05-14 02:30:29 +03:00
name (unicode): Name of package.
RETURNS (bool): True if installed package, False if not.
2017-05-09 00:51:15 +03:00
"""
import pkg_resources
name = name.lower() # compare package name against lowercase name
packages = pkg_resources.working_set.by_key.keys()
2017-05-09 00:51:15 +03:00
for package in packages:
if package.lower().replace("-", "_") == name:
2017-05-09 00:51:15 +03:00
return True
return False
def get_package_path(name):
"""Get the path to an installed package.
name (unicode): Package name.
RETURNS (Path): Path to installed package.
"""
name = name.lower() # use lowercase version to be safe
2017-05-09 00:51:15 +03:00
# Here we're importing the module just to find it. This is worryingly
# indirect, but it's otherwise very difficult to find the package.
pkg = importlib.import_module(name)
return Path(pkg.__file__).parent
2017-05-09 00:51:15 +03:00
def get_entry_points(key):
"""Get registered entry points from other packages for a given key, e.g.
'spacy_factories' and return them as a dictionary, keyed by name.
key (unicode): Entry point name.
RETURNS (dict): Entry points, keyed by name.
"""
result = {}
for entry_point in AVAILABLE_ENTRY_POINTS.get(key, []):
result[entry_point.name] = entry_point.load()
return result
def get_entry_point(key, value, default=None):
"""Check if registered entry point is available for a given name and
load it. Otherwise, return None.
key (unicode): Entry point name.
value (unicode): Name of entry point to load.
default: Optional default value to return.
RETURNS: The loaded entry point or None.
"""
for entry_point in AVAILABLE_ENTRY_POINTS.get(key, []):
if entry_point.name == value:
return entry_point.load()
return default
def is_in_jupyter():
2017-05-21 02:12:09 +03:00
"""Check if user is running spaCy from a Jupyter notebook by detecting the
IPython kernel. Mainly used for the displaCy visualizer.
RETURNS (bool): True if in Jupyter, False if not.
"""
# https://stackoverflow.com/a/39662359/6400719
try:
shell = get_ipython().__class__.__name__
if shell == "ZMQInteractiveShell":
return True # Jupyter notebook or qtconsole
except NameError:
return False # Probably standard Python interpreter
return False
def get_cuda_stream(require=False):
if CudaStream is None:
return None
elif isinstance(Model.ops, NumpyOps):
return None
else:
return CudaStream()
def get_async(stream, numpy_array):
if cupy is None:
return numpy_array
else:
array = cupy.ndarray(numpy_array.shape, order="C", dtype=numpy_array.dtype)
array.set(numpy_array, stream=stream)
return array
2017-05-26 13:37:45 +03:00
def env_opt(name, default=None):
2017-05-18 16:32:03 +03:00
if type(default) is float:
type_convert = float
else:
2017-05-18 16:32:03 +03:00
type_convert = int
if "SPACY_" + name.upper() in os.environ:
value = type_convert(os.environ["SPACY_" + name.upper()])
if _PRINT_ENV:
print(name, "=", repr(value), "via", "$SPACY_" + name.upper())
2017-05-18 16:32:03 +03:00
return value
elif name in os.environ:
value = type_convert(os.environ[name])
if _PRINT_ENV:
print(name, "=", repr(value), "via", "$" + name)
2017-05-18 16:32:03 +03:00
return value
else:
if _PRINT_ENV:
print(name, "=", repr(default), "by default")
return default
2016-09-24 21:26:17 +03:00
def read_regex(path):
path = ensure_path(path)
2016-09-24 21:26:17 +03:00
with path.open() as file_:
entries = file_.read().split("\n")
expression = "|".join(
["^" + re.escape(piece) for piece in entries if piece.strip()]
)
2016-09-24 21:26:17 +03:00
return re.compile(expression)
def compile_prefix_regex(entries):
2019-02-24 20:39:59 +03:00
"""Compile a sequence of prefix rules into a regex object.
2019-02-24 20:34:10 +03:00
entries (tuple): The prefix rules, e.g. spacy.lang.punctuation.TOKENIZER_PREFIXES.
RETURNS (regex object): The regex object. to be used for Tokenizer.prefix_search.
"""
if "(" in entries:
# Handle deprecated data
expression = "|".join(
["^" + re.escape(piece) for piece in entries if piece.strip()]
)
return re.compile(expression)
else:
expression = "|".join(["^" + piece for piece in entries if piece.strip()])
return re.compile(expression)
2016-09-24 21:26:17 +03:00
def compile_suffix_regex(entries):
2019-02-24 20:39:59 +03:00
"""Compile a sequence of suffix rules into a regex object.
2019-02-24 20:34:10 +03:00
entries (tuple): The suffix rules, e.g. spacy.lang.punctuation.TOKENIZER_SUFFIXES.
RETURNS (regex object): The regex object. to be used for Tokenizer.suffix_search.
"""
expression = "|".join([piece + "$" for piece in entries if piece.strip()])
2016-09-24 21:26:17 +03:00
return re.compile(expression)
def compile_infix_regex(entries):
2019-02-24 20:39:59 +03:00
"""Compile a sequence of infix rules into a regex object.
2019-02-24 20:34:10 +03:00
entries (tuple): The infix rules, e.g. spacy.lang.punctuation.TOKENIZER_INFIXES.
RETURNS (regex object): The regex object. to be used for Tokenizer.infix_finditer.
"""
expression = "|".join([piece for piece in entries if piece.strip()])
2016-09-24 21:26:17 +03:00
return re.compile(expression)
2017-06-03 20:44:47 +03:00
def add_lookups(default_func, *lookups):
"""Extend an attribute function with special cases. If a word is in the
lookups, the value is returned. Otherwise the previous function is used.
default_func (callable): The default function to execute.
*lookups (dict): Lookup dictionary mapping string to attribute value.
RETURNS (callable): Lexical attribute getter.
"""
# This is implemented as functools.partial instead of a closure, to allow
# pickle to work.
return functools.partial(_get_attr_unless_lookup, default_func, lookups)
def _get_attr_unless_lookup(default_func, lookups, string):
for lookup in lookups:
if string in lookup:
return lookup[string]
return default_func(string)
2017-06-03 20:44:47 +03:00
def update_exc(base_exceptions, *addition_dicts):
"""Update and validate tokenizer exceptions. Will overwrite exceptions.
2017-05-14 02:30:29 +03:00
base_exceptions (dict): Base exceptions.
*addition_dicts (dict): Exceptions to add to the base dict, in order.
RETURNS (dict): Combined tokenizer exceptions.
"""
exc = dict(base_exceptions)
for additions in addition_dicts:
for orth, token_attrs in additions.items():
if not all(isinstance(attr[ORTH], unicode_) for attr in token_attrs):
raise ValueError(Errors.E055.format(key=orth, orths=token_attrs))
described_orth = "".join(attr[ORTH] for attr in token_attrs)
if orth != described_orth:
raise ValueError(Errors.E056.format(key=orth, orths=described_orth))
exc.update(additions)
exc = expand_exc(exc, "'", "")
return exc
def expand_exc(excs, search, replace):
"""Find string in tokenizer exceptions, duplicate entry and replace string.
For example, to add additional versions with typographic apostrophes.
2017-05-14 02:30:29 +03:00
excs (dict): Tokenizer exceptions.
search (unicode): String to find and replace.
replace (unicode): Replacement.
RETURNS (dict): Combined tokenizer exceptions.
"""
def _fix_token(token, search, replace):
fixed = dict(token)
fixed[ORTH] = fixed[ORTH].replace(search, replace)
return fixed
new_excs = dict(excs)
for token_string, tokens in excs.items():
if search in token_string:
new_key = token_string.replace(search, replace)
new_value = [_fix_token(t, search, replace) for t in tokens]
new_excs[new_key] = new_value
return new_excs
def normalize_slice(length, start, stop, step=None):
if not (step is None or step == 1):
raise ValueError(Errors.E057)
if start is None:
2017-10-27 15:39:09 +03:00
start = 0
elif start < 0:
2017-10-27 15:39:09 +03:00
start += length
start = min(length, max(0, start))
if stop is None:
2017-10-27 15:39:09 +03:00
stop = length
elif stop < 0:
2017-10-27 15:39:09 +03:00
stop += length
stop = min(length, max(start, stop))
return start, stop
2017-11-07 01:45:36 +03:00
def minibatch(items, size=8):
"""Iterate over batches of items. `size` may be an iterator,
so that batch-size can vary on each step.
"""
if isinstance(size, int):
2017-11-07 02:22:43 +03:00
size_ = itertools.repeat(size)
2017-11-07 01:45:36 +03:00
else:
size_ = size
items = iter(items)
while True:
batch_size = next(size_)
2018-12-03 04:19:12 +03:00
batch = list(itertools.islice(items, int(batch_size)))
2017-11-07 01:45:36 +03:00
if len(batch) == 0:
break
yield list(batch)
2017-05-26 00:16:10 +03:00
def compounding(start, stop, compound):
"""Yield an infinite series of compounding values. Each time the
2017-05-26 00:16:10 +03:00
generator is called, a value is produced by multiplying the previous
value by the compound rate.
EXAMPLE:
2017-05-26 00:16:10 +03:00
>>> sizes = compounding(1., 10., 1.5)
>>> assert next(sizes) == 1.
>>> assert next(sizes) == 1 * 1.5
>>> assert next(sizes) == 1.5 * 1.5
"""
2017-05-26 00:16:10 +03:00
def clip(value):
2017-10-27 15:39:09 +03:00
return max(value, stop) if (start > stop) else min(value, stop)
2017-05-26 00:16:10 +03:00
curr = float(start)
while True:
yield clip(curr)
curr *= compound
def stepping(start, stop, steps):
"""Yield an infinite series of values that step from a start value to a
final value over some number of steps. Each step is (stop-start)/steps.
After the final value is reached, the generator continues yielding that
value.
EXAMPLE:
>>> sizes = stepping(1., 200., 100)
>>> assert next(sizes) == 1.
>>> assert next(sizes) == 1 * (200.-1.) / 100
>>> assert next(sizes) == 1 + (200.-1.) / 100 + (200.-1.) / 100
"""
def clip(value):
return max(value, stop) if (start > stop) else min(value, stop)
curr = float(start)
while True:
yield clip(curr)
curr += (stop - start) / steps
2017-05-26 00:16:10 +03:00
def decaying(start, stop, decay):
"""Yield an infinite series of linearly decaying values."""
curr = float(start)
2017-05-26 00:16:10 +03:00
while True:
yield max(curr, stop)
2019-05-06 17:58:29 +03:00
curr -= decay
2017-05-26 00:16:10 +03:00
def minibatch_by_words(items, size, tuples=True, count_words=len):
"""Create minibatches of a given number of words."""
if isinstance(size, int):
size_ = itertools.repeat(size)
else:
size_ = size
items = iter(items)
while True:
batch_size = next(size_)
batch = []
while batch_size >= 0:
try:
if tuples:
doc, gold = next(items)
else:
doc = next(items)
except StopIteration:
if batch:
yield batch
return
batch_size -= count_words(doc)
if tuples:
batch.append((doc, gold))
else:
batch.append(doc)
if batch:
yield batch
2017-11-07 01:45:36 +03:00
def itershuffle(iterable, bufsize=1000):
"""Shuffle an iterator. This works by holding `bufsize` items back
and yielding them sometime later. Obviously, this is not unbiased
but should be good enough for batching. Larger bufsize means less bias.
From https://gist.github.com/andres-erbsen/1307752
iterable (iterable): Iterator to shuffle.
bufsize (int): Items to hold back.
YIELDS (iterable): The shuffled iterator.
"""
iterable = iter(iterable)
buf = []
try:
while True:
for i in range(random.randint(1, bufsize - len(buf))):
buf.append(next(iterable))
2017-11-07 01:45:36 +03:00
random.shuffle(buf)
for i in range(random.randint(1, bufsize)):
if buf:
yield buf.pop()
else:
break
except StopIteration:
random.shuffle(buf)
while buf:
yield buf.pop()
raise StopIteration
2019-05-08 03:33:40 +03:00
def filter_spans(spans):
"""Filter a sequence of spans and remove duplicates or overlaps. Useful for
creating named entities (where one token can only be part of one entity) or
when merging spans with `Retokenizer.merge`. When spans overlap, the (first)
longest span is preferred over shorter spans.
spans (iterable): The spans to filter.
RETURNS (list): The filtered spans.
"""
get_sort_key = lambda span: (span.end - span.start, -span.start)
2019-05-08 03:33:40 +03:00
sorted_spans = sorted(spans, key=get_sort_key, reverse=True)
result = []
seen_tokens = set()
for span in sorted_spans:
# Check for end - 1 here because boundaries are inclusive
if span.start not in seen_tokens and span.end - 1 not in seen_tokens:
result.append(span)
seen_tokens.update(range(span.start, span.end))
result = sorted(result, key=lambda span: span.start)
return result
2017-05-29 11:13:42 +03:00
def to_bytes(getters, exclude):
2017-05-31 14:42:39 +03:00
serialized = OrderedDict()
2017-05-29 11:13:42 +03:00
for key, getter in getters.items():
# Split to support file names like meta.json
if key.split(".")[0] not in exclude:
2017-05-29 11:13:42 +03:00
serialized[key] = getter()
return srsly.msgpack_dumps(serialized)
2017-05-29 11:13:42 +03:00
def from_bytes(bytes_data, setters, exclude):
msg = srsly.msgpack_loads(bytes_data)
2017-05-29 11:13:42 +03:00
for key, setter in setters.items():
# Split to support file names like meta.json
if key.split(".")[0] not in exclude and key in msg:
2017-05-29 11:13:42 +03:00
setter(msg[key])
return msg
2017-05-31 14:42:39 +03:00
def to_disk(path, writers, exclude):
path = ensure_path(path)
if not path.exists():
path.mkdir()
for key, writer in writers.items():
# Split to support file names like meta.json
if key.split(".")[0] not in exclude:
2017-05-31 14:42:39 +03:00
writer(path / key)
return path
def from_disk(path, readers, exclude):
path = ensure_path(path)
for key, reader in readers.items():
# Split to support file names like meta.json
if key.split(".")[0] not in exclude:
2017-10-16 21:55:00 +03:00
reader(path / key)
2017-05-31 14:42:39 +03:00
return path
2017-05-14 18:50:23 +03:00
def minify_html(html):
"""Perform a template-specific, rudimentary HTML minification for displaCy.
2017-10-27 15:39:09 +03:00
Disclaimer: NOT a general-purpose solution, only removes indentation and
newlines.
2017-05-14 18:50:23 +03:00
html (unicode): Markup to minify.
RETURNS (unicode): "Minified" HTML.
"""
return html.strip().replace(" ", "").replace("\n", "")
2017-09-21 03:16:35 +03:00
def escape_html(text):
"""Replace <, >, &, " with their HTML encoded representation. Intended to
prevent HTML errors in rendered displaCy markup.
text (unicode): The original text.
RETURNS (unicode): Equivalent text to be safely used within HTML.
"""
text = text.replace("&", "&amp;")
text = text.replace("<", "&lt;")
text = text.replace(">", "&gt;")
text = text.replace('"', "&quot;")
return text
2017-09-21 03:16:35 +03:00
def use_gpu(gpu_id):
2017-10-03 23:47:31 +03:00
try:
import cupy.cuda.device
except ImportError:
return None
2017-09-21 03:16:35 +03:00
from thinc.neural.ops import CupyOps
2017-09-21 03:16:35 +03:00
device = cupy.cuda.device.Device(gpu_id)
device.use()
Model.ops = CupyOps()
Model.Ops = CupyOps
return device
2018-02-13 14:42:23 +03:00
def fix_random_seed(seed=0):
2018-02-13 14:52:48 +03:00
random.seed(seed)
numpy.random.seed(seed)
if cupy is not None:
cupy.random.seed(seed)
def get_json_validator(schema):
# We're using a helper function here to make it easier to change the
# validator that's used (e.g. different draft implementation), without
# having to change it all across the codebase.
# TODO: replace with (stable) Draft6Validator, if available
if jsonschema is None:
raise ValueError(Errors.E136)
return jsonschema.Draft4Validator(schema)
def validate_schema(schema):
"""Validate a given schema. This just checks if the schema itself is valid."""
validator = get_json_validator(schema)
validator.check_schema(schema)
def validate_json(data, validator):
"""Validate data against a given JSON schema (see https://json-schema.org).
data: JSON-serializable data to validate.
validator (jsonschema.DraftXValidator): The validator.
RETURNS (list): A list of error messages, if available.
"""
errors = []
for err in sorted(validator.iter_errors(data), key=lambda e: e.path):
if err.path:
err_path = "[{}]".format(" -> ".join([str(p) for p in err.path]))
else:
err_path = ""
msg = err.message + " " + err_path
if err.context: # Error has suberrors, e.g. if schema uses anyOf
suberrs = [" - {}".format(suberr.message) for suberr in err.context]
msg += ":\n{}".format("".join(suberrs))
errors.append(msg)
return errors
def get_serialization_exclude(serializers, exclude, kwargs):
"""Helper function to validate serialization args and manage transition from
keyword arguments (pre v2.1) to exclude argument.
"""
exclude = list(exclude)
# Split to support file names like meta.json
options = [name.split(".")[0] for name in serializers]
for key, value in kwargs.items():
if key in ("vocab",) and value is False:
deprecation_warning(Warnings.W015.format(arg=key))
exclude.append(key)
elif key.split(".")[0] in options:
raise ValueError(Errors.E128.format(arg=key))
# TODO: user warning?
return exclude
class SimpleFrozenDict(dict):
"""Simplified implementation of a frozen dict, mainly used as default
function or method argument (for arguments that should default to empty
dictionary). Will raise an error if user or spaCy attempts to add to dict.
"""
def __setitem__(self, key, value):
raise NotImplementedError(Errors.E095)
def pop(self, key, default=None):
raise NotImplementedError(Errors.E095)
def update(self, other):
raise NotImplementedError(Errors.E095)
class DummyTokenizer(object):
# add dummy methods for to_bytes, from_bytes, to_disk and from_disk to
# allow serialization (see #1557)
def to_bytes(self, **kwargs):
2019-02-07 23:00:04 +03:00
return b""
def from_bytes(self, _bytes_data, **kwargs):
return self
def to_disk(self, _path, **kwargs):
return None
def from_disk(self, _path, **kwargs):
return self