2020-07-22 14:42:59 +03:00
|
|
|
|
from typing import List, Union, Dict, Any, Optional, Iterable, Callable, Tuple
|
2020-07-25 16:01:15 +03:00
|
|
|
|
from typing import Iterator, Type, Pattern, Sequence, TYPE_CHECKING
|
|
|
|
|
from types import ModuleType
|
2017-05-18 12:36:53 +03:00
|
|
|
|
import os
|
2017-05-08 00:24:51 +03:00
|
|
|
|
import importlib
|
2019-12-22 03:53:56 +03:00
|
|
|
|
import importlib.util
|
2019-02-01 10:05:22 +03:00
|
|
|
|
import re
|
2017-04-15 13:05:47 +03:00
|
|
|
|
from pathlib import Path
|
2020-01-29 19:06:46 +03:00
|
|
|
|
import thinc
|
2020-07-25 16:01:15 +03:00
|
|
|
|
from thinc.api import NumpyOps, get_current_ops, Adam, Config, Optimizer
|
2017-10-17 19:20:52 +03:00
|
|
|
|
import functools
|
2017-11-10 21:05:18 +03:00
|
|
|
|
import itertools
|
2018-02-13 14:52:48 +03:00
|
|
|
|
import numpy.random
|
2020-04-15 14:49:47 +03:00
|
|
|
|
import numpy
|
💫 Replace ujson, msgpack and dill/pickle/cloudpickle with srsly (#3003)
Remove hacks and wrappers, keep code in sync across our libraries and move spaCy a few steps closer to only depending on packages with binary wheels 🎉
See here: https://github.com/explosion/srsly
Serialization is hard, especially across Python versions and multiple platforms. After dealing with many subtle bugs over the years (encodings, locales, large files) our libraries like spaCy and Prodigy have steadily grown a number of utility functions to wrap the multiple serialization formats we need to support (especially json, msgpack and pickle). These wrapping functions ended up duplicated across our codebases, so we wanted to put them in one place.
At the same time, we noticed that having a lot of small dependencies was making maintainence harder, and making installation slower. To solve this, we've made srsly standalone, by including the component packages directly within it. This way we can provide all the serialization utilities we need in a single binary wheel.
srsly currently includes forks of the following packages:
ujson
msgpack
msgpack-numpy
cloudpickle
* WIP: replace json/ujson with srsly
* Replace ujson in examples
Use regular json instead of srsly to make code easier to read and follow
* Update requirements
* Fix imports
* Fix typos
* Replace msgpack with srsly
* Fix warning
2018-12-03 03:28:22 +03:00
|
|
|
|
import srsly
|
2019-11-07 13:45:22 +03:00
|
|
|
|
import catalogue
|
2019-08-22 15:21:32 +03:00
|
|
|
|
import sys
|
2020-04-28 15:01:29 +03:00
|
|
|
|
import warnings
|
2020-05-30 16:01:58 +03:00
|
|
|
|
from packaging.specifiers import SpecifierSet, InvalidSpecifier
|
|
|
|
|
from packaging.version import Version, InvalidVersion
|
2020-06-21 22:35:01 +03:00
|
|
|
|
import subprocess
|
|
|
|
|
from contextlib import contextmanager
|
2020-06-22 15:53:31 +03:00
|
|
|
|
import tempfile
|
|
|
|
|
import shutil
|
2020-06-30 13:54:15 +03:00
|
|
|
|
import shlex
|
2020-07-22 14:42:59 +03:00
|
|
|
|
import inspect
|
2018-11-30 22:16:14 +03:00
|
|
|
|
|
2018-12-08 14:37:38 +03:00
|
|
|
|
try:
|
|
|
|
|
import cupy.random
|
|
|
|
|
except ImportError:
|
|
|
|
|
cupy = None
|
|
|
|
|
|
2020-05-22 16:42:46 +03:00
|
|
|
|
try: # Python 3.8
|
|
|
|
|
import importlib.metadata as importlib_metadata
|
|
|
|
|
except ImportError:
|
|
|
|
|
import importlib_metadata
|
|
|
|
|
|
2020-07-06 14:06:25 +03:00
|
|
|
|
# These are functions that were previously (v2.x) available from spacy.util
|
|
|
|
|
# and have since moved to Thinc. We're importing them here so people's code
|
|
|
|
|
# doesn't break, but they should always be imported from Thinc from now on,
|
|
|
|
|
# not from spacy.util.
|
|
|
|
|
from thinc.api import fix_random_seed, compounding, decaying # noqa: F401
|
|
|
|
|
|
|
|
|
|
|
2017-10-27 15:39:09 +03:00
|
|
|
|
from .symbols import ORTH
|
2020-06-30 13:54:15 +03:00
|
|
|
|
from .compat import cupy, CudaStream, is_windows
|
2020-04-28 14:37:37 +03:00
|
|
|
|
from .errors import Errors, Warnings
|
2020-05-22 16:42:46 +03:00
|
|
|
|
from . import about
|
2017-10-27 15:39:09 +03:00
|
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
|
if TYPE_CHECKING:
|
|
|
|
|
# This lets us add type hints for mypy etc. without causing circular imports
|
|
|
|
|
from .language import Language # noqa: F401
|
2020-07-25 16:01:15 +03:00
|
|
|
|
from .tokens import Doc, Span # noqa: F401
|
|
|
|
|
from .vocab import Vocab # noqa: F401
|
2020-07-22 14:42:59 +03:00
|
|
|
|
|
2017-03-21 00:48:32 +03:00
|
|
|
|
|
2017-10-27 15:39:09 +03:00
|
|
|
|
_PRINT_ENV = False
|
2020-04-15 14:49:47 +03:00
|
|
|
|
OOV_RANK = numpy.iinfo(numpy.uint64).max
|
2020-07-25 12:51:30 +03:00
|
|
|
|
LEXEME_NORM_LANGS = ["da", "de", "el", "en", "id", "lb", "pt", "ru", "sr", "ta", "th"]
|
2017-10-27 15:39:09 +03:00
|
|
|
|
|
|
|
|
|
|
2020-01-29 19:06:46 +03:00
|
|
|
|
class registry(thinc.registry):
|
2019-11-07 13:45:22 +03:00
|
|
|
|
languages = catalogue.create("spacy", "languages", entry_points=True)
|
|
|
|
|
architectures = catalogue.create("spacy", "architectures", entry_points=True)
|
2020-07-22 14:42:59 +03:00
|
|
|
|
tokenizers = catalogue.create("spacy", "tokenizers", entry_points=True)
|
|
|
|
|
lemmatizers = catalogue.create("spacy", "lemmatizers", entry_points=True)
|
2019-11-07 13:45:22 +03:00
|
|
|
|
lookups = catalogue.create("spacy", "lookups", entry_points=True)
|
|
|
|
|
displacy_colors = catalogue.create("spacy", "displacy_colors", entry_points=True)
|
2020-05-20 12:41:12 +03:00
|
|
|
|
assets = catalogue.create("spacy", "assets", entry_points=True)
|
2020-07-22 14:42:59 +03:00
|
|
|
|
# These are factories registered via third-party packages and the
|
|
|
|
|
# spacy_factories entry point. This registry only exists so we can easily
|
|
|
|
|
# load them via the entry points. The "true" factories are added via the
|
|
|
|
|
# Language.factory decorator (in the spaCy code base and user code) and those
|
|
|
|
|
# are the factories used to initialize components via registry.make_from_config.
|
|
|
|
|
_entry_point_factories = catalogue.create("spacy", "factories", entry_points=True)
|
|
|
|
|
factories = catalogue.create("spacy", "internal_factories")
|
2020-05-22 16:42:46 +03:00
|
|
|
|
# This is mostly used to get a list of all installed models in the current
|
|
|
|
|
# environment. spaCy models packaged with `spacy package` will "advertise"
|
|
|
|
|
# themselves via entry points.
|
|
|
|
|
models = catalogue.create("spacy", "models", entry_points=True)
|
2019-10-01 01:01:27 +03:00
|
|
|
|
|
|
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
|
class SimpleFrozenDict(dict):
|
|
|
|
|
"""Simplified implementation of a frozen dict, mainly used as default
|
|
|
|
|
function or method argument (for arguments that should default to empty
|
|
|
|
|
dictionary). Will raise an error if user or spaCy attempts to add to dict.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
def __init__(self, *args, error: str = Errors.E095, **kwargs) -> None:
|
|
|
|
|
"""Initialize the frozen dict. Can be initialized with pre-defined
|
|
|
|
|
values.
|
|
|
|
|
|
|
|
|
|
error (str): The error message when user tries to assign to dict.
|
|
|
|
|
"""
|
|
|
|
|
super().__init__(*args, **kwargs)
|
|
|
|
|
self.error = error
|
|
|
|
|
|
|
|
|
|
def __setitem__(self, key, value):
|
|
|
|
|
raise NotImplementedError(self.error)
|
|
|
|
|
|
|
|
|
|
def pop(self, key, default=None):
|
|
|
|
|
raise NotImplementedError(self.error)
|
|
|
|
|
|
|
|
|
|
def update(self, other):
|
|
|
|
|
raise NotImplementedError(self.error)
|
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def set_env_log(value: bool) -> None:
|
2017-10-27 15:39:09 +03:00
|
|
|
|
global _PRINT_ENV
|
|
|
|
|
_PRINT_ENV = value
|
2016-03-25 20:54:45 +03:00
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def lang_class_is_loaded(lang: str) -> bool:
|
2019-03-11 17:23:20 +03:00
|
|
|
|
"""Check whether a Language class is already loaded. Language classes are
|
|
|
|
|
loaded lazily, to avoid expensive setup code associated with the language
|
|
|
|
|
data.
|
|
|
|
|
|
2020-05-24 18:20:58 +03:00
|
|
|
|
lang (str): Two-letter language code, e.g. 'en'.
|
2019-03-11 17:23:20 +03:00
|
|
|
|
RETURNS (bool): Whether a Language class has been loaded.
|
|
|
|
|
"""
|
2019-11-07 13:45:22 +03:00
|
|
|
|
return lang in registry.languages
|
2019-03-11 19:10:50 +03:00
|
|
|
|
|
2019-03-11 17:23:20 +03:00
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def get_lang_class(lang: str) -> "Language":
|
2017-05-14 02:31:10 +03:00
|
|
|
|
"""Import and load a Language class.
|
2016-03-25 20:54:45 +03:00
|
|
|
|
|
2020-05-24 18:20:58 +03:00
|
|
|
|
lang (str): Two-letter language code, e.g. 'en'.
|
2017-05-14 02:31:10 +03:00
|
|
|
|
RETURNS (Language): Language class.
|
|
|
|
|
"""
|
2019-11-07 13:45:22 +03:00
|
|
|
|
# Check if language is registered / entry point is available
|
|
|
|
|
if lang in registry.languages:
|
|
|
|
|
return registry.languages.get(lang)
|
|
|
|
|
else:
|
2017-05-14 02:31:10 +03:00
|
|
|
|
try:
|
2019-12-25 19:59:52 +03:00
|
|
|
|
module = importlib.import_module(f".lang.{lang}", "spacy")
|
2019-02-13 18:52:25 +03:00
|
|
|
|
except ImportError as err:
|
|
|
|
|
raise ImportError(Errors.E048.format(lang=lang, err=err))
|
2019-11-07 13:45:22 +03:00
|
|
|
|
set_lang_class(lang, getattr(module, module.__all__[0]))
|
|
|
|
|
return registry.languages.get(lang)
|
2016-03-25 20:54:45 +03:00
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def set_lang_class(name: str, cls: Type["Language"]) -> None:
|
2017-05-14 02:31:10 +03:00
|
|
|
|
"""Set a custom Language class name that can be loaded via get_lang_class.
|
2017-05-13 22:22:49 +03:00
|
|
|
|
|
2020-05-24 18:20:58 +03:00
|
|
|
|
name (str): Name of Language class.
|
2017-05-14 02:31:10 +03:00
|
|
|
|
cls (Language): Language class.
|
2017-05-13 22:22:49 +03:00
|
|
|
|
"""
|
2019-11-07 13:45:22 +03:00
|
|
|
|
registry.languages.register(name, func=cls)
|
2017-05-09 00:50:45 +03:00
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def ensure_path(path: Any) -> Any:
|
2017-05-14 02:30:29 +03:00
|
|
|
|
"""Ensure string is converted to a Path.
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
path (Any): Anything. If string, it's converted to Path.
|
2017-05-14 02:30:29 +03:00
|
|
|
|
RETURNS: Path or original argument.
|
|
|
|
|
"""
|
2019-12-22 03:53:56 +03:00
|
|
|
|
if isinstance(path, str):
|
2017-04-15 13:11:16 +03:00
|
|
|
|
return Path(path)
|
|
|
|
|
else:
|
|
|
|
|
return path
|
2016-09-24 21:26:17 +03:00
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def load_language_data(path: Union[str, Path]) -> Union[dict, list]:
|
2019-08-22 15:21:32 +03:00
|
|
|
|
"""Load JSON language data using the given path as a base. If the provided
|
|
|
|
|
path isn't present, will attempt to load a gzipped version before giving up.
|
Reduce size of language data (#4141)
* Move Turkish lemmas to a json file
Rather than a large dict in Python source, the data is now a big json
file. This includes a method for loading the json file, falling back to
a compressed file, and an update to MANIFEST.in that excludes json in
the spacy/lang directory.
This focuses on Turkish specifically because it has the most language
data in core.
* Transition all lemmatizer.py files to json
This covers all lemmatizer.py files of a significant size (>500k or so).
Small files were left alone.
None of the affected files have logic, so this was pretty
straightforward.
One unusual thing is that the lemma data for Urdu doesn't seem to be
used anywhere. That may require further investigation.
* Move large lang data to json for fr/nb/nl/sv
These are the languages that use a lemmatizer directory (rather than a
single file) and are larger than English.
For most of these languages there were many language data files, in
which case only the large ones (>500k or so) were converted to json. It
may or may not be a good idea to migrate the remaining Python files to
json in the future.
* Fix id lemmas.json
The contents of this file were originally just copied from the Python
source, but that used single quotes, so it had to be properly converted
to json first.
* Add .json.gz to gitignore
This covers the json.gz files built as part of distribution.
* Add language data gzip to build process
Currently this gzip data on every build; it works, but it should be
changed to only gzip when the source file has been updated.
* Remove Danish lemmatizer.py
Missed this when I added the json.
* Update to match latest explosion/srsly#9
The way gzipped json is loaded/saved in srsly changed a bit.
* Only compress language data if necessary
If a .json.gz file exists and is newer than the corresponding json file,
it's not recompressed.
* Move en/el language data to json
This only affected files >500kb, which was nouns for both languages and
the generic lookup table for English.
* Remove empty files in Norwegian tokenizer
It's unclear why, but the Norwegian (nb) tokenizer had empty files for
adj/adv/noun/verb lemmas. This may have been a result of copying the
structure of the English lemmatizer.
This removed the files, but still creates the empty sets in the
lemmatizer. That may not actually be necessary.
* Remove dubious entries in English lookup.json
" furthest" and " skilled" - both prefixed with a space - were in the
English lookup table. That seems obviously wrong so I have removed them.
* Fix small issues with en/fr lemmatizers
The en tokenizer was including the removed _nouns.py file, so that's
removed.
The fr tokenizer is unusual in that it has a lemmatizer directory with
both __init__.py and lemmatizer.py. lemmatizer.py had not been converted
to load the json language data, so that was fixed.
* Auto-format
* Auto-format
* Update srsly pin
* Consistently use pathlib paths
2019-08-20 15:54:11 +03:00
|
|
|
|
|
2020-05-24 18:20:58 +03:00
|
|
|
|
path (str / Path): The data to load.
|
2019-08-22 15:21:32 +03:00
|
|
|
|
RETURNS: The loaded data.
|
Reduce size of language data (#4141)
* Move Turkish lemmas to a json file
Rather than a large dict in Python source, the data is now a big json
file. This includes a method for loading the json file, falling back to
a compressed file, and an update to MANIFEST.in that excludes json in
the spacy/lang directory.
This focuses on Turkish specifically because it has the most language
data in core.
* Transition all lemmatizer.py files to json
This covers all lemmatizer.py files of a significant size (>500k or so).
Small files were left alone.
None of the affected files have logic, so this was pretty
straightforward.
One unusual thing is that the lemma data for Urdu doesn't seem to be
used anywhere. That may require further investigation.
* Move large lang data to json for fr/nb/nl/sv
These are the languages that use a lemmatizer directory (rather than a
single file) and are larger than English.
For most of these languages there were many language data files, in
which case only the large ones (>500k or so) were converted to json. It
may or may not be a good idea to migrate the remaining Python files to
json in the future.
* Fix id lemmas.json
The contents of this file were originally just copied from the Python
source, but that used single quotes, so it had to be properly converted
to json first.
* Add .json.gz to gitignore
This covers the json.gz files built as part of distribution.
* Add language data gzip to build process
Currently this gzip data on every build; it works, but it should be
changed to only gzip when the source file has been updated.
* Remove Danish lemmatizer.py
Missed this when I added the json.
* Update to match latest explosion/srsly#9
The way gzipped json is loaded/saved in srsly changed a bit.
* Only compress language data if necessary
If a .json.gz file exists and is newer than the corresponding json file,
it's not recompressed.
* Move en/el language data to json
This only affected files >500kb, which was nouns for both languages and
the generic lookup table for English.
* Remove empty files in Norwegian tokenizer
It's unclear why, but the Norwegian (nb) tokenizer had empty files for
adj/adv/noun/verb lemmas. This may have been a result of copying the
structure of the English lemmatizer.
This removed the files, but still creates the empty sets in the
lemmatizer. That may not actually be necessary.
* Remove dubious entries in English lookup.json
" furthest" and " skilled" - both prefixed with a space - were in the
English lookup table. That seems obviously wrong so I have removed them.
* Fix small issues with en/fr lemmatizers
The en tokenizer was including the removed _nouns.py file, so that's
removed.
The fr tokenizer is unusual in that it has a lemmatizer directory with
both __init__.py and lemmatizer.py. lemmatizer.py had not been converted
to load the json language data, so that was fixed.
* Auto-format
* Auto-format
* Update srsly pin
* Consistently use pathlib paths
2019-08-20 15:54:11 +03:00
|
|
|
|
"""
|
2019-08-22 15:21:32 +03:00
|
|
|
|
path = ensure_path(path)
|
|
|
|
|
if path.exists():
|
Reduce size of language data (#4141)
* Move Turkish lemmas to a json file
Rather than a large dict in Python source, the data is now a big json
file. This includes a method for loading the json file, falling back to
a compressed file, and an update to MANIFEST.in that excludes json in
the spacy/lang directory.
This focuses on Turkish specifically because it has the most language
data in core.
* Transition all lemmatizer.py files to json
This covers all lemmatizer.py files of a significant size (>500k or so).
Small files were left alone.
None of the affected files have logic, so this was pretty
straightforward.
One unusual thing is that the lemma data for Urdu doesn't seem to be
used anywhere. That may require further investigation.
* Move large lang data to json for fr/nb/nl/sv
These are the languages that use a lemmatizer directory (rather than a
single file) and are larger than English.
For most of these languages there were many language data files, in
which case only the large ones (>500k or so) were converted to json. It
may or may not be a good idea to migrate the remaining Python files to
json in the future.
* Fix id lemmas.json
The contents of this file were originally just copied from the Python
source, but that used single quotes, so it had to be properly converted
to json first.
* Add .json.gz to gitignore
This covers the json.gz files built as part of distribution.
* Add language data gzip to build process
Currently this gzip data on every build; it works, but it should be
changed to only gzip when the source file has been updated.
* Remove Danish lemmatizer.py
Missed this when I added the json.
* Update to match latest explosion/srsly#9
The way gzipped json is loaded/saved in srsly changed a bit.
* Only compress language data if necessary
If a .json.gz file exists and is newer than the corresponding json file,
it's not recompressed.
* Move en/el language data to json
This only affected files >500kb, which was nouns for both languages and
the generic lookup table for English.
* Remove empty files in Norwegian tokenizer
It's unclear why, but the Norwegian (nb) tokenizer had empty files for
adj/adv/noun/verb lemmas. This may have been a result of copying the
structure of the English lemmatizer.
This removed the files, but still creates the empty sets in the
lemmatizer. That may not actually be necessary.
* Remove dubious entries in English lookup.json
" furthest" and " skilled" - both prefixed with a space - were in the
English lookup table. That seems obviously wrong so I have removed them.
* Fix small issues with en/fr lemmatizers
The en tokenizer was including the removed _nouns.py file, so that's
removed.
The fr tokenizer is unusual in that it has a lemmatizer directory with
both __init__.py and lemmatizer.py. lemmatizer.py had not been converted
to load the json language data, so that was fixed.
* Auto-format
* Auto-format
* Update srsly pin
* Consistently use pathlib paths
2019-08-20 15:54:11 +03:00
|
|
|
|
return srsly.read_json(path)
|
2019-08-22 15:21:32 +03:00
|
|
|
|
path = path.with_suffix(path.suffix + ".gz")
|
|
|
|
|
if path.exists():
|
|
|
|
|
return srsly.read_gzip_json(path)
|
2019-12-22 03:53:56 +03:00
|
|
|
|
raise ValueError(Errors.E160.format(path=path))
|
2019-08-22 15:21:32 +03:00
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def get_module_path(module: ModuleType) -> Path:
|
|
|
|
|
"""Get the path of a Python module.
|
|
|
|
|
|
|
|
|
|
module (ModuleType): The Python module.
|
|
|
|
|
RETURNS (Path): The path.
|
|
|
|
|
"""
|
2019-08-22 15:21:32 +03:00
|
|
|
|
if not hasattr(module, "__module__"):
|
2019-09-21 15:37:06 +03:00
|
|
|
|
raise ValueError(Errors.E169.format(module=repr(module)))
|
2019-08-22 15:21:32 +03:00
|
|
|
|
return Path(sys.modules[module.__module__].__file__).parent
|
Reduce size of language data (#4141)
* Move Turkish lemmas to a json file
Rather than a large dict in Python source, the data is now a big json
file. This includes a method for loading the json file, falling back to
a compressed file, and an update to MANIFEST.in that excludes json in
the spacy/lang directory.
This focuses on Turkish specifically because it has the most language
data in core.
* Transition all lemmatizer.py files to json
This covers all lemmatizer.py files of a significant size (>500k or so).
Small files were left alone.
None of the affected files have logic, so this was pretty
straightforward.
One unusual thing is that the lemma data for Urdu doesn't seem to be
used anywhere. That may require further investigation.
* Move large lang data to json for fr/nb/nl/sv
These are the languages that use a lemmatizer directory (rather than a
single file) and are larger than English.
For most of these languages there were many language data files, in
which case only the large ones (>500k or so) were converted to json. It
may or may not be a good idea to migrate the remaining Python files to
json in the future.
* Fix id lemmas.json
The contents of this file were originally just copied from the Python
source, but that used single quotes, so it had to be properly converted
to json first.
* Add .json.gz to gitignore
This covers the json.gz files built as part of distribution.
* Add language data gzip to build process
Currently this gzip data on every build; it works, but it should be
changed to only gzip when the source file has been updated.
* Remove Danish lemmatizer.py
Missed this when I added the json.
* Update to match latest explosion/srsly#9
The way gzipped json is loaded/saved in srsly changed a bit.
* Only compress language data if necessary
If a .json.gz file exists and is newer than the corresponding json file,
it's not recompressed.
* Move en/el language data to json
This only affected files >500kb, which was nouns for both languages and
the generic lookup table for English.
* Remove empty files in Norwegian tokenizer
It's unclear why, but the Norwegian (nb) tokenizer had empty files for
adj/adv/noun/verb lemmas. This may have been a result of copying the
structure of the English lemmatizer.
This removed the files, but still creates the empty sets in the
lemmatizer. That may not actually be necessary.
* Remove dubious entries in English lookup.json
" furthest" and " skilled" - both prefixed with a space - were in the
English lookup table. That seems obviously wrong so I have removed them.
* Fix small issues with en/fr lemmatizers
The en tokenizer was including the removed _nouns.py file, so that's
removed.
The fr tokenizer is unusual in that it has a lemmatizer directory with
both __init__.py and lemmatizer.py. lemmatizer.py had not been converted
to load the json language data, so that was fixed.
* Auto-format
* Auto-format
* Update srsly pin
* Consistently use pathlib paths
2019-08-20 15:54:11 +03:00
|
|
|
|
|
|
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
|
def load_model(
|
|
|
|
|
name: Union[str, Path],
|
|
|
|
|
disable: Iterable[str] = tuple(),
|
|
|
|
|
component_cfg: Dict[str, Dict[str, Any]] = SimpleFrozenDict(),
|
2020-07-25 16:01:15 +03:00
|
|
|
|
) -> "Language":
|
2020-02-18 19:20:17 +03:00
|
|
|
|
"""Load a model from a package or data path.
|
2017-05-13 22:22:49 +03:00
|
|
|
|
|
2020-05-24 18:20:58 +03:00
|
|
|
|
name (str): Package name or model path.
|
2020-07-25 13:14:28 +03:00
|
|
|
|
disable (Iterable[str]): Names of pipeline components to disable.
|
|
|
|
|
component_cfg (Dict[str, dict]): Config overrides for pipeline components,
|
|
|
|
|
keyed by component names.
|
|
|
|
|
RETURNS (Language): The loaded nlp object.
|
2017-05-13 22:22:49 +03:00
|
|
|
|
"""
|
2020-07-22 14:42:59 +03:00
|
|
|
|
cfg = component_cfg
|
2020-02-18 19:20:17 +03:00
|
|
|
|
if isinstance(name, str): # name or string path
|
2020-05-21 21:24:07 +03:00
|
|
|
|
if name.startswith("blank:"): # shortcut for blank model
|
|
|
|
|
return get_lang_class(name.replace("blank:", ""))()
|
2017-10-27 15:39:09 +03:00
|
|
|
|
if is_package(name): # installed as package
|
2020-07-22 14:42:59 +03:00
|
|
|
|
return load_model_from_package(name, disable=disable, component_cfg=cfg)
|
2017-10-27 15:39:09 +03:00
|
|
|
|
if Path(name).exists(): # path to model data directory
|
2020-07-22 14:42:59 +03:00
|
|
|
|
return load_model_from_path(Path(name), disable=disable, component_cfg=cfg)
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
elif hasattr(name, "exists"): # Path or Path-like to model data
|
2020-07-22 14:42:59 +03:00
|
|
|
|
return load_model_from_path(name, disable=disable, component_cfg=cfg)
|
2018-04-03 16:50:31 +03:00
|
|
|
|
raise IOError(Errors.E050.format(name=name))
|
2017-05-09 00:51:15 +03:00
|
|
|
|
|
|
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
|
def load_model_from_package(
|
|
|
|
|
name: str,
|
|
|
|
|
disable: Iterable[str] = tuple(),
|
|
|
|
|
component_cfg: Dict[str, Dict[str, Any]] = SimpleFrozenDict(),
|
2020-07-25 16:01:15 +03:00
|
|
|
|
) -> "Language":
|
2017-06-05 14:02:31 +03:00
|
|
|
|
"""Load a model from an installed package."""
|
|
|
|
|
cls = importlib.import_module(name)
|
2020-07-22 14:42:59 +03:00
|
|
|
|
return cls.load(disable=disable, component_cfg=component_cfg)
|
2017-06-05 14:02:31 +03:00
|
|
|
|
|
|
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
|
def load_model_from_path(
|
|
|
|
|
model_path: Union[str, Path],
|
|
|
|
|
meta: Optional[Dict[str, Any]] = None,
|
|
|
|
|
disable: Iterable[str] = tuple(),
|
|
|
|
|
component_cfg: Dict[str, Dict[str, Any]] = SimpleFrozenDict(),
|
2020-07-25 16:01:15 +03:00
|
|
|
|
) -> "Language":
|
2017-06-05 14:02:31 +03:00
|
|
|
|
"""Load a model from a data directory path. Creates Language class with
|
2020-07-22 14:42:59 +03:00
|
|
|
|
pipeline from config.cfg and then calls from_disk() with path."""
|
|
|
|
|
if not model_path.exists():
|
|
|
|
|
raise IOError(Errors.E052.format(path=model_path))
|
2017-06-05 14:02:31 +03:00
|
|
|
|
if not meta:
|
|
|
|
|
meta = get_model_meta(model_path)
|
2020-07-22 14:42:59 +03:00
|
|
|
|
config_path = model_path / "config.cfg"
|
|
|
|
|
if not config_path.exists() or not config_path.is_file():
|
|
|
|
|
raise IOError(Errors.E053.format(path=config_path, name="config.cfg"))
|
|
|
|
|
config = Config().from_disk(config_path)
|
|
|
|
|
override_cfg = {"components": {p: dict_to_dot(c) for p, c in component_cfg.items()}}
|
|
|
|
|
overrides = dict_to_dot(override_cfg)
|
|
|
|
|
nlp, _ = load_model_from_config(config, disable=disable, overrides=overrides)
|
2019-11-25 18:01:22 +03:00
|
|
|
|
return nlp.from_disk(model_path, exclude=disable)
|
2017-06-05 14:02:31 +03:00
|
|
|
|
|
|
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
|
def load_model_from_config(
|
|
|
|
|
config: Union[Dict[str, Any], Config],
|
|
|
|
|
disable: Iterable[str] = tuple(),
|
|
|
|
|
overrides: Dict[str, Any] = {},
|
|
|
|
|
auto_fill: bool = False,
|
|
|
|
|
validate: bool = True,
|
|
|
|
|
) -> Tuple["Language", Config]:
|
|
|
|
|
"""Create an nlp object from a config. Expects the full config file including
|
|
|
|
|
a section "nlp" containing the settings for the nlp object.
|
|
|
|
|
"""
|
|
|
|
|
if "nlp" not in config:
|
|
|
|
|
raise ValueError(Errors.E985.format(config=config))
|
|
|
|
|
nlp_config = config["nlp"]
|
|
|
|
|
if "lang" not in nlp_config:
|
|
|
|
|
raise ValueError(Errors.E993.format(config=nlp_config))
|
|
|
|
|
# This will automatically handle all codes registered via the languages
|
|
|
|
|
# registry, including custom subclasses provided via entry points
|
|
|
|
|
lang_cls = get_lang_class(nlp_config["lang"])
|
|
|
|
|
nlp = lang_cls.from_config(
|
|
|
|
|
config,
|
|
|
|
|
disable=disable,
|
|
|
|
|
overrides=overrides,
|
|
|
|
|
auto_fill=auto_fill,
|
|
|
|
|
validate=validate,
|
|
|
|
|
)
|
|
|
|
|
return nlp, nlp.resolved
|
2020-02-27 20:42:27 +03:00
|
|
|
|
|
|
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
|
def load_model_from_init_py(
|
|
|
|
|
init_file: Union[Path, str],
|
|
|
|
|
disable: Iterable[str] = tuple(),
|
|
|
|
|
component_cfg: Dict[str, Dict[str, Any]] = SimpleFrozenDict(),
|
2020-07-25 16:01:15 +03:00
|
|
|
|
) -> "Language":
|
2017-05-28 01:22:00 +03:00
|
|
|
|
"""Helper function to use in the `load()` method of a model package's
|
|
|
|
|
__init__.py.
|
|
|
|
|
|
2020-05-24 18:20:58 +03:00
|
|
|
|
init_file (str): Path to model's __init__.py, i.e. `__file__`.
|
2017-05-29 15:10:10 +03:00
|
|
|
|
**overrides: Specific overrides, like pipeline components to disable.
|
2017-05-28 01:22:00 +03:00
|
|
|
|
RETURNS (Language): `Language` class with loaded model.
|
|
|
|
|
"""
|
|
|
|
|
model_path = Path(init_file).parent
|
2017-05-29 15:10:10 +03:00
|
|
|
|
meta = get_model_meta(model_path)
|
2019-12-25 19:59:52 +03:00
|
|
|
|
data_dir = f"{meta['lang']}_{meta['name']}-{meta['version']}"
|
2017-05-29 15:10:10 +03:00
|
|
|
|
data_path = model_path / data_dir
|
|
|
|
|
if not model_path.exists():
|
2019-12-22 03:53:56 +03:00
|
|
|
|
raise IOError(Errors.E052.format(path=data_path))
|
2020-07-22 14:42:59 +03:00
|
|
|
|
return load_model_from_path(
|
|
|
|
|
data_path, meta, disable=disable, component_cfg=component_cfg
|
|
|
|
|
)
|
2017-05-28 01:22:00 +03:00
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def get_installed_models() -> List[str]:
|
2020-05-22 16:42:46 +03:00
|
|
|
|
"""List all model packages currently installed in the environment.
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
RETURNS (List[str]): The string names of the models.
|
2020-05-22 16:42:46 +03:00
|
|
|
|
"""
|
|
|
|
|
return list(registry.models.get_all().keys())
|
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def get_package_version(name: str) -> Optional[str]:
|
2020-05-22 16:42:46 +03:00
|
|
|
|
"""Get the version of an installed package. Typically used to get model
|
|
|
|
|
package versions.
|
|
|
|
|
|
2020-05-24 18:20:58 +03:00
|
|
|
|
name (str): The name of the installed Python package.
|
|
|
|
|
RETURNS (str / None): The version or None if package not installed.
|
2020-05-22 16:42:46 +03:00
|
|
|
|
"""
|
|
|
|
|
try:
|
|
|
|
|
return importlib_metadata.version(name)
|
|
|
|
|
except importlib_metadata.PackageNotFoundError:
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def is_compatible_version(
|
|
|
|
|
version: str, constraint: str, prereleases: bool = True
|
|
|
|
|
) -> Optional[bool]:
|
2020-05-30 16:18:53 +03:00
|
|
|
|
"""Check if a version (e.g. "2.0.0") is compatible given a version
|
|
|
|
|
constraint (e.g. ">=1.9.0,<2.2.1"). If the constraint is a specific version,
|
|
|
|
|
it's interpreted as =={version}.
|
|
|
|
|
|
|
|
|
|
version (str): The version to check.
|
|
|
|
|
constraint (str): The constraint string.
|
|
|
|
|
prereleases (bool): Whether to allow prereleases. If set to False,
|
|
|
|
|
prerelease versions will be considered incompatible.
|
|
|
|
|
RETURNS (bool / None): Whether the version is compatible, or None if the
|
|
|
|
|
version or constraint are invalid.
|
|
|
|
|
"""
|
|
|
|
|
# Handle cases where exact version is provided as constraint
|
2020-05-30 16:01:58 +03:00
|
|
|
|
if constraint[0].isdigit():
|
|
|
|
|
constraint = f"=={constraint}"
|
|
|
|
|
try:
|
|
|
|
|
spec = SpecifierSet(constraint)
|
2020-05-30 16:18:53 +03:00
|
|
|
|
version = Version(version)
|
|
|
|
|
except (InvalidSpecifier, InvalidVersion):
|
2020-05-22 16:42:46 +03:00
|
|
|
|
return None
|
2020-05-30 16:18:53 +03:00
|
|
|
|
spec.prereleases = prereleases
|
2020-05-30 16:01:58 +03:00
|
|
|
|
return version in spec
|
2020-05-22 16:42:46 +03:00
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def is_unconstrained_version(
|
|
|
|
|
constraint: str, prereleases: bool = True
|
|
|
|
|
) -> Optional[bool]:
|
2020-06-05 13:42:15 +03:00
|
|
|
|
# We have an exact version, this is the ultimate constrained version
|
|
|
|
|
if constraint[0].isdigit():
|
|
|
|
|
return False
|
|
|
|
|
try:
|
|
|
|
|
spec = SpecifierSet(constraint)
|
|
|
|
|
except InvalidSpecifier:
|
|
|
|
|
return None
|
|
|
|
|
spec.prereleases = prereleases
|
|
|
|
|
specs = [sp for sp in spec]
|
|
|
|
|
# We only have one version spec and it defines > or >=
|
|
|
|
|
if len(specs) == 1 and specs[0].operator in (">", ">="):
|
|
|
|
|
return True
|
|
|
|
|
# One specifier is exact version
|
|
|
|
|
if any(sp.operator in ("==") for sp in specs):
|
|
|
|
|
return False
|
|
|
|
|
has_upper = any(sp.operator in ("<", "<=") for sp in specs)
|
|
|
|
|
has_lower = any(sp.operator in (">", ">=") for sp in specs)
|
|
|
|
|
# We have a version spec that defines an upper and lower bound
|
|
|
|
|
if has_upper and has_lower:
|
|
|
|
|
return False
|
|
|
|
|
# Everything else, like only an upper version, only a lower version etc.
|
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def get_model_version_range(spacy_version: str) -> str:
|
2020-05-28 13:51:37 +03:00
|
|
|
|
"""Generate a version range like >=1.2.3,<1.3.0 based on a given spaCy
|
|
|
|
|
version. Models are always compatible across patch versions but not
|
|
|
|
|
across minor or major versions.
|
|
|
|
|
"""
|
2020-05-30 16:01:58 +03:00
|
|
|
|
release = Version(spacy_version).release
|
|
|
|
|
return f">={spacy_version},<{release[0]}.{release[1] + 1}.0"
|
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def get_base_version(version: str) -> str:
|
2020-05-30 16:18:53 +03:00
|
|
|
|
"""Generate the base version without any prerelease identifiers.
|
|
|
|
|
|
|
|
|
|
version (str): The version, e.g. "3.0.0.dev1".
|
|
|
|
|
RETURNS (str): The base version, e.g. "3.0.0".
|
|
|
|
|
"""
|
2020-05-30 16:01:58 +03:00
|
|
|
|
return Version(version).base_version
|
2020-05-28 13:51:37 +03:00
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def get_model_meta(path: Union[str, Path]) -> Dict[str, Any]:
|
2017-05-29 15:10:10 +03:00
|
|
|
|
"""Get model meta.json from a directory path and validate its contents.
|
2017-05-28 01:22:00 +03:00
|
|
|
|
|
2020-05-24 19:51:10 +03:00
|
|
|
|
path (str / Path): Path to model directory.
|
2020-07-25 16:01:15 +03:00
|
|
|
|
RETURNS (Dict[str, Any]): The model's meta data.
|
2017-05-28 01:22:00 +03:00
|
|
|
|
"""
|
2017-05-29 15:10:10 +03:00
|
|
|
|
model_path = ensure_path(path)
|
|
|
|
|
if not model_path.exists():
|
2019-12-22 03:53:56 +03:00
|
|
|
|
raise IOError(Errors.E052.format(path=model_path))
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
meta_path = model_path / "meta.json"
|
2017-05-28 01:22:00 +03:00
|
|
|
|
if not meta_path.is_file():
|
2020-02-27 20:42:27 +03:00
|
|
|
|
raise IOError(Errors.E053.format(path=meta_path, name="meta.json"))
|
💫 Replace ujson, msgpack and dill/pickle/cloudpickle with srsly (#3003)
Remove hacks and wrappers, keep code in sync across our libraries and move spaCy a few steps closer to only depending on packages with binary wheels 🎉
See here: https://github.com/explosion/srsly
Serialization is hard, especially across Python versions and multiple platforms. After dealing with many subtle bugs over the years (encodings, locales, large files) our libraries like spaCy and Prodigy have steadily grown a number of utility functions to wrap the multiple serialization formats we need to support (especially json, msgpack and pickle). These wrapping functions ended up duplicated across our codebases, so we wanted to put them in one place.
At the same time, we noticed that having a lot of small dependencies was making maintainence harder, and making installation slower. To solve this, we've made srsly standalone, by including the component packages directly within it. This way we can provide all the serialization utilities we need in a single binary wheel.
srsly currently includes forks of the following packages:
ujson
msgpack
msgpack-numpy
cloudpickle
* WIP: replace json/ujson with srsly
* Replace ujson in examples
Use regular json instead of srsly to make code easier to read and follow
* Update requirements
* Fix imports
* Fix typos
* Replace msgpack with srsly
* Fix warning
2018-12-03 03:28:22 +03:00
|
|
|
|
meta = srsly.read_json(meta_path)
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
for setting in ["lang", "name", "version"]:
|
2017-08-29 12:21:44 +03:00
|
|
|
|
if setting not in meta or not meta[setting]:
|
2018-04-03 16:50:31 +03:00
|
|
|
|
raise ValueError(Errors.E054.format(setting=setting))
|
2020-06-02 18:23:16 +03:00
|
|
|
|
if "spacy_version" in meta:
|
2020-05-30 16:34:54 +03:00
|
|
|
|
if not is_compatible_version(about.__version__, meta["spacy_version"]):
|
2020-06-05 13:42:15 +03:00
|
|
|
|
warn_msg = Warnings.W095.format(
|
|
|
|
|
model=f"{meta['lang']}_{meta['name']}",
|
2020-06-02 18:23:16 +03:00
|
|
|
|
model_version=meta["version"],
|
2020-06-05 13:42:15 +03:00
|
|
|
|
version=meta["spacy_version"],
|
2020-06-02 18:23:16 +03:00
|
|
|
|
current=about.__version__,
|
|
|
|
|
)
|
|
|
|
|
warnings.warn(warn_msg)
|
2020-06-05 13:42:15 +03:00
|
|
|
|
if is_unconstrained_version(meta["spacy_version"]):
|
|
|
|
|
warn_msg = Warnings.W094.format(
|
|
|
|
|
model=f"{meta['lang']}_{meta['name']}",
|
|
|
|
|
model_version=meta["version"],
|
|
|
|
|
version=meta["spacy_version"],
|
|
|
|
|
example=get_model_version_range(about.__version__),
|
2020-05-30 16:34:54 +03:00
|
|
|
|
)
|
2020-06-05 13:42:15 +03:00
|
|
|
|
warnings.warn(warn_msg)
|
2017-05-29 15:10:10 +03:00
|
|
|
|
return meta
|
2017-05-28 01:22:00 +03:00
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def is_package(name: str) -> bool:
|
2017-05-13 22:22:49 +03:00
|
|
|
|
"""Check if string maps to a package installed via pip.
|
|
|
|
|
|
2020-05-24 18:20:58 +03:00
|
|
|
|
name (str): Name of package.
|
2017-05-14 02:30:29 +03:00
|
|
|
|
RETURNS (bool): True if installed package, False if not.
|
2017-05-09 00:51:15 +03:00
|
|
|
|
"""
|
2020-05-24 15:48:56 +03:00
|
|
|
|
try:
|
|
|
|
|
importlib_metadata.distribution(name)
|
|
|
|
|
return True
|
|
|
|
|
except: # noqa: E722
|
|
|
|
|
return False
|
2017-05-09 00:51:15 +03:00
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def get_package_path(name: str) -> Path:
|
2017-05-28 01:22:00 +03:00
|
|
|
|
"""Get the path to an installed package.
|
2017-05-13 22:22:49 +03:00
|
|
|
|
|
2020-05-24 18:20:58 +03:00
|
|
|
|
name (str): Package name.
|
2017-05-28 01:22:00 +03:00
|
|
|
|
RETURNS (Path): Path to installed package.
|
2017-05-13 22:22:49 +03:00
|
|
|
|
"""
|
2017-09-29 21:55:17 +03:00
|
|
|
|
name = name.lower() # use lowercase version to be safe
|
2017-05-09 00:51:15 +03:00
|
|
|
|
# Here we're importing the module just to find it. This is worryingly
|
|
|
|
|
# indirect, but it's otherwise very difficult to find the package.
|
2017-05-29 11:51:19 +03:00
|
|
|
|
pkg = importlib.import_module(name)
|
2017-05-28 01:22:00 +03:00
|
|
|
|
return Path(pkg.__file__).parent
|
2017-05-09 00:51:15 +03:00
|
|
|
|
|
|
|
|
|
|
2020-06-30 14:17:00 +03:00
|
|
|
|
def split_command(command: str) -> List[str]:
|
|
|
|
|
"""Split a string command using shlex. Handles platform compatibility.
|
2020-06-21 22:35:01 +03:00
|
|
|
|
|
2020-06-30 14:17:00 +03:00
|
|
|
|
command (str) : The command to split
|
|
|
|
|
RETURNS (List[str]): The split command.
|
2020-06-21 22:35:01 +03:00
|
|
|
|
"""
|
2020-06-30 14:17:00 +03:00
|
|
|
|
return shlex.split(command, posix=not is_windows)
|
|
|
|
|
|
|
|
|
|
|
2020-07-09 02:42:51 +03:00
|
|
|
|
def join_command(command: List[str]) -> str:
|
|
|
|
|
"""Join a command using shlex. shlex.join is only available for Python 3.8+,
|
|
|
|
|
so we're using a workaround here.
|
|
|
|
|
|
|
|
|
|
command (List[str]): The command to join.
|
|
|
|
|
RETURNS (str): The joined command
|
|
|
|
|
"""
|
|
|
|
|
return " ".join(shlex.quote(cmd) for cmd in command)
|
|
|
|
|
|
|
|
|
|
|
2020-06-30 14:17:00 +03:00
|
|
|
|
def run_command(command: Union[str, List[str]]) -> None:
|
|
|
|
|
"""Run a command on the command line as a subprocess. If the subprocess
|
|
|
|
|
returns a non-zero exit code, a system exit is performed.
|
|
|
|
|
|
|
|
|
|
command (str / List[str]): The command. If provided as a string, the
|
|
|
|
|
string will be split using shlex.split.
|
|
|
|
|
"""
|
|
|
|
|
if isinstance(command, str):
|
|
|
|
|
command = split_command(command)
|
2020-06-30 18:28:43 +03:00
|
|
|
|
try:
|
|
|
|
|
status = subprocess.call(command, env=os.environ.copy())
|
|
|
|
|
except FileNotFoundError:
|
|
|
|
|
raise FileNotFoundError(
|
|
|
|
|
Errors.E970.format(str_command=" ".join(command), tool=command[0])
|
|
|
|
|
)
|
2020-06-21 22:35:01 +03:00
|
|
|
|
if status != 0:
|
|
|
|
|
sys.exit(status)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@contextmanager
|
|
|
|
|
def working_dir(path: Union[str, Path]) -> None:
|
|
|
|
|
"""Change current working directory and returns to previous on exit.
|
|
|
|
|
|
|
|
|
|
path (str / Path): The directory to navigate to.
|
2020-06-30 14:17:14 +03:00
|
|
|
|
YIELDS (Path): The absolute path to the current working directory. This
|
|
|
|
|
should be used if the block needs to perform actions within the working
|
|
|
|
|
directory, to prevent mismatches with relative paths.
|
2020-06-21 22:35:01 +03:00
|
|
|
|
"""
|
|
|
|
|
prev_cwd = Path.cwd()
|
2020-06-30 14:29:45 +03:00
|
|
|
|
current = Path(path).resolve()
|
|
|
|
|
os.chdir(str(current))
|
2020-06-21 22:35:01 +03:00
|
|
|
|
try:
|
2020-06-30 14:29:45 +03:00
|
|
|
|
yield current
|
2020-06-21 22:35:01 +03:00
|
|
|
|
finally:
|
2020-06-30 14:29:45 +03:00
|
|
|
|
os.chdir(str(prev_cwd))
|
2020-06-21 22:35:01 +03:00
|
|
|
|
|
|
|
|
|
|
2020-06-22 15:53:31 +03:00
|
|
|
|
@contextmanager
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def make_tempdir() -> None:
|
2020-06-28 16:08:35 +03:00
|
|
|
|
"""Execute a block in a temporary directory and remove the directory and
|
|
|
|
|
its contents at the end of the with block.
|
|
|
|
|
|
|
|
|
|
YIELDS (Path): The path of the temp directory.
|
|
|
|
|
"""
|
2020-06-22 15:53:31 +03:00
|
|
|
|
d = Path(tempfile.mkdtemp())
|
|
|
|
|
yield d
|
2020-06-29 19:16:39 +03:00
|
|
|
|
try:
|
|
|
|
|
shutil.rmtree(str(d))
|
2020-06-29 19:22:33 +03:00
|
|
|
|
except PermissionError as e:
|
|
|
|
|
warnings.warn(Warnings.W091.format(dir=d, msg=e))
|
2020-06-22 15:53:31 +03:00
|
|
|
|
|
|
|
|
|
|
2020-07-09 02:42:51 +03:00
|
|
|
|
def is_cwd(path: Union[Path, str]) -> bool:
|
|
|
|
|
"""Check whether a path is the current working directory.
|
|
|
|
|
|
|
|
|
|
path (Union[Path, str]): The directory path.
|
|
|
|
|
RETURNS (bool): Whether the path is the current working directory.
|
|
|
|
|
"""
|
|
|
|
|
return str(Path(path).resolve()).lower() == str(Path.cwd().resolve()).lower()
|
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def is_in_jupyter() -> bool:
|
2017-05-21 02:12:09 +03:00
|
|
|
|
"""Check if user is running spaCy from a Jupyter notebook by detecting the
|
|
|
|
|
IPython kernel. Mainly used for the displaCy visualizer.
|
2017-05-18 15:13:14 +03:00
|
|
|
|
RETURNS (bool): True if in Jupyter, False if not.
|
|
|
|
|
"""
|
2018-12-20 19:32:04 +03:00
|
|
|
|
# https://stackoverflow.com/a/39662359/6400719
|
2017-05-18 15:13:14 +03:00
|
|
|
|
try:
|
2018-12-20 19:32:04 +03:00
|
|
|
|
shell = get_ipython().__class__.__name__
|
|
|
|
|
if shell == "ZMQInteractiveShell":
|
|
|
|
|
return True # Jupyter notebook or qtconsole
|
2017-05-18 15:13:14 +03:00
|
|
|
|
except NameError:
|
2018-12-20 19:32:04 +03:00
|
|
|
|
return False # Probably standard Python interpreter
|
2017-05-18 15:13:14 +03:00
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
|
def get_object_name(obj: Any) -> str:
|
|
|
|
|
"""Get a human-readable name of a Python object, e.g. a pipeline component.
|
|
|
|
|
|
|
|
|
|
obj (Any): The Python object, typically a function or class.
|
|
|
|
|
RETURNS (str): A human-readable name.
|
|
|
|
|
"""
|
|
|
|
|
if hasattr(obj, "name"):
|
|
|
|
|
return obj.name
|
|
|
|
|
if hasattr(obj, "__name__"):
|
|
|
|
|
return obj.__name__
|
|
|
|
|
if hasattr(obj, "__class__") and hasattr(obj.__class__, "__name__"):
|
|
|
|
|
return obj.__class__.__name__
|
|
|
|
|
return repr(obj)
|
2019-10-27 15:35:49 +03:00
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def get_cuda_stream(
|
|
|
|
|
require: bool = False, non_blocking: bool = True
|
|
|
|
|
) -> Optional[CudaStream]:
|
2020-01-29 19:06:46 +03:00
|
|
|
|
ops = get_current_ops()
|
2018-03-27 20:22:52 +03:00
|
|
|
|
if CudaStream is None:
|
|
|
|
|
return None
|
2020-01-29 19:06:46 +03:00
|
|
|
|
elif isinstance(ops, NumpyOps):
|
2018-03-27 20:22:52 +03:00
|
|
|
|
return None
|
|
|
|
|
else:
|
2019-11-19 17:54:34 +03:00
|
|
|
|
return CudaStream(non_blocking=non_blocking)
|
2017-05-15 22:46:08 +03:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_async(stream, numpy_array):
|
|
|
|
|
if cupy is None:
|
|
|
|
|
return numpy_array
|
|
|
|
|
else:
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
array = cupy.ndarray(numpy_array.shape, order="C", dtype=numpy_array.dtype)
|
2017-05-18 12:36:53 +03:00
|
|
|
|
array.set(numpy_array, stream=stream)
|
|
|
|
|
return array
|
|
|
|
|
|
2017-05-26 13:37:45 +03:00
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def env_opt(name: str, default: Optional[Any] = None) -> Optional[Any]:
|
2017-05-18 16:32:03 +03:00
|
|
|
|
if type(default) is float:
|
|
|
|
|
type_convert = float
|
2017-05-18 12:36:53 +03:00
|
|
|
|
else:
|
2017-05-18 16:32:03 +03:00
|
|
|
|
type_convert = int
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
if "SPACY_" + name.upper() in os.environ:
|
|
|
|
|
value = type_convert(os.environ["SPACY_" + name.upper()])
|
2017-05-31 15:14:11 +03:00
|
|
|
|
if _PRINT_ENV:
|
|
|
|
|
print(name, "=", repr(value), "via", "$SPACY_" + name.upper())
|
2017-05-18 16:32:03 +03:00
|
|
|
|
return value
|
|
|
|
|
elif name in os.environ:
|
|
|
|
|
value = type_convert(os.environ[name])
|
2017-05-31 15:14:11 +03:00
|
|
|
|
if _PRINT_ENV:
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
print(name, "=", repr(value), "via", "$" + name)
|
2017-05-18 16:32:03 +03:00
|
|
|
|
return value
|
|
|
|
|
else:
|
2017-05-31 15:14:11 +03:00
|
|
|
|
if _PRINT_ENV:
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
print(name, "=", repr(default), "by default")
|
2017-05-18 12:36:53 +03:00
|
|
|
|
return default
|
2017-05-14 01:37:53 +03:00
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def read_regex(path: Union[str, Path]) -> Pattern:
|
2017-04-15 13:11:16 +03:00
|
|
|
|
path = ensure_path(path)
|
2019-10-29 15:16:55 +03:00
|
|
|
|
with path.open(encoding="utf8") as file_:
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
entries = file_.read().split("\n")
|
|
|
|
|
expression = "|".join(
|
|
|
|
|
["^" + re.escape(piece) for piece in entries if piece.strip()]
|
|
|
|
|
)
|
2016-09-24 21:26:17 +03:00
|
|
|
|
return re.compile(expression)
|
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def compile_prefix_regex(entries: Iterable[Union[str, Pattern]]) -> Pattern:
|
2019-02-24 20:39:59 +03:00
|
|
|
|
"""Compile a sequence of prefix rules into a regex object.
|
2019-02-24 20:34:10 +03:00
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
entries (Iterable[Union[str, Pattern]]): The prefix rules, e.g.
|
|
|
|
|
spacy.lang.punctuation.TOKENIZER_PREFIXES.
|
|
|
|
|
RETURNS (Pattern): The regex object. to be used for Tokenizer.prefix_search.
|
2019-02-24 20:34:10 +03:00
|
|
|
|
"""
|
2020-07-06 14:06:25 +03:00
|
|
|
|
expression = "|".join(["^" + piece for piece in entries if piece.strip()])
|
|
|
|
|
return re.compile(expression)
|
2016-09-24 21:26:17 +03:00
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def compile_suffix_regex(entries: Iterable[Union[str, Pattern]]) -> Pattern:
|
2019-02-24 20:39:59 +03:00
|
|
|
|
"""Compile a sequence of suffix rules into a regex object.
|
2019-02-24 20:34:10 +03:00
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
entries (Iterable[Union[str, Pattern]]): The suffix rules, e.g.
|
|
|
|
|
spacy.lang.punctuation.TOKENIZER_SUFFIXES.
|
|
|
|
|
RETURNS (Pattern): The regex object. to be used for Tokenizer.suffix_search.
|
2019-02-24 20:34:10 +03:00
|
|
|
|
"""
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
expression = "|".join([piece + "$" for piece in entries if piece.strip()])
|
2016-09-24 21:26:17 +03:00
|
|
|
|
return re.compile(expression)
|
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def compile_infix_regex(entries: Iterable[Union[str, Pattern]]) -> Pattern:
|
2019-02-24 20:39:59 +03:00
|
|
|
|
"""Compile a sequence of infix rules into a regex object.
|
2019-02-24 20:34:10 +03:00
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
entries (Iterable[Union[str, Pattern]]): The infix rules, e.g.
|
|
|
|
|
spacy.lang.punctuation.TOKENIZER_INFIXES.
|
2019-02-24 20:34:10 +03:00
|
|
|
|
RETURNS (regex object): The regex object. to be used for Tokenizer.infix_finditer.
|
|
|
|
|
"""
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
expression = "|".join([piece for piece in entries if piece.strip()])
|
2016-09-24 21:26:17 +03:00
|
|
|
|
return re.compile(expression)
|
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def add_lookups(default_func: Callable[[str], Any], *lookups) -> Callable[[str], Any]:
|
2017-06-03 20:44:47 +03:00
|
|
|
|
"""Extend an attribute function with special cases. If a word is in the
|
|
|
|
|
lookups, the value is returned. Otherwise the previous function is used.
|
|
|
|
|
|
|
|
|
|
default_func (callable): The default function to execute.
|
|
|
|
|
*lookups (dict): Lookup dictionary mapping string to attribute value.
|
|
|
|
|
RETURNS (callable): Lexical attribute getter.
|
|
|
|
|
"""
|
2017-10-17 19:20:52 +03:00
|
|
|
|
# This is implemented as functools.partial instead of a closure, to allow
|
|
|
|
|
# pickle to work.
|
|
|
|
|
return functools.partial(_get_attr_unless_lookup, default_func, lookups)
|
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def _get_attr_unless_lookup(
|
|
|
|
|
default_func: Callable[[str], Any], lookups: Dict[str, Any], string: str
|
|
|
|
|
) -> Any:
|
2017-10-17 19:20:52 +03:00
|
|
|
|
for lookup in lookups:
|
|
|
|
|
if string in lookup:
|
|
|
|
|
return lookup[string]
|
|
|
|
|
return default_func(string)
|
2017-06-03 20:44:47 +03:00
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def update_exc(
|
|
|
|
|
base_exceptions: Dict[str, List[dict]], *addition_dicts
|
|
|
|
|
) -> Dict[str, List[dict]]:
|
2017-05-13 22:22:49 +03:00
|
|
|
|
"""Update and validate tokenizer exceptions. Will overwrite exceptions.
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
base_exceptions (Dict[str, List[dict]]): Base exceptions.
|
|
|
|
|
*addition_dicts (Dict[str, List[dict]]): Exceptions to add to the base dict, in order.
|
|
|
|
|
RETURNS (Dict[str, List[dict]]): Combined tokenizer exceptions.
|
2017-05-13 22:22:49 +03:00
|
|
|
|
"""
|
2017-05-08 16:42:12 +03:00
|
|
|
|
exc = dict(base_exceptions)
|
|
|
|
|
for additions in addition_dicts:
|
|
|
|
|
for orth, token_attrs in additions.items():
|
2019-12-22 03:53:56 +03:00
|
|
|
|
if not all(isinstance(attr[ORTH], str) for attr in token_attrs):
|
2018-04-03 16:50:31 +03:00
|
|
|
|
raise ValueError(Errors.E055.format(key=orth, orths=token_attrs))
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
described_orth = "".join(attr[ORTH] for attr in token_attrs)
|
2017-05-08 16:42:12 +03:00
|
|
|
|
if orth != described_orth:
|
2018-04-03 16:50:31 +03:00
|
|
|
|
raise ValueError(Errors.E056.format(key=orth, orths=described_orth))
|
2017-05-08 16:42:12 +03:00
|
|
|
|
exc.update(additions)
|
2017-05-13 22:22:25 +03:00
|
|
|
|
exc = expand_exc(exc, "'", "’")
|
2017-05-08 16:42:12 +03:00
|
|
|
|
return exc
|
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def expand_exc(
|
|
|
|
|
excs: Dict[str, List[dict]], search: str, replace: str
|
|
|
|
|
) -> Dict[str, List[dict]]:
|
2017-05-13 22:22:49 +03:00
|
|
|
|
"""Find string in tokenizer exceptions, duplicate entry and replace string.
|
|
|
|
|
For example, to add additional versions with typographic apostrophes.
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
excs (Dict[str, List[dict]]): Tokenizer exceptions.
|
2020-05-24 18:20:58 +03:00
|
|
|
|
search (str): String to find and replace.
|
|
|
|
|
replace (str): Replacement.
|
2020-07-25 16:01:15 +03:00
|
|
|
|
RETURNS (Dict[str, List[dict]]): Combined tokenizer exceptions.
|
2017-05-13 22:22:49 +03:00
|
|
|
|
"""
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
|
2017-05-08 16:42:12 +03:00
|
|
|
|
def _fix_token(token, search, replace):
|
|
|
|
|
fixed = dict(token)
|
|
|
|
|
fixed[ORTH] = fixed[ORTH].replace(search, replace)
|
|
|
|
|
return fixed
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
|
2017-05-13 22:22:25 +03:00
|
|
|
|
new_excs = dict(excs)
|
2017-05-08 16:42:12 +03:00
|
|
|
|
for token_string, tokens in excs.items():
|
|
|
|
|
if search in token_string:
|
|
|
|
|
new_key = token_string.replace(search, replace)
|
|
|
|
|
new_value = [_fix_token(t, search, replace) for t in tokens]
|
2017-05-13 22:22:25 +03:00
|
|
|
|
new_excs[new_key] = new_value
|
|
|
|
|
return new_excs
|
2017-05-08 16:42:12 +03:00
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def normalize_slice(
|
|
|
|
|
length: int, start: int, stop: int, step: Optional[int] = None
|
|
|
|
|
) -> Tuple[int, int]:
|
2015-10-07 11:25:35 +03:00
|
|
|
|
if not (step is None or step == 1):
|
2018-04-03 16:50:31 +03:00
|
|
|
|
raise ValueError(Errors.E057)
|
2015-10-07 11:25:35 +03:00
|
|
|
|
if start is None:
|
2017-10-27 15:39:09 +03:00
|
|
|
|
start = 0
|
2015-10-07 11:25:35 +03:00
|
|
|
|
elif start < 0:
|
2017-10-27 15:39:09 +03:00
|
|
|
|
start += length
|
2015-10-07 11:25:35 +03:00
|
|
|
|
start = min(length, max(0, start))
|
|
|
|
|
if stop is None:
|
2017-10-27 15:39:09 +03:00
|
|
|
|
stop = length
|
2015-10-07 11:25:35 +03:00
|
|
|
|
elif stop < 0:
|
2017-10-27 15:39:09 +03:00
|
|
|
|
stop += length
|
2015-10-07 11:25:35 +03:00
|
|
|
|
stop = min(length, max(start, stop))
|
|
|
|
|
return start, stop
|
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def minibatch(
|
|
|
|
|
items: Iterable[Any], size: Union[Iterator[int], int] = 8
|
|
|
|
|
) -> Iterator[Any]:
|
2017-11-07 01:45:36 +03:00
|
|
|
|
"""Iterate over batches of items. `size` may be an iterator,
|
|
|
|
|
so that batch-size can vary on each step.
|
|
|
|
|
"""
|
|
|
|
|
if isinstance(size, int):
|
2017-11-07 02:22:43 +03:00
|
|
|
|
size_ = itertools.repeat(size)
|
2017-11-07 01:45:36 +03:00
|
|
|
|
else:
|
|
|
|
|
size_ = size
|
|
|
|
|
items = iter(items)
|
|
|
|
|
while True:
|
|
|
|
|
batch_size = next(size_)
|
2018-12-03 04:19:12 +03:00
|
|
|
|
batch = list(itertools.islice(items, int(batch_size)))
|
2017-11-07 01:45:36 +03:00
|
|
|
|
if len(batch) == 0:
|
|
|
|
|
break
|
|
|
|
|
yield list(batch)
|
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def minibatch_by_padded_size(
|
|
|
|
|
docs: Iterator["Doc"],
|
|
|
|
|
size: Union[Iterator[int], int],
|
|
|
|
|
buffer: int = 256,
|
|
|
|
|
discard_oversize: bool = False,
|
|
|
|
|
) -> Iterator[Iterator["Doc"]]:
|
2020-07-09 15:38:41 +03:00
|
|
|
|
if isinstance(size, int):
|
|
|
|
|
size_ = itertools.repeat(size)
|
|
|
|
|
else:
|
|
|
|
|
size_ = size
|
|
|
|
|
for outer_batch in minibatch(docs, buffer):
|
|
|
|
|
outer_batch = list(outer_batch)
|
|
|
|
|
target_size = next(size_)
|
|
|
|
|
for indices in _batch_by_length(outer_batch, target_size):
|
|
|
|
|
subbatch = [outer_batch[i] for i in indices]
|
|
|
|
|
padded_size = max(len(seq) for seq in subbatch) * len(subbatch)
|
|
|
|
|
if discard_oversize and padded_size >= target_size:
|
|
|
|
|
pass
|
|
|
|
|
else:
|
|
|
|
|
yield subbatch
|
2017-05-26 00:16:10 +03:00
|
|
|
|
|
2018-09-14 19:37:16 +03:00
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def _batch_by_length(seqs: Sequence[Any], max_words: int) -> List[List[Any]]:
|
2020-07-10 00:51:18 +03:00
|
|
|
|
"""Given a list of sequences, return a batched list of indices into the
|
2020-07-09 15:38:41 +03:00
|
|
|
|
list, where the batches are grouped by length, in descending order.
|
2018-09-14 19:37:16 +03:00
|
|
|
|
|
2020-07-09 15:38:41 +03:00
|
|
|
|
Batches may be at most max_words in size, defined as max sequence length * size.
|
2018-09-14 19:37:16 +03:00
|
|
|
|
"""
|
2020-07-09 15:38:41 +03:00
|
|
|
|
# Use negative index so we can get sort by position ascending.
|
|
|
|
|
lengths_indices = [(len(seq), i) for i, seq in enumerate(seqs)]
|
|
|
|
|
lengths_indices.sort()
|
|
|
|
|
batches = []
|
|
|
|
|
batch = []
|
|
|
|
|
for length, i in lengths_indices:
|
|
|
|
|
if not batch:
|
|
|
|
|
batch.append(i)
|
|
|
|
|
elif length * (len(batch) + 1) <= max_words:
|
|
|
|
|
batch.append(i)
|
|
|
|
|
else:
|
|
|
|
|
batches.append(batch)
|
|
|
|
|
batch = [i]
|
|
|
|
|
if batch:
|
|
|
|
|
batches.append(batch)
|
|
|
|
|
# Check lengths match
|
|
|
|
|
assert sum(len(b) for b in batches) == len(seqs)
|
|
|
|
|
batches = [list(sorted(batch)) for batch in batches]
|
|
|
|
|
batches.reverse()
|
|
|
|
|
return batches
|
|
|
|
|
|
2020-07-10 00:51:18 +03:00
|
|
|
|
|
2020-06-26 20:34:12 +03:00
|
|
|
|
def minibatch_by_words(docs, size, tolerance=0.2, discard_oversize=False):
|
2020-05-18 23:23:33 +03:00
|
|
|
|
"""Create minibatches of roughly a given number of words. If any examples
|
|
|
|
|
are longer than the specified batch length, they will appear in a batch by
|
2020-06-26 20:34:12 +03:00
|
|
|
|
themselves, or be discarded if discard_oversize=True.
|
|
|
|
|
The argument 'docs' can be a list of strings, Doc's or Example's. """
|
|
|
|
|
from .gold import Example
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
|
2018-03-27 20:23:02 +03:00
|
|
|
|
if isinstance(size, int):
|
|
|
|
|
size_ = itertools.repeat(size)
|
2020-03-08 15:23:18 +03:00
|
|
|
|
elif isinstance(size, List):
|
2020-01-29 19:06:46 +03:00
|
|
|
|
size_ = iter(size)
|
2018-03-27 20:23:02 +03:00
|
|
|
|
else:
|
|
|
|
|
size_ = size
|
2020-06-02 16:22:54 +03:00
|
|
|
|
target_size = next(size_)
|
|
|
|
|
tol_size = target_size * tolerance
|
|
|
|
|
batch = []
|
2020-06-02 20:43:39 +03:00
|
|
|
|
overflow = []
|
2020-06-02 23:05:08 +03:00
|
|
|
|
batch_size = 0
|
2020-06-02 20:43:39 +03:00
|
|
|
|
overflow_size = 0
|
2020-06-26 20:34:12 +03:00
|
|
|
|
for doc in docs:
|
|
|
|
|
if isinstance(doc, Example):
|
|
|
|
|
n_words = len(doc.reference)
|
|
|
|
|
elif isinstance(doc, str):
|
|
|
|
|
n_words = len(doc.split())
|
|
|
|
|
else:
|
|
|
|
|
n_words = len(doc)
|
2020-06-02 23:05:08 +03:00
|
|
|
|
# if the current example exceeds the maximum batch size, it is returned separately
|
2020-06-02 18:49:33 +03:00
|
|
|
|
# but only if discard_oversize=False.
|
2020-06-02 20:59:04 +03:00
|
|
|
|
if n_words > target_size + tol_size:
|
2020-06-02 18:49:33 +03:00
|
|
|
|
if not discard_oversize:
|
2020-06-26 20:34:12 +03:00
|
|
|
|
yield [doc]
|
2020-06-02 20:50:44 +03:00
|
|
|
|
# add the example to the current batch if there's no overflow yet and it still fits
|
2020-06-02 23:05:08 +03:00
|
|
|
|
elif overflow_size == 0 and (batch_size + n_words) <= target_size:
|
2020-06-26 20:34:12 +03:00
|
|
|
|
batch.append(doc)
|
2020-06-02 23:05:08 +03:00
|
|
|
|
batch_size += n_words
|
2020-06-02 20:50:44 +03:00
|
|
|
|
# add the example to the overflow buffer if it fits in the tolerance margin
|
2020-06-02 23:05:08 +03:00
|
|
|
|
elif (batch_size + overflow_size + n_words) <= (target_size + tol_size):
|
2020-06-26 20:34:12 +03:00
|
|
|
|
overflow.append(doc)
|
2020-06-02 20:43:39 +03:00
|
|
|
|
overflow_size += n_words
|
2020-06-02 20:50:44 +03:00
|
|
|
|
# yield the previous batch and start a new one. The new one gets the overflow examples.
|
2020-06-02 16:22:54 +03:00
|
|
|
|
else:
|
2020-07-09 15:38:41 +03:00
|
|
|
|
if batch:
|
|
|
|
|
yield batch
|
2020-06-02 18:49:33 +03:00
|
|
|
|
target_size = next(size_)
|
|
|
|
|
tol_size = target_size * tolerance
|
2020-06-02 20:43:39 +03:00
|
|
|
|
batch = overflow
|
2020-06-02 23:05:08 +03:00
|
|
|
|
batch_size = overflow_size
|
2020-06-02 20:43:39 +03:00
|
|
|
|
overflow = []
|
|
|
|
|
overflow_size = 0
|
2020-06-02 23:05:08 +03:00
|
|
|
|
# this example still fits
|
|
|
|
|
if (batch_size + n_words) <= target_size:
|
2018-11-16 01:44:07 +03:00
|
|
|
|
batch.append(doc)
|
2020-06-02 23:05:08 +03:00
|
|
|
|
batch_size += n_words
|
|
|
|
|
# this example fits in overflow
|
|
|
|
|
elif (batch_size + n_words) <= (target_size + tol_size):
|
2020-06-26 20:34:12 +03:00
|
|
|
|
overflow.append(doc)
|
2020-06-02 23:05:08 +03:00
|
|
|
|
overflow_size += n_words
|
|
|
|
|
# this example does not fit with the previous overflow: start another new batch
|
2018-11-16 01:44:07 +03:00
|
|
|
|
else:
|
2020-07-09 15:38:41 +03:00
|
|
|
|
if batch:
|
|
|
|
|
yield batch
|
2020-06-02 23:05:08 +03:00
|
|
|
|
target_size = next(size_)
|
|
|
|
|
tol_size = target_size * tolerance
|
2020-06-26 20:34:12 +03:00
|
|
|
|
batch = [doc]
|
2020-06-02 23:05:08 +03:00
|
|
|
|
batch_size = n_words
|
2020-07-09 15:38:41 +03:00
|
|
|
|
batch.extend(overflow)
|
2020-06-02 16:22:54 +03:00
|
|
|
|
if batch:
|
|
|
|
|
yield batch
|
2017-11-07 01:45:36 +03:00
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def filter_spans(spans: Iterable["Span"]) -> List["Span"]:
|
2019-05-08 03:33:40 +03:00
|
|
|
|
"""Filter a sequence of spans and remove duplicates or overlaps. Useful for
|
|
|
|
|
creating named entities (where one token can only be part of one entity) or
|
|
|
|
|
when merging spans with `Retokenizer.merge`. When spans overlap, the (first)
|
|
|
|
|
longest span is preferred over shorter spans.
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
spans (Iterable[Span]): The spans to filter.
|
|
|
|
|
RETURNS (List[Span]): The filtered spans.
|
2019-05-08 03:33:40 +03:00
|
|
|
|
"""
|
2019-10-10 18:00:03 +03:00
|
|
|
|
get_sort_key = lambda span: (span.end - span.start, -span.start)
|
2019-05-08 03:33:40 +03:00
|
|
|
|
sorted_spans = sorted(spans, key=get_sort_key, reverse=True)
|
|
|
|
|
result = []
|
|
|
|
|
seen_tokens = set()
|
|
|
|
|
for span in sorted_spans:
|
|
|
|
|
# Check for end - 1 here because boundaries are inclusive
|
|
|
|
|
if span.start not in seen_tokens and span.end - 1 not in seen_tokens:
|
|
|
|
|
result.append(span)
|
|
|
|
|
seen_tokens.update(range(span.start, span.end))
|
|
|
|
|
result = sorted(result, key=lambda span: span.start)
|
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def to_bytes(getters: Dict[str, Callable[[], bytes]], exclude: Iterable[str]) -> bytes:
|
2020-06-26 20:34:12 +03:00
|
|
|
|
return srsly.msgpack_dumps(to_dict(getters, exclude))
|
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def from_bytes(
|
|
|
|
|
bytes_data: bytes,
|
|
|
|
|
setters: Dict[str, Callable[[bytes], Any]],
|
|
|
|
|
exclude: Iterable[str],
|
|
|
|
|
) -> None:
|
2020-06-26 20:34:12 +03:00
|
|
|
|
return from_dict(srsly.msgpack_loads(bytes_data), setters, exclude)
|
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def to_dict(
|
|
|
|
|
getters: Dict[str, Callable[[], Any]], exclude: Iterable[str]
|
|
|
|
|
) -> Dict[str, Any]:
|
2019-12-22 03:53:56 +03:00
|
|
|
|
serialized = {}
|
2017-05-29 11:13:42 +03:00
|
|
|
|
for key, getter in getters.items():
|
2019-03-10 21:16:45 +03:00
|
|
|
|
# Split to support file names like meta.json
|
|
|
|
|
if key.split(".")[0] not in exclude:
|
2017-05-29 11:13:42 +03:00
|
|
|
|
serialized[key] = getter()
|
2020-06-26 20:34:12 +03:00
|
|
|
|
return serialized
|
2017-05-29 11:13:42 +03:00
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def from_dict(
|
|
|
|
|
msg: Dict[str, Any],
|
|
|
|
|
setters: Dict[str, Callable[[Any], Any]],
|
|
|
|
|
exclude: Iterable[str],
|
|
|
|
|
) -> Dict[str, Any]:
|
2017-05-29 11:13:42 +03:00
|
|
|
|
for key, setter in setters.items():
|
2019-03-10 21:16:45 +03:00
|
|
|
|
# Split to support file names like meta.json
|
|
|
|
|
if key.split(".")[0] not in exclude and key in msg:
|
2017-05-29 11:13:42 +03:00
|
|
|
|
setter(msg[key])
|
|
|
|
|
return msg
|
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def to_disk(
|
|
|
|
|
path: Union[str, Path],
|
|
|
|
|
writers: Dict[str, Callable[[Path], None]],
|
|
|
|
|
exclude: Iterable[str],
|
|
|
|
|
) -> Path:
|
2017-05-31 14:42:39 +03:00
|
|
|
|
path = ensure_path(path)
|
|
|
|
|
if not path.exists():
|
|
|
|
|
path.mkdir()
|
|
|
|
|
for key, writer in writers.items():
|
2019-03-10 21:16:45 +03:00
|
|
|
|
# Split to support file names like meta.json
|
|
|
|
|
if key.split(".")[0] not in exclude:
|
2017-05-31 14:42:39 +03:00
|
|
|
|
writer(path / key)
|
|
|
|
|
return path
|
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def from_disk(
|
|
|
|
|
path: Union[str, Path],
|
|
|
|
|
readers: Dict[str, Callable[[Path], None]],
|
|
|
|
|
exclude: Iterable[str],
|
|
|
|
|
) -> Path:
|
2017-05-31 14:42:39 +03:00
|
|
|
|
path = ensure_path(path)
|
|
|
|
|
for key, reader in readers.items():
|
2019-03-10 21:16:45 +03:00
|
|
|
|
# Split to support file names like meta.json
|
|
|
|
|
if key.split(".")[0] not in exclude:
|
2017-10-16 21:55:00 +03:00
|
|
|
|
reader(path / key)
|
2017-05-31 14:42:39 +03:00
|
|
|
|
return path
|
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def import_file(name: str, loc: Union[str, Path]) -> ModuleType:
|
2019-12-22 03:53:56 +03:00
|
|
|
|
"""Import module from a file. Used to load models from a directory.
|
|
|
|
|
|
2020-05-24 18:20:58 +03:00
|
|
|
|
name (str): Name of module to load.
|
|
|
|
|
loc (str / Path): Path to the file.
|
2019-12-22 03:53:56 +03:00
|
|
|
|
RETURNS: The loaded module.
|
|
|
|
|
"""
|
|
|
|
|
loc = str(loc)
|
|
|
|
|
spec = importlib.util.spec_from_file_location(name, str(loc))
|
|
|
|
|
module = importlib.util.module_from_spec(spec)
|
|
|
|
|
spec.loader.exec_module(module)
|
|
|
|
|
return module
|
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def minify_html(html: str) -> str:
|
2017-05-14 18:50:23 +03:00
|
|
|
|
"""Perform a template-specific, rudimentary HTML minification for displaCy.
|
2017-10-27 15:39:09 +03:00
|
|
|
|
Disclaimer: NOT a general-purpose solution, only removes indentation and
|
|
|
|
|
newlines.
|
2017-05-14 18:50:23 +03:00
|
|
|
|
|
2020-05-24 18:20:58 +03:00
|
|
|
|
html (str): Markup to minify.
|
|
|
|
|
RETURNS (str): "Minified" HTML.
|
2017-05-14 18:50:23 +03:00
|
|
|
|
"""
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
return html.strip().replace(" ", "").replace("\n", "")
|
2017-09-21 03:16:35 +03:00
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def escape_html(text: str) -> str:
|
escape html in displacy.render (#2378) (closes #2361)
## Description
Fix for issue #2361 :
replace &, <, >, " with &amp; , &lt; , &gt; , &quot; in before rendering svg
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [ ] I ran the tests, and all new and existing tests passed.
(As discussed in the comments to #2361)
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-05-28 19:36:41 +03:00
|
|
|
|
"""Replace <, >, &, " with their HTML encoded representation. Intended to
|
|
|
|
|
prevent HTML errors in rendered displaCy markup.
|
|
|
|
|
|
2020-05-24 18:20:58 +03:00
|
|
|
|
text (str): The original text.
|
|
|
|
|
RETURNS (str): Equivalent text to be safely used within HTML.
|
escape html in displacy.render (#2378) (closes #2361)
## Description
Fix for issue #2361 :
replace &, <, >, " with &amp; , &lt; , &gt; , &quot; in before rendering svg
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [ ] I ran the tests, and all new and existing tests passed.
(As discussed in the comments to #2361)
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-05-28 19:36:41 +03:00
|
|
|
|
"""
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
text = text.replace("&", "&")
|
|
|
|
|
text = text.replace("<", "<")
|
|
|
|
|
text = text.replace(">", ">")
|
|
|
|
|
text = text.replace('"', """)
|
escape html in displacy.render (#2378) (closes #2361)
## Description
Fix for issue #2361 :
replace &, <, >, " with &amp; , &lt; , &gt; , &quot; in before rendering svg
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [ ] I ran the tests, and all new and existing tests passed.
(As discussed in the comments to #2361)
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-05-28 19:36:41 +03:00
|
|
|
|
return text
|
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def get_words_and_spaces(
|
|
|
|
|
words: Iterable[str], text: str
|
|
|
|
|
) -> Tuple[List[str], List[bool]]:
|
2020-04-23 17:58:23 +03:00
|
|
|
|
if "".join("".join(words).split()) != "".join(text.split()):
|
2020-04-14 20:15:52 +03:00
|
|
|
|
raise ValueError(Errors.E194.format(text=text, words=words))
|
|
|
|
|
text_words = []
|
|
|
|
|
text_spaces = []
|
|
|
|
|
text_pos = 0
|
|
|
|
|
# normalize words to remove all whitespace tokens
|
|
|
|
|
norm_words = [word for word in words if not word.isspace()]
|
|
|
|
|
# align words with text
|
|
|
|
|
for word in norm_words:
|
|
|
|
|
try:
|
|
|
|
|
word_start = text[text_pos:].index(word)
|
|
|
|
|
except ValueError:
|
|
|
|
|
raise ValueError(Errors.E194.format(text=text, words=words))
|
|
|
|
|
if word_start > 0:
|
2020-05-21 15:14:01 +03:00
|
|
|
|
text_words.append(text[text_pos : text_pos + word_start])
|
2020-04-14 20:15:52 +03:00
|
|
|
|
text_spaces.append(False)
|
|
|
|
|
text_pos += word_start
|
|
|
|
|
text_words.append(word)
|
|
|
|
|
text_spaces.append(False)
|
|
|
|
|
text_pos += len(word)
|
|
|
|
|
if text_pos < len(text) and text[text_pos] == " ":
|
|
|
|
|
text_spaces[-1] = True
|
|
|
|
|
text_pos += 1
|
|
|
|
|
if text_pos < len(text):
|
|
|
|
|
text_words.append(text[text_pos:])
|
|
|
|
|
text_spaces.append(False)
|
|
|
|
|
return (text_words, text_spaces)
|
|
|
|
|
|
|
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
|
def copy_config(config: Union[Dict[str, Any], Config]) -> Config:
|
|
|
|
|
"""Deep copy a Config. Will raise an error if the config contents are not
|
|
|
|
|
JSON-serializable.
|
|
|
|
|
|
|
|
|
|
config (Config): The config to copy.
|
|
|
|
|
RETURNS (Config): The copied config.
|
2018-05-20 16:13:37 +03:00
|
|
|
|
"""
|
2020-07-22 14:42:59 +03:00
|
|
|
|
try:
|
|
|
|
|
return Config(config).copy()
|
|
|
|
|
except ValueError:
|
|
|
|
|
raise ValueError(Errors.E961.format(config=config))
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
|
2018-05-20 16:13:37 +03:00
|
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
|
def deep_merge_configs(
|
|
|
|
|
config: Union[Dict[str, Any], Config], defaults: Union[Dict[str, Any], Config]
|
|
|
|
|
) -> Config:
|
|
|
|
|
"""Deep merge two configs, a base config and its defaults. Ignores
|
|
|
|
|
references to registered functions to avoid filling in
|
2018-05-20 16:13:37 +03:00
|
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
|
config (Dict[str, Any]): The config.
|
|
|
|
|
destination (Dict[str, Any]): The config defaults.
|
|
|
|
|
RETURNS (Dict[str, Any]): The merged config.
|
|
|
|
|
"""
|
|
|
|
|
config = copy_config(config)
|
|
|
|
|
merged = _deep_merge_configs(config, defaults)
|
|
|
|
|
return Config(merged)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _deep_merge_configs(
|
|
|
|
|
config: Union[Dict[str, Any], Config], defaults: Union[Dict[str, Any], Config]
|
|
|
|
|
) -> Union[Dict[str, Any], Config]:
|
|
|
|
|
for key, value in defaults.items():
|
|
|
|
|
if isinstance(value, dict):
|
|
|
|
|
node = config.setdefault(key, {})
|
|
|
|
|
if not isinstance(node, dict):
|
|
|
|
|
continue
|
|
|
|
|
promises = [key for key in value if key.startswith("@")]
|
|
|
|
|
promise = promises[0] if promises else None
|
|
|
|
|
# We only update the block from defaults if it refers to the same
|
|
|
|
|
# registered function
|
|
|
|
|
if (
|
|
|
|
|
promise
|
|
|
|
|
and any(k.startswith("@") for k in node)
|
|
|
|
|
and (promise in node and node[promise] != value[promise])
|
|
|
|
|
):
|
|
|
|
|
continue
|
|
|
|
|
defaults = _deep_merge_configs(node, value)
|
|
|
|
|
elif key not in config:
|
|
|
|
|
config[key] = value
|
|
|
|
|
return config
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def dot_to_dict(values: Dict[str, Any]) -> Dict[str, dict]:
|
|
|
|
|
"""Convert dot notation to a dict. For example: {"token.pos": True,
|
|
|
|
|
"token._.xyz": True} becomes {"token": {"pos": True, "_": {"xyz": True }}}.
|
|
|
|
|
|
|
|
|
|
values (Dict[str, Any]): The key/value pairs to convert.
|
|
|
|
|
RETURNS (Dict[str, dict]): The converted values.
|
|
|
|
|
"""
|
|
|
|
|
result = {}
|
|
|
|
|
for key, value in values.items():
|
|
|
|
|
path = result
|
|
|
|
|
parts = key.lower().split(".")
|
|
|
|
|
for i, item in enumerate(parts):
|
|
|
|
|
is_last = i == len(parts) - 1
|
|
|
|
|
path = path.setdefault(item, value if is_last else {})
|
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def dict_to_dot(obj: Dict[str, dict]) -> Dict[str, Any]:
|
|
|
|
|
"""Convert dot notation to a dict. For example: {"token": {"pos": True,
|
|
|
|
|
"_": {"xyz": True }}} becomes {"token.pos": True, "token._.xyz": True}.
|
|
|
|
|
|
|
|
|
|
values (Dict[str, dict]): The dict to convert.
|
|
|
|
|
RETURNS (Dict[str, Any]): The key/value pairs.
|
|
|
|
|
"""
|
|
|
|
|
return {".".join(key): value for key, value in walk_dict(obj)}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def walk_dict(
|
|
|
|
|
node: Dict[str, Any], parent: List[str] = []
|
|
|
|
|
) -> Iterator[Tuple[List[str], Any]]:
|
|
|
|
|
"""Walk a dict and yield the path and values of the leaves."""
|
|
|
|
|
for key, value in node.items():
|
|
|
|
|
key_parent = [*parent, key]
|
|
|
|
|
if isinstance(value, dict):
|
|
|
|
|
yield from walk_dict(value, key_parent)
|
|
|
|
|
else:
|
|
|
|
|
yield (key_parent, value)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_arg_names(func: Callable) -> List[str]:
|
|
|
|
|
"""Get a list of all named arguments of a function (regular,
|
|
|
|
|
keyword-only).
|
|
|
|
|
|
|
|
|
|
func (Callable): The function
|
|
|
|
|
RETURNS (List[str]): The argument names.
|
|
|
|
|
"""
|
|
|
|
|
argspec = inspect.getfullargspec(func)
|
|
|
|
|
return list(set([*argspec.args, *argspec.kwonlyargs]))
|
2019-01-10 17:40:37 +03:00
|
|
|
|
|
|
|
|
|
|
2020-07-26 14:18:43 +03:00
|
|
|
|
def combine_score_weights(weights: List[Dict[str, float]]) -> Dict[str, float]:
|
|
|
|
|
"""Combine and normalize score weights defined by components, e.g.
|
|
|
|
|
{"ents_r": 0.2, "ents_p": 0.3, "ents_f": 0.5} and {"some_other_score": 1.0}.
|
|
|
|
|
|
|
|
|
|
weights (List[dict]): The weights defined by the components.
|
|
|
|
|
RETURNS (Dict[str, float]): The combined and normalized weights.
|
|
|
|
|
"""
|
|
|
|
|
result = {}
|
|
|
|
|
for w_dict in weights:
|
|
|
|
|
# We need to account for weights that don't sum to 1.0 and normalize the
|
|
|
|
|
# score weights accordingly, then divide score by the number of components
|
|
|
|
|
total = sum([w for w in w_dict.values()])
|
|
|
|
|
for key, value in w_dict.items():
|
2020-07-27 11:21:31 +03:00
|
|
|
|
weight = round(value / total / len(weights), 2)
|
|
|
|
|
result[key] = result.get(key, 0.0) + weight
|
2020-07-26 14:18:43 +03:00
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
|
2020-07-12 15:03:23 +03:00
|
|
|
|
class DummyTokenizer:
|
2019-01-10 17:40:37 +03:00
|
|
|
|
# add dummy methods for to_bytes, from_bytes, to_disk and from_disk to
|
|
|
|
|
# allow serialization (see #1557)
|
2019-03-10 21:16:45 +03:00
|
|
|
|
def to_bytes(self, **kwargs):
|
2019-02-07 23:00:04 +03:00
|
|
|
|
return b""
|
2019-01-10 17:40:37 +03:00
|
|
|
|
|
2019-03-10 21:16:45 +03:00
|
|
|
|
def from_bytes(self, _bytes_data, **kwargs):
|
2019-01-10 17:40:37 +03:00
|
|
|
|
return self
|
|
|
|
|
|
2019-03-10 21:16:45 +03:00
|
|
|
|
def to_disk(self, _path, **kwargs):
|
2019-01-10 17:40:37 +03:00
|
|
|
|
return None
|
|
|
|
|
|
2019-03-10 21:16:45 +03:00
|
|
|
|
def from_disk(self, _path, **kwargs):
|
2019-01-10 17:40:37 +03:00
|
|
|
|
return self
|
2020-01-29 19:06:46 +03:00
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def link_vectors_to_models(vocab: "Vocab") -> None:
|
2020-01-29 19:06:46 +03:00
|
|
|
|
vectors = vocab.vectors
|
|
|
|
|
if vectors.name is None:
|
|
|
|
|
vectors.name = VECTORS_KEY
|
|
|
|
|
if vectors.data.size != 0:
|
2020-02-28 14:20:23 +03:00
|
|
|
|
warnings.warn(Warnings.W020.format(shape=vectors.data.shape))
|
2020-01-29 19:06:46 +03:00
|
|
|
|
for word in vocab:
|
|
|
|
|
if word.orth in vectors.key2row:
|
|
|
|
|
word.rank = vectors.key2row[word.orth]
|
|
|
|
|
else:
|
|
|
|
|
word.rank = 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
VECTORS_KEY = "spacy_pretrained_vectors"
|
|
|
|
|
|
|
|
|
|
|
2020-07-25 16:01:15 +03:00
|
|
|
|
def create_default_optimizer() -> Optimizer:
|
2020-01-29 19:06:46 +03:00
|
|
|
|
learn_rate = env_opt("learn_rate", 0.001)
|
|
|
|
|
beta1 = env_opt("optimizer_B1", 0.9)
|
|
|
|
|
beta2 = env_opt("optimizer_B2", 0.999)
|
|
|
|
|
eps = env_opt("optimizer_eps", 1e-8)
|
|
|
|
|
L2 = env_opt("L2_penalty", 1e-6)
|
2020-04-03 14:02:46 +03:00
|
|
|
|
grad_clip = env_opt("grad_norm_clip", 10.0)
|
|
|
|
|
L2_is_weight_decay = env_opt("L2_is_weight_decay", False)
|
2020-02-18 19:20:17 +03:00
|
|
|
|
optimizer = Adam(
|
|
|
|
|
learn_rate,
|
|
|
|
|
L2=L2,
|
|
|
|
|
beta1=beta1,
|
|
|
|
|
beta2=beta2,
|
|
|
|
|
eps=eps,
|
|
|
|
|
grad_clip=grad_clip,
|
2020-04-03 14:02:46 +03:00
|
|
|
|
L2_is_weight_decay=L2_is_weight_decay,
|
2020-02-18 19:20:17 +03:00
|
|
|
|
)
|
2020-01-29 19:06:46 +03:00
|
|
|
|
return optimizer
|