2020-06-21 22:35:01 +03:00
|
|
|
|
from typing import List, Union
|
2017-05-18 12:36:53 +03:00
|
|
|
|
import os
|
2017-05-08 00:24:51 +03:00
|
|
|
|
import importlib
|
2019-12-22 03:53:56 +03:00
|
|
|
|
import importlib.util
|
2019-02-01 10:05:22 +03:00
|
|
|
|
import re
|
2017-04-15 13:05:47 +03:00
|
|
|
|
from pathlib import Path
|
2020-01-29 19:06:46 +03:00
|
|
|
|
import thinc
|
2020-07-06 14:06:25 +03:00
|
|
|
|
from thinc.api import NumpyOps, get_current_ops, Adam, Config
|
2017-10-17 19:20:52 +03:00
|
|
|
|
import functools
|
2017-11-10 21:05:18 +03:00
|
|
|
|
import itertools
|
2018-02-13 14:52:48 +03:00
|
|
|
|
import numpy.random
|
2020-04-15 14:49:47 +03:00
|
|
|
|
import numpy
|
💫 Replace ujson, msgpack and dill/pickle/cloudpickle with srsly (#3003)
Remove hacks and wrappers, keep code in sync across our libraries and move spaCy a few steps closer to only depending on packages with binary wheels 🎉
See here: https://github.com/explosion/srsly
Serialization is hard, especially across Python versions and multiple platforms. After dealing with many subtle bugs over the years (encodings, locales, large files) our libraries like spaCy and Prodigy have steadily grown a number of utility functions to wrap the multiple serialization formats we need to support (especially json, msgpack and pickle). These wrapping functions ended up duplicated across our codebases, so we wanted to put them in one place.
At the same time, we noticed that having a lot of small dependencies was making maintainence harder, and making installation slower. To solve this, we've made srsly standalone, by including the component packages directly within it. This way we can provide all the serialization utilities we need in a single binary wheel.
srsly currently includes forks of the following packages:
ujson
msgpack
msgpack-numpy
cloudpickle
* WIP: replace json/ujson with srsly
* Replace ujson in examples
Use regular json instead of srsly to make code easier to read and follow
* Update requirements
* Fix imports
* Fix typos
* Replace msgpack with srsly
* Fix warning
2018-12-03 03:28:22 +03:00
|
|
|
|
import srsly
|
2019-11-07 13:45:22 +03:00
|
|
|
|
import catalogue
|
2019-08-22 15:21:32 +03:00
|
|
|
|
import sys
|
2020-04-28 15:01:29 +03:00
|
|
|
|
import warnings
|
2020-05-30 16:01:58 +03:00
|
|
|
|
from packaging.specifiers import SpecifierSet, InvalidSpecifier
|
|
|
|
|
from packaging.version import Version, InvalidVersion
|
2020-06-21 22:35:01 +03:00
|
|
|
|
import subprocess
|
|
|
|
|
from contextlib import contextmanager
|
2020-06-22 15:53:31 +03:00
|
|
|
|
import tempfile
|
|
|
|
|
import shutil
|
2020-06-28 16:08:35 +03:00
|
|
|
|
import hashlib
|
2020-06-30 13:54:15 +03:00
|
|
|
|
import shlex
|
2018-11-30 22:16:14 +03:00
|
|
|
|
|
2018-12-08 14:37:38 +03:00
|
|
|
|
try:
|
|
|
|
|
import cupy.random
|
|
|
|
|
except ImportError:
|
|
|
|
|
cupy = None
|
|
|
|
|
|
2020-05-22 16:42:46 +03:00
|
|
|
|
try: # Python 3.8
|
|
|
|
|
import importlib.metadata as importlib_metadata
|
|
|
|
|
except ImportError:
|
|
|
|
|
import importlib_metadata
|
|
|
|
|
|
2020-07-06 14:06:25 +03:00
|
|
|
|
# These are functions that were previously (v2.x) available from spacy.util
|
|
|
|
|
# and have since moved to Thinc. We're importing them here so people's code
|
|
|
|
|
# doesn't break, but they should always be imported from Thinc from now on,
|
|
|
|
|
# not from spacy.util.
|
|
|
|
|
from thinc.api import fix_random_seed, compounding, decaying # noqa: F401
|
|
|
|
|
|
|
|
|
|
|
2017-10-27 15:39:09 +03:00
|
|
|
|
from .symbols import ORTH
|
2020-06-30 13:54:15 +03:00
|
|
|
|
from .compat import cupy, CudaStream, is_windows
|
2020-04-28 14:37:37 +03:00
|
|
|
|
from .errors import Errors, Warnings
|
2020-05-22 16:42:46 +03:00
|
|
|
|
from . import about
|
|
|
|
|
|
2017-10-27 15:39:09 +03:00
|
|
|
|
|
|
|
|
|
_PRINT_ENV = False
|
2020-04-15 14:49:47 +03:00
|
|
|
|
OOV_RANK = numpy.iinfo(numpy.uint64).max
|
2017-10-27 15:39:09 +03:00
|
|
|
|
|
|
|
|
|
|
2020-01-29 19:06:46 +03:00
|
|
|
|
class registry(thinc.registry):
|
2019-11-07 13:45:22 +03:00
|
|
|
|
languages = catalogue.create("spacy", "languages", entry_points=True)
|
|
|
|
|
architectures = catalogue.create("spacy", "architectures", entry_points=True)
|
|
|
|
|
lookups = catalogue.create("spacy", "lookups", entry_points=True)
|
|
|
|
|
factories = catalogue.create("spacy", "factories", entry_points=True)
|
|
|
|
|
displacy_colors = catalogue.create("spacy", "displacy_colors", entry_points=True)
|
2020-05-20 12:41:12 +03:00
|
|
|
|
assets = catalogue.create("spacy", "assets", entry_points=True)
|
2020-05-22 16:42:46 +03:00
|
|
|
|
# This is mostly used to get a list of all installed models in the current
|
|
|
|
|
# environment. spaCy models packaged with `spacy package` will "advertise"
|
|
|
|
|
# themselves via entry points.
|
|
|
|
|
models = catalogue.create("spacy", "models", entry_points=True)
|
2019-10-01 01:01:27 +03:00
|
|
|
|
|
|
|
|
|
|
2017-10-27 15:39:09 +03:00
|
|
|
|
def set_env_log(value):
|
|
|
|
|
global _PRINT_ENV
|
|
|
|
|
_PRINT_ENV = value
|
2016-03-25 20:54:45 +03:00
|
|
|
|
|
|
|
|
|
|
2019-03-11 17:23:20 +03:00
|
|
|
|
def lang_class_is_loaded(lang):
|
|
|
|
|
"""Check whether a Language class is already loaded. Language classes are
|
|
|
|
|
loaded lazily, to avoid expensive setup code associated with the language
|
|
|
|
|
data.
|
|
|
|
|
|
2020-05-24 18:20:58 +03:00
|
|
|
|
lang (str): Two-letter language code, e.g. 'en'.
|
2019-03-11 17:23:20 +03:00
|
|
|
|
RETURNS (bool): Whether a Language class has been loaded.
|
|
|
|
|
"""
|
2019-11-07 13:45:22 +03:00
|
|
|
|
return lang in registry.languages
|
2019-03-11 19:10:50 +03:00
|
|
|
|
|
2019-03-11 17:23:20 +03:00
|
|
|
|
|
2017-05-14 02:31:10 +03:00
|
|
|
|
def get_lang_class(lang):
|
|
|
|
|
"""Import and load a Language class.
|
2016-03-25 20:54:45 +03:00
|
|
|
|
|
2020-05-24 18:20:58 +03:00
|
|
|
|
lang (str): Two-letter language code, e.g. 'en'.
|
2017-05-14 02:31:10 +03:00
|
|
|
|
RETURNS (Language): Language class.
|
|
|
|
|
"""
|
2019-11-07 13:45:22 +03:00
|
|
|
|
# Check if language is registered / entry point is available
|
|
|
|
|
if lang in registry.languages:
|
|
|
|
|
return registry.languages.get(lang)
|
|
|
|
|
else:
|
2017-05-14 02:31:10 +03:00
|
|
|
|
try:
|
2019-12-25 19:59:52 +03:00
|
|
|
|
module = importlib.import_module(f".lang.{lang}", "spacy")
|
2019-02-13 18:52:25 +03:00
|
|
|
|
except ImportError as err:
|
|
|
|
|
raise ImportError(Errors.E048.format(lang=lang, err=err))
|
2019-11-07 13:45:22 +03:00
|
|
|
|
set_lang_class(lang, getattr(module, module.__all__[0]))
|
|
|
|
|
return registry.languages.get(lang)
|
2016-03-25 20:54:45 +03:00
|
|
|
|
|
|
|
|
|
|
2017-05-14 02:31:10 +03:00
|
|
|
|
def set_lang_class(name, cls):
|
|
|
|
|
"""Set a custom Language class name that can be loaded via get_lang_class.
|
2017-05-13 22:22:49 +03:00
|
|
|
|
|
2020-05-24 18:20:58 +03:00
|
|
|
|
name (str): Name of Language class.
|
2017-05-14 02:31:10 +03:00
|
|
|
|
cls (Language): Language class.
|
2017-05-13 22:22:49 +03:00
|
|
|
|
"""
|
2019-11-07 13:45:22 +03:00
|
|
|
|
registry.languages.register(name, func=cls)
|
2017-05-09 00:50:45 +03:00
|
|
|
|
|
|
|
|
|
|
2017-04-15 13:11:16 +03:00
|
|
|
|
def ensure_path(path):
|
2017-05-14 02:30:29 +03:00
|
|
|
|
"""Ensure string is converted to a Path.
|
|
|
|
|
|
|
|
|
|
path: Anything. If string, it's converted to Path.
|
|
|
|
|
RETURNS: Path or original argument.
|
|
|
|
|
"""
|
2019-12-22 03:53:56 +03:00
|
|
|
|
if isinstance(path, str):
|
2017-04-15 13:11:16 +03:00
|
|
|
|
return Path(path)
|
|
|
|
|
else:
|
|
|
|
|
return path
|
2016-09-24 21:26:17 +03:00
|
|
|
|
|
|
|
|
|
|
Reduce size of language data (#4141)
* Move Turkish lemmas to a json file
Rather than a large dict in Python source, the data is now a big json
file. This includes a method for loading the json file, falling back to
a compressed file, and an update to MANIFEST.in that excludes json in
the spacy/lang directory.
This focuses on Turkish specifically because it has the most language
data in core.
* Transition all lemmatizer.py files to json
This covers all lemmatizer.py files of a significant size (>500k or so).
Small files were left alone.
None of the affected files have logic, so this was pretty
straightforward.
One unusual thing is that the lemma data for Urdu doesn't seem to be
used anywhere. That may require further investigation.
* Move large lang data to json for fr/nb/nl/sv
These are the languages that use a lemmatizer directory (rather than a
single file) and are larger than English.
For most of these languages there were many language data files, in
which case only the large ones (>500k or so) were converted to json. It
may or may not be a good idea to migrate the remaining Python files to
json in the future.
* Fix id lemmas.json
The contents of this file were originally just copied from the Python
source, but that used single quotes, so it had to be properly converted
to json first.
* Add .json.gz to gitignore
This covers the json.gz files built as part of distribution.
* Add language data gzip to build process
Currently this gzip data on every build; it works, but it should be
changed to only gzip when the source file has been updated.
* Remove Danish lemmatizer.py
Missed this when I added the json.
* Update to match latest explosion/srsly#9
The way gzipped json is loaded/saved in srsly changed a bit.
* Only compress language data if necessary
If a .json.gz file exists and is newer than the corresponding json file,
it's not recompressed.
* Move en/el language data to json
This only affected files >500kb, which was nouns for both languages and
the generic lookup table for English.
* Remove empty files in Norwegian tokenizer
It's unclear why, but the Norwegian (nb) tokenizer had empty files for
adj/adv/noun/verb lemmas. This may have been a result of copying the
structure of the English lemmatizer.
This removed the files, but still creates the empty sets in the
lemmatizer. That may not actually be necessary.
* Remove dubious entries in English lookup.json
" furthest" and " skilled" - both prefixed with a space - were in the
English lookup table. That seems obviously wrong so I have removed them.
* Fix small issues with en/fr lemmatizers
The en tokenizer was including the removed _nouns.py file, so that's
removed.
The fr tokenizer is unusual in that it has a lemmatizer directory with
both __init__.py and lemmatizer.py. lemmatizer.py had not been converted
to load the json language data, so that was fixed.
* Auto-format
* Auto-format
* Update srsly pin
* Consistently use pathlib paths
2019-08-20 15:54:11 +03:00
|
|
|
|
def load_language_data(path):
|
2019-08-22 15:21:32 +03:00
|
|
|
|
"""Load JSON language data using the given path as a base. If the provided
|
|
|
|
|
path isn't present, will attempt to load a gzipped version before giving up.
|
Reduce size of language data (#4141)
* Move Turkish lemmas to a json file
Rather than a large dict in Python source, the data is now a big json
file. This includes a method for loading the json file, falling back to
a compressed file, and an update to MANIFEST.in that excludes json in
the spacy/lang directory.
This focuses on Turkish specifically because it has the most language
data in core.
* Transition all lemmatizer.py files to json
This covers all lemmatizer.py files of a significant size (>500k or so).
Small files were left alone.
None of the affected files have logic, so this was pretty
straightforward.
One unusual thing is that the lemma data for Urdu doesn't seem to be
used anywhere. That may require further investigation.
* Move large lang data to json for fr/nb/nl/sv
These are the languages that use a lemmatizer directory (rather than a
single file) and are larger than English.
For most of these languages there were many language data files, in
which case only the large ones (>500k or so) were converted to json. It
may or may not be a good idea to migrate the remaining Python files to
json in the future.
* Fix id lemmas.json
The contents of this file were originally just copied from the Python
source, but that used single quotes, so it had to be properly converted
to json first.
* Add .json.gz to gitignore
This covers the json.gz files built as part of distribution.
* Add language data gzip to build process
Currently this gzip data on every build; it works, but it should be
changed to only gzip when the source file has been updated.
* Remove Danish lemmatizer.py
Missed this when I added the json.
* Update to match latest explosion/srsly#9
The way gzipped json is loaded/saved in srsly changed a bit.
* Only compress language data if necessary
If a .json.gz file exists and is newer than the corresponding json file,
it's not recompressed.
* Move en/el language data to json
This only affected files >500kb, which was nouns for both languages and
the generic lookup table for English.
* Remove empty files in Norwegian tokenizer
It's unclear why, but the Norwegian (nb) tokenizer had empty files for
adj/adv/noun/verb lemmas. This may have been a result of copying the
structure of the English lemmatizer.
This removed the files, but still creates the empty sets in the
lemmatizer. That may not actually be necessary.
* Remove dubious entries in English lookup.json
" furthest" and " skilled" - both prefixed with a space - were in the
English lookup table. That seems obviously wrong so I have removed them.
* Fix small issues with en/fr lemmatizers
The en tokenizer was including the removed _nouns.py file, so that's
removed.
The fr tokenizer is unusual in that it has a lemmatizer directory with
both __init__.py and lemmatizer.py. lemmatizer.py had not been converted
to load the json language data, so that was fixed.
* Auto-format
* Auto-format
* Update srsly pin
* Consistently use pathlib paths
2019-08-20 15:54:11 +03:00
|
|
|
|
|
2020-05-24 18:20:58 +03:00
|
|
|
|
path (str / Path): The data to load.
|
2019-08-22 15:21:32 +03:00
|
|
|
|
RETURNS: The loaded data.
|
Reduce size of language data (#4141)
* Move Turkish lemmas to a json file
Rather than a large dict in Python source, the data is now a big json
file. This includes a method for loading the json file, falling back to
a compressed file, and an update to MANIFEST.in that excludes json in
the spacy/lang directory.
This focuses on Turkish specifically because it has the most language
data in core.
* Transition all lemmatizer.py files to json
This covers all lemmatizer.py files of a significant size (>500k or so).
Small files were left alone.
None of the affected files have logic, so this was pretty
straightforward.
One unusual thing is that the lemma data for Urdu doesn't seem to be
used anywhere. That may require further investigation.
* Move large lang data to json for fr/nb/nl/sv
These are the languages that use a lemmatizer directory (rather than a
single file) and are larger than English.
For most of these languages there were many language data files, in
which case only the large ones (>500k or so) were converted to json. It
may or may not be a good idea to migrate the remaining Python files to
json in the future.
* Fix id lemmas.json
The contents of this file were originally just copied from the Python
source, but that used single quotes, so it had to be properly converted
to json first.
* Add .json.gz to gitignore
This covers the json.gz files built as part of distribution.
* Add language data gzip to build process
Currently this gzip data on every build; it works, but it should be
changed to only gzip when the source file has been updated.
* Remove Danish lemmatizer.py
Missed this when I added the json.
* Update to match latest explosion/srsly#9
The way gzipped json is loaded/saved in srsly changed a bit.
* Only compress language data if necessary
If a .json.gz file exists and is newer than the corresponding json file,
it's not recompressed.
* Move en/el language data to json
This only affected files >500kb, which was nouns for both languages and
the generic lookup table for English.
* Remove empty files in Norwegian tokenizer
It's unclear why, but the Norwegian (nb) tokenizer had empty files for
adj/adv/noun/verb lemmas. This may have been a result of copying the
structure of the English lemmatizer.
This removed the files, but still creates the empty sets in the
lemmatizer. That may not actually be necessary.
* Remove dubious entries in English lookup.json
" furthest" and " skilled" - both prefixed with a space - were in the
English lookup table. That seems obviously wrong so I have removed them.
* Fix small issues with en/fr lemmatizers
The en tokenizer was including the removed _nouns.py file, so that's
removed.
The fr tokenizer is unusual in that it has a lemmatizer directory with
both __init__.py and lemmatizer.py. lemmatizer.py had not been converted
to load the json language data, so that was fixed.
* Auto-format
* Auto-format
* Update srsly pin
* Consistently use pathlib paths
2019-08-20 15:54:11 +03:00
|
|
|
|
"""
|
2019-08-22 15:21:32 +03:00
|
|
|
|
path = ensure_path(path)
|
|
|
|
|
if path.exists():
|
Reduce size of language data (#4141)
* Move Turkish lemmas to a json file
Rather than a large dict in Python source, the data is now a big json
file. This includes a method for loading the json file, falling back to
a compressed file, and an update to MANIFEST.in that excludes json in
the spacy/lang directory.
This focuses on Turkish specifically because it has the most language
data in core.
* Transition all lemmatizer.py files to json
This covers all lemmatizer.py files of a significant size (>500k or so).
Small files were left alone.
None of the affected files have logic, so this was pretty
straightforward.
One unusual thing is that the lemma data for Urdu doesn't seem to be
used anywhere. That may require further investigation.
* Move large lang data to json for fr/nb/nl/sv
These are the languages that use a lemmatizer directory (rather than a
single file) and are larger than English.
For most of these languages there were many language data files, in
which case only the large ones (>500k or so) were converted to json. It
may or may not be a good idea to migrate the remaining Python files to
json in the future.
* Fix id lemmas.json
The contents of this file were originally just copied from the Python
source, but that used single quotes, so it had to be properly converted
to json first.
* Add .json.gz to gitignore
This covers the json.gz files built as part of distribution.
* Add language data gzip to build process
Currently this gzip data on every build; it works, but it should be
changed to only gzip when the source file has been updated.
* Remove Danish lemmatizer.py
Missed this when I added the json.
* Update to match latest explosion/srsly#9
The way gzipped json is loaded/saved in srsly changed a bit.
* Only compress language data if necessary
If a .json.gz file exists and is newer than the corresponding json file,
it's not recompressed.
* Move en/el language data to json
This only affected files >500kb, which was nouns for both languages and
the generic lookup table for English.
* Remove empty files in Norwegian tokenizer
It's unclear why, but the Norwegian (nb) tokenizer had empty files for
adj/adv/noun/verb lemmas. This may have been a result of copying the
structure of the English lemmatizer.
This removed the files, but still creates the empty sets in the
lemmatizer. That may not actually be necessary.
* Remove dubious entries in English lookup.json
" furthest" and " skilled" - both prefixed with a space - were in the
English lookup table. That seems obviously wrong so I have removed them.
* Fix small issues with en/fr lemmatizers
The en tokenizer was including the removed _nouns.py file, so that's
removed.
The fr tokenizer is unusual in that it has a lemmatizer directory with
both __init__.py and lemmatizer.py. lemmatizer.py had not been converted
to load the json language data, so that was fixed.
* Auto-format
* Auto-format
* Update srsly pin
* Consistently use pathlib paths
2019-08-20 15:54:11 +03:00
|
|
|
|
return srsly.read_json(path)
|
2019-08-22 15:21:32 +03:00
|
|
|
|
path = path.with_suffix(path.suffix + ".gz")
|
|
|
|
|
if path.exists():
|
|
|
|
|
return srsly.read_gzip_json(path)
|
2019-12-22 03:53:56 +03:00
|
|
|
|
raise ValueError(Errors.E160.format(path=path))
|
2019-08-22 15:21:32 +03:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_module_path(module):
|
|
|
|
|
if not hasattr(module, "__module__"):
|
2019-09-21 15:37:06 +03:00
|
|
|
|
raise ValueError(Errors.E169.format(module=repr(module)))
|
2019-08-22 15:21:32 +03:00
|
|
|
|
return Path(sys.modules[module.__module__].__file__).parent
|
Reduce size of language data (#4141)
* Move Turkish lemmas to a json file
Rather than a large dict in Python source, the data is now a big json
file. This includes a method for loading the json file, falling back to
a compressed file, and an update to MANIFEST.in that excludes json in
the spacy/lang directory.
This focuses on Turkish specifically because it has the most language
data in core.
* Transition all lemmatizer.py files to json
This covers all lemmatizer.py files of a significant size (>500k or so).
Small files were left alone.
None of the affected files have logic, so this was pretty
straightforward.
One unusual thing is that the lemma data for Urdu doesn't seem to be
used anywhere. That may require further investigation.
* Move large lang data to json for fr/nb/nl/sv
These are the languages that use a lemmatizer directory (rather than a
single file) and are larger than English.
For most of these languages there were many language data files, in
which case only the large ones (>500k or so) were converted to json. It
may or may not be a good idea to migrate the remaining Python files to
json in the future.
* Fix id lemmas.json
The contents of this file were originally just copied from the Python
source, but that used single quotes, so it had to be properly converted
to json first.
* Add .json.gz to gitignore
This covers the json.gz files built as part of distribution.
* Add language data gzip to build process
Currently this gzip data on every build; it works, but it should be
changed to only gzip when the source file has been updated.
* Remove Danish lemmatizer.py
Missed this when I added the json.
* Update to match latest explosion/srsly#9
The way gzipped json is loaded/saved in srsly changed a bit.
* Only compress language data if necessary
If a .json.gz file exists and is newer than the corresponding json file,
it's not recompressed.
* Move en/el language data to json
This only affected files >500kb, which was nouns for both languages and
the generic lookup table for English.
* Remove empty files in Norwegian tokenizer
It's unclear why, but the Norwegian (nb) tokenizer had empty files for
adj/adv/noun/verb lemmas. This may have been a result of copying the
structure of the English lemmatizer.
This removed the files, but still creates the empty sets in the
lemmatizer. That may not actually be necessary.
* Remove dubious entries in English lookup.json
" furthest" and " skilled" - both prefixed with a space - were in the
English lookup table. That seems obviously wrong so I have removed them.
* Fix small issues with en/fr lemmatizers
The en tokenizer was including the removed _nouns.py file, so that's
removed.
The fr tokenizer is unusual in that it has a lemmatizer directory with
both __init__.py and lemmatizer.py. lemmatizer.py had not been converted
to load the json language data, so that was fixed.
* Auto-format
* Auto-format
* Update srsly pin
* Consistently use pathlib paths
2019-08-20 15:54:11 +03:00
|
|
|
|
|
|
|
|
|
|
2017-05-29 15:10:10 +03:00
|
|
|
|
def load_model(name, **overrides):
|
2020-02-18 19:20:17 +03:00
|
|
|
|
"""Load a model from a package or data path.
|
2017-05-13 22:22:49 +03:00
|
|
|
|
|
2020-05-24 18:20:58 +03:00
|
|
|
|
name (str): Package name or model path.
|
2017-05-29 15:10:10 +03:00
|
|
|
|
**overrides: Specific overrides, like pipeline components to disable.
|
2017-05-28 01:22:00 +03:00
|
|
|
|
RETURNS (Language): `Language` class with the loaded model.
|
2017-05-13 22:22:49 +03:00
|
|
|
|
"""
|
2020-02-18 19:20:17 +03:00
|
|
|
|
if isinstance(name, str): # name or string path
|
2020-05-21 21:24:07 +03:00
|
|
|
|
if name.startswith("blank:"): # shortcut for blank model
|
|
|
|
|
return get_lang_class(name.replace("blank:", ""))()
|
2017-10-27 15:39:09 +03:00
|
|
|
|
if is_package(name): # installed as package
|
2017-06-05 14:02:31 +03:00
|
|
|
|
return load_model_from_package(name, **overrides)
|
2017-10-27 15:39:09 +03:00
|
|
|
|
if Path(name).exists(): # path to model data directory
|
2017-06-05 14:02:31 +03:00
|
|
|
|
return load_model_from_path(Path(name), **overrides)
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
elif hasattr(name, "exists"): # Path or Path-like to model data
|
2017-06-05 14:02:31 +03:00
|
|
|
|
return load_model_from_path(name, **overrides)
|
2018-04-03 16:50:31 +03:00
|
|
|
|
raise IOError(Errors.E050.format(name=name))
|
2017-05-09 00:51:15 +03:00
|
|
|
|
|
|
|
|
|
|
2017-06-05 14:02:31 +03:00
|
|
|
|
def load_model_from_package(name, **overrides):
|
|
|
|
|
"""Load a model from an installed package."""
|
|
|
|
|
cls = importlib.import_module(name)
|
|
|
|
|
return cls.load(**overrides)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def load_model_from_path(model_path, meta=False, **overrides):
|
|
|
|
|
"""Load a model from a data directory path. Creates Language class with
|
|
|
|
|
pipeline from meta.json and then calls from_disk() with path."""
|
|
|
|
|
if not meta:
|
|
|
|
|
meta = get_model_meta(model_path)
|
2020-02-27 20:42:27 +03:00
|
|
|
|
nlp_config = get_model_config(model_path)
|
|
|
|
|
if nlp_config.get("nlp", None):
|
|
|
|
|
return load_model_from_config(nlp_config["nlp"])
|
|
|
|
|
|
2019-07-27 14:17:43 +03:00
|
|
|
|
# Support language factories registered via entry points (e.g. custom
|
|
|
|
|
# language subclass) while keeping top-level language identifier "lang"
|
|
|
|
|
lang = meta.get("lang_factory", meta["lang"])
|
|
|
|
|
cls = get_lang_class(lang)
|
2017-10-07 01:25:54 +03:00
|
|
|
|
nlp = cls(meta=meta, **overrides)
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
pipeline = meta.get("pipeline", [])
|
2019-10-27 15:35:49 +03:00
|
|
|
|
factories = meta.get("factories", {})
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
disable = overrides.get("disable", [])
|
2017-10-07 01:29:08 +03:00
|
|
|
|
if pipeline is True:
|
|
|
|
|
pipeline = nlp.Defaults.pipe_names
|
|
|
|
|
elif pipeline in (False, None):
|
|
|
|
|
pipeline = []
|
|
|
|
|
for name in pipeline:
|
|
|
|
|
if name not in disable:
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
config = meta.get("pipeline_args", {}).get(name, {})
|
2020-04-29 13:56:17 +03:00
|
|
|
|
config.update(overrides)
|
2019-10-27 15:35:49 +03:00
|
|
|
|
factory = factories.get(name, name)
|
2020-02-27 20:42:27 +03:00
|
|
|
|
if nlp_config.get(name, None):
|
|
|
|
|
model_config = nlp_config[name]["model"]
|
|
|
|
|
config["model"] = model_config
|
2019-10-27 15:35:49 +03:00
|
|
|
|
component = nlp.create_pipe(factory, config=config)
|
2017-10-07 01:29:08 +03:00
|
|
|
|
nlp.add_pipe(component, name=name)
|
2019-11-25 18:01:22 +03:00
|
|
|
|
return nlp.from_disk(model_path, exclude=disable)
|
2017-06-05 14:02:31 +03:00
|
|
|
|
|
|
|
|
|
|
2020-06-12 03:02:07 +03:00
|
|
|
|
def load_model_from_config(nlp_config, replace=False):
|
2020-02-27 20:42:27 +03:00
|
|
|
|
if "name" in nlp_config:
|
|
|
|
|
nlp = load_model(**nlp_config)
|
|
|
|
|
elif "lang" in nlp_config:
|
|
|
|
|
lang_class = get_lang_class(nlp_config["lang"])
|
|
|
|
|
nlp = lang_class()
|
|
|
|
|
else:
|
|
|
|
|
raise ValueError(Errors.E993)
|
|
|
|
|
if "pipeline" in nlp_config:
|
|
|
|
|
for name, component_cfg in nlp_config["pipeline"].items():
|
|
|
|
|
factory = component_cfg.pop("factory")
|
2020-06-12 03:02:07 +03:00
|
|
|
|
if name in nlp.pipe_names:
|
|
|
|
|
if replace:
|
|
|
|
|
component = nlp.create_pipe(factory, config=component_cfg)
|
|
|
|
|
nlp.replace_pipe(name, component)
|
|
|
|
|
else:
|
|
|
|
|
raise ValueError(Errors.E985.format(component=name))
|
|
|
|
|
else:
|
|
|
|
|
component = nlp.create_pipe(factory, config=component_cfg)
|
|
|
|
|
nlp.add_pipe(component, name=name)
|
2020-02-27 20:42:27 +03:00
|
|
|
|
return nlp
|
|
|
|
|
|
|
|
|
|
|
2017-05-29 15:10:10 +03:00
|
|
|
|
def load_model_from_init_py(init_file, **overrides):
|
2017-05-28 01:22:00 +03:00
|
|
|
|
"""Helper function to use in the `load()` method of a model package's
|
|
|
|
|
__init__.py.
|
|
|
|
|
|
2020-05-24 18:20:58 +03:00
|
|
|
|
init_file (str): Path to model's __init__.py, i.e. `__file__`.
|
2017-05-29 15:10:10 +03:00
|
|
|
|
**overrides: Specific overrides, like pipeline components to disable.
|
2017-05-28 01:22:00 +03:00
|
|
|
|
RETURNS (Language): `Language` class with loaded model.
|
|
|
|
|
"""
|
|
|
|
|
model_path = Path(init_file).parent
|
2017-05-29 15:10:10 +03:00
|
|
|
|
meta = get_model_meta(model_path)
|
2019-12-25 19:59:52 +03:00
|
|
|
|
data_dir = f"{meta['lang']}_{meta['name']}-{meta['version']}"
|
2017-05-29 15:10:10 +03:00
|
|
|
|
data_path = model_path / data_dir
|
|
|
|
|
if not model_path.exists():
|
2019-12-22 03:53:56 +03:00
|
|
|
|
raise IOError(Errors.E052.format(path=data_path))
|
2017-06-05 14:02:31 +03:00
|
|
|
|
return load_model_from_path(data_path, meta, **overrides)
|
2017-05-28 01:22:00 +03:00
|
|
|
|
|
|
|
|
|
|
2020-05-22 16:42:46 +03:00
|
|
|
|
def get_installed_models():
|
|
|
|
|
"""List all model packages currently installed in the environment.
|
|
|
|
|
|
|
|
|
|
RETURNS (list): The string names of the models.
|
|
|
|
|
"""
|
|
|
|
|
return list(registry.models.get_all().keys())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_package_version(name):
|
|
|
|
|
"""Get the version of an installed package. Typically used to get model
|
|
|
|
|
package versions.
|
|
|
|
|
|
2020-05-24 18:20:58 +03:00
|
|
|
|
name (str): The name of the installed Python package.
|
|
|
|
|
RETURNS (str / None): The version or None if package not installed.
|
2020-05-22 16:42:46 +03:00
|
|
|
|
"""
|
|
|
|
|
try:
|
|
|
|
|
return importlib_metadata.version(name)
|
|
|
|
|
except importlib_metadata.PackageNotFoundError:
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
|
2020-05-30 16:18:53 +03:00
|
|
|
|
def is_compatible_version(version, constraint, prereleases=True):
|
|
|
|
|
"""Check if a version (e.g. "2.0.0") is compatible given a version
|
|
|
|
|
constraint (e.g. ">=1.9.0,<2.2.1"). If the constraint is a specific version,
|
|
|
|
|
it's interpreted as =={version}.
|
|
|
|
|
|
|
|
|
|
version (str): The version to check.
|
|
|
|
|
constraint (str): The constraint string.
|
|
|
|
|
prereleases (bool): Whether to allow prereleases. If set to False,
|
|
|
|
|
prerelease versions will be considered incompatible.
|
|
|
|
|
RETURNS (bool / None): Whether the version is compatible, or None if the
|
|
|
|
|
version or constraint are invalid.
|
|
|
|
|
"""
|
|
|
|
|
# Handle cases where exact version is provided as constraint
|
2020-05-30 16:01:58 +03:00
|
|
|
|
if constraint[0].isdigit():
|
|
|
|
|
constraint = f"=={constraint}"
|
|
|
|
|
try:
|
|
|
|
|
spec = SpecifierSet(constraint)
|
2020-05-30 16:18:53 +03:00
|
|
|
|
version = Version(version)
|
|
|
|
|
except (InvalidSpecifier, InvalidVersion):
|
2020-05-22 16:42:46 +03:00
|
|
|
|
return None
|
2020-05-30 16:18:53 +03:00
|
|
|
|
spec.prereleases = prereleases
|
2020-05-30 16:01:58 +03:00
|
|
|
|
return version in spec
|
2020-05-22 16:42:46 +03:00
|
|
|
|
|
|
|
|
|
|
2020-06-05 13:42:15 +03:00
|
|
|
|
def is_unconstrained_version(constraint, prereleases=True):
|
|
|
|
|
# We have an exact version, this is the ultimate constrained version
|
|
|
|
|
if constraint[0].isdigit():
|
|
|
|
|
return False
|
|
|
|
|
try:
|
|
|
|
|
spec = SpecifierSet(constraint)
|
|
|
|
|
except InvalidSpecifier:
|
|
|
|
|
return None
|
|
|
|
|
spec.prereleases = prereleases
|
|
|
|
|
specs = [sp for sp in spec]
|
|
|
|
|
# We only have one version spec and it defines > or >=
|
|
|
|
|
if len(specs) == 1 and specs[0].operator in (">", ">="):
|
|
|
|
|
return True
|
|
|
|
|
# One specifier is exact version
|
|
|
|
|
if any(sp.operator in ("==") for sp in specs):
|
|
|
|
|
return False
|
|
|
|
|
has_upper = any(sp.operator in ("<", "<=") for sp in specs)
|
|
|
|
|
has_lower = any(sp.operator in (">", ">=") for sp in specs)
|
|
|
|
|
# We have a version spec that defines an upper and lower bound
|
|
|
|
|
if has_upper and has_lower:
|
|
|
|
|
return False
|
|
|
|
|
# Everything else, like only an upper version, only a lower version etc.
|
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
|
2020-05-30 16:01:58 +03:00
|
|
|
|
def get_model_version_range(spacy_version):
|
2020-05-28 13:51:37 +03:00
|
|
|
|
"""Generate a version range like >=1.2.3,<1.3.0 based on a given spaCy
|
|
|
|
|
version. Models are always compatible across patch versions but not
|
|
|
|
|
across minor or major versions.
|
|
|
|
|
"""
|
2020-05-30 16:01:58 +03:00
|
|
|
|
release = Version(spacy_version).release
|
|
|
|
|
return f">={spacy_version},<{release[0]}.{release[1] + 1}.0"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_base_version(version):
|
2020-05-30 16:18:53 +03:00
|
|
|
|
"""Generate the base version without any prerelease identifiers.
|
|
|
|
|
|
|
|
|
|
version (str): The version, e.g. "3.0.0.dev1".
|
|
|
|
|
RETURNS (str): The base version, e.g. "3.0.0".
|
|
|
|
|
"""
|
2020-05-30 16:01:58 +03:00
|
|
|
|
return Version(version).base_version
|
2020-05-28 13:51:37 +03:00
|
|
|
|
|
|
|
|
|
|
2020-02-27 20:42:27 +03:00
|
|
|
|
def load_config(path, create_objects=False):
|
2020-01-29 19:06:46 +03:00
|
|
|
|
"""Load a Thinc-formatted config file, optionally filling in objects where
|
|
|
|
|
the config references registry entries. See "Thinc config files" for details.
|
|
|
|
|
|
2020-05-24 19:51:10 +03:00
|
|
|
|
path (str / Path): Path to the config file
|
2020-01-29 19:06:46 +03:00
|
|
|
|
create_objects (bool): Whether to automatically create objects when the config
|
|
|
|
|
references registry entries. Defaults to False.
|
|
|
|
|
|
|
|
|
|
RETURNS (dict): The objects from the config file.
|
|
|
|
|
"""
|
|
|
|
|
config = thinc.config.Config().from_disk(path)
|
|
|
|
|
if create_objects:
|
|
|
|
|
return registry.make_from_config(config, validate=True)
|
|
|
|
|
else:
|
|
|
|
|
return config
|
|
|
|
|
|
|
|
|
|
|
2020-05-02 15:09:21 +03:00
|
|
|
|
def load_config_from_str(string, create_objects=False):
|
|
|
|
|
"""Load a Thinc-formatted config, optionally filling in objects where
|
|
|
|
|
the config references registry entries. See "Thinc config files" for details.
|
|
|
|
|
|
2020-05-24 19:51:10 +03:00
|
|
|
|
string (str / Path): Text contents of the config file.
|
2020-05-02 15:09:21 +03:00
|
|
|
|
create_objects (bool): Whether to automatically create objects when the config
|
|
|
|
|
references registry entries. Defaults to False.
|
|
|
|
|
|
|
|
|
|
RETURNS (dict): The objects from the config file.
|
|
|
|
|
"""
|
|
|
|
|
config = thinc.config.Config().from_str(string)
|
|
|
|
|
if create_objects:
|
|
|
|
|
return registry.make_from_config(config, validate=True)
|
|
|
|
|
else:
|
|
|
|
|
return config
|
|
|
|
|
|
|
|
|
|
|
2017-05-29 15:10:10 +03:00
|
|
|
|
def get_model_meta(path):
|
|
|
|
|
"""Get model meta.json from a directory path and validate its contents.
|
2017-05-28 01:22:00 +03:00
|
|
|
|
|
2020-05-24 19:51:10 +03:00
|
|
|
|
path (str / Path): Path to model directory.
|
2017-05-29 15:10:10 +03:00
|
|
|
|
RETURNS (dict): The model's meta data.
|
2017-05-28 01:22:00 +03:00
|
|
|
|
"""
|
2017-05-29 15:10:10 +03:00
|
|
|
|
model_path = ensure_path(path)
|
|
|
|
|
if not model_path.exists():
|
2019-12-22 03:53:56 +03:00
|
|
|
|
raise IOError(Errors.E052.format(path=model_path))
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
meta_path = model_path / "meta.json"
|
2017-05-28 01:22:00 +03:00
|
|
|
|
if not meta_path.is_file():
|
2020-02-27 20:42:27 +03:00
|
|
|
|
raise IOError(Errors.E053.format(path=meta_path, name="meta.json"))
|
💫 Replace ujson, msgpack and dill/pickle/cloudpickle with srsly (#3003)
Remove hacks and wrappers, keep code in sync across our libraries and move spaCy a few steps closer to only depending on packages with binary wheels 🎉
See here: https://github.com/explosion/srsly
Serialization is hard, especially across Python versions and multiple platforms. After dealing with many subtle bugs over the years (encodings, locales, large files) our libraries like spaCy and Prodigy have steadily grown a number of utility functions to wrap the multiple serialization formats we need to support (especially json, msgpack and pickle). These wrapping functions ended up duplicated across our codebases, so we wanted to put them in one place.
At the same time, we noticed that having a lot of small dependencies was making maintainence harder, and making installation slower. To solve this, we've made srsly standalone, by including the component packages directly within it. This way we can provide all the serialization utilities we need in a single binary wheel.
srsly currently includes forks of the following packages:
ujson
msgpack
msgpack-numpy
cloudpickle
* WIP: replace json/ujson with srsly
* Replace ujson in examples
Use regular json instead of srsly to make code easier to read and follow
* Update requirements
* Fix imports
* Fix typos
* Replace msgpack with srsly
* Fix warning
2018-12-03 03:28:22 +03:00
|
|
|
|
meta = srsly.read_json(meta_path)
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
for setting in ["lang", "name", "version"]:
|
2017-08-29 12:21:44 +03:00
|
|
|
|
if setting not in meta or not meta[setting]:
|
2018-04-03 16:50:31 +03:00
|
|
|
|
raise ValueError(Errors.E054.format(setting=setting))
|
2020-05-30 16:34:54 +03:00
|
|
|
|
if "spacy_version" in meta:
|
|
|
|
|
if not is_compatible_version(about.__version__, meta["spacy_version"]):
|
2020-06-05 13:42:15 +03:00
|
|
|
|
warn_msg = Warnings.W095.format(
|
|
|
|
|
model=f"{meta['lang']}_{meta['name']}",
|
|
|
|
|
model_version=meta["version"],
|
|
|
|
|
version=meta["spacy_version"],
|
|
|
|
|
current=about.__version__,
|
|
|
|
|
)
|
|
|
|
|
warnings.warn(warn_msg)
|
|
|
|
|
if is_unconstrained_version(meta["spacy_version"]):
|
|
|
|
|
warn_msg = Warnings.W094.format(
|
|
|
|
|
model=f"{meta['lang']}_{meta['name']}",
|
|
|
|
|
model_version=meta["version"],
|
|
|
|
|
version=meta["spacy_version"],
|
|
|
|
|
example=get_model_version_range(about.__version__),
|
2020-05-30 16:34:54 +03:00
|
|
|
|
)
|
2020-06-05 13:42:15 +03:00
|
|
|
|
warnings.warn(warn_msg)
|
2017-05-29 15:10:10 +03:00
|
|
|
|
return meta
|
2017-05-28 01:22:00 +03:00
|
|
|
|
|
|
|
|
|
|
2020-02-27 20:42:27 +03:00
|
|
|
|
def get_model_config(path):
|
|
|
|
|
"""Get the model's config from a directory path.
|
|
|
|
|
|
2020-05-24 19:51:10 +03:00
|
|
|
|
path (str / Path): Path to model directory.
|
2020-02-27 20:42:27 +03:00
|
|
|
|
RETURNS (Config): The model's config data.
|
|
|
|
|
"""
|
|
|
|
|
model_path = ensure_path(path)
|
|
|
|
|
if not model_path.exists():
|
|
|
|
|
raise IOError(Errors.E052.format(path=model_path))
|
|
|
|
|
config_path = model_path / "config.cfg"
|
|
|
|
|
# model directories are allowed not to have config files ?
|
|
|
|
|
if not config_path.is_file():
|
|
|
|
|
return Config({})
|
|
|
|
|
# raise IOError(Errors.E053.format(path=config_path, name="config.cfg"))
|
|
|
|
|
return Config().from_disk(config_path)
|
|
|
|
|
|
|
|
|
|
|
2017-05-13 22:22:49 +03:00
|
|
|
|
def is_package(name):
|
|
|
|
|
"""Check if string maps to a package installed via pip.
|
|
|
|
|
|
2020-05-24 18:20:58 +03:00
|
|
|
|
name (str): Name of package.
|
2017-05-14 02:30:29 +03:00
|
|
|
|
RETURNS (bool): True if installed package, False if not.
|
2017-05-09 00:51:15 +03:00
|
|
|
|
"""
|
2020-05-24 15:48:56 +03:00
|
|
|
|
try:
|
|
|
|
|
importlib_metadata.distribution(name)
|
|
|
|
|
return True
|
|
|
|
|
except: # noqa: E722
|
|
|
|
|
return False
|
2017-05-09 00:51:15 +03:00
|
|
|
|
|
|
|
|
|
|
2017-05-28 01:22:00 +03:00
|
|
|
|
def get_package_path(name):
|
|
|
|
|
"""Get the path to an installed package.
|
2017-05-13 22:22:49 +03:00
|
|
|
|
|
2020-05-24 18:20:58 +03:00
|
|
|
|
name (str): Package name.
|
2017-05-28 01:22:00 +03:00
|
|
|
|
RETURNS (Path): Path to installed package.
|
2017-05-13 22:22:49 +03:00
|
|
|
|
"""
|
2017-09-29 21:55:17 +03:00
|
|
|
|
name = name.lower() # use lowercase version to be safe
|
2017-05-09 00:51:15 +03:00
|
|
|
|
# Here we're importing the module just to find it. This is worryingly
|
|
|
|
|
# indirect, but it's otherwise very difficult to find the package.
|
2017-05-29 11:51:19 +03:00
|
|
|
|
pkg = importlib.import_module(name)
|
2017-05-28 01:22:00 +03:00
|
|
|
|
return Path(pkg.__file__).parent
|
2017-05-09 00:51:15 +03:00
|
|
|
|
|
|
|
|
|
|
2020-06-30 14:17:00 +03:00
|
|
|
|
def split_command(command: str) -> List[str]:
|
|
|
|
|
"""Split a string command using shlex. Handles platform compatibility.
|
2020-06-21 22:35:01 +03:00
|
|
|
|
|
2020-06-30 14:17:00 +03:00
|
|
|
|
command (str) : The command to split
|
|
|
|
|
RETURNS (List[str]): The split command.
|
2020-06-21 22:35:01 +03:00
|
|
|
|
"""
|
2020-06-30 14:17:00 +03:00
|
|
|
|
return shlex.split(command, posix=not is_windows)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def run_command(command: Union[str, List[str]]) -> None:
|
|
|
|
|
"""Run a command on the command line as a subprocess. If the subprocess
|
|
|
|
|
returns a non-zero exit code, a system exit is performed.
|
|
|
|
|
|
|
|
|
|
command (str / List[str]): The command. If provided as a string, the
|
|
|
|
|
string will be split using shlex.split.
|
|
|
|
|
"""
|
|
|
|
|
if isinstance(command, str):
|
|
|
|
|
command = split_command(command)
|
2020-06-30 18:28:43 +03:00
|
|
|
|
try:
|
|
|
|
|
status = subprocess.call(command, env=os.environ.copy())
|
|
|
|
|
except FileNotFoundError:
|
|
|
|
|
raise FileNotFoundError(
|
|
|
|
|
Errors.E970.format(str_command=" ".join(command), tool=command[0])
|
|
|
|
|
)
|
2020-06-21 22:35:01 +03:00
|
|
|
|
if status != 0:
|
|
|
|
|
sys.exit(status)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@contextmanager
|
|
|
|
|
def working_dir(path: Union[str, Path]) -> None:
|
|
|
|
|
"""Change current working directory and returns to previous on exit.
|
|
|
|
|
|
|
|
|
|
path (str / Path): The directory to navigate to.
|
2020-06-30 14:17:14 +03:00
|
|
|
|
YIELDS (Path): The absolute path to the current working directory. This
|
|
|
|
|
should be used if the block needs to perform actions within the working
|
|
|
|
|
directory, to prevent mismatches with relative paths.
|
2020-06-21 22:35:01 +03:00
|
|
|
|
"""
|
|
|
|
|
prev_cwd = Path.cwd()
|
2020-06-30 14:29:45 +03:00
|
|
|
|
current = Path(path).resolve()
|
|
|
|
|
os.chdir(str(current))
|
2020-06-21 22:35:01 +03:00
|
|
|
|
try:
|
2020-06-30 14:29:45 +03:00
|
|
|
|
yield current
|
2020-06-21 22:35:01 +03:00
|
|
|
|
finally:
|
2020-06-30 14:29:45 +03:00
|
|
|
|
os.chdir(str(prev_cwd))
|
2020-06-21 22:35:01 +03:00
|
|
|
|
|
|
|
|
|
|
2020-06-22 15:53:31 +03:00
|
|
|
|
@contextmanager
|
|
|
|
|
def make_tempdir():
|
2020-06-28 16:08:35 +03:00
|
|
|
|
"""Execute a block in a temporary directory and remove the directory and
|
|
|
|
|
its contents at the end of the with block.
|
|
|
|
|
|
|
|
|
|
YIELDS (Path): The path of the temp directory.
|
|
|
|
|
"""
|
2020-06-22 15:53:31 +03:00
|
|
|
|
d = Path(tempfile.mkdtemp())
|
|
|
|
|
yield d
|
2020-06-29 19:16:39 +03:00
|
|
|
|
try:
|
|
|
|
|
shutil.rmtree(str(d))
|
2020-06-29 19:22:33 +03:00
|
|
|
|
except PermissionError as e:
|
|
|
|
|
warnings.warn(Warnings.W091.format(dir=d, msg=e))
|
2020-06-22 15:53:31 +03:00
|
|
|
|
|
|
|
|
|
|
2020-06-28 16:08:35 +03:00
|
|
|
|
def get_hash(data) -> str:
|
|
|
|
|
"""Get the hash for a JSON-serializable object.
|
|
|
|
|
|
|
|
|
|
data: The data to hash.
|
|
|
|
|
RETURNS (str): The hash.
|
|
|
|
|
"""
|
|
|
|
|
data_str = srsly.json_dumps(data, sort_keys=True).encode("utf8")
|
|
|
|
|
return hashlib.md5(data_str).hexdigest()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_checksum(path: Union[Path, str]) -> str:
|
|
|
|
|
"""Get the checksum for a file given its file path.
|
|
|
|
|
|
|
|
|
|
path (Union[Path, str]): The file path.
|
|
|
|
|
RETURNS (str): The checksum.
|
|
|
|
|
"""
|
|
|
|
|
return hashlib.md5(Path(path).read_bytes()).hexdigest()
|
|
|
|
|
|
|
|
|
|
|
2017-05-18 15:13:14 +03:00
|
|
|
|
def is_in_jupyter():
|
2017-05-21 02:12:09 +03:00
|
|
|
|
"""Check if user is running spaCy from a Jupyter notebook by detecting the
|
|
|
|
|
IPython kernel. Mainly used for the displaCy visualizer.
|
2017-05-18 15:13:14 +03:00
|
|
|
|
RETURNS (bool): True if in Jupyter, False if not.
|
|
|
|
|
"""
|
2018-12-20 19:32:04 +03:00
|
|
|
|
# https://stackoverflow.com/a/39662359/6400719
|
2017-05-18 15:13:14 +03:00
|
|
|
|
try:
|
2018-12-20 19:32:04 +03:00
|
|
|
|
shell = get_ipython().__class__.__name__
|
|
|
|
|
if shell == "ZMQInteractiveShell":
|
|
|
|
|
return True # Jupyter notebook or qtconsole
|
2017-05-18 15:13:14 +03:00
|
|
|
|
except NameError:
|
2018-12-20 19:32:04 +03:00
|
|
|
|
return False # Probably standard Python interpreter
|
2017-05-18 15:13:14 +03:00
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
|
2019-10-27 15:35:49 +03:00
|
|
|
|
def get_component_name(component):
|
|
|
|
|
if hasattr(component, "name"):
|
|
|
|
|
return component.name
|
|
|
|
|
if hasattr(component, "__name__"):
|
|
|
|
|
return component.__name__
|
|
|
|
|
if hasattr(component, "__class__") and hasattr(component.__class__, "__name__"):
|
|
|
|
|
return component.__class__.__name__
|
|
|
|
|
return repr(component)
|
|
|
|
|
|
|
|
|
|
|
2019-11-19 17:54:34 +03:00
|
|
|
|
def get_cuda_stream(require=False, non_blocking=True):
|
2020-01-29 19:06:46 +03:00
|
|
|
|
ops = get_current_ops()
|
2018-03-27 20:22:52 +03:00
|
|
|
|
if CudaStream is None:
|
|
|
|
|
return None
|
2020-01-29 19:06:46 +03:00
|
|
|
|
elif isinstance(ops, NumpyOps):
|
2018-03-27 20:22:52 +03:00
|
|
|
|
return None
|
|
|
|
|
else:
|
2019-11-19 17:54:34 +03:00
|
|
|
|
return CudaStream(non_blocking=non_blocking)
|
2017-05-15 22:46:08 +03:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_async(stream, numpy_array):
|
|
|
|
|
if cupy is None:
|
|
|
|
|
return numpy_array
|
|
|
|
|
else:
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
array = cupy.ndarray(numpy_array.shape, order="C", dtype=numpy_array.dtype)
|
2017-05-18 12:36:53 +03:00
|
|
|
|
array.set(numpy_array, stream=stream)
|
|
|
|
|
return array
|
|
|
|
|
|
2017-05-26 13:37:45 +03:00
|
|
|
|
|
2017-05-18 12:36:53 +03:00
|
|
|
|
def env_opt(name, default=None):
|
2017-05-18 16:32:03 +03:00
|
|
|
|
if type(default) is float:
|
|
|
|
|
type_convert = float
|
2017-05-18 12:36:53 +03:00
|
|
|
|
else:
|
2017-05-18 16:32:03 +03:00
|
|
|
|
type_convert = int
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
if "SPACY_" + name.upper() in os.environ:
|
|
|
|
|
value = type_convert(os.environ["SPACY_" + name.upper()])
|
2017-05-31 15:14:11 +03:00
|
|
|
|
if _PRINT_ENV:
|
|
|
|
|
print(name, "=", repr(value), "via", "$SPACY_" + name.upper())
|
2017-05-18 16:32:03 +03:00
|
|
|
|
return value
|
|
|
|
|
elif name in os.environ:
|
|
|
|
|
value = type_convert(os.environ[name])
|
2017-05-31 15:14:11 +03:00
|
|
|
|
if _PRINT_ENV:
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
print(name, "=", repr(value), "via", "$" + name)
|
2017-05-18 16:32:03 +03:00
|
|
|
|
return value
|
|
|
|
|
else:
|
2017-05-31 15:14:11 +03:00
|
|
|
|
if _PRINT_ENV:
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
print(name, "=", repr(default), "by default")
|
2017-05-18 12:36:53 +03:00
|
|
|
|
return default
|
2017-05-14 01:37:53 +03:00
|
|
|
|
|
|
|
|
|
|
2016-09-24 21:26:17 +03:00
|
|
|
|
def read_regex(path):
|
2017-04-15 13:11:16 +03:00
|
|
|
|
path = ensure_path(path)
|
2019-10-29 15:16:55 +03:00
|
|
|
|
with path.open(encoding="utf8") as file_:
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
entries = file_.read().split("\n")
|
|
|
|
|
expression = "|".join(
|
|
|
|
|
["^" + re.escape(piece) for piece in entries if piece.strip()]
|
|
|
|
|
)
|
2016-09-24 21:26:17 +03:00
|
|
|
|
return re.compile(expression)
|
|
|
|
|
|
|
|
|
|
|
2016-09-25 15:49:53 +03:00
|
|
|
|
def compile_prefix_regex(entries):
|
2019-02-24 20:39:59 +03:00
|
|
|
|
"""Compile a sequence of prefix rules into a regex object.
|
2019-02-24 20:34:10 +03:00
|
|
|
|
|
|
|
|
|
entries (tuple): The prefix rules, e.g. spacy.lang.punctuation.TOKENIZER_PREFIXES.
|
|
|
|
|
RETURNS (regex object): The regex object. to be used for Tokenizer.prefix_search.
|
|
|
|
|
"""
|
2020-07-06 14:06:25 +03:00
|
|
|
|
expression = "|".join(["^" + piece for piece in entries if piece.strip()])
|
|
|
|
|
return re.compile(expression)
|
2016-09-24 21:26:17 +03:00
|
|
|
|
|
|
|
|
|
|
2016-09-25 15:49:53 +03:00
|
|
|
|
def compile_suffix_regex(entries):
|
2019-02-24 20:39:59 +03:00
|
|
|
|
"""Compile a sequence of suffix rules into a regex object.
|
2019-02-24 20:34:10 +03:00
|
|
|
|
|
|
|
|
|
entries (tuple): The suffix rules, e.g. spacy.lang.punctuation.TOKENIZER_SUFFIXES.
|
|
|
|
|
RETURNS (regex object): The regex object. to be used for Tokenizer.suffix_search.
|
|
|
|
|
"""
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
expression = "|".join([piece + "$" for piece in entries if piece.strip()])
|
2016-09-24 21:26:17 +03:00
|
|
|
|
return re.compile(expression)
|
|
|
|
|
|
|
|
|
|
|
2016-09-25 15:49:53 +03:00
|
|
|
|
def compile_infix_regex(entries):
|
2019-02-24 20:39:59 +03:00
|
|
|
|
"""Compile a sequence of infix rules into a regex object.
|
2019-02-24 20:34:10 +03:00
|
|
|
|
|
|
|
|
|
entries (tuple): The infix rules, e.g. spacy.lang.punctuation.TOKENIZER_INFIXES.
|
|
|
|
|
RETURNS (regex object): The regex object. to be used for Tokenizer.infix_finditer.
|
|
|
|
|
"""
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
expression = "|".join([piece for piece in entries if piece.strip()])
|
2016-09-24 21:26:17 +03:00
|
|
|
|
return re.compile(expression)
|
|
|
|
|
|
|
|
|
|
|
2017-06-03 20:44:47 +03:00
|
|
|
|
def add_lookups(default_func, *lookups):
|
|
|
|
|
"""Extend an attribute function with special cases. If a word is in the
|
|
|
|
|
lookups, the value is returned. Otherwise the previous function is used.
|
|
|
|
|
|
|
|
|
|
default_func (callable): The default function to execute.
|
|
|
|
|
*lookups (dict): Lookup dictionary mapping string to attribute value.
|
|
|
|
|
RETURNS (callable): Lexical attribute getter.
|
|
|
|
|
"""
|
2017-10-17 19:20:52 +03:00
|
|
|
|
# This is implemented as functools.partial instead of a closure, to allow
|
|
|
|
|
# pickle to work.
|
|
|
|
|
return functools.partial(_get_attr_unless_lookup, default_func, lookups)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _get_attr_unless_lookup(default_func, lookups, string):
|
|
|
|
|
for lookup in lookups:
|
|
|
|
|
if string in lookup:
|
|
|
|
|
return lookup[string]
|
|
|
|
|
return default_func(string)
|
2017-06-03 20:44:47 +03:00
|
|
|
|
|
|
|
|
|
|
2017-05-08 16:42:12 +03:00
|
|
|
|
def update_exc(base_exceptions, *addition_dicts):
|
2017-05-13 22:22:49 +03:00
|
|
|
|
"""Update and validate tokenizer exceptions. Will overwrite exceptions.
|
|
|
|
|
|
2017-05-14 02:30:29 +03:00
|
|
|
|
base_exceptions (dict): Base exceptions.
|
|
|
|
|
*addition_dicts (dict): Exceptions to add to the base dict, in order.
|
|
|
|
|
RETURNS (dict): Combined tokenizer exceptions.
|
2017-05-13 22:22:49 +03:00
|
|
|
|
"""
|
2017-05-08 16:42:12 +03:00
|
|
|
|
exc = dict(base_exceptions)
|
|
|
|
|
for additions in addition_dicts:
|
|
|
|
|
for orth, token_attrs in additions.items():
|
2019-12-22 03:53:56 +03:00
|
|
|
|
if not all(isinstance(attr[ORTH], str) for attr in token_attrs):
|
2018-04-03 16:50:31 +03:00
|
|
|
|
raise ValueError(Errors.E055.format(key=orth, orths=token_attrs))
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
described_orth = "".join(attr[ORTH] for attr in token_attrs)
|
2017-05-08 16:42:12 +03:00
|
|
|
|
if orth != described_orth:
|
2018-04-03 16:50:31 +03:00
|
|
|
|
raise ValueError(Errors.E056.format(key=orth, orths=described_orth))
|
2017-05-08 16:42:12 +03:00
|
|
|
|
exc.update(additions)
|
2017-05-13 22:22:25 +03:00
|
|
|
|
exc = expand_exc(exc, "'", "’")
|
2017-05-08 16:42:12 +03:00
|
|
|
|
return exc
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def expand_exc(excs, search, replace):
|
2017-05-13 22:22:49 +03:00
|
|
|
|
"""Find string in tokenizer exceptions, duplicate entry and replace string.
|
|
|
|
|
For example, to add additional versions with typographic apostrophes.
|
|
|
|
|
|
2017-05-14 02:30:29 +03:00
|
|
|
|
excs (dict): Tokenizer exceptions.
|
2020-05-24 18:20:58 +03:00
|
|
|
|
search (str): String to find and replace.
|
|
|
|
|
replace (str): Replacement.
|
2017-05-14 02:30:29 +03:00
|
|
|
|
RETURNS (dict): Combined tokenizer exceptions.
|
2017-05-13 22:22:49 +03:00
|
|
|
|
"""
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
|
2017-05-08 16:42:12 +03:00
|
|
|
|
def _fix_token(token, search, replace):
|
|
|
|
|
fixed = dict(token)
|
|
|
|
|
fixed[ORTH] = fixed[ORTH].replace(search, replace)
|
|
|
|
|
return fixed
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
|
2017-05-13 22:22:25 +03:00
|
|
|
|
new_excs = dict(excs)
|
2017-05-08 16:42:12 +03:00
|
|
|
|
for token_string, tokens in excs.items():
|
|
|
|
|
if search in token_string:
|
|
|
|
|
new_key = token_string.replace(search, replace)
|
|
|
|
|
new_value = [_fix_token(t, search, replace) for t in tokens]
|
2017-05-13 22:22:25 +03:00
|
|
|
|
new_excs[new_key] = new_value
|
|
|
|
|
return new_excs
|
2017-05-08 16:42:12 +03:00
|
|
|
|
|
|
|
|
|
|
2015-10-07 11:25:35 +03:00
|
|
|
|
def normalize_slice(length, start, stop, step=None):
|
|
|
|
|
if not (step is None or step == 1):
|
2018-04-03 16:50:31 +03:00
|
|
|
|
raise ValueError(Errors.E057)
|
2015-10-07 11:25:35 +03:00
|
|
|
|
if start is None:
|
2017-10-27 15:39:09 +03:00
|
|
|
|
start = 0
|
2015-10-07 11:25:35 +03:00
|
|
|
|
elif start < 0:
|
2017-10-27 15:39:09 +03:00
|
|
|
|
start += length
|
2015-10-07 11:25:35 +03:00
|
|
|
|
start = min(length, max(0, start))
|
|
|
|
|
if stop is None:
|
2017-10-27 15:39:09 +03:00
|
|
|
|
stop = length
|
2015-10-07 11:25:35 +03:00
|
|
|
|
elif stop < 0:
|
2017-10-27 15:39:09 +03:00
|
|
|
|
stop += length
|
2015-10-07 11:25:35 +03:00
|
|
|
|
stop = min(length, max(start, stop))
|
|
|
|
|
return start, stop
|
|
|
|
|
|
|
|
|
|
|
2017-11-07 01:45:36 +03:00
|
|
|
|
def minibatch(items, size=8):
|
|
|
|
|
"""Iterate over batches of items. `size` may be an iterator,
|
|
|
|
|
so that batch-size can vary on each step.
|
|
|
|
|
"""
|
|
|
|
|
if isinstance(size, int):
|
2017-11-07 02:22:43 +03:00
|
|
|
|
size_ = itertools.repeat(size)
|
2017-11-07 01:45:36 +03:00
|
|
|
|
else:
|
|
|
|
|
size_ = size
|
|
|
|
|
items = iter(items)
|
|
|
|
|
while True:
|
|
|
|
|
batch_size = next(size_)
|
2018-12-03 04:19:12 +03:00
|
|
|
|
batch = list(itertools.islice(items, int(batch_size)))
|
2017-11-07 01:45:36 +03:00
|
|
|
|
if len(batch) == 0:
|
|
|
|
|
break
|
|
|
|
|
yield list(batch)
|
|
|
|
|
|
|
|
|
|
|
2020-06-26 20:34:12 +03:00
|
|
|
|
def minibatch_by_words(docs, size, tolerance=0.2, discard_oversize=False):
|
2020-05-18 23:23:33 +03:00
|
|
|
|
"""Create minibatches of roughly a given number of words. If any examples
|
|
|
|
|
are longer than the specified batch length, they will appear in a batch by
|
2020-06-26 20:34:12 +03:00
|
|
|
|
themselves, or be discarded if discard_oversize=True.
|
|
|
|
|
The argument 'docs' can be a list of strings, Doc's or Example's. """
|
|
|
|
|
from .gold import Example
|
|
|
|
|
|
2018-03-27 20:23:02 +03:00
|
|
|
|
if isinstance(size, int):
|
|
|
|
|
size_ = itertools.repeat(size)
|
2020-03-08 15:23:18 +03:00
|
|
|
|
elif isinstance(size, List):
|
2020-01-29 19:06:46 +03:00
|
|
|
|
size_ = iter(size)
|
2018-03-27 20:23:02 +03:00
|
|
|
|
else:
|
|
|
|
|
size_ = size
|
2020-06-02 16:22:54 +03:00
|
|
|
|
|
|
|
|
|
target_size = next(size_)
|
|
|
|
|
tol_size = target_size * tolerance
|
|
|
|
|
batch = []
|
2020-06-02 20:43:39 +03:00
|
|
|
|
overflow = []
|
2020-06-02 23:05:08 +03:00
|
|
|
|
batch_size = 0
|
2020-06-02 20:43:39 +03:00
|
|
|
|
overflow_size = 0
|
2020-06-02 16:22:54 +03:00
|
|
|
|
|
2020-06-26 20:34:12 +03:00
|
|
|
|
for doc in docs:
|
|
|
|
|
if isinstance(doc, Example):
|
|
|
|
|
n_words = len(doc.reference)
|
|
|
|
|
elif isinstance(doc, str):
|
|
|
|
|
n_words = len(doc.split())
|
|
|
|
|
else:
|
|
|
|
|
n_words = len(doc)
|
2020-06-02 23:05:08 +03:00
|
|
|
|
# if the current example exceeds the maximum batch size, it is returned separately
|
2020-06-02 18:49:33 +03:00
|
|
|
|
# but only if discard_oversize=False.
|
2020-06-02 20:59:04 +03:00
|
|
|
|
if n_words > target_size + tol_size:
|
2020-06-02 18:49:33 +03:00
|
|
|
|
if not discard_oversize:
|
2020-06-26 20:34:12 +03:00
|
|
|
|
yield [doc]
|
2020-06-02 18:49:33 +03:00
|
|
|
|
|
2020-06-02 20:50:44 +03:00
|
|
|
|
# add the example to the current batch if there's no overflow yet and it still fits
|
2020-06-02 23:05:08 +03:00
|
|
|
|
elif overflow_size == 0 and (batch_size + n_words) <= target_size:
|
2020-06-26 20:34:12 +03:00
|
|
|
|
batch.append(doc)
|
2020-06-02 23:05:08 +03:00
|
|
|
|
batch_size += n_words
|
2020-06-02 18:49:33 +03:00
|
|
|
|
|
2020-06-02 20:50:44 +03:00
|
|
|
|
# add the example to the overflow buffer if it fits in the tolerance margin
|
2020-06-02 23:05:08 +03:00
|
|
|
|
elif (batch_size + overflow_size + n_words) <= (target_size + tol_size):
|
2020-06-26 20:34:12 +03:00
|
|
|
|
overflow.append(doc)
|
2020-06-02 20:43:39 +03:00
|
|
|
|
overflow_size += n_words
|
|
|
|
|
|
2020-06-02 20:50:44 +03:00
|
|
|
|
# yield the previous batch and start a new one. The new one gets the overflow examples.
|
2020-06-02 16:22:54 +03:00
|
|
|
|
else:
|
2020-06-02 18:49:33 +03:00
|
|
|
|
yield batch
|
|
|
|
|
target_size = next(size_)
|
|
|
|
|
tol_size = target_size * tolerance
|
2020-06-02 20:43:39 +03:00
|
|
|
|
batch = overflow
|
2020-06-02 23:05:08 +03:00
|
|
|
|
batch_size = overflow_size
|
2020-06-02 20:43:39 +03:00
|
|
|
|
overflow = []
|
|
|
|
|
overflow_size = 0
|
2020-06-02 16:22:54 +03:00
|
|
|
|
|
2020-06-02 23:05:08 +03:00
|
|
|
|
# this example still fits
|
|
|
|
|
if (batch_size + n_words) <= target_size:
|
2020-06-26 20:34:12 +03:00
|
|
|
|
batch.append(doc)
|
2020-06-02 23:05:08 +03:00
|
|
|
|
batch_size += n_words
|
|
|
|
|
|
|
|
|
|
# this example fits in overflow
|
|
|
|
|
elif (batch_size + n_words) <= (target_size + tol_size):
|
2020-06-26 20:34:12 +03:00
|
|
|
|
overflow.append(doc)
|
2020-06-02 23:05:08 +03:00
|
|
|
|
overflow_size += n_words
|
|
|
|
|
|
|
|
|
|
# this example does not fit with the previous overflow: start another new batch
|
2018-11-16 01:44:07 +03:00
|
|
|
|
else:
|
2020-06-02 23:05:08 +03:00
|
|
|
|
yield batch
|
|
|
|
|
target_size = next(size_)
|
|
|
|
|
tol_size = target_size * tolerance
|
2020-06-26 20:34:12 +03:00
|
|
|
|
batch = [doc]
|
2020-06-02 23:05:08 +03:00
|
|
|
|
batch_size = n_words
|
|
|
|
|
|
2020-06-02 16:22:54 +03:00
|
|
|
|
# yield the final batch
|
|
|
|
|
if batch:
|
2020-06-02 20:43:39 +03:00
|
|
|
|
batch.extend(overflow)
|
2020-06-02 16:22:54 +03:00
|
|
|
|
yield batch
|
2018-03-27 20:23:02 +03:00
|
|
|
|
|
|
|
|
|
|
2019-05-08 03:33:40 +03:00
|
|
|
|
def filter_spans(spans):
|
|
|
|
|
"""Filter a sequence of spans and remove duplicates or overlaps. Useful for
|
|
|
|
|
creating named entities (where one token can only be part of one entity) or
|
|
|
|
|
when merging spans with `Retokenizer.merge`. When spans overlap, the (first)
|
|
|
|
|
longest span is preferred over shorter spans.
|
|
|
|
|
|
|
|
|
|
spans (iterable): The spans to filter.
|
|
|
|
|
RETURNS (list): The filtered spans.
|
|
|
|
|
"""
|
2019-10-10 18:00:03 +03:00
|
|
|
|
get_sort_key = lambda span: (span.end - span.start, -span.start)
|
2019-05-08 03:33:40 +03:00
|
|
|
|
sorted_spans = sorted(spans, key=get_sort_key, reverse=True)
|
|
|
|
|
result = []
|
|
|
|
|
seen_tokens = set()
|
|
|
|
|
for span in sorted_spans:
|
|
|
|
|
# Check for end - 1 here because boundaries are inclusive
|
|
|
|
|
if span.start not in seen_tokens and span.end - 1 not in seen_tokens:
|
|
|
|
|
result.append(span)
|
|
|
|
|
seen_tokens.update(range(span.start, span.end))
|
|
|
|
|
result = sorted(result, key=lambda span: span.start)
|
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
|
2017-05-29 11:13:42 +03:00
|
|
|
|
def to_bytes(getters, exclude):
|
2020-06-26 20:34:12 +03:00
|
|
|
|
return srsly.msgpack_dumps(to_dict(getters, exclude))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def from_bytes(bytes_data, setters, exclude):
|
|
|
|
|
return from_dict(srsly.msgpack_loads(bytes_data), setters, exclude)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def to_dict(getters, exclude):
|
2019-12-22 03:53:56 +03:00
|
|
|
|
serialized = {}
|
2017-05-29 11:13:42 +03:00
|
|
|
|
for key, getter in getters.items():
|
2019-03-10 21:16:45 +03:00
|
|
|
|
# Split to support file names like meta.json
|
|
|
|
|
if key.split(".")[0] not in exclude:
|
2017-05-29 11:13:42 +03:00
|
|
|
|
serialized[key] = getter()
|
2020-06-26 20:34:12 +03:00
|
|
|
|
return serialized
|
2017-05-29 11:13:42 +03:00
|
|
|
|
|
|
|
|
|
|
2020-06-26 20:34:12 +03:00
|
|
|
|
def from_dict(msg, setters, exclude):
|
2017-05-29 11:13:42 +03:00
|
|
|
|
for key, setter in setters.items():
|
2019-03-10 21:16:45 +03:00
|
|
|
|
# Split to support file names like meta.json
|
|
|
|
|
if key.split(".")[0] not in exclude and key in msg:
|
2017-05-29 11:13:42 +03:00
|
|
|
|
setter(msg[key])
|
|
|
|
|
return msg
|
|
|
|
|
|
|
|
|
|
|
2017-05-31 14:42:39 +03:00
|
|
|
|
def to_disk(path, writers, exclude):
|
|
|
|
|
path = ensure_path(path)
|
|
|
|
|
if not path.exists():
|
|
|
|
|
path.mkdir()
|
|
|
|
|
for key, writer in writers.items():
|
2019-03-10 21:16:45 +03:00
|
|
|
|
# Split to support file names like meta.json
|
|
|
|
|
if key.split(".")[0] not in exclude:
|
2017-05-31 14:42:39 +03:00
|
|
|
|
writer(path / key)
|
|
|
|
|
return path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def from_disk(path, readers, exclude):
|
|
|
|
|
path = ensure_path(path)
|
|
|
|
|
for key, reader in readers.items():
|
2019-03-10 21:16:45 +03:00
|
|
|
|
# Split to support file names like meta.json
|
|
|
|
|
if key.split(".")[0] not in exclude:
|
2017-10-16 21:55:00 +03:00
|
|
|
|
reader(path / key)
|
2020-06-30 13:59:13 +03:00
|
|
|
|
return path
|
2020-06-30 13:54:15 +03:00
|
|
|
|
|
|
|
|
|
|
2019-12-22 03:53:56 +03:00
|
|
|
|
def import_file(name, loc):
|
|
|
|
|
"""Import module from a file. Used to load models from a directory.
|
|
|
|
|
|
2020-05-24 18:20:58 +03:00
|
|
|
|
name (str): Name of module to load.
|
|
|
|
|
loc (str / Path): Path to the file.
|
2019-12-22 03:53:56 +03:00
|
|
|
|
RETURNS: The loaded module.
|
|
|
|
|
"""
|
|
|
|
|
loc = str(loc)
|
|
|
|
|
spec = importlib.util.spec_from_file_location(name, str(loc))
|
|
|
|
|
module = importlib.util.module_from_spec(spec)
|
|
|
|
|
spec.loader.exec_module(module)
|
|
|
|
|
return module
|
|
|
|
|
|
|
|
|
|
|
2017-05-14 18:50:23 +03:00
|
|
|
|
def minify_html(html):
|
|
|
|
|
"""Perform a template-specific, rudimentary HTML minification for displaCy.
|
2017-10-27 15:39:09 +03:00
|
|
|
|
Disclaimer: NOT a general-purpose solution, only removes indentation and
|
|
|
|
|
newlines.
|
2017-05-14 18:50:23 +03:00
|
|
|
|
|
2020-05-24 18:20:58 +03:00
|
|
|
|
html (str): Markup to minify.
|
|
|
|
|
RETURNS (str): "Minified" HTML.
|
2017-05-14 18:50:23 +03:00
|
|
|
|
"""
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
return html.strip().replace(" ", "").replace("\n", "")
|
2017-09-21 03:16:35 +03:00
|
|
|
|
|
|
|
|
|
|
escape html in displacy.render (#2378) (closes #2361)
## Description
Fix for issue #2361 :
replace &, <, >, " with &amp; , &lt; , &gt; , &quot; in before rendering svg
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [ ] I ran the tests, and all new and existing tests passed.
(As discussed in the comments to #2361)
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-05-28 19:36:41 +03:00
|
|
|
|
def escape_html(text):
|
|
|
|
|
"""Replace <, >, &, " with their HTML encoded representation. Intended to
|
|
|
|
|
prevent HTML errors in rendered displaCy markup.
|
|
|
|
|
|
2020-05-24 18:20:58 +03:00
|
|
|
|
text (str): The original text.
|
|
|
|
|
RETURNS (str): Equivalent text to be safely used within HTML.
|
escape html in displacy.render (#2378) (closes #2361)
## Description
Fix for issue #2361 :
replace &, <, >, " with &amp; , &lt; , &gt; , &quot; in before rendering svg
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [ ] I ran the tests, and all new and existing tests passed.
(As discussed in the comments to #2361)
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-05-28 19:36:41 +03:00
|
|
|
|
"""
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
text = text.replace("&", "&")
|
|
|
|
|
text = text.replace("<", "<")
|
|
|
|
|
text = text.replace(">", ">")
|
|
|
|
|
text = text.replace('"', """)
|
escape html in displacy.render (#2378) (closes #2361)
## Description
Fix for issue #2361 :
replace &, <, >, " with &amp; , &lt; , &gt; , &quot; in before rendering svg
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [ ] I ran the tests, and all new and existing tests passed.
(As discussed in the comments to #2361)
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-05-28 19:36:41 +03:00
|
|
|
|
return text
|
|
|
|
|
|
|
|
|
|
|
2020-04-14 20:15:52 +03:00
|
|
|
|
def get_words_and_spaces(words, text):
|
2020-04-23 17:58:23 +03:00
|
|
|
|
if "".join("".join(words).split()) != "".join(text.split()):
|
2020-04-14 20:15:52 +03:00
|
|
|
|
raise ValueError(Errors.E194.format(text=text, words=words))
|
|
|
|
|
text_words = []
|
|
|
|
|
text_spaces = []
|
|
|
|
|
text_pos = 0
|
|
|
|
|
# normalize words to remove all whitespace tokens
|
|
|
|
|
norm_words = [word for word in words if not word.isspace()]
|
|
|
|
|
# align words with text
|
|
|
|
|
for word in norm_words:
|
|
|
|
|
try:
|
|
|
|
|
word_start = text[text_pos:].index(word)
|
|
|
|
|
except ValueError:
|
|
|
|
|
raise ValueError(Errors.E194.format(text=text, words=words))
|
|
|
|
|
if word_start > 0:
|
2020-05-21 15:14:01 +03:00
|
|
|
|
text_words.append(text[text_pos : text_pos + word_start])
|
2020-04-14 20:15:52 +03:00
|
|
|
|
text_spaces.append(False)
|
|
|
|
|
text_pos += word_start
|
|
|
|
|
text_words.append(word)
|
|
|
|
|
text_spaces.append(False)
|
|
|
|
|
text_pos += len(word)
|
|
|
|
|
if text_pos < len(text) and text[text_pos] == " ":
|
|
|
|
|
text_spaces[-1] = True
|
|
|
|
|
text_pos += 1
|
|
|
|
|
if text_pos < len(text):
|
|
|
|
|
text_words.append(text[text_pos:])
|
|
|
|
|
text_spaces.append(False)
|
|
|
|
|
return (text_words, text_spaces)
|
|
|
|
|
|
|
|
|
|
|
2018-05-20 16:13:37 +03:00
|
|
|
|
class SimpleFrozenDict(dict):
|
|
|
|
|
"""Simplified implementation of a frozen dict, mainly used as default
|
|
|
|
|
function or method argument (for arguments that should default to empty
|
|
|
|
|
dictionary). Will raise an error if user or spaCy attempts to add to dict.
|
|
|
|
|
"""
|
💫 Tidy up and auto-format .py files (#2983)
<!--- Provide a general summary of your changes in the title. -->
## Description
- [x] Use [`black`](https://github.com/ambv/black) to auto-format all `.py` files.
- [x] Update flake8 config to exclude very large files (lemmatization tables etc.)
- [x] Update code to be compatible with flake8 rules
- [x] Fix various small bugs, inconsistencies and messy stuff in the language data
- [x] Update docs to explain new code style (`black`, `flake8`, when to use `# fmt: off` and `# fmt: on` and what `# noqa` means)
Once #2932 is merged, which auto-formats and tidies up the CLI, we'll be able to run `flake8 spacy` actually get meaningful results.
At the moment, the code style and linting isn't applied automatically, but I'm hoping that the new [GitHub Actions](https://github.com/features/actions) will let us auto-format pull requests and post comments with relevant linting information.
### Types of change
enhancement, code style
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-11-30 19:03:03 +03:00
|
|
|
|
|
2018-05-20 16:13:37 +03:00
|
|
|
|
def __setitem__(self, key, value):
|
|
|
|
|
raise NotImplementedError(Errors.E095)
|
|
|
|
|
|
|
|
|
|
def pop(self, key, default=None):
|
|
|
|
|
raise NotImplementedError(Errors.E095)
|
|
|
|
|
|
|
|
|
|
def update(self, other):
|
|
|
|
|
raise NotImplementedError(Errors.E095)
|
2019-01-10 17:40:37 +03:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class DummyTokenizer(object):
|
|
|
|
|
# add dummy methods for to_bytes, from_bytes, to_disk and from_disk to
|
|
|
|
|
# allow serialization (see #1557)
|
2019-03-10 21:16:45 +03:00
|
|
|
|
def to_bytes(self, **kwargs):
|
2019-02-07 23:00:04 +03:00
|
|
|
|
return b""
|
2019-01-10 17:40:37 +03:00
|
|
|
|
|
2019-03-10 21:16:45 +03:00
|
|
|
|
def from_bytes(self, _bytes_data, **kwargs):
|
2019-01-10 17:40:37 +03:00
|
|
|
|
return self
|
|
|
|
|
|
2019-03-10 21:16:45 +03:00
|
|
|
|
def to_disk(self, _path, **kwargs):
|
2019-01-10 17:40:37 +03:00
|
|
|
|
return None
|
|
|
|
|
|
2019-03-10 21:16:45 +03:00
|
|
|
|
def from_disk(self, _path, **kwargs):
|
2019-01-10 17:40:37 +03:00
|
|
|
|
return self
|
2020-01-29 19:06:46 +03:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def link_vectors_to_models(vocab):
|
|
|
|
|
vectors = vocab.vectors
|
|
|
|
|
if vectors.name is None:
|
|
|
|
|
vectors.name = VECTORS_KEY
|
|
|
|
|
if vectors.data.size != 0:
|
2020-02-28 14:20:23 +03:00
|
|
|
|
warnings.warn(Warnings.W020.format(shape=vectors.data.shape))
|
2020-01-29 19:06:46 +03:00
|
|
|
|
for word in vocab:
|
|
|
|
|
if word.orth in vectors.key2row:
|
|
|
|
|
word.rank = vectors.key2row[word.orth]
|
|
|
|
|
else:
|
|
|
|
|
word.rank = 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
VECTORS_KEY = "spacy_pretrained_vectors"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def create_default_optimizer():
|
|
|
|
|
learn_rate = env_opt("learn_rate", 0.001)
|
|
|
|
|
beta1 = env_opt("optimizer_B1", 0.9)
|
|
|
|
|
beta2 = env_opt("optimizer_B2", 0.999)
|
|
|
|
|
eps = env_opt("optimizer_eps", 1e-8)
|
|
|
|
|
L2 = env_opt("L2_penalty", 1e-6)
|
2020-04-03 14:02:46 +03:00
|
|
|
|
grad_clip = env_opt("grad_norm_clip", 10.0)
|
|
|
|
|
L2_is_weight_decay = env_opt("L2_is_weight_decay", False)
|
2020-02-18 19:20:17 +03:00
|
|
|
|
optimizer = Adam(
|
|
|
|
|
learn_rate,
|
|
|
|
|
L2=L2,
|
|
|
|
|
beta1=beta1,
|
|
|
|
|
beta2=beta2,
|
|
|
|
|
eps=eps,
|
|
|
|
|
grad_clip=grad_clip,
|
2020-04-03 14:02:46 +03:00
|
|
|
|
L2_is_weight_decay=L2_is_weight_decay,
|
2020-02-18 19:20:17 +03:00
|
|
|
|
)
|
2020-01-29 19:06:46 +03:00
|
|
|
|
return optimizer
|