mirror of
https://github.com/explosion/spaCy.git
synced 2025-08-06 05:10:21 +03:00
Merge remote-tracking branch 'upstream/master' into bugfix/remotes-pathy-fluid
This commit is contained in:
commit
e6d01e9804
|
@ -5,7 +5,7 @@ repos:
|
|||
- id: black
|
||||
language_version: python3.7
|
||||
additional_dependencies: ['click==8.0.4']
|
||||
- repo: https://gitlab.com/pycqa/flake8
|
||||
- repo: https://github.com/pycqa/flake8
|
||||
rev: 5.0.4
|
||||
hooks:
|
||||
- id: flake8
|
||||
|
|
|
@ -27,6 +27,7 @@ from .project.dvc import project_update_dvc # noqa: F401
|
|||
from .project.push import project_push # noqa: F401
|
||||
from .project.pull import project_pull # noqa: F401
|
||||
from .project.document import project_document # noqa: F401
|
||||
from .find_threshold import find_threshold # noqa: F401
|
||||
|
||||
|
||||
@app.command("link", no_args_is_help=True, deprecated=True, hidden=True)
|
||||
|
|
233
spacy/cli/find_threshold.py
Normal file
233
spacy/cli/find_threshold.py
Normal file
|
@ -0,0 +1,233 @@
|
|||
import functools
|
||||
import operator
|
||||
from pathlib import Path
|
||||
import logging
|
||||
from typing import Optional, Tuple, Any, Dict, List
|
||||
|
||||
import numpy
|
||||
import wasabi.tables
|
||||
|
||||
from ..pipeline import TextCategorizer, MultiLabel_TextCategorizer
|
||||
from ..errors import Errors
|
||||
from ..training import Corpus
|
||||
from ._util import app, Arg, Opt, import_code, setup_gpu
|
||||
from .. import util
|
||||
|
||||
_DEFAULTS = {
|
||||
"n_trials": 11,
|
||||
"use_gpu": -1,
|
||||
"gold_preproc": False,
|
||||
}
|
||||
|
||||
|
||||
@app.command(
|
||||
"find-threshold",
|
||||
context_settings={"allow_extra_args": False, "ignore_unknown_options": True},
|
||||
)
|
||||
def find_threshold_cli(
|
||||
# fmt: off
|
||||
model: str = Arg(..., help="Model name or path"),
|
||||
data_path: Path = Arg(..., help="Location of binary evaluation data in .spacy format", exists=True),
|
||||
pipe_name: str = Arg(..., help="Name of pipe to examine thresholds for"),
|
||||
threshold_key: str = Arg(..., help="Key of threshold attribute in component's configuration"),
|
||||
scores_key: str = Arg(..., help="Metric to optimize"),
|
||||
n_trials: int = Opt(_DEFAULTS["n_trials"], "--n_trials", "-n", help="Number of trials to determine optimal thresholds"),
|
||||
code_path: Optional[Path] = Opt(None, "--code", "-c", help="Path to Python file with additional code (registered functions) to be imported"),
|
||||
use_gpu: int = Opt(_DEFAULTS["use_gpu"], "--gpu-id", "-g", help="GPU ID or -1 for CPU"),
|
||||
gold_preproc: bool = Opt(_DEFAULTS["gold_preproc"], "--gold-preproc", "-G", help="Use gold preprocessing"),
|
||||
verbose: bool = Opt(False, "--silent", "-V", "-VV", help="Display more information for debugging purposes"),
|
||||
# fmt: on
|
||||
):
|
||||
"""
|
||||
Runs prediction trials for a trained model with varying tresholds to maximize
|
||||
the specified metric. The search space for the threshold is traversed linearly
|
||||
from 0 to 1 in `n_trials` steps. Results are displayed in a table on `stdout`
|
||||
(the corresponding API call to `spacy.cli.find_threshold.find_threshold()`
|
||||
returns all results).
|
||||
|
||||
This is applicable only for components whose predictions are influenced by
|
||||
thresholds - e.g. `textcat_multilabel` and `spancat`, but not `textcat`. Note
|
||||
that the full path to the corresponding threshold attribute in the config has to
|
||||
be provided.
|
||||
|
||||
DOCS: https://spacy.io/api/cli#find-threshold
|
||||
"""
|
||||
|
||||
util.logger.setLevel(logging.DEBUG if verbose else logging.INFO)
|
||||
import_code(code_path)
|
||||
find_threshold(
|
||||
model=model,
|
||||
data_path=data_path,
|
||||
pipe_name=pipe_name,
|
||||
threshold_key=threshold_key,
|
||||
scores_key=scores_key,
|
||||
n_trials=n_trials,
|
||||
use_gpu=use_gpu,
|
||||
gold_preproc=gold_preproc,
|
||||
silent=False,
|
||||
)
|
||||
|
||||
|
||||
def find_threshold(
|
||||
model: str,
|
||||
data_path: Path,
|
||||
pipe_name: str,
|
||||
threshold_key: str,
|
||||
scores_key: str,
|
||||
*,
|
||||
n_trials: int = _DEFAULTS["n_trials"], # type: ignore
|
||||
use_gpu: int = _DEFAULTS["use_gpu"], # type: ignore
|
||||
gold_preproc: bool = _DEFAULTS["gold_preproc"], # type: ignore
|
||||
silent: bool = True,
|
||||
) -> Tuple[float, float, Dict[float, float]]:
|
||||
"""
|
||||
Runs prediction trials for models with varying tresholds to maximize the specified metric.
|
||||
model (Union[str, Path]): Pipeline to evaluate. Can be a package or a path to a data directory.
|
||||
data_path (Path): Path to file with DocBin with docs to use for threshold search.
|
||||
pipe_name (str): Name of pipe to examine thresholds for.
|
||||
threshold_key (str): Key of threshold attribute in component's configuration.
|
||||
scores_key (str): Name of score to metric to optimize.
|
||||
n_trials (int): Number of trials to determine optimal thresholds.
|
||||
use_gpu (int): GPU ID or -1 for CPU.
|
||||
gold_preproc (bool): Whether to use gold preprocessing. Gold preprocessing helps the annotations align to the
|
||||
tokenization, and may result in sequences of more consistent length. However, it may reduce runtime accuracy due
|
||||
to train/test skew.
|
||||
silent (bool): Whether to print non-error-related output to stdout.
|
||||
RETURNS (Tuple[float, float, Dict[float, float]]): Best found threshold, the corresponding score, scores for all
|
||||
evaluated thresholds.
|
||||
"""
|
||||
|
||||
setup_gpu(use_gpu, silent=silent)
|
||||
data_path = util.ensure_path(data_path)
|
||||
if not data_path.exists():
|
||||
wasabi.msg.fail("Evaluation data not found", data_path, exits=1)
|
||||
nlp = util.load_model(model)
|
||||
|
||||
if pipe_name not in nlp.component_names:
|
||||
raise AttributeError(
|
||||
Errors.E001.format(name=pipe_name, opts=nlp.component_names)
|
||||
)
|
||||
pipe = nlp.get_pipe(pipe_name)
|
||||
if not hasattr(pipe, "scorer"):
|
||||
raise AttributeError(Errors.E1045)
|
||||
|
||||
if type(pipe) == TextCategorizer:
|
||||
wasabi.msg.warn(
|
||||
"The `textcat` component doesn't use a threshold as it's not applicable to the concept of "
|
||||
"exclusive classes. All thresholds will yield the same results."
|
||||
)
|
||||
|
||||
if not silent:
|
||||
wasabi.msg.info(
|
||||
title=f"Optimizing for {scores_key} for component '{pipe_name}' with {n_trials} "
|
||||
f"trials."
|
||||
)
|
||||
|
||||
# Load evaluation corpus.
|
||||
corpus = Corpus(data_path, gold_preproc=gold_preproc)
|
||||
dev_dataset = list(corpus(nlp))
|
||||
config_keys = threshold_key.split(".")
|
||||
|
||||
def set_nested_item(
|
||||
config: Dict[str, Any], keys: List[str], value: float
|
||||
) -> Dict[str, Any]:
|
||||
"""Set item in nested dictionary. Adapted from https://stackoverflow.com/a/54138200.
|
||||
config (Dict[str, Any]): Configuration dictionary.
|
||||
keys (List[Any]): Path to value to set.
|
||||
value (float): Value to set.
|
||||
RETURNS (Dict[str, Any]): Updated dictionary.
|
||||
"""
|
||||
functools.reduce(operator.getitem, keys[:-1], config)[keys[-1]] = value
|
||||
return config
|
||||
|
||||
def filter_config(
|
||||
config: Dict[str, Any], keys: List[str], full_key: str
|
||||
) -> Dict[str, Any]:
|
||||
"""Filters provided config dictionary so that only the specified keys path remains.
|
||||
config (Dict[str, Any]): Configuration dictionary.
|
||||
keys (List[Any]): Path to value to set.
|
||||
full_key (str): Full user-specified key.
|
||||
RETURNS (Dict[str, Any]): Filtered dictionary.
|
||||
"""
|
||||
if keys[0] not in config:
|
||||
wasabi.msg.fail(
|
||||
title=f"Failed to look up `{full_key}` in config: sub-key {[keys[0]]} not found.",
|
||||
text=f"Make sure you specified {[keys[0]]} correctly. The following sub-keys are available instead: "
|
||||
f"{list(config.keys())}",
|
||||
exits=1,
|
||||
)
|
||||
return {
|
||||
keys[0]: filter_config(config[keys[0]], keys[1:], full_key)
|
||||
if len(keys) > 1
|
||||
else config[keys[0]]
|
||||
}
|
||||
|
||||
# Evaluate with varying threshold values.
|
||||
scores: Dict[float, float] = {}
|
||||
config_keys_full = ["components", pipe_name, *config_keys]
|
||||
table_col_widths = (10, 10)
|
||||
thresholds = numpy.linspace(0, 1, n_trials)
|
||||
print(wasabi.tables.row(["Threshold", f"{scores_key}"], widths=table_col_widths))
|
||||
for threshold in thresholds:
|
||||
# Reload pipeline with overrides specifying the new threshold.
|
||||
nlp = util.load_model(
|
||||
model,
|
||||
config=set_nested_item(
|
||||
filter_config(
|
||||
nlp.config, config_keys_full, ".".join(config_keys_full)
|
||||
).copy(),
|
||||
config_keys_full,
|
||||
threshold,
|
||||
),
|
||||
)
|
||||
if hasattr(pipe, "cfg"):
|
||||
setattr(
|
||||
nlp.get_pipe(pipe_name),
|
||||
"cfg",
|
||||
set_nested_item(getattr(pipe, "cfg"), config_keys, threshold),
|
||||
)
|
||||
|
||||
eval_scores = nlp.evaluate(dev_dataset)
|
||||
if scores_key not in eval_scores:
|
||||
wasabi.msg.fail(
|
||||
title=f"Failed to look up score `{scores_key}` in evaluation results.",
|
||||
text=f"Make sure you specified the correct value for `scores_key`. The following scores are "
|
||||
f"available: {list(eval_scores.keys())}",
|
||||
exits=1,
|
||||
)
|
||||
scores[threshold] = eval_scores[scores_key]
|
||||
|
||||
if not isinstance(scores[threshold], (float, int)):
|
||||
wasabi.msg.fail(
|
||||
f"Returned score for key '{scores_key}' is not numeric. Threshold optimization only works for numeric "
|
||||
f"scores.",
|
||||
exits=1,
|
||||
)
|
||||
print(
|
||||
wasabi.row(
|
||||
[round(threshold, 3), round(scores[threshold], 3)],
|
||||
widths=table_col_widths,
|
||||
)
|
||||
)
|
||||
|
||||
best_threshold = max(scores.keys(), key=(lambda key: scores[key]))
|
||||
|
||||
# If all scores are identical, emit warning.
|
||||
if len(set(scores.values())) == 1:
|
||||
wasabi.msg.warn(
|
||||
title="All scores are identical. Verify that all settings are correct.",
|
||||
text=""
|
||||
if (
|
||||
not isinstance(pipe, MultiLabel_TextCategorizer)
|
||||
or scores_key in ("cats_macro_f", "cats_micro_f")
|
||||
)
|
||||
else "Use `cats_macro_f` or `cats_micro_f` when optimizing the threshold for `textcat_multilabel`.",
|
||||
)
|
||||
|
||||
else:
|
||||
if not silent:
|
||||
print(
|
||||
f"\nBest threshold: {round(best_threshold, ndigits=4)} with {scores_key} value of {scores[best_threshold]}."
|
||||
)
|
||||
|
||||
return best_threshold, scores[best_threshold], scores
|
|
@ -90,6 +90,8 @@ dev_corpus = "corpora.dev"
|
|||
train_corpus = "corpora.train"
|
||||
# Optional callback before nlp object is saved to disk after training
|
||||
before_to_disk = null
|
||||
# Optional callback that is invoked at the start of each training step
|
||||
before_update = null
|
||||
|
||||
[training.logger]
|
||||
@loggers = "spacy.ConsoleLogger.v1"
|
||||
|
|
|
@ -956,6 +956,7 @@ class Errors(metaclass=ErrorsWithCodes):
|
|||
"sure it's overwritten on the subclass.")
|
||||
E1046 = ("{cls_name} is an abstract class and cannot be instantiated. If you are looking for spaCy's default "
|
||||
"knowledge base, use `InMemoryLookupKB`.")
|
||||
E1047 = ("`find_threshold()` only supports components with a `scorer` attribute.")
|
||||
|
||||
|
||||
# Deprecated model shortcuts, only used in errors and warnings
|
||||
|
|
|
@ -28,34 +28,39 @@ class RussianLemmatizer(Lemmatizer):
|
|||
from pymorphy2 import MorphAnalyzer
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"The Russian lemmatizer mode 'pymorphy2' requires the "
|
||||
"pymorphy2 library. Install it with: pip install pymorphy2"
|
||||
"The lemmatizer mode 'pymorphy2' requires the "
|
||||
"pymorphy2 library and dictionaries. Install them with: "
|
||||
"pip install pymorphy2"
|
||||
"# for Ukrainian dictionaries:"
|
||||
"pip install pymorphy2-dicts-uk"
|
||||
) from None
|
||||
if getattr(self, "_morph", None) is None:
|
||||
self._morph = MorphAnalyzer()
|
||||
elif mode == "pymorphy3":
|
||||
self._morph = MorphAnalyzer(lang="ru")
|
||||
elif mode in {"pymorphy3", "pymorphy3_lookup"}:
|
||||
try:
|
||||
from pymorphy3 import MorphAnalyzer
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"The Russian lemmatizer mode 'pymorphy3' requires the "
|
||||
"pymorphy3 library. Install it with: pip install pymorphy3"
|
||||
"The lemmatizer mode 'pymorphy3' requires the "
|
||||
"pymorphy3 library and dictionaries. Install them with: "
|
||||
"pip install pymorphy3"
|
||||
"# for Ukrainian dictionaries:"
|
||||
"pip install pymorphy3-dicts-uk"
|
||||
) from None
|
||||
if getattr(self, "_morph", None) is None:
|
||||
self._morph = MorphAnalyzer()
|
||||
self._morph = MorphAnalyzer(lang="ru")
|
||||
super().__init__(
|
||||
vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer
|
||||
)
|
||||
|
||||
def pymorphy2_lemmatize(self, token: Token) -> List[str]:
|
||||
def _pymorphy_lemmatize(self, token: Token) -> List[str]:
|
||||
string = token.text
|
||||
univ_pos = token.pos_
|
||||
morphology = token.morph.to_dict()
|
||||
if univ_pos == "PUNCT":
|
||||
return [PUNCT_RULES.get(string, string)]
|
||||
if univ_pos not in ("ADJ", "DET", "NOUN", "NUM", "PRON", "PROPN", "VERB"):
|
||||
# Skip unchangeable pos
|
||||
return [string.lower()]
|
||||
return self._pymorphy_lookup_lemmatize(token)
|
||||
analyses = self._morph.parse(string)
|
||||
filtered_analyses = []
|
||||
for analysis in analyses:
|
||||
|
@ -63,8 +68,10 @@ class RussianLemmatizer(Lemmatizer):
|
|||
# Skip suggested parse variant for unknown word for pymorphy
|
||||
continue
|
||||
analysis_pos, _ = oc2ud(str(analysis.tag))
|
||||
if analysis_pos == univ_pos or (
|
||||
analysis_pos in ("NOUN", "PROPN") and univ_pos in ("NOUN", "PROPN")
|
||||
if (
|
||||
analysis_pos == univ_pos
|
||||
or (analysis_pos in ("NOUN", "PROPN") and univ_pos in ("NOUN", "PROPN"))
|
||||
or ((analysis_pos == "PRON") and (univ_pos == "DET"))
|
||||
):
|
||||
filtered_analyses.append(analysis)
|
||||
if not len(filtered_analyses):
|
||||
|
@ -107,15 +114,27 @@ class RussianLemmatizer(Lemmatizer):
|
|||
dict.fromkeys([analysis.normal_form for analysis in filtered_analyses])
|
||||
)
|
||||
|
||||
def pymorphy2_lookup_lemmatize(self, token: Token) -> List[str]:
|
||||
def _pymorphy_lookup_lemmatize(self, token: Token) -> List[str]:
|
||||
string = token.text
|
||||
analyses = self._morph.parse(string)
|
||||
if len(analyses) == 1:
|
||||
return [analyses[0].normal_form]
|
||||
# often multiple forms would derive from the same normal form
|
||||
# thus check _unique_ normal forms
|
||||
normal_forms = set([an.normal_form for an in analyses])
|
||||
if len(normal_forms) == 1:
|
||||
return [next(iter(normal_forms))]
|
||||
return [string]
|
||||
|
||||
def pymorphy2_lemmatize(self, token: Token) -> List[str]:
|
||||
return self._pymorphy_lemmatize(token)
|
||||
|
||||
def pymorphy2_lookup_lemmatize(self, token: Token) -> List[str]:
|
||||
return self._pymorphy_lookup_lemmatize(token)
|
||||
|
||||
def pymorphy3_lemmatize(self, token: Token) -> List[str]:
|
||||
return self.pymorphy2_lemmatize(token)
|
||||
return self._pymorphy_lemmatize(token)
|
||||
|
||||
def pymorphy3_lookup_lemmatize(self, token: Token) -> List[str]:
|
||||
return self._pymorphy_lookup_lemmatize(token)
|
||||
|
||||
|
||||
def oc2ud(oc_tag: str) -> Tuple[str, Dict[str, str]]:
|
||||
|
|
|
@ -29,7 +29,7 @@ class UkrainianLemmatizer(RussianLemmatizer):
|
|||
) from None
|
||||
if getattr(self, "_morph", None) is None:
|
||||
self._morph = MorphAnalyzer(lang="uk")
|
||||
elif mode == "pymorphy3":
|
||||
elif mode in {"pymorphy3", "pymorphy3_lookup"}:
|
||||
try:
|
||||
from pymorphy3 import MorphAnalyzer
|
||||
except ImportError:
|
||||
|
|
|
@ -784,14 +784,6 @@ class Language:
|
|||
factory_name, source, name=name
|
||||
)
|
||||
else:
|
||||
if not self.has_factory(factory_name):
|
||||
err = Errors.E002.format(
|
||||
name=factory_name,
|
||||
opts=", ".join(self.factory_names),
|
||||
method="add_pipe",
|
||||
lang=util.get_object_name(self),
|
||||
lang_code=self.lang,
|
||||
)
|
||||
pipe_component = self.create_pipe(
|
||||
factory_name,
|
||||
name=name,
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# cython: infer_types=True, cython: profile=True
|
||||
# cython: infer_types=True, profile=True
|
||||
from typing import List, Iterable
|
||||
|
||||
from libcpp.vector cimport vector
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
from typing import List, Dict, Callable, Tuple, Optional, Iterable, Any, cast
|
||||
from typing import List, Dict, Callable, Tuple, Optional, Iterable, Any
|
||||
from thinc.api import Config, Model, get_current_ops, set_dropout_rate, Ops
|
||||
from thinc.api import Optimizer
|
||||
from thinc.types import Ragged, Ints2d, Floats2d, Ints1d
|
||||
from thinc.types import Ragged, Ints2d, Floats2d
|
||||
|
||||
import numpy
|
||||
|
||||
|
|
|
@ -293,7 +293,7 @@ class TextCategorizer(TrainablePipe):
|
|||
bp_scores(gradient)
|
||||
if sgd is not None:
|
||||
self.finish_update(sgd)
|
||||
losses[self.name] += (gradient ** 2).sum()
|
||||
losses[self.name] += (gradient**2).sum()
|
||||
return losses
|
||||
|
||||
def _examples_to_truth(
|
||||
|
@ -327,7 +327,7 @@ class TextCategorizer(TrainablePipe):
|
|||
not_missing = self.model.ops.asarray(not_missing) # type: ignore
|
||||
d_scores = scores - truths
|
||||
d_scores *= not_missing
|
||||
mean_square_error = (d_scores ** 2).mean()
|
||||
mean_square_error = (d_scores**2).mean()
|
||||
return float(mean_square_error), d_scores
|
||||
|
||||
def add_label(self, label: str) -> int:
|
||||
|
|
|
@ -329,6 +329,7 @@ class ConfigSchemaTraining(BaseModel):
|
|||
frozen_components: List[str] = Field(..., title="Pipeline components that shouldn't be updated during training")
|
||||
annotating_components: List[str] = Field(..., title="Pipeline components that should set annotations during training")
|
||||
before_to_disk: Optional[Callable[["Language"], "Language"]] = Field(..., title="Optional callback to modify nlp object after training, before it's saved to disk")
|
||||
before_update: Optional[Callable[["Language", Dict[str, Any]], None]] = Field(..., title="Optional callback that is invoked at the start of each training step")
|
||||
# fmt: on
|
||||
|
||||
class Config:
|
||||
|
|
|
@ -337,17 +337,17 @@ def ru_tokenizer():
|
|||
return get_lang_class("ru")().tokenizer
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@pytest.fixture(scope="session")
|
||||
def ru_lemmatizer():
|
||||
pytest.importorskip("pymorphy3")
|
||||
return get_lang_class("ru")().add_pipe("lemmatizer")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@pytest.fixture(scope="session")
|
||||
def ru_lookup_lemmatizer():
|
||||
pytest.importorskip("pymorphy2")
|
||||
pytest.importorskip("pymorphy3")
|
||||
return get_lang_class("ru")().add_pipe(
|
||||
"lemmatizer", config={"mode": "pymorphy2_lookup"}
|
||||
"lemmatizer", config={"mode": "pymorphy3_lookup"}
|
||||
)
|
||||
|
||||
|
||||
|
@ -423,19 +423,19 @@ def uk_tokenizer():
|
|||
return get_lang_class("uk")().tokenizer
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@pytest.fixture(scope="session")
|
||||
def uk_lemmatizer():
|
||||
pytest.importorskip("pymorphy3")
|
||||
pytest.importorskip("pymorphy3_dicts_uk")
|
||||
return get_lang_class("uk")().add_pipe("lemmatizer")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@pytest.fixture(scope="session")
|
||||
def uk_lookup_lemmatizer():
|
||||
pytest.importorskip("pymorphy2")
|
||||
pytest.importorskip("pymorphy2_dicts_uk")
|
||||
pytest.importorskip("pymorphy3")
|
||||
pytest.importorskip("pymorphy3_dicts_uk")
|
||||
return get_lang_class("uk")().add_pipe(
|
||||
"lemmatizer", config={"mode": "pymorphy2_lookup"}
|
||||
"lemmatizer", config={"mode": "pymorphy3_lookup"}
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -81,6 +81,7 @@ def test_ru_lemmatizer_punct(ru_lemmatizer):
|
|||
|
||||
|
||||
def test_ru_doc_lookup_lemmatization(ru_lookup_lemmatizer):
|
||||
assert ru_lookup_lemmatizer.mode == "pymorphy3_lookup"
|
||||
words = ["мама", "мыла", "раму"]
|
||||
pos = ["NOUN", "VERB", "NOUN"]
|
||||
morphs = [
|
||||
|
@ -92,3 +93,17 @@ def test_ru_doc_lookup_lemmatization(ru_lookup_lemmatizer):
|
|||
doc = ru_lookup_lemmatizer(doc)
|
||||
lemmas = [token.lemma_ for token in doc]
|
||||
assert lemmas == ["мама", "мыла", "раму"]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"word,lemma",
|
||||
(
|
||||
("бременем", "бремя"),
|
||||
("будешь", "быть"),
|
||||
("какая-то", "какой-то"),
|
||||
),
|
||||
)
|
||||
def test_ru_lookup_lemmatizer(ru_lookup_lemmatizer, word, lemma):
|
||||
assert ru_lookup_lemmatizer.mode == "pymorphy3_lookup"
|
||||
doc = Doc(ru_lookup_lemmatizer.vocab, words=[word])
|
||||
assert ru_lookup_lemmatizer(doc)[0].lemma_ == lemma
|
||||
|
|
|
@ -8,12 +8,20 @@ pytestmark = pytest.mark.filterwarnings("ignore::DeprecationWarning")
|
|||
def test_uk_lemmatizer(uk_lemmatizer):
|
||||
"""Check that the default uk lemmatizer runs."""
|
||||
doc = Doc(uk_lemmatizer.vocab, words=["a", "b", "c"])
|
||||
assert uk_lemmatizer.mode == "pymorphy3"
|
||||
uk_lemmatizer(doc)
|
||||
assert [token.lemma for token in doc]
|
||||
|
||||
|
||||
def test_uk_lookup_lemmatizer(uk_lookup_lemmatizer):
|
||||
"""Check that the lookup uk lemmatizer runs."""
|
||||
doc = Doc(uk_lookup_lemmatizer.vocab, words=["a", "b", "c"])
|
||||
uk_lookup_lemmatizer(doc)
|
||||
assert [token.lemma for token in doc]
|
||||
@pytest.mark.parametrize(
|
||||
"word,lemma",
|
||||
(
|
||||
("якийсь", "якийсь"),
|
||||
("розповідають", "розповідати"),
|
||||
("розповіси", "розповісти"),
|
||||
),
|
||||
)
|
||||
def test_uk_lookup_lemmatizer(uk_lookup_lemmatizer, word, lemma):
|
||||
assert uk_lookup_lemmatizer.mode == "pymorphy3_lookup"
|
||||
doc = Doc(uk_lookup_lemmatizer.vocab, words=[word])
|
||||
assert uk_lookup_lemmatizer(doc)[0].lemma_ == lemma
|
||||
|
|
|
@ -1,10 +1,12 @@
|
|||
import os
|
||||
import math
|
||||
from collections import Counter
|
||||
from typing import Tuple, List, Dict, Any
|
||||
import pkg_resources
|
||||
from random import sample
|
||||
from typing import Counter
|
||||
import time
|
||||
|
||||
import numpy
|
||||
import pytest
|
||||
import srsly
|
||||
from click import NoSuchOption
|
||||
|
@ -17,6 +19,7 @@ from spacy.cli._util import is_subpath_of, load_project_config
|
|||
from spacy.cli._util import parse_config_overrides, string_to_list
|
||||
from spacy.cli._util import substitute_project_variables
|
||||
from spacy.cli._util import validate_project_commands
|
||||
from spacy.cli._util import upload_file, download_file
|
||||
from spacy.cli.debug_data import _compile_gold, _get_labels_from_model
|
||||
from spacy.cli.debug_data import _get_labels_from_spancat
|
||||
from spacy.cli.debug_data import _get_distribution, _get_kl_divergence
|
||||
|
@ -30,11 +33,12 @@ from spacy.cli.package import _is_permitted_package_name
|
|||
from spacy.cli.project.remote_storage import RemoteStorage
|
||||
from spacy.cli.project.run import _check_requirements
|
||||
from spacy.cli.validate import get_model_pkgs
|
||||
from spacy.cli.find_threshold import find_threshold
|
||||
from spacy.lang.en import English
|
||||
from spacy.lang.nl import Dutch
|
||||
from spacy.language import Language
|
||||
from spacy.schemas import ProjectConfigSchema, RecommendationSchema, validate
|
||||
from spacy.tokens import Doc
|
||||
from spacy.tokens import Doc, DocBin
|
||||
from spacy.tokens.span import Span
|
||||
from spacy.training import Example, docs_to_json, offsets_to_biluo_tags
|
||||
from spacy.training.converters import conll_ner_to_docs, conllu_to_docs
|
||||
|
@ -915,6 +919,122 @@ def test_local_remote_storage_pull_missing():
|
|||
assert remote.pull(filename) is None
|
||||
|
||||
|
||||
def test_cli_find_threshold(capsys):
|
||||
thresholds = numpy.linspace(0, 1, 10)
|
||||
|
||||
def make_examples(nlp: Language) -> List[Example]:
|
||||
docs: List[Example] = []
|
||||
|
||||
for t in [
|
||||
(
|
||||
"I am angry and confused in the Bank of America.",
|
||||
{
|
||||
"cats": {"ANGRY": 1.0, "CONFUSED": 1.0, "HAPPY": 0.0},
|
||||
"spans": {"sc": [(31, 46, "ORG")]},
|
||||
},
|
||||
),
|
||||
(
|
||||
"I am confused but happy in New York.",
|
||||
{
|
||||
"cats": {"ANGRY": 0.0, "CONFUSED": 1.0, "HAPPY": 1.0},
|
||||
"spans": {"sc": [(27, 35, "GPE")]},
|
||||
},
|
||||
),
|
||||
]:
|
||||
doc = nlp.make_doc(t[0])
|
||||
docs.append(Example.from_dict(doc, t[1]))
|
||||
|
||||
return docs
|
||||
|
||||
def init_nlp(
|
||||
components: Tuple[Tuple[str, Dict[str, Any]], ...] = ()
|
||||
) -> Tuple[Language, List[Example]]:
|
||||
new_nlp = English()
|
||||
new_nlp.add_pipe( # type: ignore
|
||||
factory_name="textcat_multilabel",
|
||||
name="tc_multi",
|
||||
config={"threshold": 0.9},
|
||||
)
|
||||
|
||||
# Append additional components to pipeline.
|
||||
for cfn, comp_config in components:
|
||||
new_nlp.add_pipe(cfn, config=comp_config)
|
||||
|
||||
new_examples = make_examples(new_nlp)
|
||||
new_nlp.initialize(get_examples=lambda: new_examples)
|
||||
for i in range(5):
|
||||
new_nlp.update(new_examples)
|
||||
|
||||
return new_nlp, new_examples
|
||||
|
||||
with make_tempdir() as docs_dir:
|
||||
# Check whether find_threshold() identifies lowest threshold above 0 as (first) ideal threshold, as this matches
|
||||
# the current model behavior with the examples above. This can break once the model behavior changes and serves
|
||||
# mostly as a smoke test.
|
||||
nlp, examples = init_nlp()
|
||||
DocBin(docs=[example.reference for example in examples]).to_disk(
|
||||
docs_dir / "docs.spacy"
|
||||
)
|
||||
with make_tempdir() as nlp_dir:
|
||||
nlp.to_disk(nlp_dir)
|
||||
res = find_threshold(
|
||||
model=nlp_dir,
|
||||
data_path=docs_dir / "docs.spacy",
|
||||
pipe_name="tc_multi",
|
||||
threshold_key="threshold",
|
||||
scores_key="cats_macro_f",
|
||||
silent=True,
|
||||
)
|
||||
assert res[0] != thresholds[0]
|
||||
assert thresholds[0] < res[0] < thresholds[9]
|
||||
assert res[1] == 1.0
|
||||
assert res[2][1.0] == 0.0
|
||||
|
||||
# Test with spancat.
|
||||
nlp, _ = init_nlp((("spancat", {}),))
|
||||
with make_tempdir() as nlp_dir:
|
||||
nlp.to_disk(nlp_dir)
|
||||
res = find_threshold(
|
||||
model=nlp_dir,
|
||||
data_path=docs_dir / "docs.spacy",
|
||||
pipe_name="spancat",
|
||||
threshold_key="threshold",
|
||||
scores_key="spans_sc_f",
|
||||
silent=True,
|
||||
)
|
||||
assert res[0] != thresholds[0]
|
||||
assert thresholds[0] < res[0] < thresholds[8]
|
||||
assert res[1] >= 0.6
|
||||
assert res[2][1.0] == 0.0
|
||||
|
||||
# Having multiple textcat_multilabel components should work, since the name has to be specified.
|
||||
nlp, _ = init_nlp((("textcat_multilabel", {}),))
|
||||
with make_tempdir() as nlp_dir:
|
||||
nlp.to_disk(nlp_dir)
|
||||
assert find_threshold(
|
||||
model=nlp_dir,
|
||||
data_path=docs_dir / "docs.spacy",
|
||||
pipe_name="tc_multi",
|
||||
threshold_key="threshold",
|
||||
scores_key="cats_macro_f",
|
||||
silent=True,
|
||||
)
|
||||
|
||||
# Specifying the name of an non-existing pipe should fail.
|
||||
nlp, _ = init_nlp()
|
||||
with make_tempdir() as nlp_dir:
|
||||
nlp.to_disk(nlp_dir)
|
||||
with pytest.raises(AttributeError):
|
||||
find_threshold(
|
||||
model=nlp_dir,
|
||||
data_path=docs_dir / "docs.spacy",
|
||||
pipe_name="_",
|
||||
threshold_key="threshold",
|
||||
scores_key="cats_macro_f",
|
||||
silent=True,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"reqs,output",
|
||||
[
|
||||
|
@ -952,3 +1072,18 @@ def test_project_check_requirements(reqs, output):
|
|||
pkg_resources.require("spacyunknowndoesnotexist12345")
|
||||
except pkg_resources.DistributionNotFound:
|
||||
assert output == _check_requirements([req.strip() for req in reqs.split("\n")])
|
||||
|
||||
|
||||
def test_upload_download_local_file():
|
||||
with make_tempdir() as d1, make_tempdir() as d2:
|
||||
filename = "f.txt"
|
||||
content = "content"
|
||||
local_file = d1 / filename
|
||||
remote_file = d2 / filename
|
||||
with local_file.open(mode="w") as file_:
|
||||
file_.write(content)
|
||||
upload_file(local_file, remote_file)
|
||||
local_file.unlink()
|
||||
download_file(remote_file, local_file)
|
||||
with local_file.open(mode="r") as file_:
|
||||
assert file_.read() == content
|
||||
|
|
|
@ -2,6 +2,7 @@ import random
|
|||
|
||||
import numpy
|
||||
import pytest
|
||||
import spacy
|
||||
import srsly
|
||||
from spacy.lang.en import English
|
||||
from spacy.tokens import Doc, DocBin
|
||||
|
@ -11,9 +12,10 @@ from spacy.training import offsets_to_biluo_tags
|
|||
from spacy.training.alignment_array import AlignmentArray
|
||||
from spacy.training.align import get_alignments
|
||||
from spacy.training.converters import json_to_docs
|
||||
from spacy.training.loop import train_while_improving
|
||||
from spacy.util import get_words_and_spaces, load_model_from_path, minibatch
|
||||
from spacy.util import load_config_from_str
|
||||
from thinc.api import compounding
|
||||
from thinc.api import compounding, Adam
|
||||
|
||||
from ..util import make_tempdir
|
||||
|
||||
|
@ -1112,3 +1114,39 @@ def test_retokenized_docs(doc):
|
|||
retokenizer.merge(doc1[0:2])
|
||||
retokenizer.merge(doc1[5:7])
|
||||
assert example.get_aligned("ORTH", as_string=True) == expected2
|
||||
|
||||
|
||||
def test_training_before_update(doc):
|
||||
def before_update(nlp, args):
|
||||
assert args["step"] == 0
|
||||
assert args["epoch"] == 1
|
||||
|
||||
# Raise an error here as the rest of the loop
|
||||
# will not run to completion due to uninitialized
|
||||
# models.
|
||||
raise ValueError("ran_before_update")
|
||||
|
||||
def generate_batch():
|
||||
yield 1, [Example(doc, doc)]
|
||||
|
||||
nlp = spacy.blank("en")
|
||||
nlp.add_pipe("tagger")
|
||||
optimizer = Adam()
|
||||
generator = train_while_improving(
|
||||
nlp,
|
||||
optimizer,
|
||||
generate_batch(),
|
||||
lambda: None,
|
||||
dropout=0.1,
|
||||
eval_frequency=100,
|
||||
accumulate_gradient=10,
|
||||
patience=10,
|
||||
max_steps=100,
|
||||
exclude=[],
|
||||
annotating_components=[],
|
||||
before_update=before_update,
|
||||
)
|
||||
|
||||
with pytest.raises(ValueError, match="ran_before_update"):
|
||||
for _ in generator:
|
||||
pass
|
||||
|
|
|
@ -1,8 +1,13 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
from spacy.attrs import IS_ALPHA, LEMMA, ORTH
|
||||
from spacy.lang.en import English
|
||||
from spacy.parts_of_speech import NOUN, VERB
|
||||
from spacy.vocab import Vocab
|
||||
|
||||
from ..util import make_tempdir
|
||||
|
||||
|
||||
@pytest.mark.issue(1868)
|
||||
def test_issue1868():
|
||||
|
@ -59,3 +64,19 @@ def test_vocab_api_contains(en_vocab, text):
|
|||
def test_vocab_writing_system(en_vocab):
|
||||
assert en_vocab.writing_system["direction"] == "ltr"
|
||||
assert en_vocab.writing_system["has_case"] is True
|
||||
|
||||
|
||||
def test_to_disk():
|
||||
nlp = English()
|
||||
with make_tempdir() as d:
|
||||
nlp.vocab.to_disk(d)
|
||||
assert "vectors" in os.listdir(d)
|
||||
assert "lookups.bin" in os.listdir(d)
|
||||
|
||||
|
||||
def test_to_disk_exclude():
|
||||
nlp = English()
|
||||
with make_tempdir() as d:
|
||||
nlp.vocab.to_disk(d, exclude=("vectors", "lookups"))
|
||||
assert "vectors" not in os.listdir(d)
|
||||
assert "lookups.bin" not in os.listdir(d)
|
||||
|
|
|
@ -59,6 +59,7 @@ def train(
|
|||
batcher = T["batcher"]
|
||||
train_logger = T["logger"]
|
||||
before_to_disk = create_before_to_disk_callback(T["before_to_disk"])
|
||||
before_update = T["before_update"]
|
||||
|
||||
# Helper function to save checkpoints. This is a closure for convenience,
|
||||
# to avoid passing in all the args all the time.
|
||||
|
@ -89,6 +90,7 @@ def train(
|
|||
eval_frequency=T["eval_frequency"],
|
||||
exclude=frozen_components,
|
||||
annotating_components=annotating_components,
|
||||
before_update=before_update,
|
||||
)
|
||||
clean_output_dir(output_path)
|
||||
stdout.write(msg.info(f"Pipeline: {nlp.pipe_names}") + "\n")
|
||||
|
@ -150,6 +152,7 @@ def train_while_improving(
|
|||
max_steps: int,
|
||||
exclude: List[str],
|
||||
annotating_components: List[str],
|
||||
before_update: Optional[Callable[["Language", Dict[str, Any]], None]],
|
||||
):
|
||||
"""Train until an evaluation stops improving. Works as a generator,
|
||||
with each iteration yielding a tuple `(batch, info, is_best_checkpoint)`,
|
||||
|
@ -198,6 +201,9 @@ def train_while_improving(
|
|||
words_seen = 0
|
||||
start_time = timer()
|
||||
for step, (epoch, batch) in enumerate(train_data):
|
||||
if before_update:
|
||||
before_update_args = {"step": step, "epoch": epoch}
|
||||
before_update(nlp, before_update_args)
|
||||
dropout = next(dropouts) # type: ignore
|
||||
for subbatch in subdivide_batch(batch, accumulate_gradient):
|
||||
nlp.update(
|
||||
|
|
|
@ -468,9 +468,9 @@ cdef class Vocab:
|
|||
setters = ["strings", "vectors"]
|
||||
if "strings" not in exclude:
|
||||
self.strings.to_disk(path / "strings.json")
|
||||
if "vectors" not in "exclude":
|
||||
if "vectors" not in exclude:
|
||||
self.vectors.to_disk(path, exclude=["strings"])
|
||||
if "lookups" not in "exclude":
|
||||
if "lookups" not in exclude:
|
||||
self.lookups.to_disk(path)
|
||||
|
||||
def from_disk(self, path, *, exclude=tuple()):
|
||||
|
|
|
@ -12,6 +12,7 @@ menu:
|
|||
- ['train', 'train']
|
||||
- ['pretrain', 'pretrain']
|
||||
- ['evaluate', 'evaluate']
|
||||
- ['find-threshold', 'find-threshold']
|
||||
- ['assemble', 'assemble']
|
||||
- ['package', 'package']
|
||||
- ['project', 'project']
|
||||
|
@ -1162,6 +1163,46 @@ $ python -m spacy evaluate [model] [data_path] [--output] [--code] [--gold-prepr
|
|||
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
|
||||
| **CREATES** | Training results and optional metrics and visualizations. |
|
||||
|
||||
## find-threshold {#find-threshold new="3.5" tag="command"}
|
||||
|
||||
Runs prediction trials for a trained model with varying tresholds to maximize
|
||||
the specified metric. The search space for the threshold is traversed linearly
|
||||
from 0 to 1 in `n_trials` steps. Results are displayed in a table on `stdout`
|
||||
(the corresponding API call to `spacy.cli.find_threshold.find_threshold()`
|
||||
returns all results).
|
||||
|
||||
This is applicable only for components whose predictions are influenced by
|
||||
thresholds - e.g. `textcat_multilabel` and `spancat`, but not `textcat`. Note
|
||||
that the full path to the corresponding threshold attribute in the config has to
|
||||
be provided.
|
||||
|
||||
> #### Examples
|
||||
>
|
||||
> ```cli
|
||||
> # For textcat_multilabel:
|
||||
> $ python -m spacy find-threshold my_nlp data.spacy textcat_multilabel threshold cats_macro_f
|
||||
> ```
|
||||
>
|
||||
> ```cli
|
||||
> # For spancat:
|
||||
> $ python -m spacy find-threshold my_nlp data.spacy spancat threshold spans_sc_f
|
||||
> ```
|
||||
|
||||
|
||||
| Name | Description |
|
||||
| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| `model` | Pipeline to evaluate. Can be a package or a path to a data directory. ~~str (positional)~~ |
|
||||
| `data_path` | Path to file with DocBin with docs to use for threshold search. ~~Path (positional)~~ |
|
||||
| `pipe_name` | Name of pipe to examine thresholds for. ~~str (positional)~~ |
|
||||
| `threshold_key` | Key of threshold attribute in component's configuration. ~~str (positional)~~ |
|
||||
| `scores_key` | Name of score to metric to optimize. ~~str (positional)~~ |
|
||||
| `--n_trials`, `-n` | Number of trials to determine optimal thresholds. ~~int (option)~~ |
|
||||
| `--code`, `-c` | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-functions) for new architectures. ~~Optional[Path] \(option)~~ |
|
||||
| `--gpu-id`, `-g` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ |
|
||||
| `--gold-preproc`, `-G` | Use gold preprocessing. ~~bool (flag)~~ |
|
||||
| `--silent`, `-V`, `-VV` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ |
|
||||
| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ |
|
||||
|
||||
## assemble {#assemble tag="command"}
|
||||
|
||||
Assemble a pipeline from a config file without additional training. Expects a
|
||||
|
|
|
@ -186,6 +186,7 @@ process that are used when you run [`spacy train`](/api/cli#train).
|
|||
| `accumulate_gradient` | Whether to divide the batch up into substeps. Defaults to `1`. ~~int~~ |
|
||||
| `batcher` | Callable that takes an iterator of [`Doc`](/api/doc) objects and yields batches of `Doc`s. Defaults to [`batch_by_words`](/api/top-level#batch_by_words). ~~Callable[[Iterator[Doc], Iterator[List[Doc]]]]~~ |
|
||||
| `before_to_disk` | Optional callback to modify `nlp` object right before it is saved to disk during and after training. Can be used to remove or reset config values or disable components. Defaults to `null`. ~~Optional[Callable[[Language], Language]]~~ |
|
||||
| `before_update` | Optional callback that is invoked at the start of each training step with the `nlp` object and a `Dict` containing the following entries: `step`, `epoch`. Can be used to make deferred changes to components. Defaults to `null`. ~~Optional[Callable[[Language, Dict[str, Any]], None]]~~ |
|
||||
| `dev_corpus` | Dot notation of the config location defining the dev corpus. Defaults to `corpora.dev`. ~~str~~ |
|
||||
| `dropout` | The dropout rate. Defaults to `0.1`. ~~float~~ |
|
||||
| `eval_frequency` | How often to evaluate during training (steps). Defaults to `200`. ~~int~~ |
|
||||
|
|
|
@ -50,7 +50,7 @@ modified later.
|
|||
| _keyword-only_ | |
|
||||
| `strings` | The string store. A new string store is created if one is not provided. Defaults to `None`. ~~Optional[StringStore]~~ |
|
||||
| `shape` | Size of the table as `(n_entries, n_columns)`, the number of entries and number of columns. Not required if you're initializing the object with `data` and `keys`. ~~Tuple[int, int]~~ |
|
||||
| `data` | The vector data. ~~numpy.ndarray[ndim=1, dtype=float32]~~ |
|
||||
| `data` | The vector data. ~~numpy.ndarray[ndim=2, dtype=float32]~~ |
|
||||
| `keys` | A sequence of keys aligned with the data. ~~Iterable[Union[str, int]]~~ |
|
||||
| `name` | A name to identify the vectors table. ~~str~~ |
|
||||
| `mode` <Tag variant="new">3.2</Tag> | Vectors mode: `"default"` or [`"floret"`](https://github.com/explosion/floret) (default: `"default"`). ~~str~~ |
|
||||
|
|
|
@ -461,37 +461,6 @@
|
|||
},
|
||||
"category": ["standalone"]
|
||||
},
|
||||
{
|
||||
"id": "spikex",
|
||||
"title": "SpikeX - SpaCy Pipes for Knowledge Extraction",
|
||||
"slogan": "Use SpikeX to build knowledge extraction tools with almost-zero effort",
|
||||
"description": "SpikeX is a collection of pipes ready to be plugged in a spaCy pipeline. It aims to help in building knowledge extraction tools with almost-zero effort.",
|
||||
"github": "erre-quadro/spikex",
|
||||
"pip": "spikex",
|
||||
"code_example": [
|
||||
"from spacy import load as spacy_load",
|
||||
"from spikex.wikigraph import load as wg_load",
|
||||
"from spikex.pipes import WikiPageX",
|
||||
"",
|
||||
"# load a spacy model and get a doc",
|
||||
"nlp = spacy_load('en_core_web_sm')",
|
||||
"doc = nlp('An apple a day keeps the doctor away')",
|
||||
"# load a WikiGraph",
|
||||
"wg = wg_load('simplewiki_core')",
|
||||
"# get a WikiPageX and extract all pages",
|
||||
"wikipagex = WikiPageX(wg)",
|
||||
"doc = wikipagex(doc)",
|
||||
"# see all pages extracted from the doc",
|
||||
"for span in doc._.wiki_spans:",
|
||||
" print(span._.wiki_pages)"
|
||||
],
|
||||
"category": ["pipeline", "standalone"],
|
||||
"author": "Erre Quadro",
|
||||
"author_links": {
|
||||
"github": "erre-quadro",
|
||||
"website": "https://www.errequadrosrl.com"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "spacy-dbpedia-spotlight",
|
||||
"title": "DBpedia Spotlight for SpaCy",
|
||||
|
@ -2024,17 +1993,6 @@
|
|||
},
|
||||
"category": ["books"]
|
||||
},
|
||||
{
|
||||
"type": "education",
|
||||
"id": "learning-path-spacy",
|
||||
"title": "Learning Path: Mastering spaCy for Natural Language Processing",
|
||||
"slogan": "O'Reilly, 2017",
|
||||
"description": "spaCy, a fast, user-friendly library for teaching computers to understand text, simplifies NLP techniques, such as speech tagging and syntactic dependencies, so you can easily extract information, attributes, and objects from massive amounts of text to then document, measure, and analyze. This Learning Path is a hands-on introduction to using spaCy to discover insights through natural language processing. While end-to-end natural language processing solutions can be complex, you’ll learn the linguistics, algorithms, and machine learning skills to get the job done.",
|
||||
"url": "https://www.safaribooksonline.com/library/view/learning-path-mastering/9781491986653/",
|
||||
"thumb": "https://i.imgur.com/9MIgMAc.jpg",
|
||||
"author": "Aaron Kramer",
|
||||
"category": ["courses"]
|
||||
},
|
||||
{
|
||||
"type": "education",
|
||||
"id": "introduction-into-spacy-3",
|
||||
|
|
Loading…
Reference in New Issue
Block a user