2020-06-27 22:13:11 +03:00
|
|
|
import re
|
2023-06-14 18:48:41 +03:00
|
|
|
from pathlib import Path
|
|
|
|
from typing import Any, Dict, List, Optional, Union
|
|
|
|
|
2020-06-27 22:13:11 +03:00
|
|
|
import srsly
|
2020-09-28 16:09:59 +03:00
|
|
|
from thinc.api import fix_random_seed
|
2023-06-14 18:48:41 +03:00
|
|
|
from wasabi import Printer
|
2017-10-01 22:04:32 +03:00
|
|
|
|
2023-06-14 18:48:41 +03:00
|
|
|
from .. import displacy, util
|
2020-06-21 22:35:01 +03:00
|
|
|
from ..scorer import Scorer
|
2023-06-14 18:48:41 +03:00
|
|
|
from ..tokens import Doc
|
|
|
|
from ..training import Corpus
|
|
|
|
from ._util import Arg, Opt, app, benchmark_cli, import_code, setup_gpu
|
2017-10-27 15:38:39 +03:00
|
|
|
|
2017-10-01 22:04:32 +03:00
|
|
|
|
2023-01-12 13:55:21 +03:00
|
|
|
@benchmark_cli.command(
|
|
|
|
"accuracy",
|
|
|
|
)
|
2020-06-21 14:44:00 +03:00
|
|
|
@app.command("evaluate")
|
2020-06-21 22:35:01 +03:00
|
|
|
def evaluate_cli(
|
2020-01-01 15:15:46 +03:00
|
|
|
# fmt: off
|
2020-06-21 14:44:00 +03:00
|
|
|
model: str = Arg(..., help="Model name or path"),
|
2020-08-07 15:40:58 +03:00
|
|
|
data_path: Path = Arg(..., help="Location of binary evaluation data in .spacy format", exists=True),
|
2020-06-27 22:13:11 +03:00
|
|
|
output: Optional[Path] = Opt(None, "--output", "-o", help="Output JSON file for metrics", dir_okay=False),
|
2020-09-29 22:20:56 +03:00
|
|
|
code_path: Optional[Path] = Opt(None, "--code", "-c", help="Path to Python file with additional code (registered functions) to be imported"),
|
2020-08-07 15:40:58 +03:00
|
|
|
use_gpu: int = Opt(-1, "--gpu-id", "-g", help="GPU ID or -1 for CPU"),
|
2020-06-21 14:44:00 +03:00
|
|
|
gold_preproc: bool = Opt(False, "--gold-preproc", "-G", help="Use gold preprocessing"),
|
2020-06-21 22:35:01 +03:00
|
|
|
displacy_path: Optional[Path] = Opt(None, "--displacy-path", "-dp", help="Directory to output rendered parses as HTML", exists=True, file_okay=False),
|
2020-06-21 14:44:00 +03:00
|
|
|
displacy_limit: int = Opt(25, "--displacy-limit", "-dl", help="Limit of parses to render as HTML"),
|
2023-05-12 16:36:54 +03:00
|
|
|
per_component: bool = Opt(False, "--per-component", "-P", help="Return scores per component, only applicable when an output JSON file is specified."),
|
2023-09-25 12:25:41 +03:00
|
|
|
spans_key: str = Opt("sc", "--spans-key", "-sk", help="Spans key to use when evaluating Doc.spans"),
|
2020-06-27 22:13:11 +03:00
|
|
|
# fmt: on
|
2018-11-30 22:16:14 +03:00
|
|
|
):
|
2017-10-01 22:04:32 +03:00
|
|
|
"""
|
2020-09-03 14:13:03 +03:00
|
|
|
Evaluate a trained pipeline. Expects a loadable spaCy pipeline and evaluation
|
2020-09-04 13:58:50 +03:00
|
|
|
data in the binary .spacy format. The --gold-preproc option sets up the
|
|
|
|
evaluation examples with gold-standard sentences and tokens for the
|
|
|
|
predictions. Gold preprocessing helps the annotations align to the
|
|
|
|
tokenization, and may result in sequences of more consistent length. However,
|
|
|
|
it may reduce runtime accuracy due to train/test skew. To render a sample of
|
|
|
|
dependency parses in a HTML file, set as output directory as the
|
|
|
|
displacy_path argument.
|
|
|
|
|
2023-01-12 13:55:21 +03:00
|
|
|
DOCS: https://spacy.io/api/cli#benchmark-accuracy
|
2017-10-01 22:04:32 +03:00
|
|
|
"""
|
2020-09-29 22:20:56 +03:00
|
|
|
import_code(code_path)
|
2020-06-21 22:35:01 +03:00
|
|
|
evaluate(
|
|
|
|
model,
|
|
|
|
data_path,
|
2020-06-27 22:13:11 +03:00
|
|
|
output=output,
|
2020-08-07 15:40:58 +03:00
|
|
|
use_gpu=use_gpu,
|
2020-06-21 22:35:01 +03:00
|
|
|
gold_preproc=gold_preproc,
|
|
|
|
displacy_path=displacy_path,
|
|
|
|
displacy_limit=displacy_limit,
|
2023-05-12 16:36:54 +03:00
|
|
|
per_component=per_component,
|
2020-06-21 22:35:01 +03:00
|
|
|
silent=False,
|
2023-09-25 12:25:41 +03:00
|
|
|
spans_key=spans_key,
|
2020-06-21 22:35:01 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def evaluate(
|
|
|
|
model: str,
|
|
|
|
data_path: Path,
|
2020-07-20 15:42:46 +03:00
|
|
|
output: Optional[Path] = None,
|
2020-08-07 15:40:58 +03:00
|
|
|
use_gpu: int = -1,
|
2020-06-21 22:35:01 +03:00
|
|
|
gold_preproc: bool = False,
|
|
|
|
displacy_path: Optional[Path] = None,
|
|
|
|
displacy_limit: int = 25,
|
|
|
|
silent: bool = True,
|
2021-07-06 14:02:37 +03:00
|
|
|
spans_key: str = "sc",
|
2023-05-12 16:36:54 +03:00
|
|
|
per_component: bool = False,
|
2021-07-06 14:02:37 +03:00
|
|
|
) -> Dict[str, Any]:
|
2020-06-21 22:35:01 +03:00
|
|
|
msg = Printer(no_print=silent, pretty=not silent)
|
2020-07-06 14:06:25 +03:00
|
|
|
fix_random_seed()
|
2021-07-06 15:16:19 +03:00
|
|
|
setup_gpu(use_gpu, silent=silent)
|
2017-10-01 22:04:32 +03:00
|
|
|
data_path = util.ensure_path(data_path)
|
2020-06-27 22:13:11 +03:00
|
|
|
output_path = util.ensure_path(output)
|
2017-10-04 01:03:15 +03:00
|
|
|
displacy_path = util.ensure_path(displacy_path)
|
2017-10-01 22:04:32 +03:00
|
|
|
if not data_path.exists():
|
2018-12-08 13:49:43 +03:00
|
|
|
msg.fail("Evaluation data not found", data_path, exits=1)
|
2017-10-04 01:03:15 +03:00
|
|
|
if displacy_path and not displacy_path.exists():
|
2018-12-08 13:49:43 +03:00
|
|
|
msg.fail("Visualization output directory not found", displacy_path, exits=1)
|
2020-08-04 16:09:37 +03:00
|
|
|
corpus = Corpus(data_path, gold_preproc=gold_preproc)
|
2020-06-27 22:16:57 +03:00
|
|
|
nlp = util.load_model(model)
|
2020-08-04 16:09:37 +03:00
|
|
|
dev_dataset = list(corpus(nlp))
|
2023-05-12 16:36:54 +03:00
|
|
|
scores = nlp.evaluate(dev_dataset, per_component=per_component)
|
|
|
|
if per_component:
|
|
|
|
data = scores
|
|
|
|
if output is None:
|
|
|
|
msg.warn(
|
|
|
|
"The per-component option is enabled but there is no output JSON file provided to save the scores to."
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
msg.info("Per-component scores will be saved to output JSON file.")
|
|
|
|
else:
|
|
|
|
metrics = {
|
|
|
|
"TOK": "token_acc",
|
|
|
|
"TAG": "tag_acc",
|
|
|
|
"POS": "pos_acc",
|
|
|
|
"MORPH": "morph_acc",
|
|
|
|
"LEMMA": "lemma_acc",
|
|
|
|
"UAS": "dep_uas",
|
|
|
|
"LAS": "dep_las",
|
|
|
|
"NER P": "ents_p",
|
|
|
|
"NER R": "ents_r",
|
|
|
|
"NER F": "ents_f",
|
|
|
|
"TEXTCAT": "cats_score",
|
|
|
|
"SENT P": "sents_p",
|
|
|
|
"SENT R": "sents_r",
|
|
|
|
"SENT F": "sents_f",
|
|
|
|
"SPAN P": f"spans_{spans_key}_p",
|
|
|
|
"SPAN R": f"spans_{spans_key}_r",
|
|
|
|
"SPAN F": f"spans_{spans_key}_f",
|
|
|
|
"SPEED": "speed",
|
|
|
|
}
|
|
|
|
results = {}
|
|
|
|
data = {}
|
|
|
|
for metric, key in metrics.items():
|
|
|
|
if key in scores:
|
|
|
|
if key == "cats_score":
|
|
|
|
metric = metric + " (" + scores.get("cats_score_desc", "unk") + ")"
|
|
|
|
if isinstance(scores[key], (int, float)):
|
|
|
|
if key == "speed":
|
|
|
|
results[metric] = f"{scores[key]:.0f}"
|
|
|
|
else:
|
|
|
|
results[metric] = f"{scores[key]*100:.2f}"
|
2020-11-03 17:47:18 +03:00
|
|
|
else:
|
2023-05-12 16:36:54 +03:00
|
|
|
results[metric] = "-"
|
|
|
|
data[re.sub(r"[\s/]", "_", key.lower())] = scores[key]
|
2020-06-28 16:34:28 +03:00
|
|
|
|
2023-05-12 16:36:54 +03:00
|
|
|
msg.table(results, title="Results")
|
|
|
|
data = handle_scores_per_type(scores, data, spans_key=spans_key, silent=silent)
|
2018-11-30 22:16:14 +03:00
|
|
|
|
2021-07-06 14:02:37 +03:00
|
|
|
if displacy_path:
|
|
|
|
factory_names = [nlp.get_pipe_meta(pipe).factory for pipe in nlp.pipe_names]
|
|
|
|
docs = list(nlp.pipe(ex.reference.text for ex in dev_dataset[:displacy_limit]))
|
|
|
|
render_deps = "parser" in factory_names
|
|
|
|
render_ents = "ner" in factory_names
|
2023-04-28 15:32:52 +03:00
|
|
|
render_spans = "spancat" in factory_names
|
|
|
|
|
2021-07-06 14:02:37 +03:00
|
|
|
render_parses(
|
|
|
|
docs,
|
|
|
|
displacy_path,
|
|
|
|
model_name=model,
|
|
|
|
limit=displacy_limit,
|
|
|
|
deps=render_deps,
|
|
|
|
ents=render_ents,
|
2023-04-28 15:32:52 +03:00
|
|
|
spans=render_spans,
|
2021-07-06 14:02:37 +03:00
|
|
|
)
|
|
|
|
msg.good(f"Generated {displacy_limit} parses as HTML", displacy_path)
|
|
|
|
|
|
|
|
if output_path is not None:
|
|
|
|
srsly.write_json(output_path, data)
|
|
|
|
msg.good(f"Saved results to {output_path}")
|
|
|
|
return data
|
|
|
|
|
|
|
|
|
|
|
|
def handle_scores_per_type(
|
🏷 Add Mypy check to CI and ignore all existing Mypy errors (#9167)
* 🚨 Ignore all existing Mypy errors
* 🏗 Add Mypy check to CI
* Add types-mock and types-requests as dev requirements
* Add additional type ignore directives
* Add types packages to dev-only list in reqs test
* Add types-dataclasses for python 3.6
* Add ignore to pretrain
* 🏷 Improve type annotation on `run_command` helper
The `run_command` helper previously declared that it returned an
`Optional[subprocess.CompletedProcess]`, but it isn't actually possible
for the function to return `None`. These changes modify the type
annotation of the `run_command` helper and remove all now-unnecessary
`# type: ignore` directives.
* 🔧 Allow variable type redefinition in limited contexts
These changes modify how Mypy is configured to allow variables to have
their type automatically redefined under certain conditions. The Mypy
documentation contains the following example:
```python
def process(items: List[str]) -> None:
# 'items' has type List[str]
items = [item.split() for item in items]
# 'items' now has type List[List[str]]
...
```
This configuration change is especially helpful in reducing the number
of `# type: ignore` directives needed to handle the common pattern of:
* Accepting a filepath as a string
* Overwriting the variable using `filepath = ensure_path(filepath)`
These changes enable redefinition and remove all `# type: ignore`
directives rendered redundant by this change.
* 🏷 Add type annotation to converters mapping
* 🚨 Fix Mypy error in convert CLI argument verification
* 🏷 Improve type annotation on `resolve_dot_names` helper
* 🏷 Add type annotations for `Vocab` attributes `strings` and `vectors`
* 🏷 Add type annotations for more `Vocab` attributes
* 🏷 Add loose type annotation for gold data compilation
* 🏷 Improve `_format_labels` type annotation
* 🏷 Fix `get_lang_class` type annotation
* 🏷 Loosen return type of `Language.evaluate`
* 🏷 Don't accept `Scorer` in `handle_scores_per_type`
* 🏷 Add `string_to_list` overloads
* 🏷 Fix non-Optional command-line options
* 🙈 Ignore redefinition of `wandb_logger` in `loggers.py`
* ➕ Install `typing_extensions` in Python 3.8+
The `typing_extensions` package states that it should be used when
"writing code that must be compatible with multiple Python versions".
Since SpaCy needs to support multiple Python versions, it should be used
when newer `typing` module members are required. One example of this is
`Literal`, which is available starting with Python 3.8.
Previously SpaCy tried to import `Literal` from `typing`, falling back
to `typing_extensions` if the import failed. However, Mypy doesn't seem
to be able to understand what `Literal` means when the initial import
means. Therefore, these changes modify how `compat` imports `Literal` by
always importing it from `typing_extensions`.
These changes also modify how `typing_extensions` is installed, so that
it is a requirement for all Python versions, including those greater
than or equal to 3.8.
* 🏷 Improve type annotation for `Language.pipe`
These changes add a missing overload variant to the type signature of
`Language.pipe`. Additionally, the type signature is enhanced to allow
type checkers to differentiate between the two overload variants based
on the `as_tuple` parameter.
Fixes #8772
* ➖ Don't install `typing-extensions` in Python 3.8+
After more detailed analysis of how to implement Python version-specific
type annotations using SpaCy, it has been determined that by branching
on a comparison against `sys.version_info` can be statically analyzed by
Mypy well enough to enable us to conditionally use
`typing_extensions.Literal`. This means that we no longer need to
install `typing_extensions` for Python versions greater than or equal to
3.8! 🎉
These changes revert previous changes installing `typing-extensions`
regardless of Python version and modify how we import the `Literal` type
to ensure that Mypy treats it properly.
* resolve mypy errors for Strict pydantic types
* refactor code to avoid missing return statement
* fix types of convert CLI command
* avoid list-set confustion in debug_data
* fix typo and formatting
* small fixes to avoid type ignores
* fix types in profile CLI command and make it more efficient
* type fixes in projects CLI
* put one ignore back
* type fixes for render
* fix render types - the sequel
* fix BaseDefault in language definitions
* fix type of noun_chunks iterator - yields tuple instead of span
* fix types in language-specific modules
* 🏷 Expand accepted inputs of `get_string_id`
`get_string_id` accepts either a string (in which case it returns its
ID) or an ID (in which case it immediately returns the ID). These
changes extend the type annotation of `get_string_id` to indicate that
it can accept either strings or IDs.
* 🏷 Handle override types in `combine_score_weights`
The `combine_score_weights` function allows users to pass an `overrides`
mapping to override data extracted from the `weights` argument. Since it
allows `Optional` dictionary values, the return value may also include
`Optional` dictionary values.
These changes update the type annotations for `combine_score_weights` to
reflect this fact.
* 🏷 Fix tokenizer serialization method signatures in `DummyTokenizer`
* 🏷 Fix redefinition of `wandb_logger`
These changes fix the redefinition of `wandb_logger` by giving a
separate name to each `WandbLogger` version. For
backwards-compatibility, `spacy.train` still exports `wandb_logger_v3`
as `wandb_logger` for now.
* more fixes for typing in language
* type fixes in model definitions
* 🏷 Annotate `_RandomWords.probs` as `NDArray`
* 🏷 Annotate `tok2vec` layers to help Mypy
* 🐛 Fix `_RandomWords.probs` type annotations for Python 3.6
Also remove an import that I forgot to move to the top of the module 😅
* more fixes for matchers and other pipeline components
* quick fix for entity linker
* fixing types for spancat, textcat, etc
* bugfix for tok2vec
* type annotations for scorer
* add runtime_checkable for Protocol
* type and import fixes in tests
* mypy fixes for training utilities
* few fixes in util
* fix import
* 🐵 Remove unused `# type: ignore` directives
* 🏷 Annotate `Language._components`
* 🏷 Annotate `spacy.pipeline.Pipe`
* add doc as property to span.pyi
* small fixes and cleanup
* explicit type annotations instead of via comment
Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com>
Co-authored-by: svlandeg <sofie.vanlandeghem@gmail.com>
Co-authored-by: svlandeg <svlandeg@github.com>
2021-10-14 16:21:40 +03:00
|
|
|
scores: Dict[str, Any],
|
2021-07-06 14:02:37 +03:00
|
|
|
data: Dict[str, Any] = {},
|
|
|
|
*,
|
|
|
|
spans_key: str = "sc",
|
|
|
|
silent: bool = False,
|
|
|
|
) -> Dict[str, Any]:
|
|
|
|
msg = Printer(no_print=silent, pretty=not silent)
|
2020-10-19 13:07:46 +03:00
|
|
|
if "morph_per_feat" in scores:
|
|
|
|
if scores["morph_per_feat"]:
|
2020-10-19 14:18:47 +03:00
|
|
|
print_prf_per_type(msg, scores["morph_per_feat"], "MORPH", "feat")
|
2020-10-19 13:07:46 +03:00
|
|
|
data["morph_per_feat"] = scores["morph_per_feat"]
|
2020-10-19 14:18:47 +03:00
|
|
|
if "dep_las_per_type" in scores:
|
|
|
|
if scores["dep_las_per_type"]:
|
|
|
|
print_prf_per_type(msg, scores["dep_las_per_type"], "LAS", "type")
|
|
|
|
data["dep_las_per_type"] = scores["dep_las_per_type"]
|
Refactor the Scorer to improve flexibility (#5731)
* Refactor the Scorer to improve flexibility
Refactor the `Scorer` to improve flexibility for arbitrary pipeline
components.
* Individual pipeline components provide their own `evaluate` methods
that score a list of `Example`s and return a dictionary of scores
* `Scorer` is initialized either:
* with a provided pipeline containing components to be scored
* with a default pipeline containing the built-in statistical
components (senter, tagger, morphologizer, parser, ner)
* `Scorer.score` evaluates a list of `Example`s and returns a dictionary
of scores referring to the scores provided by the components in the
pipeline
Significant differences:
* `tags_acc` is renamed to `tag_acc` to be consistent with `token_acc`
and the new `morph_acc`, `pos_acc`, and `lemma_acc`
* Scoring is no longer cumulative: `Scorer.score` scores a list of
examples rather than a single example and does not retain any state
about previously scored examples
* PRF values in the returned scores are no longer multiplied by 100
* Add kwargs to Morphologizer.evaluate
* Create generalized scoring methods in Scorer
* Generalized static scoring methods are added to `Scorer`
* Methods require an attribute (either on Token or Doc) that is
used to key the returned scores
Naming differences:
* `uas`, `las`, and `las_per_type` in the scores dict are renamed to
`dep_uas`, `dep_las`, and `dep_las_per_type`
Scoring differences:
* `Doc.sents` is now scored as spans rather than on sentence-initial
token positions so that `Doc.sents` and `Doc.ents` can be scored with
the same method (this lowers scores since a single incorrect sentence
start results in two incorrect spans)
* Simplify / extend hasattr check for eval method
* Add hasattr check to tokenizer scoring
* Simplify to hasattr check for component scoring
* Reset Example alignment if docs are set
Reset the Example alignment if either doc is set in case the
tokenization has changed.
* Add PRF tokenization scoring for tokens as spans
Add PRF scores for tokens as character spans. The scores are:
* token_acc: # correct tokens / # gold tokens
* token_p/r/f: PRF for (token.idx, token.idx + len(token))
* Add docstring to Scorer.score_tokenization
* Rename component.evaluate() to component.score()
* Update Scorer API docs
* Update scoring for positive_label in textcat
* Fix TextCategorizer.score kwargs
* Update Language.evaluate docs
* Update score names in default config
2020-07-25 13:53:02 +03:00
|
|
|
if "ents_per_type" in scores:
|
|
|
|
if scores["ents_per_type"]:
|
2020-10-19 14:18:47 +03:00
|
|
|
print_prf_per_type(msg, scores["ents_per_type"], "NER", "type")
|
2020-10-19 13:07:46 +03:00
|
|
|
data["ents_per_type"] = scores["ents_per_type"]
|
2021-06-24 13:35:27 +03:00
|
|
|
if f"spans_{spans_key}_per_type" in scores:
|
|
|
|
if scores[f"spans_{spans_key}_per_type"]:
|
2021-06-28 12:48:00 +03:00
|
|
|
print_prf_per_type(
|
|
|
|
msg, scores[f"spans_{spans_key}_per_type"], "SPANS", "type"
|
|
|
|
)
|
2021-06-24 13:35:27 +03:00
|
|
|
data[f"spans_{spans_key}_per_type"] = scores[f"spans_{spans_key}_per_type"]
|
2020-07-27 12:17:52 +03:00
|
|
|
if "cats_f_per_type" in scores:
|
|
|
|
if scores["cats_f_per_type"]:
|
2020-10-19 14:18:47 +03:00
|
|
|
print_prf_per_type(msg, scores["cats_f_per_type"], "Textcat F", "label")
|
2020-10-19 13:07:46 +03:00
|
|
|
data["cats_f_per_type"] = scores["cats_f_per_type"]
|
2020-07-27 12:17:52 +03:00
|
|
|
if "cats_auc_per_type" in scores:
|
|
|
|
if scores["cats_auc_per_type"]:
|
|
|
|
print_textcats_auc_per_cat(msg, scores["cats_auc_per_type"])
|
2020-10-19 13:07:46 +03:00
|
|
|
data["cats_auc_per_type"] = scores["cats_auc_per_type"]
|
2021-07-06 14:02:37 +03:00
|
|
|
return scores
|
2017-10-01 22:04:32 +03:00
|
|
|
|
|
|
|
|
2020-06-21 22:35:01 +03:00
|
|
|
def render_parses(
|
|
|
|
docs: List[Doc],
|
|
|
|
output_path: Path,
|
|
|
|
model_name: str = "",
|
|
|
|
limit: int = 250,
|
|
|
|
deps: bool = True,
|
|
|
|
ents: bool = True,
|
2023-04-28 15:32:52 +03:00
|
|
|
spans: bool = True,
|
2020-06-21 22:35:01 +03:00
|
|
|
):
|
2018-11-30 22:16:14 +03:00
|
|
|
docs[0].user_data["title"] = model_name
|
2017-10-04 01:03:15 +03:00
|
|
|
if ents:
|
2019-08-18 14:54:26 +03:00
|
|
|
html = displacy.render(docs[:limit], style="ent", page=True)
|
2019-08-18 14:55:34 +03:00
|
|
|
with (output_path / "entities.html").open("w", encoding="utf8") as file_:
|
2017-10-04 01:03:15 +03:00
|
|
|
file_.write(html)
|
|
|
|
if deps:
|
2019-08-18 14:54:26 +03:00
|
|
|
html = displacy.render(
|
|
|
|
docs[:limit], style="dep", page=True, options={"compact": True}
|
|
|
|
)
|
2019-08-18 14:55:34 +03:00
|
|
|
with (output_path / "parses.html").open("w", encoding="utf8") as file_:
|
2017-10-04 01:03:15 +03:00
|
|
|
file_.write(html)
|
2020-06-28 16:34:28 +03:00
|
|
|
|
2023-04-28 15:32:52 +03:00
|
|
|
if spans:
|
|
|
|
html = displacy.render(docs[:limit], style="span", page=True)
|
|
|
|
with (output_path / "spans.html").open("w", encoding="utf8") as file_:
|
|
|
|
file_.write(html)
|
|
|
|
|
2020-06-28 16:34:28 +03:00
|
|
|
|
2021-01-05 05:41:53 +03:00
|
|
|
def print_prf_per_type(
|
|
|
|
msg: Printer, scores: Dict[str, Dict[str, float]], name: str, type: str
|
|
|
|
) -> None:
|
2021-02-11 08:45:23 +03:00
|
|
|
data = []
|
|
|
|
for key, value in scores.items():
|
|
|
|
row = [key]
|
|
|
|
for k in ("p", "r", "f"):
|
|
|
|
v = value[k]
|
|
|
|
row.append(f"{v * 100:.2f}" if isinstance(v, (int, float)) else v)
|
|
|
|
data.append(row)
|
2020-10-19 13:07:46 +03:00
|
|
|
msg.table(
|
|
|
|
data,
|
|
|
|
header=("", "P", "R", "F"),
|
|
|
|
aligns=("l", "r", "r", "r"),
|
2020-10-19 14:18:47 +03:00
|
|
|
title=f"{name} (per {type})",
|
2020-06-28 16:34:28 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def print_textcats_auc_per_cat(
|
|
|
|
msg: Printer, scores: Dict[str, Dict[str, float]]
|
|
|
|
) -> None:
|
|
|
|
msg.table(
|
2021-02-11 08:45:23 +03:00
|
|
|
[
|
|
|
|
(k, f"{v:.2f}" if isinstance(v, (float, int)) else v)
|
|
|
|
for k, v in scores.items()
|
|
|
|
],
|
2020-06-28 16:34:28 +03:00
|
|
|
header=("", "ROC AUC"),
|
|
|
|
aligns=("l", "r"),
|
|
|
|
title="Textcat ROC AUC (per label)",
|
|
|
|
)
|