Restore spacy evaluate, make spacy benchmark speed an alias

This commit is contained in:
Daniël de Kok 2022-12-13 17:16:14 +01:00
parent e09b00fe30
commit 05ae048e75
5 changed files with 15 additions and 15 deletions

View File

@ -4,6 +4,7 @@ from ._util import app, setup_cli # noqa: F401
# These are the actual functions, NOT the wrapped CLI commands. The CLI commands
# are registered automatically and won't have to be imported here.
from .benchmark_speed import benchmark_speed_cli # noqa: F401
from .download import download # noqa: F401
from .info import info # noqa: F401
from .package import package # noqa: F401
@ -15,8 +16,7 @@ from .debug_data import debug_data # noqa: F401
from .debug_config import debug_config # noqa: F401
from .debug_model import debug_model # noqa: F401
from .debug_diff import debug_diff # noqa: F401
from .evaluate_accuracy import evaluate # noqa: F401
from .evaluate_speed import evaluate_cli # noqa: F401
from .evaluate import evaluate # noqa: F401
from .convert import convert # noqa: F401
from .init_pipeline import init_pipeline_cli # noqa: F401
from .init_config import init_config, fill_config # noqa: F401

View File

@ -46,7 +46,7 @@ DEBUG_HELP = """Suite of helpful commands for debugging and profiling. Includes
commands to check and validate your config files, training and evaluation data,
and custom model implementations.
"""
EVALUATE_HELP = """Commands for evaluating pipelines."""
BENCHMARK_HELP = """Commands for benchmarking pipelines."""
INIT_HELP = """Commands for initializing configs and pipeline packages."""
# Wrappers for Typer's annotations. Initially created to set defaults and to
@ -55,14 +55,14 @@ Arg = typer.Argument
Opt = typer.Option
app = typer.Typer(name=NAME, help=HELP)
benchmark_cli = typer.Typer(name="benchmark", help=BENCHMARK_HELP, no_args_is_help=True)
project_cli = typer.Typer(name="project", help=PROJECT_HELP, no_args_is_help=True)
debug_cli = typer.Typer(name="debug", help=DEBUG_HELP, no_args_is_help=True)
evaluate_cli = typer.Typer(name="evaluate", help=EVALUATE_HELP, no_args_is_help=True)
init_cli = typer.Typer(name="init", help=INIT_HELP, no_args_is_help=True)
app.add_typer(project_cli)
app.add_typer(debug_cli)
app.add_typer(evaluate_cli)
app.add_typer(benchmark_cli)
app.add_typer(init_cli)

View File

@ -11,14 +11,14 @@ from .. import util
from ..language import Language
from ..tokens import Doc
from ..training import Corpus
from ._util import Arg, Opt, evaluate_cli, setup_gpu
from ._util import Arg, Opt, benchmark_cli, setup_gpu
@evaluate_cli.command(
@benchmark_cli.command(
"speed",
context_settings={"allow_extra_args": True, "ignore_unknown_options": True},
)
def benchmark_cli(
def benchmark_speed_cli(
ctx: typer.Context,
model: str = Arg(..., help="Model name or path"),
data_path: Path = Arg(

View File

@ -4,20 +4,20 @@ from pathlib import Path
import re
import srsly
from thinc.api import fix_random_seed
import typer
from ..training import Corpus
from ..tokens import Doc
from ._util import Arg, Opt, evaluate_cli, setup_gpu, import_code
from ._util import app, Arg, Opt, setup_gpu, import_code, benchmark_cli
from ..scorer import Scorer
from .. import util
from .. import displacy
@evaluate_cli.command("accuracy", context_settings={"allow_extra_args": True, "ignore_unknown_options": True},)
def accuracy_cli(
@benchmark_cli.command(
"accuracy",
)
@app.command("evaluate")
def evaluate_cli(
# fmt: off
ctx: typer.Context,
model: str = Arg(..., help="Model name or path"),
data_path: Path = Arg(..., help="Location of binary evaluation data in .spacy format", exists=True),
output: Optional[Path] = Opt(None, "--output", "-o", help="Output JSON file for metrics", dir_okay=False),

View File

@ -8,7 +8,7 @@ from wasabi import msg
import spacy
from spacy import util
from spacy.cli.evaluate_accuracy import print_prf_per_type, print_textcats_auc_per_cat
from spacy.cli.evaluate import print_prf_per_type, print_textcats_auc_per_cat
from spacy.lang.en import English
from spacy.language import Language
from spacy.pipeline import TextCategorizer