2017-10-01 22:04:32 +03:00
|
|
|
from timeit import default_timer as timer
|
2019-11-04 04:38:45 +03:00
|
|
|
from wasabi import msg
|
2017-10-01 22:04:32 +03:00
|
|
|
|
2017-10-27 15:38:39 +03:00
|
|
|
from ..gold import GoldCorpus
|
2017-10-01 22:04:32 +03:00
|
|
|
from .. import util
|
|
|
|
from .. import displacy
|
2017-10-27 15:38:39 +03:00
|
|
|
|
2017-10-01 22:04:32 +03:00
|
|
|
|
2018-11-30 22:16:14 +03:00
|
|
|
def evaluate(
|
2020-01-01 15:15:46 +03:00
|
|
|
# fmt: off
|
|
|
|
model: ("Model name or path", "positional", None, str),
|
|
|
|
data_path: ("Location of JSON-formatted evaluation data", "positional", None, str),
|
|
|
|
gpu_id: ("Use GPU", "option", "g", int) = -1,
|
|
|
|
gold_preproc: ("Use gold preprocessing", "flag", "G", bool) = False,
|
|
|
|
displacy_path: ("Directory to output rendered parses as HTML", "option", "dp", str) = None,
|
|
|
|
displacy_limit: ("Limit of parses to render as HTML", "option", "dl", int) = 25,
|
|
|
|
return_scores: ("Return dict containing model scores", "flag", "R", bool) = False,
|
|
|
|
# fmt: on
|
2018-11-30 22:16:14 +03:00
|
|
|
):
|
2017-10-01 22:04:32 +03:00
|
|
|
"""
|
2017-10-27 15:38:39 +03:00
|
|
|
Evaluate a model. To render a sample of parses in a HTML file, set an
|
|
|
|
output directory as the displacy_path argument.
|
2017-10-01 22:04:32 +03:00
|
|
|
"""
|
2018-02-13 14:42:23 +03:00
|
|
|
util.fix_random_seed()
|
2017-10-06 21:17:47 +03:00
|
|
|
if gpu_id >= 0:
|
|
|
|
util.use_gpu(gpu_id)
|
2017-10-03 23:47:31 +03:00
|
|
|
util.set_env_log(False)
|
2017-10-01 22:04:32 +03:00
|
|
|
data_path = util.ensure_path(data_path)
|
2017-10-04 01:03:15 +03:00
|
|
|
displacy_path = util.ensure_path(displacy_path)
|
2017-10-01 22:04:32 +03:00
|
|
|
if not data_path.exists():
|
2018-12-08 13:49:43 +03:00
|
|
|
msg.fail("Evaluation data not found", data_path, exits=1)
|
2017-10-04 01:03:15 +03:00
|
|
|
if displacy_path and not displacy_path.exists():
|
2018-12-08 13:49:43 +03:00
|
|
|
msg.fail("Visualization output directory not found", displacy_path, exits=1)
|
2017-10-01 22:04:32 +03:00
|
|
|
corpus = GoldCorpus(data_path, data_path)
|
2020-04-29 13:56:46 +03:00
|
|
|
if model.startswith("blank:"):
|
2020-05-21 19:39:06 +03:00
|
|
|
nlp = util.get_lang_class(model.replace("blank:", ""))()
|
2020-04-29 13:56:46 +03:00
|
|
|
else:
|
|
|
|
nlp = util.load_model(model)
|
2019-11-11 19:35:27 +03:00
|
|
|
dev_dataset = list(corpus.dev_dataset(nlp, gold_preproc=gold_preproc))
|
2017-10-03 17:15:35 +03:00
|
|
|
begin = timer()
|
2019-11-11 19:35:27 +03:00
|
|
|
scorer = nlp.evaluate(dev_dataset, verbose=False)
|
2017-10-03 17:15:35 +03:00
|
|
|
end = timer()
|
2019-11-11 19:35:27 +03:00
|
|
|
nwords = sum(len(ex.doc) for ex in dev_dataset)
|
2018-11-30 22:16:14 +03:00
|
|
|
results = {
|
2019-12-25 19:59:52 +03:00
|
|
|
"Time": f"{end - begin:.2f} s",
|
2018-11-30 22:16:14 +03:00
|
|
|
"Words": nwords,
|
2019-12-25 19:59:52 +03:00
|
|
|
"Words/s": f"{nwords / (end - begin):.0f}",
|
|
|
|
"TOK": f"{scorer.token_acc:.2f}",
|
2020-04-02 15:46:32 +03:00
|
|
|
"TAG": f"{scorer.tags_acc:.2f}",
|
|
|
|
"POS": f"{scorer.pos_acc:.2f}",
|
|
|
|
"MORPH": f"{scorer.morphs_acc:.2f}",
|
2019-12-25 19:59:52 +03:00
|
|
|
"UAS": f"{scorer.uas:.2f}",
|
|
|
|
"LAS": f"{scorer.las:.2f}",
|
|
|
|
"NER P": f"{scorer.ents_p:.2f}",
|
|
|
|
"NER R": f"{scorer.ents_r:.2f}",
|
|
|
|
"NER F": f"{scorer.ents_f:.2f}",
|
2020-06-12 03:02:07 +03:00
|
|
|
"Textcat AUC": f"{scorer.textcat_auc:.2f}",
|
|
|
|
"Textcat F": f"{scorer.textcat_f:.2f}",
|
2019-12-25 19:59:52 +03:00
|
|
|
"Sent P": f"{scorer.sent_p:.2f}",
|
|
|
|
"Sent R": f"{scorer.sent_r:.2f}",
|
|
|
|
"Sent F": f"{scorer.sent_f:.2f}",
|
2018-11-30 22:16:14 +03:00
|
|
|
}
|
|
|
|
msg.table(results, title="Results")
|
|
|
|
|
2017-10-04 01:03:15 +03:00
|
|
|
if displacy_path:
|
2019-11-11 19:35:27 +03:00
|
|
|
docs = [ex.doc for ex in dev_dataset]
|
2018-11-30 22:16:14 +03:00
|
|
|
render_deps = "parser" in nlp.meta.get("pipeline", [])
|
|
|
|
render_ents = "ner" in nlp.meta.get("pipeline", [])
|
|
|
|
render_parses(
|
|
|
|
docs,
|
|
|
|
displacy_path,
|
|
|
|
model_name=model,
|
|
|
|
limit=displacy_limit,
|
|
|
|
deps=render_deps,
|
|
|
|
ents=render_ents,
|
|
|
|
)
|
2019-12-22 03:53:56 +03:00
|
|
|
msg.good(f"Generated {displacy_limit} parses as HTML", displacy_path)
|
2019-04-15 13:04:36 +03:00
|
|
|
if return_scores:
|
|
|
|
return scorer.scores
|
2017-10-01 22:04:32 +03:00
|
|
|
|
|
|
|
|
2018-11-30 22:16:14 +03:00
|
|
|
def render_parses(docs, output_path, model_name="", limit=250, deps=True, ents=True):
|
|
|
|
docs[0].user_data["title"] = model_name
|
2017-10-04 01:03:15 +03:00
|
|
|
if ents:
|
2019-08-18 14:54:26 +03:00
|
|
|
html = displacy.render(docs[:limit], style="ent", page=True)
|
2019-08-18 14:55:34 +03:00
|
|
|
with (output_path / "entities.html").open("w", encoding="utf8") as file_:
|
2017-10-04 01:03:15 +03:00
|
|
|
file_.write(html)
|
|
|
|
if deps:
|
2019-08-18 14:54:26 +03:00
|
|
|
html = displacy.render(
|
|
|
|
docs[:limit], style="dep", page=True, options={"compact": True}
|
|
|
|
)
|
2019-08-18 14:55:34 +03:00
|
|
|
with (output_path / "parses.html").open("w", encoding="utf8") as file_:
|
2017-10-04 01:03:15 +03:00
|
|
|
file_.write(html)
|