From e765940583a8eca60242010aff3bbaa98f165b26 Mon Sep 17 00:00:00 2001 From: Adriane Boyd Date: Mon, 9 Jan 2023 10:07:52 +0100 Subject: [PATCH] Restore v2 token_acc score implementation In the v3 scorer refactoring, `token_acc` was implemented incorrectly. It should use `precision` instead of `fscore` for the measure of correctly aligned tokens / number of predicted tokens. Fix the docs to reflect that the measure uses the number of predicted tokens rather than the number of gold tokens. --- spacy/scorer.py | 2 +- spacy/tests/test_scorer.py | 2 +- website/docs/api/scorer.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/spacy/scorer.py b/spacy/scorer.py index 16fc303a0..28f504fc3 100644 --- a/spacy/scorer.py +++ b/spacy/scorer.py @@ -174,7 +174,7 @@ class Scorer: prf_score.score_set(pred_spans, gold_spans) if len(acc_score) > 0: return { - "token_acc": acc_score.fscore, + "token_acc": acc_score.precision, "token_p": prf_score.precision, "token_r": prf_score.recall, "token_f": prf_score.fscore, diff --git a/spacy/tests/test_scorer.py b/spacy/tests/test_scorer.py index b903f1669..dbb47b423 100644 --- a/spacy/tests/test_scorer.py +++ b/spacy/tests/test_scorer.py @@ -110,7 +110,7 @@ def test_tokenization(sented_doc): ) example.predicted[1].is_sent_start = False scores = scorer.score([example]) - assert scores["token_acc"] == approx(0.66666666) + assert scores["token_acc"] == 0.5 assert scores["token_p"] == 0.5 assert scores["token_r"] == approx(0.33333333) assert scores["token_f"] == 0.4 diff --git a/website/docs/api/scorer.md b/website/docs/api/scorer.md index 9ef36e6fc..86e61da1e 100644 --- a/website/docs/api/scorer.md +++ b/website/docs/api/scorer.md @@ -76,7 +76,7 @@ core pipeline components, the individual score names start with the `Token` or Scores the tokenization: -- `token_acc`: number of correct tokens / number of gold tokens +- `token_acc`: number of correct tokens / number of predicted tokens - `token_p`, `token_r`, `token_f`: precision, recall and F-score for token character spans