Fix bug in scorer

Scoring code was just using one metric, not all three of interest.
This commit is contained in:
Paul O'Leary McCann 2021-08-12 18:22:08 +09:00
parent 00d481dd12
commit 230698dc83

View File

@ -357,7 +357,7 @@ class CoreferenceResolver(TrainablePipe):
# we need to handle the average ourselves. # we need to handle the average ourselves.
scores = [] scores = []
for metric in (b_cubed, muc, ceafe): for metric in (b_cubed, muc, ceafe):
evaluator = Evaluator(b_cubed) evaluator = Evaluator(metric)
for ex in examples: for ex in examples:
p_clusters = doc2clusters(ex.predicted, self.span_cluster_prefix) p_clusters = doc2clusters(ex.predicted, self.span_cluster_prefix)