2023-09-12 09:49:41 +03:00
|
|
|
# cython: profile=False
|
2023-06-14 18:48:41 +03:00
|
|
|
from collections.abc import Iterable as IterableInstance
|
|
|
|
|
2020-06-26 20:34:12 +03:00
|
|
|
import numpy
|
2023-06-14 18:48:41 +03:00
|
|
|
|
2020-09-24 19:13:39 +03:00
|
|
|
from murmurhash.mrmr cimport hash64
|
2020-06-26 20:34:12 +03:00
|
|
|
|
|
|
|
from ..tokens.doc cimport Doc
|
|
|
|
from ..tokens.span cimport Span
|
2023-06-14 18:48:41 +03:00
|
|
|
|
2020-06-26 20:34:12 +03:00
|
|
|
from ..attrs import IDS
|
2020-06-29 15:33:00 +03:00
|
|
|
from ..errors import Errors, Warnings
|
2020-07-31 00:30:54 +03:00
|
|
|
from ..pipeline._parser_internals import nonproj
|
2023-06-14 18:48:41 +03:00
|
|
|
from ..tokens.span import Span
|
|
|
|
from .alignment import Alignment
|
|
|
|
from .iob_utils import (
|
|
|
|
biluo_tags_to_spans,
|
|
|
|
biluo_to_iob,
|
|
|
|
doc_to_biluo_tags,
|
|
|
|
offsets_to_biluo_tags,
|
|
|
|
remove_bilu_prefix,
|
|
|
|
)
|
|
|
|
|
2021-01-13 16:20:05 +03:00
|
|
|
from ..tokens.token cimport MISSING_DEP
|
2023-06-14 18:48:41 +03:00
|
|
|
|
|
|
|
from ..util import all_equal, logger, to_ternary_int
|
2020-06-26 20:34:12 +03:00
|
|
|
|
|
|
|
|
2020-09-22 12:50:19 +03:00
|
|
|
cpdef Doc annotations_to_doc(vocab, tok_annot, doc_annot):
|
2020-07-03 13:58:16 +03:00
|
|
|
""" Create a Doc from dictionaries with token and doc annotations. """
|
2020-06-26 20:34:12 +03:00
|
|
|
attrs, array = _annot2array(vocab, tok_annot, doc_annot)
|
|
|
|
output = Doc(vocab, words=tok_annot["ORTH"], spaces=tok_annot["SPACY"])
|
|
|
|
if "entities" in doc_annot:
|
2023-07-19 13:03:31 +03:00
|
|
|
_add_entities_to_doc(output, doc_annot["entities"])
|
2021-03-02 17:12:54 +03:00
|
|
|
if "spans" in doc_annot:
|
2023-07-19 13:03:31 +03:00
|
|
|
_add_spans_to_doc(output, doc_annot["spans"])
|
2020-06-26 20:34:12 +03:00
|
|
|
if array.size:
|
|
|
|
output = output.from_array(attrs, array)
|
|
|
|
# links are currently added with ENT_KB_ID on the token level
|
|
|
|
output.cats.update(doc_annot.get("cats", {}))
|
|
|
|
return output
|
|
|
|
|
|
|
|
|
2020-08-12 00:29:31 +03:00
|
|
|
def validate_examples(examples, method):
|
|
|
|
"""Check that a batch of examples received during processing is valid.
|
|
|
|
This function lives here to prevent circular imports.
|
|
|
|
|
|
|
|
examples (Iterable[Examples]): A batch of examples.
|
|
|
|
method (str): The method name to show in error messages.
|
|
|
|
"""
|
|
|
|
if not isinstance(examples, IterableInstance):
|
|
|
|
err = Errors.E978.format(name=method, types=type(examples))
|
|
|
|
raise TypeError(err)
|
|
|
|
wrong = set([type(eg) for eg in examples if not isinstance(eg, Example)])
|
|
|
|
if wrong:
|
|
|
|
err = Errors.E978.format(name=method, types=wrong)
|
|
|
|
raise TypeError(err)
|
|
|
|
|
|
|
|
|
2020-10-08 22:33:49 +03:00
|
|
|
def validate_get_examples(get_examples, method):
|
|
|
|
"""Check that a generator of a batch of examples received during processing is valid:
|
|
|
|
the callable produces a non-empty list of Example objects.
|
|
|
|
This function lives here to prevent circular imports.
|
|
|
|
|
|
|
|
get_examples (Callable[[], Iterable[Example]]): A function that produces a batch of examples.
|
|
|
|
method (str): The method name to show in error messages.
|
|
|
|
"""
|
|
|
|
if get_examples is None or not hasattr(get_examples, "__call__"):
|
|
|
|
err = Errors.E930.format(method=method, obj=type(get_examples))
|
|
|
|
raise TypeError(err)
|
|
|
|
examples = get_examples()
|
|
|
|
if not examples:
|
|
|
|
err = Errors.E930.format(method=method, obj=examples)
|
|
|
|
raise TypeError(err)
|
|
|
|
validate_examples(examples, method)
|
|
|
|
|
|
|
|
|
2020-06-26 20:34:12 +03:00
|
|
|
cdef class Example:
|
2020-07-06 18:39:31 +03:00
|
|
|
def __init__(self, Doc predicted, Doc reference, *, alignment=None):
|
2020-06-26 20:34:12 +03:00
|
|
|
if predicted is None:
|
2020-06-29 15:33:00 +03:00
|
|
|
raise TypeError(Errors.E972.format(arg="predicted"))
|
2020-06-26 20:34:12 +03:00
|
|
|
if reference is None:
|
2020-06-29 15:33:00 +03:00
|
|
|
raise TypeError(Errors.E972.format(arg="reference"))
|
2020-08-04 15:31:32 +03:00
|
|
|
self.predicted = predicted
|
|
|
|
self.reference = reference
|
|
|
|
self._cached_alignment = alignment
|
2020-06-26 20:34:12 +03:00
|
|
|
|
2020-07-09 15:38:26 +03:00
|
|
|
def __len__(self):
|
|
|
|
return len(self.predicted)
|
|
|
|
|
2020-06-26 20:34:12 +03:00
|
|
|
property predicted:
|
|
|
|
def __get__(self):
|
|
|
|
return self.x
|
|
|
|
|
|
|
|
def __set__(self, doc):
|
|
|
|
self.x = doc
|
2020-08-04 15:31:32 +03:00
|
|
|
self._cached_alignment = None
|
|
|
|
self._cached_words_x = [t.text for t in doc]
|
2020-06-29 14:59:17 +03:00
|
|
|
|
2020-06-26 20:34:12 +03:00
|
|
|
property reference:
|
|
|
|
def __get__(self):
|
|
|
|
return self.y
|
|
|
|
|
|
|
|
def __set__(self, doc):
|
|
|
|
self.y = doc
|
2020-08-04 15:31:32 +03:00
|
|
|
self._cached_alignment = None
|
|
|
|
self._cached_words_y = [t.text for t in doc]
|
2020-06-26 20:34:12 +03:00
|
|
|
|
|
|
|
def copy(self):
|
|
|
|
return Example(
|
|
|
|
self.x.copy(),
|
|
|
|
self.y.copy()
|
|
|
|
)
|
2020-06-29 14:59:17 +03:00
|
|
|
|
2020-06-26 20:34:12 +03:00
|
|
|
@classmethod
|
|
|
|
def from_dict(cls, Doc predicted, dict example_dict):
|
2020-07-07 19:46:00 +03:00
|
|
|
if predicted is None:
|
|
|
|
raise ValueError(Errors.E976.format(n="first", type="Doc"))
|
2020-06-26 20:34:12 +03:00
|
|
|
if example_dict is None:
|
2020-07-07 19:46:00 +03:00
|
|
|
raise ValueError(Errors.E976.format(n="second", type="dict"))
|
2020-06-26 20:34:12 +03:00
|
|
|
example_dict = _fix_legacy_dict_data(example_dict)
|
|
|
|
tok_dict, doc_dict = _parse_example_dict_data(example_dict)
|
|
|
|
if "ORTH" not in tok_dict:
|
|
|
|
tok_dict["ORTH"] = [tok.text for tok in predicted]
|
|
|
|
tok_dict["SPACY"] = [tok.whitespace_ for tok in predicted]
|
|
|
|
return Example(
|
|
|
|
predicted,
|
2020-09-22 12:50:19 +03:00
|
|
|
annotations_to_doc(predicted.vocab, tok_dict, doc_dict)
|
2020-06-26 20:34:12 +03:00
|
|
|
)
|
2020-06-29 14:59:17 +03:00
|
|
|
|
2020-06-26 20:34:12 +03:00
|
|
|
@property
|
|
|
|
def alignment(self):
|
2020-09-24 19:13:39 +03:00
|
|
|
x_sig = hash64(self.x.c, sizeof(self.x.c[0]) * self.x.length, 0)
|
|
|
|
y_sig = hash64(self.y.c, sizeof(self.y.c[0]) * self.y.length, 0)
|
|
|
|
if self._cached_alignment is None:
|
|
|
|
words_x = [token.text for token in self.x]
|
|
|
|
words_y = [token.text for token in self.y]
|
|
|
|
self._x_sig = x_sig
|
|
|
|
self._y_sig = y_sig
|
2020-08-04 15:31:32 +03:00
|
|
|
self._cached_words_x = words_x
|
|
|
|
self._cached_words_y = words_y
|
2020-09-24 19:13:39 +03:00
|
|
|
self._cached_alignment = Alignment.from_strings(words_x, words_y)
|
|
|
|
return self._cached_alignment
|
|
|
|
elif self._x_sig == x_sig and self._y_sig == y_sig:
|
|
|
|
# If we have a cached alignment, check whether the cache is invalid
|
|
|
|
# due to retokenization. To make this check fast in loops, we first
|
|
|
|
# check a hash of the TokenC arrays.
|
|
|
|
return self._cached_alignment
|
|
|
|
else:
|
|
|
|
words_x = [token.text for token in self.x]
|
|
|
|
words_y = [token.text for token in self.y]
|
|
|
|
if words_x == self._cached_words_x and words_y == self._cached_words_y:
|
|
|
|
self._x_sig = x_sig
|
|
|
|
self._y_sig = y_sig
|
|
|
|
return self._cached_alignment
|
|
|
|
else:
|
|
|
|
self._cached_alignment = Alignment.from_strings(words_x, words_y)
|
|
|
|
self._cached_words_x = words_x
|
|
|
|
self._cached_words_y = words_y
|
|
|
|
self._x_sig = x_sig
|
|
|
|
self._y_sig = y_sig
|
|
|
|
return self._cached_alignment
|
2020-06-26 20:34:12 +03:00
|
|
|
|
2022-06-24 14:39:52 +03:00
|
|
|
def _get_aligned_vectorized(self, align, gold_values):
|
|
|
|
# Fast path for Doc attributes/fields that are predominantly a single value,
|
|
|
|
# i.e., TAG, POS, MORPH.
|
|
|
|
x2y_single_toks = []
|
|
|
|
x2y_single_toks_i = []
|
|
|
|
|
|
|
|
x2y_multiple_toks = []
|
|
|
|
x2y_multiple_toks_i = []
|
|
|
|
|
|
|
|
# Gather indices of gold tokens aligned to the candidate tokens into two buckets.
|
|
|
|
# Bucket 1: All tokens that have a one-to-one alignment.
|
|
|
|
# Bucket 2: All tokens that have a one-to-many alignment.
|
|
|
|
for idx, token in enumerate(self.predicted):
|
|
|
|
aligned_gold_i = align[token.i]
|
|
|
|
aligned_gold_len = len(aligned_gold_i)
|
|
|
|
|
|
|
|
if aligned_gold_len == 1:
|
|
|
|
x2y_single_toks.append(aligned_gold_i.item())
|
|
|
|
x2y_single_toks_i.append(idx)
|
|
|
|
elif aligned_gold_len > 1:
|
|
|
|
x2y_multiple_toks.append(aligned_gold_i)
|
|
|
|
x2y_multiple_toks_i.append(idx)
|
|
|
|
|
|
|
|
# Map elements of the first bucket directly to the output array.
|
|
|
|
output = numpy.full(len(self.predicted), None)
|
|
|
|
output[x2y_single_toks_i] = gold_values[x2y_single_toks].squeeze()
|
|
|
|
|
|
|
|
# Collapse many-to-one alignments into one-to-one alignments if they
|
|
|
|
# share the same value. Map to None in all other cases.
|
|
|
|
for i in range(len(x2y_multiple_toks)):
|
|
|
|
aligned_gold_values = gold_values[x2y_multiple_toks[i]]
|
|
|
|
|
|
|
|
# If all aligned tokens have the same value, use it.
|
|
|
|
if all_equal(aligned_gold_values):
|
|
|
|
x2y_multiple_toks[i] = aligned_gold_values[0].item()
|
|
|
|
else:
|
|
|
|
x2y_multiple_toks[i] = None
|
|
|
|
|
|
|
|
output[x2y_multiple_toks_i] = x2y_multiple_toks
|
|
|
|
|
|
|
|
return output.tolist()
|
|
|
|
|
|
|
|
def _get_aligned_non_vectorized(self, align, gold_values):
|
|
|
|
# Slower path for fields that return multiple values (resulting
|
|
|
|
# in ragged arrays that cannot be vectorized trivially).
|
|
|
|
output = [None] * len(self.predicted)
|
|
|
|
|
|
|
|
for token in self.predicted:
|
|
|
|
aligned_gold_i = align[token.i]
|
|
|
|
values = gold_values[aligned_gold_i].ravel()
|
|
|
|
if len(values) == 1:
|
|
|
|
output[token.i] = values.item()
|
|
|
|
elif all_equal(values):
|
|
|
|
# If all aligned tokens have the same value, use it.
|
|
|
|
output[token.i] = values[0].item()
|
|
|
|
|
|
|
|
return output
|
|
|
|
|
2020-06-26 20:34:12 +03:00
|
|
|
def get_aligned(self, field, as_string=False):
|
|
|
|
"""Return an aligned array for a token attribute."""
|
2020-07-06 18:39:31 +03:00
|
|
|
align = self.alignment.x2y
|
2022-06-24 14:39:52 +03:00
|
|
|
gold_values = self.reference.to_array([field])
|
|
|
|
|
|
|
|
if len(gold_values.shape) == 1:
|
|
|
|
output = self._get_aligned_vectorized(align, gold_values)
|
|
|
|
else:
|
|
|
|
output = self._get_aligned_non_vectorized(align, gold_values)
|
2020-06-26 20:34:12 +03:00
|
|
|
|
|
|
|
vocab = self.reference.vocab
|
|
|
|
if as_string and field not in ["ENT_IOB", "SENT_START"]:
|
|
|
|
output = [vocab.strings[o] if o is not None else o for o in output]
|
2022-06-24 14:39:52 +03:00
|
|
|
|
2020-06-26 20:34:12 +03:00
|
|
|
return output
|
|
|
|
|
|
|
|
def get_aligned_parse(self, projectivize=True):
|
2020-07-06 18:39:31 +03:00
|
|
|
cand_to_gold = self.alignment.x2y
|
|
|
|
gold_to_cand = self.alignment.y2x
|
2021-01-12 19:17:06 +03:00
|
|
|
heads = [token.head.i for token in self.y]
|
2020-06-26 20:34:12 +03:00
|
|
|
deps = [token.dep_ for token in self.y]
|
2022-06-24 14:39:52 +03:00
|
|
|
|
2020-06-29 15:33:00 +03:00
|
|
|
if projectivize:
|
2021-01-13 15:47:25 +03:00
|
|
|
proj_heads, proj_deps = nonproj.projectivize(heads, deps)
|
2022-06-24 14:39:52 +03:00
|
|
|
has_deps = [token.has_dep() for token in self.y]
|
|
|
|
has_heads = [token.has_head() for token in self.y]
|
|
|
|
|
2021-01-13 16:20:05 +03:00
|
|
|
# ensure that missing data remains missing
|
2021-01-13 15:47:25 +03:00
|
|
|
heads = [h if has_heads[i] else heads[i] for i, h in enumerate(proj_heads)]
|
2021-01-13 16:20:05 +03:00
|
|
|
deps = [d if has_deps[i] else deps[i] for i, d in enumerate(proj_deps)]
|
2022-06-24 14:39:52 +03:00
|
|
|
|
|
|
|
# Select all candidate tokens that are aligned to a single gold token.
|
|
|
|
c2g_single_toks = numpy.where(cand_to_gold.lengths == 1)[0]
|
|
|
|
|
|
|
|
# Fetch all aligned gold token incides.
|
|
|
|
if c2g_single_toks.shape == cand_to_gold.lengths.shape:
|
|
|
|
# This the most likely case.
|
2022-06-28 20:42:58 +03:00
|
|
|
gold_i = cand_to_gold[:]
|
2022-06-24 14:39:52 +03:00
|
|
|
else:
|
2022-06-28 20:42:58 +03:00
|
|
|
gold_i = numpy.vectorize(lambda x: cand_to_gold[int(x)][0], otypes='i')(c2g_single_toks)
|
2022-06-24 14:39:52 +03:00
|
|
|
|
|
|
|
# Fetch indices of all gold heads for the aligned gold tokens.
|
|
|
|
heads = numpy.asarray(heads, dtype='i')
|
|
|
|
gold_head_i = heads[gold_i]
|
|
|
|
|
|
|
|
# Select all gold tokens that are heads of the previously selected
|
|
|
|
# gold tokens (and are aligned to a single candidate token).
|
|
|
|
g2c_len_heads = gold_to_cand.lengths[gold_head_i]
|
|
|
|
g2c_len_heads = numpy.where(g2c_len_heads == 1)[0]
|
2022-06-28 20:42:58 +03:00
|
|
|
g2c_i = numpy.vectorize(lambda x: gold_to_cand[int(x)][0], otypes='i')(gold_head_i[g2c_len_heads]).squeeze()
|
2022-06-24 14:39:52 +03:00
|
|
|
|
|
|
|
# Update head/dep alignments with the above.
|
|
|
|
aligned_heads = numpy.full((self.x.length), None)
|
|
|
|
aligned_heads[c2g_single_toks[g2c_len_heads]] = g2c_i
|
|
|
|
|
|
|
|
deps = numpy.asarray(deps)
|
|
|
|
aligned_deps = numpy.full((self.x.length), None)
|
|
|
|
aligned_deps[c2g_single_toks] = deps[gold_i]
|
|
|
|
|
|
|
|
return aligned_heads.tolist(), aligned_deps.tolist()
|
2020-06-26 20:34:12 +03:00
|
|
|
|
2020-12-13 04:08:32 +03:00
|
|
|
def get_aligned_sent_starts(self):
|
|
|
|
"""Get list of SENT_START attributes aligned to the predicted tokenization.
|
2022-06-02 01:10:16 +03:00
|
|
|
If the reference does not have sentence starts, return a list of None values.
|
2020-12-13 04:08:32 +03:00
|
|
|
"""
|
|
|
|
if self.y.has_annotation("SENT_START"):
|
|
|
|
align = self.alignment.y2x
|
|
|
|
sent_starts = [False] * len(self.x)
|
|
|
|
for y_sent in self.y.sents:
|
2022-04-01 10:02:06 +03:00
|
|
|
x_start = int(align[y_sent.start][0])
|
2020-12-13 04:08:32 +03:00
|
|
|
sent_starts[x_start] = True
|
|
|
|
return sent_starts
|
|
|
|
else:
|
|
|
|
return [None] * len(self.x)
|
|
|
|
|
2021-04-08 13:19:17 +03:00
|
|
|
def get_aligned_spans_x2y(self, x_spans, allow_overlap=False):
|
|
|
|
return self._get_aligned_spans(self.y, x_spans, self.alignment.x2y, allow_overlap)
|
2020-07-06 18:39:31 +03:00
|
|
|
|
2021-04-08 13:19:17 +03:00
|
|
|
def get_aligned_spans_y2x(self, y_spans, allow_overlap=False):
|
|
|
|
return self._get_aligned_spans(self.x, y_spans, self.alignment.y2x, allow_overlap)
|
2020-08-06 00:53:21 +03:00
|
|
|
|
2021-04-08 13:19:17 +03:00
|
|
|
def _get_aligned_spans(self, doc, spans, align, allow_overlap):
|
2020-07-06 18:39:31 +03:00
|
|
|
seen = set()
|
|
|
|
output = []
|
|
|
|
for span in spans:
|
2022-04-01 10:02:06 +03:00
|
|
|
indices = align[span.start : span.end]
|
2021-04-08 13:19:17 +03:00
|
|
|
if not allow_overlap:
|
|
|
|
indices = [idx for idx in indices if idx not in seen]
|
2020-07-06 18:39:31 +03:00
|
|
|
if len(indices) >= 1:
|
|
|
|
aligned_span = Span(doc, indices[0], indices[-1] + 1, label=span.label)
|
|
|
|
target_text = span.text.lower().strip().replace(" ", "")
|
|
|
|
our_text = aligned_span.text.lower().strip().replace(" ", "")
|
|
|
|
if our_text == target_text:
|
|
|
|
output.append(aligned_span)
|
|
|
|
seen.update(indices)
|
|
|
|
return output
|
|
|
|
|
2021-06-17 10:33:00 +03:00
|
|
|
def get_aligned_ents_and_ner(self):
|
2020-09-21 21:43:54 +03:00
|
|
|
if not self.y.has_annotation("ENT_IOB"):
|
2021-06-17 10:33:00 +03:00
|
|
|
return [], [None] * len(self.x)
|
2021-04-08 13:19:17 +03:00
|
|
|
x_ents = self.get_aligned_spans_y2x(self.y.ents, allow_overlap=False)
|
2020-07-06 18:39:31 +03:00
|
|
|
# Default to 'None' for missing values
|
2020-09-22 12:50:19 +03:00
|
|
|
x_tags = offsets_to_biluo_tags(
|
2020-06-29 14:59:17 +03:00
|
|
|
self.x,
|
2020-07-06 18:39:31 +03:00
|
|
|
[(e.start_char, e.end_char, e.label_) for e in x_ents],
|
2020-06-26 20:34:12 +03:00
|
|
|
missing=None
|
|
|
|
)
|
2020-07-06 18:39:31 +03:00
|
|
|
# Now fill the tokens we can align to O.
|
2023-07-19 13:03:31 +03:00
|
|
|
O = 2 # I=1, O=2, B=3 # no-cython-lint: E741
|
2020-07-06 18:39:31 +03:00
|
|
|
for i, ent_iob in enumerate(self.get_aligned("ENT_IOB")):
|
|
|
|
if x_tags[i] is None:
|
|
|
|
if ent_iob == O:
|
|
|
|
x_tags[i] = "O"
|
|
|
|
elif self.x[i].is_space:
|
2020-06-26 20:34:12 +03:00
|
|
|
x_tags[i] = "O"
|
2021-06-17 10:33:00 +03:00
|
|
|
return x_ents, x_tags
|
|
|
|
|
|
|
|
def get_aligned_ner(self):
|
2023-07-19 13:03:31 +03:00
|
|
|
_x_ents, x_tags = self.get_aligned_ents_and_ner()
|
2020-06-26 20:34:12 +03:00
|
|
|
return x_tags
|
|
|
|
|
Fix entity linker batching (#9669)
* Partial fix of entity linker batching
* Add import
* Better name
* Add `use_gold_ents` option, docs
* Change to v2, create stub v1, update docs etc.
* Fix error type
Honestly no idea what the right type to use here is.
ConfigValidationError seems wrong. Maybe a NotImplementedError?
* Make mypy happy
* Add hacky fix for init issue
* Add legacy pipeline entity linker
* Fix references to class name
* Add __init__.py for legacy
* Attempted fix for loss issue
* Remove placeholder V1
* formatting
* slightly more interesting train data
* Handle batches with no usable examples
This adds a test for batches that have docs but not entities, and a
check in the component that detects such cases and skips the update step
as thought the batch were empty.
* Remove todo about data verification
Check for empty data was moved further up so this should be OK now - the
case in question shouldn't be possible.
* Fix gradient calculation
The model doesn't know which entities are not in the kb, so it generates
embeddings for the context of all of them.
However, the loss does know which entities aren't in the kb, and it
ignores them, as there's no sensible gradient.
This has the issue that the gradient will not be calculated for some of
the input embeddings, which causes a dimension mismatch in backprop.
That should have caused a clear error, but with numpyops it was causing
nans to happen, which is another problem that should be addressed
separately.
This commit changes the loss to give a zero gradient for entities not in
the kb.
* add failing test for v1 EL legacy architecture
* Add nasty but simple working check for legacy arch
* Clarify why init hack works the way it does
* Clarify use_gold_ents use case
* Fix use gold ents related handling
* Add tests for no gold ents and fix other tests
* Use aligned ents function (not working)
This doesn't actually work because the "aligned" ents are gold-only. But
if I have a different function that returns the intersection, *then*
this will work as desired.
* Use proper matching ent check
This changes the process when gold ents are not used so that the
intersection of ents in the pred and gold is used.
* Move get_matching_ents to Example
* Use model attribute to check for legacy arch
* Rename flag
* bump spacy-legacy to lower 3.0.9
Co-authored-by: svlandeg <svlandeg@github.com>
2022-03-04 11:17:36 +03:00
|
|
|
def get_matching_ents(self, check_label=True):
|
|
|
|
"""Return entities that are shared between predicted and reference docs.
|
|
|
|
|
|
|
|
If `check_label` is True, entities must have matching labels to be
|
|
|
|
kept. Otherwise only the character indices need to match.
|
|
|
|
"""
|
|
|
|
gold = {}
|
2022-03-07 18:56:57 +03:00
|
|
|
for ent in self.reference.ents:
|
Fix entity linker batching (#9669)
* Partial fix of entity linker batching
* Add import
* Better name
* Add `use_gold_ents` option, docs
* Change to v2, create stub v1, update docs etc.
* Fix error type
Honestly no idea what the right type to use here is.
ConfigValidationError seems wrong. Maybe a NotImplementedError?
* Make mypy happy
* Add hacky fix for init issue
* Add legacy pipeline entity linker
* Fix references to class name
* Add __init__.py for legacy
* Attempted fix for loss issue
* Remove placeholder V1
* formatting
* slightly more interesting train data
* Handle batches with no usable examples
This adds a test for batches that have docs but not entities, and a
check in the component that detects such cases and skips the update step
as thought the batch were empty.
* Remove todo about data verification
Check for empty data was moved further up so this should be OK now - the
case in question shouldn't be possible.
* Fix gradient calculation
The model doesn't know which entities are not in the kb, so it generates
embeddings for the context of all of them.
However, the loss does know which entities aren't in the kb, and it
ignores them, as there's no sensible gradient.
This has the issue that the gradient will not be calculated for some of
the input embeddings, which causes a dimension mismatch in backprop.
That should have caused a clear error, but with numpyops it was causing
nans to happen, which is another problem that should be addressed
separately.
This commit changes the loss to give a zero gradient for entities not in
the kb.
* add failing test for v1 EL legacy architecture
* Add nasty but simple working check for legacy arch
* Clarify why init hack works the way it does
* Clarify use_gold_ents use case
* Fix use gold ents related handling
* Add tests for no gold ents and fix other tests
* Use aligned ents function (not working)
This doesn't actually work because the "aligned" ents are gold-only. But
if I have a different function that returns the intersection, *then*
this will work as desired.
* Use proper matching ent check
This changes the process when gold ents are not used so that the
intersection of ents in the pred and gold is used.
* Move get_matching_ents to Example
* Use model attribute to check for legacy arch
* Rename flag
* bump spacy-legacy to lower 3.0.9
Co-authored-by: svlandeg <svlandeg@github.com>
2022-03-04 11:17:36 +03:00
|
|
|
gold[(ent.start_char, ent.end_char)] = ent.label
|
|
|
|
|
|
|
|
keep = []
|
2022-03-07 18:56:57 +03:00
|
|
|
for ent in self.predicted.ents:
|
Fix entity linker batching (#9669)
* Partial fix of entity linker batching
* Add import
* Better name
* Add `use_gold_ents` option, docs
* Change to v2, create stub v1, update docs etc.
* Fix error type
Honestly no idea what the right type to use here is.
ConfigValidationError seems wrong. Maybe a NotImplementedError?
* Make mypy happy
* Add hacky fix for init issue
* Add legacy pipeline entity linker
* Fix references to class name
* Add __init__.py for legacy
* Attempted fix for loss issue
* Remove placeholder V1
* formatting
* slightly more interesting train data
* Handle batches with no usable examples
This adds a test for batches that have docs but not entities, and a
check in the component that detects such cases and skips the update step
as thought the batch were empty.
* Remove todo about data verification
Check for empty data was moved further up so this should be OK now - the
case in question shouldn't be possible.
* Fix gradient calculation
The model doesn't know which entities are not in the kb, so it generates
embeddings for the context of all of them.
However, the loss does know which entities aren't in the kb, and it
ignores them, as there's no sensible gradient.
This has the issue that the gradient will not be calculated for some of
the input embeddings, which causes a dimension mismatch in backprop.
That should have caused a clear error, but with numpyops it was causing
nans to happen, which is another problem that should be addressed
separately.
This commit changes the loss to give a zero gradient for entities not in
the kb.
* add failing test for v1 EL legacy architecture
* Add nasty but simple working check for legacy arch
* Clarify why init hack works the way it does
* Clarify use_gold_ents use case
* Fix use gold ents related handling
* Add tests for no gold ents and fix other tests
* Use aligned ents function (not working)
This doesn't actually work because the "aligned" ents are gold-only. But
if I have a different function that returns the intersection, *then*
this will work as desired.
* Use proper matching ent check
This changes the process when gold ents are not used so that the
intersection of ents in the pred and gold is used.
* Move get_matching_ents to Example
* Use model attribute to check for legacy arch
* Rename flag
* bump spacy-legacy to lower 3.0.9
Co-authored-by: svlandeg <svlandeg@github.com>
2022-03-04 11:17:36 +03:00
|
|
|
key = (ent.start_char, ent.end_char)
|
|
|
|
if key not in gold:
|
|
|
|
continue
|
|
|
|
|
|
|
|
if check_label and ent.label != gold[key]:
|
|
|
|
continue
|
|
|
|
|
|
|
|
keep.append(ent)
|
|
|
|
|
|
|
|
return keep
|
|
|
|
|
2020-06-26 20:34:12 +03:00
|
|
|
def to_dict(self):
|
|
|
|
return {
|
|
|
|
"doc_annotation": {
|
|
|
|
"cats": dict(self.reference.cats),
|
2020-09-22 12:50:19 +03:00
|
|
|
"entities": doc_to_biluo_tags(self.reference),
|
2022-08-05 13:26:38 +03:00
|
|
|
"spans": self._spans_to_dict(),
|
2020-06-26 20:34:12 +03:00
|
|
|
"links": self._links_to_dict()
|
|
|
|
},
|
|
|
|
"token_annotation": {
|
2020-08-04 23:22:26 +03:00
|
|
|
"ORTH": [t.text for t in self.reference],
|
|
|
|
"SPACY": [bool(t.whitespace_) for t in self.reference],
|
|
|
|
"TAG": [t.tag_ for t in self.reference],
|
|
|
|
"LEMMA": [t.lemma_ for t in self.reference],
|
|
|
|
"POS": [t.pos_ for t in self.reference],
|
2020-10-01 23:21:46 +03:00
|
|
|
"MORPH": [str(t.morph) for t in self.reference],
|
2020-08-04 23:22:26 +03:00
|
|
|
"HEAD": [t.head.i for t in self.reference],
|
|
|
|
"DEP": [t.dep_ for t in self.reference],
|
|
|
|
"SENT_START": [int(bool(t.is_sent_start)) for t in self.reference]
|
2020-06-26 20:34:12 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-05 13:26:38 +03:00
|
|
|
def _spans_to_dict(self):
|
|
|
|
span_dict = {}
|
|
|
|
for key in self.reference.spans:
|
|
|
|
span_tuples = []
|
|
|
|
for span in self.reference.spans[key]:
|
|
|
|
span_tuple = (span.start_char, span.end_char, span.label_, span.kb_id_)
|
|
|
|
span_tuples.append(span_tuple)
|
|
|
|
span_dict[key] = span_tuples
|
|
|
|
|
|
|
|
return span_dict
|
|
|
|
|
2020-06-26 20:34:12 +03:00
|
|
|
def _links_to_dict(self):
|
|
|
|
links = {}
|
|
|
|
for ent in self.reference.ents:
|
|
|
|
if ent.kb_id_:
|
|
|
|
links[(ent.start_char, ent.end_char)] = {ent.kb_id_: 1.0}
|
|
|
|
return links
|
|
|
|
|
|
|
|
def split_sents(self):
|
|
|
|
""" Split the token annotations into multiple Examples based on
|
|
|
|
sent_starts and return a list of the new Examples"""
|
2020-09-21 21:43:54 +03:00
|
|
|
if not self.reference.has_annotation("SENT_START"):
|
2020-06-26 20:34:12 +03:00
|
|
|
return [self]
|
2020-08-06 00:53:21 +03:00
|
|
|
|
2020-07-06 18:39:31 +03:00
|
|
|
align = self.alignment.y2x
|
|
|
|
seen_indices = set()
|
2020-06-26 20:34:12 +03:00
|
|
|
output = []
|
2020-07-06 18:39:31 +03:00
|
|
|
for y_sent in self.reference.sents:
|
2022-04-01 10:02:06 +03:00
|
|
|
indices = align[y_sent.start : y_sent.end]
|
2020-07-06 18:39:31 +03:00
|
|
|
indices = [idx for idx in indices if idx not in seen_indices]
|
|
|
|
if indices:
|
|
|
|
x_sent = self.predicted[indices[0] : indices[-1] + 1]
|
|
|
|
output.append(Example(x_sent.as_doc(), y_sent.as_doc()))
|
|
|
|
seen_indices.update(indices)
|
2020-06-26 20:34:12 +03:00
|
|
|
return output
|
|
|
|
|
|
|
|
property text:
|
|
|
|
def __get__(self):
|
|
|
|
return self.x.text
|
|
|
|
|
|
|
|
def __str__(self):
|
|
|
|
return str(self.to_dict())
|
|
|
|
|
|
|
|
def __repr__(self):
|
|
|
|
return str(self.to_dict())
|
|
|
|
|
|
|
|
|
|
|
|
def _annot2array(vocab, tok_annot, doc_annot):
|
|
|
|
attrs = []
|
|
|
|
values = []
|
|
|
|
|
|
|
|
for key, value in doc_annot.items():
|
|
|
|
if value:
|
2021-03-02 17:12:54 +03:00
|
|
|
if key in ["entities", "cats", "spans"]:
|
2020-06-26 20:34:12 +03:00
|
|
|
pass
|
|
|
|
elif key == "links":
|
2020-07-02 14:48:11 +03:00
|
|
|
ent_kb_ids = _parse_links(vocab, tok_annot["ORTH"], tok_annot["SPACY"], value)
|
2020-06-26 20:34:12 +03:00
|
|
|
tok_annot["ENT_KB_ID"] = ent_kb_ids
|
|
|
|
else:
|
2020-06-29 15:33:00 +03:00
|
|
|
raise ValueError(Errors.E974.format(obj="doc", key=key))
|
2020-06-26 20:34:12 +03:00
|
|
|
|
|
|
|
for key, value in tok_annot.items():
|
|
|
|
if key not in IDS:
|
2020-06-29 15:33:00 +03:00
|
|
|
raise ValueError(Errors.E974.format(obj="token", key=key))
|
2020-06-26 20:34:12 +03:00
|
|
|
elif key in ["ORTH", "SPACY"]:
|
2022-12-12 10:45:35 +03:00
|
|
|
continue
|
2020-06-26 20:34:12 +03:00
|
|
|
elif key == "HEAD":
|
|
|
|
attrs.append(key)
|
2022-12-12 10:45:35 +03:00
|
|
|
row = [h-i if h is not None else 0 for i, h in enumerate(value)]
|
2021-01-07 21:10:32 +03:00
|
|
|
elif key == "DEP":
|
|
|
|
attrs.append(key)
|
2022-12-12 10:45:35 +03:00
|
|
|
row = [vocab.strings.add(h) if h is not None else MISSING_DEP for h in value]
|
2020-06-26 20:34:12 +03:00
|
|
|
elif key == "SENT_START":
|
|
|
|
attrs.append(key)
|
2022-12-12 10:45:35 +03:00
|
|
|
row = [to_ternary_int(v) for v in value]
|
2020-06-26 20:34:12 +03:00
|
|
|
elif key == "MORPH":
|
|
|
|
attrs.append(key)
|
2022-12-12 10:45:35 +03:00
|
|
|
row = [vocab.morphology.add(v) for v in value]
|
2020-06-26 20:34:12 +03:00
|
|
|
else:
|
|
|
|
attrs.append(key)
|
2020-08-12 00:29:31 +03:00
|
|
|
if not all(isinstance(v, str) for v in value):
|
|
|
|
types = set([type(v) for v in value])
|
2020-08-06 00:53:21 +03:00
|
|
|
raise TypeError(Errors.E969.format(field=key, types=types)) from None
|
2022-12-12 10:45:35 +03:00
|
|
|
row = [vocab.strings.add(v) for v in value]
|
|
|
|
values.append([numpy.array(v, dtype=numpy.int32).astype(numpy.uint64) if v < 0 else v for v in row])
|
|
|
|
array = numpy.array(values, dtype=numpy.uint64)
|
2020-06-26 20:34:12 +03:00
|
|
|
return attrs, array.T
|
|
|
|
|
|
|
|
|
2021-03-02 17:12:54 +03:00
|
|
|
def _add_spans_to_doc(doc, spans_data):
|
|
|
|
if not isinstance(spans_data, dict):
|
|
|
|
raise ValueError(Errors.E879)
|
|
|
|
for key, span_list in spans_data.items():
|
|
|
|
spans = []
|
|
|
|
if not isinstance(span_list, list):
|
|
|
|
raise ValueError(Errors.E879)
|
|
|
|
for span_tuple in span_list:
|
|
|
|
if not isinstance(span_tuple, (list, tuple)) or len(span_tuple) < 2:
|
|
|
|
raise ValueError(Errors.E879)
|
|
|
|
start_char = span_tuple[0]
|
|
|
|
end_char = span_tuple[1]
|
|
|
|
label = 0
|
|
|
|
kb_id = 0
|
|
|
|
if len(span_tuple) > 2:
|
|
|
|
label = span_tuple[2]
|
|
|
|
if len(span_tuple) > 3:
|
|
|
|
kb_id = span_tuple[3]
|
|
|
|
span = doc.char_span(start_char, end_char, label=label, kb_id=kb_id)
|
|
|
|
spans.append(span)
|
|
|
|
doc.spans[key] = spans
|
|
|
|
|
|
|
|
|
2020-06-26 20:34:12 +03:00
|
|
|
def _add_entities_to_doc(doc, ner_data):
|
|
|
|
if ner_data is None:
|
|
|
|
return
|
|
|
|
elif ner_data == []:
|
|
|
|
doc.ents = []
|
2021-01-28 05:11:20 +03:00
|
|
|
elif not isinstance(ner_data, (list, tuple)):
|
|
|
|
raise ValueError(Errors.E973)
|
|
|
|
elif isinstance(ner_data[0], (list, tuple)):
|
2020-06-26 20:34:12 +03:00
|
|
|
return _add_entities_to_doc(
|
|
|
|
doc,
|
2020-09-22 12:50:19 +03:00
|
|
|
offsets_to_biluo_tags(doc, ner_data)
|
2020-06-26 20:34:12 +03:00
|
|
|
)
|
|
|
|
elif isinstance(ner_data[0], str) or ner_data[0] is None:
|
|
|
|
return _add_entities_to_doc(
|
|
|
|
doc,
|
2020-09-22 12:50:19 +03:00
|
|
|
biluo_tags_to_spans(doc, ner_data)
|
2020-06-26 20:34:12 +03:00
|
|
|
)
|
|
|
|
elif isinstance(ner_data[0], Span):
|
2020-09-21 16:54:05 +03:00
|
|
|
entities = []
|
|
|
|
missing = []
|
|
|
|
for span in ner_data:
|
|
|
|
if span.label:
|
|
|
|
entities.append(span)
|
|
|
|
else:
|
|
|
|
missing.append(span)
|
|
|
|
doc.set_ents(entities, missing=missing)
|
2020-06-26 20:34:12 +03:00
|
|
|
else:
|
2020-06-29 15:33:00 +03:00
|
|
|
raise ValueError(Errors.E973)
|
2020-06-26 20:34:12 +03:00
|
|
|
|
|
|
|
|
|
|
|
def _parse_example_dict_data(example_dict):
|
|
|
|
return (
|
|
|
|
example_dict["token_annotation"],
|
|
|
|
example_dict["doc_annotation"]
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def _fix_legacy_dict_data(example_dict):
|
|
|
|
token_dict = example_dict.get("token_annotation", {})
|
|
|
|
doc_dict = example_dict.get("doc_annotation", {})
|
|
|
|
for key, value in example_dict.items():
|
2021-06-18 11:41:50 +03:00
|
|
|
if value is not None:
|
2020-06-26 20:34:12 +03:00
|
|
|
if key in ("token_annotation", "doc_annotation"):
|
|
|
|
pass
|
|
|
|
elif key == "ids":
|
|
|
|
pass
|
2021-03-02 17:12:54 +03:00
|
|
|
elif key in ("cats", "links", "spans"):
|
2020-06-26 20:34:12 +03:00
|
|
|
doc_dict[key] = value
|
|
|
|
elif key in ("ner", "entities"):
|
|
|
|
doc_dict["entities"] = value
|
|
|
|
else:
|
|
|
|
token_dict[key] = value
|
|
|
|
# Remap keys
|
|
|
|
remapping = {
|
|
|
|
"words": "ORTH",
|
|
|
|
"tags": "TAG",
|
|
|
|
"pos": "POS",
|
|
|
|
"lemmas": "LEMMA",
|
|
|
|
"deps": "DEP",
|
|
|
|
"heads": "HEAD",
|
|
|
|
"sent_starts": "SENT_START",
|
|
|
|
"morphs": "MORPH",
|
|
|
|
"spaces": "SPACY",
|
|
|
|
}
|
|
|
|
old_token_dict = token_dict
|
|
|
|
token_dict = {}
|
|
|
|
for key, value in old_token_dict.items():
|
|
|
|
if key in ("text", "ids", "brackets"):
|
|
|
|
pass
|
2020-08-04 23:22:26 +03:00
|
|
|
elif key in remapping.values():
|
|
|
|
token_dict[key] = value
|
2020-07-09 20:43:39 +03:00
|
|
|
elif key.lower() in remapping:
|
|
|
|
token_dict[remapping[key.lower()]] = value
|
2020-06-26 20:34:12 +03:00
|
|
|
else:
|
2020-08-04 23:22:26 +03:00
|
|
|
all_keys = set(remapping.values())
|
|
|
|
all_keys.update(remapping.keys())
|
|
|
|
raise KeyError(Errors.E983.format(key=key, dict="token_annotation", keys=all_keys))
|
2020-06-26 20:34:12 +03:00
|
|
|
text = example_dict.get("text", example_dict.get("raw"))
|
2020-06-27 00:46:18 +03:00
|
|
|
if _has_field(token_dict, "ORTH") and not _has_field(token_dict, "SPACY"):
|
2020-06-27 00:42:41 +03:00
|
|
|
token_dict["SPACY"] = _guess_spaces(text, token_dict["ORTH"])
|
2020-06-26 20:34:12 +03:00
|
|
|
if "HEAD" in token_dict and "SENT_START" in token_dict:
|
|
|
|
# If heads are set, we don't also redundantly specify SENT_START.
|
|
|
|
token_dict.pop("SENT_START")
|
2020-10-04 18:46:29 +03:00
|
|
|
logger.debug(Warnings.W092)
|
2020-06-26 20:34:12 +03:00
|
|
|
return {
|
|
|
|
"token_annotation": token_dict,
|
|
|
|
"doc_annotation": doc_dict
|
|
|
|
}
|
|
|
|
|
2023-07-19 13:03:31 +03:00
|
|
|
|
2020-06-26 20:34:12 +03:00
|
|
|
def _has_field(annot, field):
|
|
|
|
if field not in annot:
|
|
|
|
return False
|
|
|
|
elif annot[field] is None:
|
|
|
|
return False
|
|
|
|
elif len(annot[field]) == 0:
|
|
|
|
return False
|
|
|
|
elif all([value is None for value in annot[field]]):
|
|
|
|
return False
|
|
|
|
else:
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
|
|
def _parse_ner_tags(biluo_or_offsets, vocab, words, spaces):
|
|
|
|
if isinstance(biluo_or_offsets[0], (list, tuple)):
|
|
|
|
# Convert to biluo if necessary
|
|
|
|
# This is annoying but to convert the offsets we need a Doc
|
|
|
|
# that has the target tokenization.
|
|
|
|
reference = Doc(vocab, words=words, spaces=spaces)
|
2020-09-22 12:50:19 +03:00
|
|
|
biluo = offsets_to_biluo_tags(reference, biluo_or_offsets)
|
2020-06-26 20:34:12 +03:00
|
|
|
else:
|
|
|
|
biluo = biluo_or_offsets
|
|
|
|
ent_iobs = []
|
|
|
|
ent_types = []
|
|
|
|
for iob_tag in biluo_to_iob(biluo):
|
|
|
|
if iob_tag in (None, "-"):
|
|
|
|
ent_iobs.append("")
|
|
|
|
ent_types.append("")
|
|
|
|
else:
|
|
|
|
ent_iobs.append(iob_tag.split("-")[0])
|
|
|
|
if iob_tag.startswith("I") or iob_tag.startswith("B"):
|
2022-06-17 22:02:37 +03:00
|
|
|
ent_types.append(remove_bilu_prefix(iob_tag))
|
2020-06-26 20:34:12 +03:00
|
|
|
else:
|
|
|
|
ent_types.append("")
|
|
|
|
return ent_iobs, ent_types
|
|
|
|
|
2023-07-19 13:03:31 +03:00
|
|
|
|
2020-07-02 14:48:11 +03:00
|
|
|
def _parse_links(vocab, words, spaces, links):
|
|
|
|
reference = Doc(vocab, words=words, spaces=spaces)
|
2020-06-26 20:34:12 +03:00
|
|
|
starts = {token.idx: token.i for token in reference}
|
|
|
|
ends = {token.idx + len(token): token.i for token in reference}
|
|
|
|
ent_kb_ids = ["" for _ in reference]
|
|
|
|
|
|
|
|
for index, annot_dict in links.items():
|
|
|
|
true_kb_ids = []
|
|
|
|
for key, value in annot_dict.items():
|
|
|
|
if value == 1.0:
|
|
|
|
true_kb_ids.append(key)
|
|
|
|
if len(true_kb_ids) > 1:
|
|
|
|
raise ValueError(Errors.E980)
|
|
|
|
|
|
|
|
if len(true_kb_ids) == 1:
|
|
|
|
start_char, end_char = index
|
|
|
|
start_token = starts.get(start_char)
|
|
|
|
end_token = ends.get(end_char)
|
2020-07-02 14:57:35 +03:00
|
|
|
if start_token is None or end_token is None:
|
|
|
|
raise ValueError(Errors.E981)
|
2020-06-26 20:34:12 +03:00
|
|
|
for i in range(start_token, end_token+1):
|
|
|
|
ent_kb_ids[i] = true_kb_ids[0]
|
|
|
|
|
|
|
|
return ent_kb_ids
|
|
|
|
|
|
|
|
|
|
|
|
def _guess_spaces(text, words):
|
|
|
|
if text is None:
|
2020-07-03 13:58:16 +03:00
|
|
|
return None
|
2020-06-26 20:34:12 +03:00
|
|
|
spaces = []
|
|
|
|
text_pos = 0
|
|
|
|
# align words with text
|
|
|
|
for word in words:
|
|
|
|
try:
|
|
|
|
word_start = text[text_pos:].index(word)
|
|
|
|
except ValueError:
|
|
|
|
spaces.append(True)
|
|
|
|
continue
|
|
|
|
text_pos += word_start + len(word)
|
|
|
|
if text_pos < len(text) and text[text_pos] == " ":
|
|
|
|
spaces.append(True)
|
|
|
|
else:
|
|
|
|
spaces.append(False)
|
|
|
|
return spaces
|