2017-07-22 02:48:58 +03:00
|
|
|
import pytest
|
2020-02-27 20:42:27 +03:00
|
|
|
from spacy import util
|
2019-09-18 22:37:17 +03:00
|
|
|
from spacy.lang.en import English
|
2020-05-19 17:20:03 +03:00
|
|
|
from spacy.pipeline.defaults import default_ner
|
2019-09-18 22:37:17 +03:00
|
|
|
from spacy.pipeline import EntityRecognizer, EntityRuler
|
2018-07-25 00:38:44 +03:00
|
|
|
from spacy.vocab import Vocab
|
|
|
|
from spacy.syntax.ner import BiluoPushDown
|
|
|
|
from spacy.gold import GoldParse
|
|
|
|
from spacy.tokens import Doc
|
2017-07-22 02:48:58 +03:00
|
|
|
|
2020-05-21 19:39:06 +03:00
|
|
|
from ..util import make_tempdir
|
|
|
|
|
|
|
|
|
2020-01-29 19:06:46 +03:00
|
|
|
TRAIN_DATA = [
|
|
|
|
("Who is Shaka Khan?", {"entities": [(7, 17, "PERSON")]}),
|
|
|
|
("I like London and Berlin.", {"entities": [(7, 13, "LOC"), (18, 24, "LOC")]}),
|
2020-02-18 17:38:18 +03:00
|
|
|
]
|
2020-01-29 19:06:46 +03:00
|
|
|
|
2017-07-22 02:48:58 +03:00
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def vocab():
|
|
|
|
return Vocab()
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def doc(vocab):
|
2018-11-27 03:09:36 +03:00
|
|
|
return Doc(vocab, words=["Casey", "went", "to", "New", "York", "."])
|
2017-07-22 02:48:58 +03:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def entity_annots(doc):
|
|
|
|
casey = doc[0:1]
|
|
|
|
ny = doc[3:5]
|
2018-11-27 03:09:36 +03:00
|
|
|
return [
|
|
|
|
(casey.start_char, casey.end_char, "PERSON"),
|
|
|
|
(ny.start_char, ny.end_char, "GPE"),
|
|
|
|
]
|
2017-07-22 02:48:58 +03:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def entity_types(entity_annots):
|
|
|
|
return sorted(set([label for (s, e, label) in entity_annots]))
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def tsys(vocab, entity_types):
|
|
|
|
actions = BiluoPushDown.get_actions(entity_types=entity_types)
|
|
|
|
return BiluoPushDown(vocab.strings, actions)
|
|
|
|
|
|
|
|
|
|
|
|
def test_get_oracle_moves(tsys, doc, entity_annots):
|
|
|
|
gold = GoldParse(doc, entities=entity_annots)
|
|
|
|
tsys.preprocess_gold(gold)
|
|
|
|
act_classes = tsys.get_oracle_sequence(doc, gold)
|
|
|
|
names = [tsys.get_class_name(act) for act in act_classes]
|
2018-11-27 03:09:36 +03:00
|
|
|
assert names == ["U-PERSON", "O", "O", "B-GPE", "L-GPE", "O"]
|
|
|
|
|
2017-07-22 02:48:58 +03:00
|
|
|
|
|
|
|
def test_get_oracle_moves_negative_entities(tsys, doc, entity_annots):
|
2018-11-27 03:09:36 +03:00
|
|
|
entity_annots = [(s, e, "!" + label) for s, e, label in entity_annots]
|
2017-07-22 02:48:58 +03:00
|
|
|
gold = GoldParse(doc, entities=entity_annots)
|
|
|
|
for i, tag in enumerate(gold.ner):
|
2018-11-27 03:09:36 +03:00
|
|
|
if tag == "L-!GPE":
|
|
|
|
gold.ner[i] = "-"
|
2017-07-22 02:48:58 +03:00
|
|
|
tsys.preprocess_gold(gold)
|
|
|
|
act_classes = tsys.get_oracle_sequence(doc, gold)
|
|
|
|
names = [tsys.get_class_name(act) for act in act_classes]
|
2018-11-30 19:43:08 +03:00
|
|
|
assert names
|
2017-07-22 02:48:58 +03:00
|
|
|
|
|
|
|
|
|
|
|
def test_get_oracle_moves_negative_entities2(tsys, vocab):
|
2018-11-27 03:09:36 +03:00
|
|
|
doc = Doc(vocab, words=["A", "B", "C", "D"])
|
2017-07-22 02:48:58 +03:00
|
|
|
gold = GoldParse(doc, entities=[])
|
2018-11-27 03:09:36 +03:00
|
|
|
gold.ner = ["B-!PERSON", "L-!PERSON", "B-!PERSON", "L-!PERSON"]
|
2017-07-22 02:48:58 +03:00
|
|
|
tsys.preprocess_gold(gold)
|
|
|
|
act_classes = tsys.get_oracle_sequence(doc, gold)
|
|
|
|
names = [tsys.get_class_name(act) for act in act_classes]
|
2018-11-30 19:43:08 +03:00
|
|
|
assert names
|
2017-07-22 02:48:58 +03:00
|
|
|
|
|
|
|
|
|
|
|
def test_get_oracle_moves_negative_O(tsys, vocab):
|
2018-11-27 03:09:36 +03:00
|
|
|
doc = Doc(vocab, words=["A", "B", "C", "D"])
|
2017-07-22 02:48:58 +03:00
|
|
|
gold = GoldParse(doc, entities=[])
|
2018-11-27 03:09:36 +03:00
|
|
|
gold.ner = ["O", "!O", "O", "!O"]
|
2017-07-22 02:48:58 +03:00
|
|
|
tsys.preprocess_gold(gold)
|
|
|
|
act_classes = tsys.get_oracle_sequence(doc, gold)
|
|
|
|
names = [tsys.get_class_name(act) for act in act_classes]
|
2018-11-30 19:43:08 +03:00
|
|
|
assert names
|
2018-07-25 00:38:44 +03:00
|
|
|
|
|
|
|
|
2019-08-29 15:33:39 +03:00
|
|
|
def test_oracle_moves_missing_B(en_vocab):
|
|
|
|
words = ["B", "52", "Bomber"]
|
|
|
|
biluo_tags = [None, None, "L-PRODUCT"]
|
|
|
|
|
|
|
|
doc = Doc(en_vocab, words=words)
|
|
|
|
gold = GoldParse(doc, words=words, entities=biluo_tags)
|
|
|
|
|
|
|
|
moves = BiluoPushDown(en_vocab.strings)
|
|
|
|
move_types = ("M", "B", "I", "L", "U", "O")
|
|
|
|
for tag in biluo_tags:
|
|
|
|
if tag is None:
|
|
|
|
continue
|
|
|
|
elif tag == "O":
|
|
|
|
moves.add_action(move_types.index("O"), "")
|
|
|
|
else:
|
|
|
|
action, label = tag.split("-")
|
|
|
|
moves.add_action(move_types.index("B"), label)
|
|
|
|
moves.add_action(move_types.index("I"), label)
|
|
|
|
moves.add_action(move_types.index("L"), label)
|
|
|
|
moves.add_action(move_types.index("U"), label)
|
|
|
|
moves.preprocess_gold(gold)
|
2019-10-18 12:27:38 +03:00
|
|
|
moves.get_oracle_sequence(doc, gold)
|
2019-08-29 15:33:39 +03:00
|
|
|
|
|
|
|
|
|
|
|
def test_oracle_moves_whitespace(en_vocab):
|
2019-09-11 15:00:36 +03:00
|
|
|
words = ["production", "\n", "of", "Northrop", "\n", "Corp.", "\n", "'s", "radar"]
|
|
|
|
biluo_tags = ["O", "O", "O", "B-ORG", None, "I-ORG", "L-ORG", "O", "O"]
|
2019-08-29 15:33:39 +03:00
|
|
|
|
|
|
|
doc = Doc(en_vocab, words=words)
|
|
|
|
gold = GoldParse(doc, words=words, entities=biluo_tags)
|
|
|
|
|
|
|
|
moves = BiluoPushDown(en_vocab.strings)
|
|
|
|
move_types = ("M", "B", "I", "L", "U", "O")
|
|
|
|
for tag in biluo_tags:
|
|
|
|
if tag is None:
|
|
|
|
continue
|
|
|
|
elif tag == "O":
|
|
|
|
moves.add_action(move_types.index("O"), "")
|
|
|
|
else:
|
|
|
|
action, label = tag.split("-")
|
|
|
|
moves.add_action(move_types.index(action), label)
|
|
|
|
moves.preprocess_gold(gold)
|
2019-09-11 15:00:36 +03:00
|
|
|
moves.get_oracle_sequence(doc, gold)
|
2019-09-18 22:41:24 +03:00
|
|
|
|
|
|
|
|
2019-09-18 22:37:17 +03:00
|
|
|
def test_accept_blocked_token():
|
|
|
|
"""Test succesful blocking of tokens to be in an entity."""
|
|
|
|
# 1. test normal behaviour
|
|
|
|
nlp1 = English()
|
|
|
|
doc1 = nlp1("I live in New York")
|
2020-02-27 20:42:27 +03:00
|
|
|
ner1 = EntityRecognizer(doc1.vocab, default_ner())
|
2019-09-18 22:37:17 +03:00
|
|
|
assert [token.ent_iob_ for token in doc1] == ["", "", "", "", ""]
|
|
|
|
assert [token.ent_type_ for token in doc1] == ["", "", "", "", ""]
|
|
|
|
|
|
|
|
# Add the OUT action
|
|
|
|
ner1.moves.add_action(5, "")
|
|
|
|
ner1.add_label("GPE")
|
|
|
|
# Get into the state just before "New"
|
|
|
|
state1 = ner1.moves.init_batch([doc1])[0]
|
|
|
|
ner1.moves.apply_transition(state1, "O")
|
|
|
|
ner1.moves.apply_transition(state1, "O")
|
|
|
|
ner1.moves.apply_transition(state1, "O")
|
|
|
|
# Check that B-GPE is valid.
|
|
|
|
assert ner1.moves.is_valid(state1, "B-GPE")
|
|
|
|
|
|
|
|
# 2. test blocking behaviour
|
|
|
|
nlp2 = English()
|
|
|
|
doc2 = nlp2("I live in New York")
|
2020-02-27 20:42:27 +03:00
|
|
|
ner2 = EntityRecognizer(doc2.vocab, default_ner())
|
2019-09-18 22:37:17 +03:00
|
|
|
|
|
|
|
# set "New York" to a blocked entity
|
|
|
|
doc2.ents = [(0, 3, 5)]
|
|
|
|
assert [token.ent_iob_ for token in doc2] == ["", "", "", "B", "B"]
|
|
|
|
assert [token.ent_type_ for token in doc2] == ["", "", "", "", ""]
|
|
|
|
|
|
|
|
# Check that B-GPE is now invalid.
|
|
|
|
ner2.moves.add_action(4, "")
|
|
|
|
ner2.moves.add_action(5, "")
|
|
|
|
ner2.add_label("GPE")
|
|
|
|
state2 = ner2.moves.init_batch([doc2])[0]
|
|
|
|
ner2.moves.apply_transition(state2, "O")
|
|
|
|
ner2.moves.apply_transition(state2, "O")
|
|
|
|
ner2.moves.apply_transition(state2, "O")
|
|
|
|
# we can only use U- for "New"
|
|
|
|
assert not ner2.moves.is_valid(state2, "B-GPE")
|
|
|
|
assert ner2.moves.is_valid(state2, "U-")
|
|
|
|
ner2.moves.apply_transition(state2, "U-")
|
|
|
|
# we can only use U- for "York"
|
|
|
|
assert not ner2.moves.is_valid(state2, "B-GPE")
|
|
|
|
assert ner2.moves.is_valid(state2, "U-")
|
|
|
|
|
|
|
|
|
2020-05-13 23:08:50 +03:00
|
|
|
def test_train_empty():
|
|
|
|
"""Test that training an empty text does not throw errors."""
|
|
|
|
train_data = [
|
|
|
|
("Who is Shaka Khan?", {"entities": [(7, 17, "PERSON")]}),
|
|
|
|
("", {"entities": []}),
|
|
|
|
]
|
|
|
|
|
|
|
|
nlp = English()
|
|
|
|
ner = nlp.create_pipe("ner")
|
|
|
|
ner.add_label("PERSON")
|
|
|
|
nlp.add_pipe(ner, last=True)
|
|
|
|
|
|
|
|
nlp.begin_training()
|
|
|
|
for itn in range(2):
|
|
|
|
losses = {}
|
2020-05-21 19:39:06 +03:00
|
|
|
batches = util.minibatch(train_data)
|
2020-05-13 23:08:50 +03:00
|
|
|
for batch in batches:
|
|
|
|
texts, annotations = zip(*batch)
|
|
|
|
nlp.update(
|
|
|
|
texts, # batch of texts
|
|
|
|
annotations, # batch of annotations
|
|
|
|
losses=losses,
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2019-09-18 22:37:17 +03:00
|
|
|
def test_overwrite_token():
|
|
|
|
nlp = English()
|
|
|
|
ner1 = nlp.create_pipe("ner")
|
|
|
|
nlp.add_pipe(ner1, name="ner")
|
|
|
|
nlp.begin_training()
|
|
|
|
|
|
|
|
# The untrained NER will predict O for each token
|
|
|
|
doc = nlp("I live in New York")
|
|
|
|
assert [token.ent_iob_ for token in doc] == ["O", "O", "O", "O", "O"]
|
|
|
|
assert [token.ent_type_ for token in doc] == ["", "", "", "", ""]
|
|
|
|
|
|
|
|
# Check that a new ner can overwrite O
|
2020-02-27 20:42:27 +03:00
|
|
|
ner2 = EntityRecognizer(doc.vocab, default_ner())
|
2019-09-18 22:37:17 +03:00
|
|
|
ner2.moves.add_action(5, "")
|
|
|
|
ner2.add_label("GPE")
|
|
|
|
state = ner2.moves.init_batch([doc])[0]
|
|
|
|
assert ner2.moves.is_valid(state, "B-GPE")
|
|
|
|
assert ner2.moves.is_valid(state, "U-GPE")
|
|
|
|
ner2.moves.apply_transition(state, "B-GPE")
|
|
|
|
assert ner2.moves.is_valid(state, "I-GPE")
|
|
|
|
assert ner2.moves.is_valid(state, "L-GPE")
|
|
|
|
|
|
|
|
|
2020-02-27 20:42:27 +03:00
|
|
|
def test_empty_ner():
|
|
|
|
nlp = English()
|
|
|
|
ner = nlp.create_pipe("ner")
|
|
|
|
ner.add_label("MY_LABEL")
|
|
|
|
nlp.add_pipe(ner)
|
|
|
|
nlp.begin_training()
|
|
|
|
doc = nlp("John is watching the news about Croatia's elections")
|
|
|
|
# if this goes wrong, the initialization of the parser's upper layer is probably broken
|
2020-02-28 13:57:41 +03:00
|
|
|
result = ["O", "O", "O", "O", "O", "O", "O", "O", "O"]
|
|
|
|
assert [token.ent_iob_ for token in doc] == result
|
2020-02-27 20:42:27 +03:00
|
|
|
|
|
|
|
|
2019-09-18 22:37:17 +03:00
|
|
|
def test_ruler_before_ner():
|
|
|
|
""" Test that an NER works after an entity_ruler: the second can add annotations """
|
|
|
|
nlp = English()
|
|
|
|
|
|
|
|
# 1 : Entity Ruler - should set "this" to B and everything else to empty
|
|
|
|
ruler = EntityRuler(nlp)
|
|
|
|
patterns = [{"label": "THING", "pattern": "This"}]
|
|
|
|
ruler.add_patterns(patterns)
|
|
|
|
nlp.add_pipe(ruler)
|
|
|
|
|
|
|
|
# 2: untrained NER - should set everything else to O
|
|
|
|
untrained_ner = nlp.create_pipe("ner")
|
|
|
|
untrained_ner.add_label("MY_LABEL")
|
|
|
|
nlp.add_pipe(untrained_ner)
|
|
|
|
nlp.begin_training()
|
|
|
|
doc = nlp("This is Antti Korhonen speaking in Finland")
|
|
|
|
expected_iobs = ["B", "O", "O", "O", "O", "O", "O"]
|
|
|
|
expected_types = ["THING", "", "", "", "", "", ""]
|
|
|
|
assert [token.ent_iob_ for token in doc] == expected_iobs
|
|
|
|
assert [token.ent_type_ for token in doc] == expected_types
|
|
|
|
|
|
|
|
|
|
|
|
def test_ner_before_ruler():
|
|
|
|
""" Test that an entity_ruler works after an NER: the second can overwrite O annotations """
|
|
|
|
nlp = English()
|
|
|
|
|
|
|
|
# 1: untrained NER - should set everything to O
|
|
|
|
untrained_ner = nlp.create_pipe("ner")
|
|
|
|
untrained_ner.add_label("MY_LABEL")
|
|
|
|
nlp.add_pipe(untrained_ner, name="uner")
|
|
|
|
nlp.begin_training()
|
|
|
|
|
|
|
|
# 2 : Entity Ruler - should set "this" to B and keep everything else O
|
|
|
|
ruler = EntityRuler(nlp)
|
|
|
|
patterns = [{"label": "THING", "pattern": "This"}]
|
|
|
|
ruler.add_patterns(patterns)
|
|
|
|
nlp.add_pipe(ruler)
|
|
|
|
|
|
|
|
doc = nlp("This is Antti Korhonen speaking in Finland")
|
|
|
|
expected_iobs = ["B", "O", "O", "O", "O", "O", "O"]
|
|
|
|
expected_types = ["THING", "", "", "", "", "", ""]
|
|
|
|
assert [token.ent_iob_ for token in doc] == expected_iobs
|
|
|
|
assert [token.ent_type_ for token in doc] == expected_types
|
|
|
|
|
|
|
|
|
|
|
|
def test_block_ner():
|
|
|
|
""" Test functionality for blocking tokens so they can't be in a named entity """
|
|
|
|
# block "Antti L Korhonen" from being a named entity
|
|
|
|
nlp = English()
|
|
|
|
nlp.add_pipe(BlockerComponent1(2, 5))
|
|
|
|
untrained_ner = nlp.create_pipe("ner")
|
|
|
|
untrained_ner.add_label("MY_LABEL")
|
|
|
|
nlp.add_pipe(untrained_ner, name="uner")
|
|
|
|
nlp.begin_training()
|
|
|
|
doc = nlp("This is Antti L Korhonen speaking in Finland")
|
|
|
|
expected_iobs = ["O", "O", "B", "B", "B", "O", "O", "O"]
|
|
|
|
expected_types = ["", "", "", "", "", "", "", ""]
|
|
|
|
assert [token.ent_iob_ for token in doc] == expected_iobs
|
|
|
|
assert [token.ent_type_ for token in doc] == expected_types
|
|
|
|
|
|
|
|
|
2020-02-27 20:42:27 +03:00
|
|
|
def test_overfitting_IO():
|
2020-01-29 19:06:46 +03:00
|
|
|
# Simple test to try and quickly overfit the NER component - ensuring the ML models work correctly
|
2019-11-19 17:03:14 +03:00
|
|
|
nlp = English()
|
|
|
|
ner = nlp.create_pipe("ner")
|
2020-01-29 19:06:46 +03:00
|
|
|
for _, annotations in TRAIN_DATA:
|
|
|
|
for ent in annotations.get("entities"):
|
|
|
|
ner.add_label(ent[2])
|
2019-11-19 17:03:14 +03:00
|
|
|
nlp.add_pipe(ner)
|
2020-01-29 19:06:46 +03:00
|
|
|
optimizer = nlp.begin_training()
|
|
|
|
|
|
|
|
for i in range(50):
|
|
|
|
losses = {}
|
|
|
|
nlp.update(TRAIN_DATA, sgd=optimizer, losses=losses)
|
|
|
|
assert losses["ner"] < 0.00001
|
|
|
|
|
|
|
|
# test the trained model
|
|
|
|
test_text = "I like London."
|
|
|
|
doc = nlp(test_text)
|
|
|
|
ents = doc.ents
|
|
|
|
assert len(ents) == 1
|
|
|
|
assert ents[0].text == "London"
|
|
|
|
assert ents[0].label_ == "LOC"
|
|
|
|
|
2020-02-27 20:42:27 +03:00
|
|
|
# Also test the results are still the same after IO
|
|
|
|
with make_tempdir() as tmp_dir:
|
|
|
|
nlp.to_disk(tmp_dir)
|
|
|
|
nlp2 = util.load_model_from_path(tmp_dir)
|
|
|
|
doc2 = nlp2(test_text)
|
|
|
|
ents2 = doc2.ents
|
|
|
|
assert len(ents2) == 1
|
|
|
|
assert ents2[0].text == "London"
|
|
|
|
assert ents2[0].label_ == "LOC"
|
2019-11-19 17:03:14 +03:00
|
|
|
|
|
|
|
|
2019-09-18 22:37:17 +03:00
|
|
|
class BlockerComponent1(object):
|
|
|
|
name = "my_blocker"
|
|
|
|
|
|
|
|
def __init__(self, start, end):
|
|
|
|
self.start = start
|
|
|
|
self.end = end
|
|
|
|
|
|
|
|
def __call__(self, doc):
|
|
|
|
doc.ents = [(0, self.start, self.end)]
|
|
|
|
return doc
|