mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-13 13:17:06 +03:00
06f0a8daa0
* fix grad_clip naming * cleaning up pretrained_vectors out of cfg * further refactoring Model init's * move Model building out of pipes * further refactor to require a model config when creating a pipe * small fixes * making cfg in nn_parser more consistent * fixing nr_class for parser * fixing nn_parser's nO * fix printing of loss * architectures in own file per type, consistent naming * convenience methods default_tagger_config and default_tok2vec_config * let create_pipe access default config if available for that component * default_parser_config * move defaults to separate folder * allow reading nlp from package or dir with argument 'name' * architecture spacy.VocabVectors.v1 to read static vectors from file * cleanup * default configs for nel, textcat, morphologizer, tensorizer * fix imports * fixing unit tests * fixes and clean up * fixing defaults, nO, fix unit tests * restore parser IO * fix IO * 'fix' serialization test * add *.cfg to manifest * fix example configs with additional arguments * replace Morpohologizer with Tagger * add IO bit when testing overfitting of tagger (currently failing) * fix IO - don't initialize when reading from disk * expand overfitting tests to also check IO goes OK * remove dropout from HashEmbed to fix Tagger performance * add defaults for sentrec * update thinc * always pass a Model instance to a Pipe * fix piped_added statement * remove obsolete W029 * remove obsolete errors * restore byte checking tests (work again) * clean up test * further test cleanup * convert from config to Model in create_pipe * bring back error when component is not initialized * cleanup * remove calls for nlp2.begin_training * use thinc.api in imports * allow setting charembed's nM and nC * fix for hardcoded nM/nC + unit test * formatting fixes * trigger build
38 lines
1.1 KiB
Python
38 lines
1.1 KiB
Python
from collections import defaultdict
|
|
|
|
from spacy.ml.models.defaults import default_ner
|
|
from spacy.pipeline import EntityRecognizer
|
|
|
|
from spacy.lang.en import English
|
|
from spacy.tokens import Span
|
|
|
|
|
|
def test_issue4313():
|
|
""" This should not crash or exit with some strange error code """
|
|
beam_width = 16
|
|
beam_density = 0.0001
|
|
nlp = English()
|
|
ner = EntityRecognizer(nlp.vocab, default_ner())
|
|
ner.add_label("SOME_LABEL")
|
|
ner.begin_training([])
|
|
nlp.add_pipe(ner)
|
|
|
|
# add a new label to the doc
|
|
doc = nlp("What do you think about Apple ?")
|
|
assert len(ner.labels) == 1
|
|
assert "SOME_LABEL" in ner.labels
|
|
apple_ent = Span(doc, 5, 6, label="MY_ORG")
|
|
doc.ents = list(doc.ents) + [apple_ent]
|
|
|
|
# ensure the beam_parse still works with the new label
|
|
docs = [doc]
|
|
beams = nlp.entity.beam_parse(
|
|
docs, beam_width=beam_width, beam_density=beam_density
|
|
)
|
|
|
|
for doc, beam in zip(docs, beams):
|
|
entity_scores = defaultdict(float)
|
|
for score, ents in nlp.entity.moves.get_beam_parses(beam):
|
|
for start, end, label in ents:
|
|
entity_scores[(start, end, label)] += score
|