2018-08-14 15:04:32 +03:00
|
|
|
# coding: utf8
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
💫 Replace ujson, msgpack and dill/pickle/cloudpickle with srsly (#3003)
Remove hacks and wrappers, keep code in sync across our libraries and move spaCy a few steps closer to only depending on packages with binary wheels 🎉
See here: https://github.com/explosion/srsly
Serialization is hard, especially across Python versions and multiple platforms. After dealing with many subtle bugs over the years (encodings, locales, large files) our libraries like spaCy and Prodigy have steadily grown a number of utility functions to wrap the multiple serialization formats we need to support (especially json, msgpack and pickle). These wrapping functions ended up duplicated across our codebases, so we wanted to put them in one place.
At the same time, we noticed that having a lot of small dependencies was making maintainence harder, and making installation slower. To solve this, we've made srsly standalone, by including the component packages directly within it. This way we can provide all the serialization utilities we need in a single binary wheel.
srsly currently includes forks of the following packages:
ujson
msgpack
msgpack-numpy
cloudpickle
* WIP: replace json/ujson with srsly
* Replace ujson in examples
Use regular json instead of srsly to make code easier to read and follow
* Update requirements
* Fix imports
* Fix typos
* Replace msgpack with srsly
* Fix warning
2018-12-03 03:28:22 +03:00
|
|
|
import srsly
|
2018-11-30 22:16:14 +03:00
|
|
|
|
2019-03-18 00:12:54 +03:00
|
|
|
from ...gold import docs_to_json
|
|
|
|
from ...util import get_lang_class, minibatch
|
2018-08-14 15:04:32 +03:00
|
|
|
|
|
|
|
|
2019-10-08 01:52:45 +03:00
|
|
|
def ner_jsonl2json(input_data, lang=None, n_sents=10, use_morphology=False, **_):
|
2018-08-14 15:04:32 +03:00
|
|
|
if lang is None:
|
2018-12-08 13:49:43 +03:00
|
|
|
raise ValueError("No --lang specified, but tokenization required")
|
2018-08-14 15:04:32 +03:00
|
|
|
json_docs = []
|
2019-03-18 00:12:54 +03:00
|
|
|
input_examples = [srsly.json_loads(line) for line in input_data.strip().split("\n")]
|
2018-08-14 15:04:32 +03:00
|
|
|
nlp = get_lang_class(lang)()
|
2019-03-18 00:12:54 +03:00
|
|
|
sentencizer = nlp.create_pipe("sentencizer")
|
|
|
|
for i, batch in enumerate(minibatch(input_examples, size=n_sents)):
|
|
|
|
docs = []
|
|
|
|
for record in batch:
|
|
|
|
raw_text = record["text"]
|
|
|
|
if "entities" in record:
|
|
|
|
ents = record["entities"]
|
|
|
|
else:
|
|
|
|
ents = record["spans"]
|
|
|
|
ents = [(e["start"], e["end"], e["label"]) for e in ents]
|
|
|
|
doc = nlp.make_doc(raw_text)
|
|
|
|
sentencizer(doc)
|
|
|
|
spans = [doc.char_span(s, e, label=L) for s, e, L in ents]
|
|
|
|
doc.ents = _cleanup_spans(spans)
|
|
|
|
docs.append(doc)
|
|
|
|
json_docs.append(docs_to_json(docs, id=i))
|
2018-11-30 22:16:14 +03:00
|
|
|
return json_docs
|
2019-03-18 00:12:54 +03:00
|
|
|
|
|
|
|
|
|
|
|
def _cleanup_spans(spans):
|
|
|
|
output = []
|
|
|
|
seen = set()
|
|
|
|
for span in spans:
|
|
|
|
if span is not None:
|
|
|
|
# Trim whitespace
|
|
|
|
while len(span) and span[0].is_space:
|
|
|
|
span = span[1:]
|
|
|
|
while len(span) and span[-1].is_space:
|
|
|
|
span = span[:-1]
|
|
|
|
if not len(span):
|
|
|
|
continue
|
|
|
|
for i in range(span.start, span.end):
|
|
|
|
if i in seen:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
output.append(span)
|
|
|
|
seen.update(range(span.start, span.end))
|
|
|
|
return output
|