spaCy/spacy/cli/converters/iob2json.py

62 lines
2.0 KiB
Python
Raw Normal View History

2017-05-19 21:27:51 +03:00
# coding: utf8
from __future__ import unicode_literals
from cytoolz import partition_all, concat
2017-05-19 21:27:51 +03:00
from .._messages import Messages
2017-05-19 21:27:51 +03:00
from ...compat import json_dumps, path2str
from ...util import prints
2017-05-26 19:32:34 +03:00
from ...gold import iob_to_biluo
2017-05-19 21:27:51 +03:00
import re
2017-05-19 21:27:51 +03:00
def iob2json(input_path, output_path, n_sents=10, *a, **k):
"""
Convert IOB files into JSON format for use with train cli.
"""
2017-05-28 16:11:39 +03:00
with input_path.open('r', encoding='utf8') as file_:
2017-10-02 18:02:10 +03:00
sentences = read_iob(file_)
docs = merge_sentences(sentences, n_sents)
output_filename = (input_path.parts[-1]
.replace(".iob2", ".json")
.replace(".iob", ".json"))
2017-05-19 21:27:51 +03:00
output_file = output_path / output_filename
with output_file.open('w', encoding='utf-8') as f:
2017-10-02 18:02:10 +03:00
f.write(json_dumps(docs))
prints(Messages.M033.format(n_docs=len(docs)),
title=Messages.M032.format(name=path2str(output_file)))
2017-05-19 21:27:51 +03:00
def read_iob(raw_sents):
2017-05-19 21:27:51 +03:00
sentences = []
for line in raw_sents:
2017-05-19 21:27:51 +03:00
if not line.strip():
continue
tokens = [re.split('[^\w\-]', line.strip())]
2017-05-28 16:11:39 +03:00
if len(tokens[0]) == 3:
words, pos, iob = zip(*tokens)
else:
words, iob = zip(*tokens)
pos = ['-'] * len(words)
2017-05-26 19:32:34 +03:00
biluo = iob_to_biluo(iob)
2017-05-19 21:27:51 +03:00
sentences.append([
{'orth': w, 'tag': p, 'ner': ent}
2017-05-26 19:32:34 +03:00
for (w, p, ent) in zip(words, pos, biluo)
2017-05-19 21:27:51 +03:00
])
sentences = [{'tokens': sent} for sent in sentences]
paragraphs = [{'sentences': [sent]} for sent in sentences]
docs = [{'id': 0, 'paragraphs': [para]} for para in paragraphs]
return docs
2017-10-02 18:02:10 +03:00
def merge_sentences(docs, n_sents):
counter = 0
merged = []
for group in partition_all(n_sents, docs):
group = list(group)
first = group.pop(0)
to_extend = first['paragraphs'][0]['sentences']
for sent in group[1:]:
to_extend.extend(sent['paragraphs'][0]['sentences'])
merged.append(first)
return merged