2017-03-15 17:29:42 +03:00
|
|
|
# cython: profile=True
|
2017-04-15 13:05:47 +03:00
|
|
|
# coding: utf8
|
2016-10-09 13:24:24 +03:00
|
|
|
from __future__ import unicode_literals, print_function
|
|
|
|
|
2015-05-24 22:50:48 +03:00
|
|
|
import re
|
2017-05-21 17:06:17 +03:00
|
|
|
import random
|
2018-03-27 20:23:02 +03:00
|
|
|
import numpy
|
|
|
|
import tempfile
|
|
|
|
import shutil
|
2019-08-28 10:14:20 +03:00
|
|
|
import itertools
|
2018-03-27 20:23:02 +03:00
|
|
|
from pathlib import Path
|
💫 Replace ujson, msgpack and dill/pickle/cloudpickle with srsly (#3003)
Remove hacks and wrappers, keep code in sync across our libraries and move spaCy a few steps closer to only depending on packages with binary wheels 🎉
See here: https://github.com/explosion/srsly
Serialization is hard, especially across Python versions and multiple platforms. After dealing with many subtle bugs over the years (encodings, locales, large files) our libraries like spaCy and Prodigy have steadily grown a number of utility functions to wrap the multiple serialization formats we need to support (especially json, msgpack and pickle). These wrapping functions ended up duplicated across our codebases, so we wanted to put them in one place.
At the same time, we noticed that having a lot of small dependencies was making maintainence harder, and making installation slower. To solve this, we've made srsly standalone, by including the component packages directly within it. This way we can provide all the serialization utilities we need in a single binary wheel.
srsly currently includes forks of the following packages:
ujson
msgpack
msgpack-numpy
cloudpickle
* WIP: replace json/ujson with srsly
* Replace ujson in examples
Use regular json instead of srsly to make code easier to read and follow
* Update requirements
* Fix imports
* Fix typos
* Replace msgpack with srsly
* Fix warning
2018-12-03 03:28:22 +03:00
|
|
|
import srsly
|
2018-03-27 20:23:02 +03:00
|
|
|
|
2018-11-30 22:16:14 +03:00
|
|
|
from . import _align
|
2016-03-01 12:09:08 +03:00
|
|
|
from .syntax import nonproj
|
2019-02-06 13:50:26 +03:00
|
|
|
from .tokens import Doc, Span
|
2018-04-03 16:50:31 +03:00
|
|
|
from .errors import Errors
|
2019-03-08 13:42:26 +03:00
|
|
|
from .compat import path2str
|
2017-10-27 22:07:59 +03:00
|
|
|
from . import util
|
2018-03-27 20:23:02 +03:00
|
|
|
from .util import minibatch, itershuffle
|
2016-02-22 16:40:40 +03:00
|
|
|
|
2018-03-27 20:23:02 +03:00
|
|
|
from libc.stdio cimport FILE, fopen, fclose, fread, fwrite, feof, fseek
|
2015-02-21 19:06:58 +03:00
|
|
|
|
2018-11-28 20:04:58 +03:00
|
|
|
|
2019-03-08 13:42:26 +03:00
|
|
|
punct_re = re.compile(r"\W")
|
|
|
|
|
|
|
|
|
2015-06-08 01:54:13 +03:00
|
|
|
def tags_to_entities(tags):
|
|
|
|
entities = []
|
|
|
|
start = None
|
|
|
|
for i, tag in enumerate(tags):
|
2016-11-25 17:57:59 +03:00
|
|
|
if tag is None:
|
|
|
|
continue
|
2019-03-08 13:42:26 +03:00
|
|
|
if tag.startswith("O"):
|
2015-06-08 01:54:13 +03:00
|
|
|
# TODO: We shouldn't be getting these malformed inputs. Fix this.
|
|
|
|
if start is not None:
|
|
|
|
start = None
|
|
|
|
continue
|
2019-03-08 13:42:26 +03:00
|
|
|
elif tag == "-":
|
2015-06-08 01:54:13 +03:00
|
|
|
continue
|
2019-03-08 13:42:26 +03:00
|
|
|
elif tag.startswith("I"):
|
2018-04-03 16:50:31 +03:00
|
|
|
if start is None:
|
2019-03-08 13:42:26 +03:00
|
|
|
raise ValueError(Errors.E067.format(tags=tags[:i + 1]))
|
2015-06-08 01:54:13 +03:00
|
|
|
continue
|
2019-03-08 13:42:26 +03:00
|
|
|
if tag.startswith("U"):
|
2015-06-08 01:54:13 +03:00
|
|
|
entities.append((tag[2:], i, i))
|
2019-03-08 13:42:26 +03:00
|
|
|
elif tag.startswith("B"):
|
2015-06-08 01:54:13 +03:00
|
|
|
start = i
|
2019-03-08 13:42:26 +03:00
|
|
|
elif tag.startswith("L"):
|
2015-06-08 01:54:13 +03:00
|
|
|
entities.append((tag[2:], start, i))
|
|
|
|
start = None
|
|
|
|
else:
|
2018-04-03 16:50:31 +03:00
|
|
|
raise ValueError(Errors.E068.format(tag=tag))
|
2015-06-08 01:54:13 +03:00
|
|
|
return entities
|
|
|
|
|
|
|
|
|
2016-10-13 04:24:29 +03:00
|
|
|
def merge_sents(sents):
|
|
|
|
m_deps = [[], [], [], [], [], []]
|
2019-10-07 17:49:00 +03:00
|
|
|
m_cats = {}
|
2016-10-13 04:24:29 +03:00
|
|
|
m_brackets = []
|
|
|
|
i = 0
|
2019-10-07 17:49:00 +03:00
|
|
|
for (ids, words, tags, heads, labels, ner), (cats, brackets) in sents:
|
2016-10-13 04:24:29 +03:00
|
|
|
m_deps[0].extend(id_ + i for id_ in ids)
|
|
|
|
m_deps[1].extend(words)
|
|
|
|
m_deps[2].extend(tags)
|
|
|
|
m_deps[3].extend(head + i for head in heads)
|
|
|
|
m_deps[4].extend(labels)
|
|
|
|
m_deps[5].extend(ner)
|
2019-03-08 13:42:26 +03:00
|
|
|
m_brackets.extend((b["first"] + i, b["last"] + i, b["label"])
|
2017-10-27 18:02:55 +03:00
|
|
|
for b in brackets)
|
2019-10-07 17:49:00 +03:00
|
|
|
m_cats.update(cats)
|
2016-10-13 04:24:29 +03:00
|
|
|
i += len(ids)
|
2019-10-07 17:49:00 +03:00
|
|
|
return [(m_deps, (m_cats, m_brackets))]
|
2016-10-13 04:24:29 +03:00
|
|
|
|
2015-06-08 01:54:13 +03:00
|
|
|
|
2019-07-17 15:29:52 +03:00
|
|
|
def align(tokens_a, tokens_b):
|
2019-07-17 14:59:17 +03:00
|
|
|
"""Calculate alignment tables between two tokenizations, using the Levenshtein
|
|
|
|
algorithm. The alignment is case-insensitive.
|
|
|
|
|
2019-07-17 15:29:52 +03:00
|
|
|
tokens_a (List[str]): The candidate tokenization.
|
|
|
|
tokens_b (List[str]): The reference tokenization.
|
2019-07-17 14:59:17 +03:00
|
|
|
RETURNS: (tuple): A 5-tuple consisting of the following information:
|
|
|
|
* cost (int): The number of misaligned tokens.
|
2019-07-17 15:29:52 +03:00
|
|
|
* a2b (List[int]): Mapping of indices in `tokens_a` to indices in `tokens_b`.
|
|
|
|
For instance, if `a2b[4] == 6`, that means that `tokens_a[4]` aligns
|
|
|
|
to `tokens_b[6]`. If there's no one-to-one alignment for a token,
|
|
|
|
it has the value -1.
|
2019-07-17 14:59:17 +03:00
|
|
|
* b2a (List[int]): The same as `a2b`, but mapping the other direction.
|
2019-07-17 15:29:52 +03:00
|
|
|
* a2b_multi (Dict[int, int]): A dictionary mapping indices in `tokens_a`
|
|
|
|
to indices in `tokens_b`, where multiple tokens of `tokens_a` align to
|
|
|
|
the same token of `tokens_b`.
|
2019-07-17 14:59:17 +03:00
|
|
|
* b2a_multi (Dict[int, int]): As with `a2b_multi`, but mapping the other
|
|
|
|
direction.
|
|
|
|
"""
|
2019-07-17 15:29:52 +03:00
|
|
|
if tokens_a == tokens_b:
|
|
|
|
alignment = numpy.arange(len(tokens_a))
|
2018-03-27 20:23:02 +03:00
|
|
|
return 0, alignment, alignment, {}, {}
|
2019-07-17 15:29:52 +03:00
|
|
|
tokens_a = [w.replace(" ", "").lower() for w in tokens_a]
|
|
|
|
tokens_b = [w.replace(" ", "").lower() for w in tokens_b]
|
|
|
|
cost, i2j, j2i, matrix = _align.align(tokens_a, tokens_b)
|
|
|
|
i2j_multi, j2i_multi = _align.multi_align(i2j, j2i, [len(w) for w in tokens_a],
|
|
|
|
[len(w) for w in tokens_b])
|
2018-03-27 20:23:02 +03:00
|
|
|
for i, j in list(i2j_multi.items()):
|
|
|
|
if i2j_multi.get(i+1) != j and i2j_multi.get(i-1) != j:
|
|
|
|
i2j[i] = j
|
|
|
|
i2j_multi.pop(i)
|
|
|
|
for j, i in list(j2i_multi.items()):
|
|
|
|
if j2i_multi.get(j+1) != i and j2i_multi.get(j-1) != i:
|
|
|
|
j2i[j] = i
|
|
|
|
j2i_multi.pop(j)
|
|
|
|
return cost, i2j, j2i, i2j_multi, j2i_multi
|
2015-05-24 22:50:48 +03:00
|
|
|
|
2015-05-27 20:13:11 +03:00
|
|
|
|
2017-05-21 17:06:17 +03:00
|
|
|
class GoldCorpus(object):
|
2017-05-22 13:29:30 +03:00
|
|
|
"""An annotated corpus, using the JSON file format. Manages
|
2019-03-08 13:42:26 +03:00
|
|
|
annotations for tagging, dependency parsing and NER.
|
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/goldcorpus
|
|
|
|
"""
|
2018-03-27 20:23:02 +03:00
|
|
|
def __init__(self, train, dev, gold_preproc=False, limit=None):
|
2017-05-22 13:29:30 +03:00
|
|
|
"""Create a GoldCorpus.
|
|
|
|
|
|
|
|
train_path (unicode or Path): File or directory of training data.
|
|
|
|
dev_path (unicode or Path): File or directory of development data.
|
2017-10-27 18:02:55 +03:00
|
|
|
RETURNS (GoldCorpus): The newly created object.
|
2017-05-22 13:29:30 +03:00
|
|
|
"""
|
2017-05-22 18:40:46 +03:00
|
|
|
self.limit = limit
|
2018-03-27 20:23:02 +03:00
|
|
|
if isinstance(train, str) or isinstance(train, Path):
|
|
|
|
train = self.read_tuples(self.walk_corpus(train))
|
|
|
|
dev = self.read_tuples(self.walk_corpus(dev))
|
2019-03-08 13:42:26 +03:00
|
|
|
# Write temp directory with one doc per file, so we can shuffle and stream
|
2018-03-27 20:23:02 +03:00
|
|
|
self.tmp_dir = Path(tempfile.mkdtemp())
|
2019-03-08 13:42:26 +03:00
|
|
|
self.write_msgpack(self.tmp_dir / "train", train, limit=self.limit)
|
|
|
|
self.write_msgpack(self.tmp_dir / "dev", dev, limit=self.limit)
|
2018-03-27 20:23:02 +03:00
|
|
|
|
|
|
|
def __del__(self):
|
2019-04-08 13:53:41 +03:00
|
|
|
shutil.rmtree(path2str(self.tmp_dir))
|
2018-03-27 20:23:02 +03:00
|
|
|
|
|
|
|
@staticmethod
|
2018-11-28 20:04:58 +03:00
|
|
|
def write_msgpack(directory, doc_tuples, limit=0):
|
2018-03-27 20:23:02 +03:00
|
|
|
if not directory.exists():
|
|
|
|
directory.mkdir()
|
2018-11-28 20:04:58 +03:00
|
|
|
n = 0
|
2018-03-27 20:23:02 +03:00
|
|
|
for i, doc_tuple in enumerate(doc_tuples):
|
2019-03-08 13:42:26 +03:00
|
|
|
srsly.write_msgpack(directory / "{}.msg".format(i), [doc_tuple])
|
2018-11-28 20:04:58 +03:00
|
|
|
n += len(doc_tuple[1])
|
|
|
|
if limit and n >= limit:
|
|
|
|
break
|
💫 Replace ujson, msgpack and dill/pickle/cloudpickle with srsly (#3003)
Remove hacks and wrappers, keep code in sync across our libraries and move spaCy a few steps closer to only depending on packages with binary wheels 🎉
See here: https://github.com/explosion/srsly
Serialization is hard, especially across Python versions and multiple platforms. After dealing with many subtle bugs over the years (encodings, locales, large files) our libraries like spaCy and Prodigy have steadily grown a number of utility functions to wrap the multiple serialization formats we need to support (especially json, msgpack and pickle). These wrapping functions ended up duplicated across our codebases, so we wanted to put them in one place.
At the same time, we noticed that having a lot of small dependencies was making maintainence harder, and making installation slower. To solve this, we've made srsly standalone, by including the component packages directly within it. This way we can provide all the serialization utilities we need in a single binary wheel.
srsly currently includes forks of the following packages:
ujson
msgpack
msgpack-numpy
cloudpickle
* WIP: replace json/ujson with srsly
* Replace ujson in examples
Use regular json instead of srsly to make code easier to read and follow
* Update requirements
* Fix imports
* Fix typos
* Replace msgpack with srsly
* Fix warning
2018-12-03 03:28:22 +03:00
|
|
|
|
2018-03-27 20:23:02 +03:00
|
|
|
@staticmethod
|
|
|
|
def walk_corpus(path):
|
|
|
|
path = util.ensure_path(path)
|
|
|
|
if not path.is_dir():
|
|
|
|
return [path]
|
|
|
|
paths = [path]
|
|
|
|
locs = []
|
|
|
|
seen = set()
|
|
|
|
for path in paths:
|
|
|
|
if str(path) in seen:
|
|
|
|
continue
|
|
|
|
seen.add(str(path))
|
2019-03-08 13:42:26 +03:00
|
|
|
if path.parts[-1].startswith("."):
|
2018-03-27 20:23:02 +03:00
|
|
|
continue
|
|
|
|
elif path.is_dir():
|
|
|
|
paths.extend(path.iterdir())
|
2019-08-02 10:58:51 +03:00
|
|
|
elif path.parts[-1].endswith((".json", ".jsonl")):
|
2018-03-27 20:23:02 +03:00
|
|
|
locs.append(path)
|
|
|
|
return locs
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def read_tuples(locs, limit=0):
|
2017-05-22 18:40:46 +03:00
|
|
|
i = 0
|
2018-03-27 20:23:02 +03:00
|
|
|
for loc in locs:
|
|
|
|
loc = util.ensure_path(loc)
|
2019-03-08 13:42:26 +03:00
|
|
|
if loc.parts[-1].endswith("json"):
|
2018-03-27 20:23:02 +03:00
|
|
|
gold_tuples = read_json_file(loc)
|
2019-03-09 01:15:23 +03:00
|
|
|
elif loc.parts[-1].endswith("jsonl"):
|
|
|
|
gold_tuples = srsly.read_jsonl(loc)
|
2019-03-08 13:42:26 +03:00
|
|
|
elif loc.parts[-1].endswith("msg"):
|
💫 Replace ujson, msgpack and dill/pickle/cloudpickle with srsly (#3003)
Remove hacks and wrappers, keep code in sync across our libraries and move spaCy a few steps closer to only depending on packages with binary wheels 🎉
See here: https://github.com/explosion/srsly
Serialization is hard, especially across Python versions and multiple platforms. After dealing with many subtle bugs over the years (encodings, locales, large files) our libraries like spaCy and Prodigy have steadily grown a number of utility functions to wrap the multiple serialization formats we need to support (especially json, msgpack and pickle). These wrapping functions ended up duplicated across our codebases, so we wanted to put them in one place.
At the same time, we noticed that having a lot of small dependencies was making maintainence harder, and making installation slower. To solve this, we've made srsly standalone, by including the component packages directly within it. This way we can provide all the serialization utilities we need in a single binary wheel.
srsly currently includes forks of the following packages:
ujson
msgpack
msgpack-numpy
cloudpickle
* WIP: replace json/ujson with srsly
* Replace ujson in examples
Use regular json instead of srsly to make code easier to read and follow
* Update requirements
* Fix imports
* Fix typos
* Replace msgpack with srsly
* Fix warning
2018-12-03 03:28:22 +03:00
|
|
|
gold_tuples = srsly.read_msgpack(loc)
|
2018-03-27 20:23:02 +03:00
|
|
|
else:
|
2019-03-09 01:15:23 +03:00
|
|
|
supported = ("json", "jsonl", "msg")
|
|
|
|
raise ValueError(Errors.E124.format(path=path2str(loc), formats=supported))
|
2017-05-22 18:40:46 +03:00
|
|
|
for item in gold_tuples:
|
|
|
|
yield item
|
2017-06-05 04:18:20 +03:00
|
|
|
i += len(item[1])
|
2018-03-27 20:23:02 +03:00
|
|
|
if limit and i >= limit:
|
2018-05-15 23:17:29 +03:00
|
|
|
return
|
2017-05-21 17:06:17 +03:00
|
|
|
|
|
|
|
@property
|
|
|
|
def dev_tuples(self):
|
2019-03-08 13:42:26 +03:00
|
|
|
locs = (self.tmp_dir / "dev").iterdir()
|
2018-03-27 20:23:02 +03:00
|
|
|
yield from self.read_tuples(locs, limit=self.limit)
|
2018-11-30 22:16:14 +03:00
|
|
|
|
2018-03-27 20:23:02 +03:00
|
|
|
@property
|
|
|
|
def train_tuples(self):
|
2019-03-08 13:42:26 +03:00
|
|
|
locs = (self.tmp_dir / "train").iterdir()
|
2018-03-27 20:23:02 +03:00
|
|
|
yield from self.read_tuples(locs, limit=self.limit)
|
2017-05-21 17:06:17 +03:00
|
|
|
|
|
|
|
def count_train(self):
|
|
|
|
n = 0
|
2017-06-05 04:18:20 +03:00
|
|
|
i = 0
|
2017-06-03 22:33:39 +03:00
|
|
|
for raw_text, paragraph_tuples in self.train_tuples:
|
2018-03-27 20:23:02 +03:00
|
|
|
for sent_tuples, brackets in paragraph_tuples:
|
|
|
|
n += len(sent_tuples[1])
|
2018-05-15 23:17:29 +03:00
|
|
|
if self.limit and i >= self.limit:
|
|
|
|
break
|
|
|
|
i += 1
|
2017-05-21 17:06:17 +03:00
|
|
|
return n
|
|
|
|
|
2018-03-27 20:23:02 +03:00
|
|
|
def train_docs(self, nlp, gold_preproc=False, max_length=None,
|
2019-08-28 10:14:20 +03:00
|
|
|
noise_level=0.0, orth_variant_level=0.0):
|
2018-03-27 20:23:02 +03:00
|
|
|
locs = list((self.tmp_dir / 'train').iterdir())
|
|
|
|
random.shuffle(locs)
|
|
|
|
train_tuples = self.read_tuples(locs, limit=self.limit)
|
2017-05-26 19:30:52 +03:00
|
|
|
gold_docs = self.iter_gold_docs(nlp, train_tuples, gold_preproc,
|
2017-06-05 04:16:57 +03:00
|
|
|
max_length=max_length,
|
2018-03-27 20:23:02 +03:00
|
|
|
noise_level=noise_level,
|
2019-08-28 10:14:20 +03:00
|
|
|
orth_variant_level=orth_variant_level,
|
2018-03-27 20:23:02 +03:00
|
|
|
make_projective=True)
|
2017-05-21 17:06:17 +03:00
|
|
|
yield from gold_docs
|
|
|
|
|
2019-08-16 11:52:46 +03:00
|
|
|
def train_docs_without_preprocessing(self, nlp, gold_preproc=False):
|
|
|
|
gold_docs = self.iter_gold_docs(nlp, self.train_tuples, gold_preproc=gold_preproc)
|
|
|
|
yield from gold_docs
|
|
|
|
|
2017-05-23 11:06:53 +03:00
|
|
|
def dev_docs(self, nlp, gold_preproc=False):
|
2019-03-08 13:42:26 +03:00
|
|
|
gold_docs = self.iter_gold_docs(nlp, self.dev_tuples, gold_preproc=gold_preproc)
|
2017-05-22 01:50:49 +03:00
|
|
|
yield from gold_docs
|
2017-05-21 17:06:17 +03:00
|
|
|
|
|
|
|
@classmethod
|
2017-06-05 04:16:57 +03:00
|
|
|
def iter_gold_docs(cls, nlp, tuples, gold_preproc, max_length=None,
|
2019-08-28 10:14:20 +03:00
|
|
|
noise_level=0.0, orth_variant_level=0.0, make_projective=False):
|
2017-05-21 17:06:17 +03:00
|
|
|
for raw_text, paragraph_tuples in tuples:
|
2017-05-23 11:06:53 +03:00
|
|
|
if gold_preproc:
|
|
|
|
raw_text = None
|
|
|
|
else:
|
|
|
|
paragraph_tuples = merge_sents(paragraph_tuples)
|
2019-08-28 10:14:20 +03:00
|
|
|
docs, paragraph_tuples = cls._make_docs(nlp, raw_text,
|
|
|
|
paragraph_tuples, gold_preproc, noise_level=noise_level,
|
|
|
|
orth_variant_level=orth_variant_level)
|
2018-03-27 20:23:02 +03:00
|
|
|
golds = cls._make_golds(docs, paragraph_tuples, make_projective)
|
2017-05-21 17:06:17 +03:00
|
|
|
for doc, gold in zip(docs, golds):
|
2017-06-03 21:28:52 +03:00
|
|
|
if (not max_length) or len(doc) < max_length:
|
2017-05-26 19:30:52 +03:00
|
|
|
yield doc, gold
|
2017-05-21 17:06:17 +03:00
|
|
|
|
|
|
|
@classmethod
|
2019-08-28 10:14:20 +03:00
|
|
|
def _make_docs(cls, nlp, raw_text, paragraph_tuples, gold_preproc, noise_level=0.0, orth_variant_level=0.0):
|
2017-05-23 11:06:53 +03:00
|
|
|
if raw_text is not None:
|
2019-08-28 14:38:54 +03:00
|
|
|
raw_text, paragraph_tuples = make_orth_variants(nlp, raw_text, paragraph_tuples, orth_variant_level=orth_variant_level)
|
2017-06-05 04:16:57 +03:00
|
|
|
raw_text = add_noise(raw_text, noise_level)
|
2019-08-28 10:14:20 +03:00
|
|
|
return [nlp.make_doc(raw_text)], paragraph_tuples
|
2017-05-21 17:06:17 +03:00
|
|
|
else:
|
2019-08-28 14:38:54 +03:00
|
|
|
raw_text, paragraph_tuples = make_orth_variants(nlp, None, paragraph_tuples, orth_variant_level=orth_variant_level)
|
2019-03-08 13:42:26 +03:00
|
|
|
return [Doc(nlp.vocab, words=add_noise(sent_tuples[1], noise_level))
|
2019-08-28 10:14:20 +03:00
|
|
|
for (sent_tuples, brackets) in paragraph_tuples], paragraph_tuples
|
|
|
|
|
2017-05-21 17:06:17 +03:00
|
|
|
|
|
|
|
@classmethod
|
2018-03-27 20:23:02 +03:00
|
|
|
def _make_golds(cls, docs, paragraph_tuples, make_projective):
|
2018-04-03 16:50:31 +03:00
|
|
|
if len(docs) != len(paragraph_tuples):
|
2019-03-08 13:42:26 +03:00
|
|
|
n_annots = len(paragraph_tuples)
|
|
|
|
raise ValueError(Errors.E070.format(n_docs=len(docs), n_annots=n_annots))
|
2019-10-07 17:49:00 +03:00
|
|
|
return [GoldParse.from_annot_tuples(doc, sent_tuples, cats=cats,
|
2018-03-27 20:23:02 +03:00
|
|
|
make_projective=make_projective)
|
2019-10-07 17:49:00 +03:00
|
|
|
for doc, (sent_tuples, (cats, brackets))
|
2017-10-27 18:02:55 +03:00
|
|
|
in zip(docs, paragraph_tuples)]
|
2017-05-21 17:06:17 +03:00
|
|
|
|
|
|
|
|
2019-08-28 10:14:20 +03:00
|
|
|
def make_orth_variants(nlp, raw, paragraph_tuples, orth_variant_level=0.0):
|
|
|
|
if random.random() >= orth_variant_level:
|
|
|
|
return raw, paragraph_tuples
|
2019-09-18 22:54:51 +03:00
|
|
|
if random.random() >= 0.5:
|
|
|
|
lower = True
|
2019-09-19 01:03:24 +03:00
|
|
|
if raw is not None:
|
|
|
|
raw = raw.lower()
|
2019-08-28 14:38:54 +03:00
|
|
|
ndsv = nlp.Defaults.single_orth_variants
|
|
|
|
ndpv = nlp.Defaults.paired_orth_variants
|
|
|
|
# modify words in paragraph_tuples
|
2019-08-28 10:14:20 +03:00
|
|
|
variant_paragraph_tuples = []
|
|
|
|
for sent_tuples, brackets in paragraph_tuples:
|
2019-10-07 17:49:00 +03:00
|
|
|
ids, words, tags, heads, labels, ner = sent_tuples
|
2019-09-18 22:54:51 +03:00
|
|
|
if lower:
|
|
|
|
words = [w.lower() for w in words]
|
2019-08-28 10:14:20 +03:00
|
|
|
# single variants
|
|
|
|
punct_choices = [random.choice(x["variants"]) for x in ndsv]
|
|
|
|
for word_idx in range(len(words)):
|
|
|
|
for punct_idx in range(len(ndsv)):
|
|
|
|
if tags[word_idx] in ndsv[punct_idx]["tags"] \
|
|
|
|
and words[word_idx] in ndsv[punct_idx]["variants"]:
|
|
|
|
words[word_idx] = punct_choices[punct_idx]
|
|
|
|
# paired variants
|
|
|
|
punct_choices = [random.choice(x["variants"]) for x in ndpv]
|
|
|
|
for word_idx in range(len(words)):
|
|
|
|
for punct_idx in range(len(ndpv)):
|
|
|
|
if tags[word_idx] in ndpv[punct_idx]["tags"] \
|
|
|
|
and words[word_idx] in itertools.chain.from_iterable(ndpv[punct_idx]["variants"]):
|
|
|
|
# backup option: random left vs. right from pair
|
|
|
|
pair_idx = random.choice([0, 1])
|
|
|
|
# best option: rely on paired POS tags like `` / ''
|
|
|
|
if len(ndpv[punct_idx]["tags"]) == 2:
|
|
|
|
pair_idx = ndpv[punct_idx]["tags"].index(tags[word_idx])
|
|
|
|
# next best option: rely on position in variants
|
|
|
|
# (may not be unambiguous, so order of variants matters)
|
|
|
|
else:
|
|
|
|
for pair in ndpv[punct_idx]["variants"]:
|
|
|
|
if words[word_idx] in pair:
|
|
|
|
pair_idx = pair.index(words[word_idx])
|
|
|
|
words[word_idx] = punct_choices[punct_idx][pair_idx]
|
|
|
|
|
2019-10-07 17:49:00 +03:00
|
|
|
variant_paragraph_tuples.append(((ids, words, tags, heads, labels, ner), brackets))
|
2019-08-28 14:38:54 +03:00
|
|
|
# modify raw to match variant_paragraph_tuples
|
|
|
|
if raw is not None:
|
|
|
|
variants = []
|
|
|
|
for single_variants in ndsv:
|
|
|
|
variants.extend(single_variants["variants"])
|
|
|
|
for paired_variants in ndpv:
|
|
|
|
variants.extend(list(itertools.chain.from_iterable(paired_variants["variants"])))
|
|
|
|
# store variants in reverse length order to be able to prioritize
|
|
|
|
# longer matches (e.g., "---" before "--")
|
|
|
|
variants = sorted(variants, key=lambda x: len(x))
|
|
|
|
variants.reverse()
|
|
|
|
variant_raw = ""
|
|
|
|
raw_idx = 0
|
|
|
|
# add initial whitespace
|
|
|
|
while raw_idx < len(raw) and re.match("\s", raw[raw_idx]):
|
|
|
|
variant_raw += raw[raw_idx]
|
|
|
|
raw_idx += 1
|
|
|
|
for sent_tuples, brackets in variant_paragraph_tuples:
|
2019-10-07 17:49:00 +03:00
|
|
|
ids, words, tags, heads, labels, ner = sent_tuples
|
2019-08-28 14:38:54 +03:00
|
|
|
for word in words:
|
|
|
|
match_found = False
|
|
|
|
# add identical word
|
|
|
|
if word not in variants and raw[raw_idx:].startswith(word):
|
|
|
|
variant_raw += word
|
|
|
|
raw_idx += len(word)
|
|
|
|
match_found = True
|
|
|
|
# add variant word
|
|
|
|
else:
|
|
|
|
for variant in variants:
|
|
|
|
if not match_found and \
|
|
|
|
raw[raw_idx:].startswith(variant):
|
|
|
|
raw_idx += len(variant)
|
|
|
|
variant_raw += word
|
|
|
|
match_found = True
|
|
|
|
# something went wrong, abort
|
|
|
|
# (add a warning message?)
|
|
|
|
if not match_found:
|
|
|
|
return raw, paragraph_tuples
|
|
|
|
# add following whitespace
|
|
|
|
while raw_idx < len(raw) and re.match("\s", raw[raw_idx]):
|
|
|
|
variant_raw += raw[raw_idx]
|
|
|
|
raw_idx += 1
|
|
|
|
return variant_raw, variant_paragraph_tuples
|
|
|
|
return raw, variant_paragraph_tuples
|
2019-08-28 10:14:20 +03:00
|
|
|
|
|
|
|
|
2017-06-05 04:16:57 +03:00
|
|
|
def add_noise(orig, noise_level):
|
|
|
|
if random.random() >= noise_level:
|
|
|
|
return orig
|
|
|
|
elif type(orig) == list:
|
2019-08-29 16:39:32 +03:00
|
|
|
corrupted = [_corrupt(word, noise_level) for word in orig]
|
2017-06-05 04:16:57 +03:00
|
|
|
corrupted = [w for w in corrupted if w]
|
|
|
|
return corrupted
|
|
|
|
else:
|
2019-08-29 16:39:32 +03:00
|
|
|
return "".join(_corrupt(c, noise_level) for c in orig)
|
2017-06-05 04:16:57 +03:00
|
|
|
|
|
|
|
|
2019-08-29 16:39:32 +03:00
|
|
|
def _corrupt(c, noise_level):
|
2017-06-05 04:16:57 +03:00
|
|
|
if random.random() >= noise_level:
|
|
|
|
return c
|
2019-03-08 13:42:26 +03:00
|
|
|
elif c in [".", "'", "!", "?", ","]:
|
2019-08-29 16:39:32 +03:00
|
|
|
return "\n"
|
2017-06-05 04:16:57 +03:00
|
|
|
else:
|
|
|
|
return c.lower()
|
|
|
|
|
|
|
|
|
2018-11-30 22:16:14 +03:00
|
|
|
def read_json_object(json_corpus_section):
|
|
|
|
"""Take a list of JSON-formatted documents (e.g. from an already loaded
|
|
|
|
training data file) and yield tuples in the GoldParse format.
|
|
|
|
|
|
|
|
json_corpus_section (list): The data.
|
|
|
|
YIELDS (tuple): The reformatted data.
|
|
|
|
"""
|
|
|
|
for json_doc in json_corpus_section:
|
|
|
|
tuple_doc = json_to_tuple(json_doc)
|
|
|
|
for tuple_paragraph in tuple_doc:
|
|
|
|
yield tuple_paragraph
|
|
|
|
|
|
|
|
|
|
|
|
def json_to_tuple(doc):
|
|
|
|
"""Convert an item in the JSON-formatted training data to the tuple format
|
|
|
|
used by GoldParse.
|
|
|
|
|
|
|
|
doc (dict): One entry in the training data.
|
|
|
|
YIELDS (tuple): The reformatted data.
|
|
|
|
"""
|
|
|
|
paragraphs = []
|
2019-03-08 13:42:26 +03:00
|
|
|
for paragraph in doc["paragraphs"]:
|
2018-11-30 22:16:14 +03:00
|
|
|
sents = []
|
2019-10-07 17:49:00 +03:00
|
|
|
cats = {}
|
|
|
|
for cat in paragraph.get("cats", {}):
|
|
|
|
cats[cat["label"]] = cat["value"]
|
2019-03-08 13:42:26 +03:00
|
|
|
for sent in paragraph["sentences"]:
|
2018-11-30 22:16:14 +03:00
|
|
|
words = []
|
|
|
|
ids = []
|
|
|
|
tags = []
|
|
|
|
heads = []
|
|
|
|
labels = []
|
|
|
|
ner = []
|
2019-03-08 13:42:26 +03:00
|
|
|
for i, token in enumerate(sent["tokens"]):
|
|
|
|
words.append(token["orth"])
|
2018-11-30 22:16:14 +03:00
|
|
|
ids.append(i)
|
2019-03-08 13:42:26 +03:00
|
|
|
tags.append(token.get('tag', "-"))
|
|
|
|
heads.append(token.get("head", 0) + i)
|
|
|
|
labels.append(token.get("dep", ""))
|
2018-11-30 22:16:14 +03:00
|
|
|
# Ensure ROOT label is case-insensitive
|
2019-03-08 13:42:26 +03:00
|
|
|
if labels[-1].lower() == "root":
|
|
|
|
labels[-1] = "ROOT"
|
|
|
|
ner.append(token.get("ner", "-"))
|
2018-11-30 22:16:14 +03:00
|
|
|
sents.append([
|
|
|
|
[ids, words, tags, heads, labels, ner],
|
2019-10-07 17:49:00 +03:00
|
|
|
[cats, sent.get("brackets", [])]])
|
2018-11-30 22:16:14 +03:00
|
|
|
if sents:
|
2019-03-08 13:42:26 +03:00
|
|
|
yield [paragraph.get("raw", None), sents]
|
2018-11-30 22:16:14 +03:00
|
|
|
|
|
|
|
|
2017-05-22 12:48:02 +03:00
|
|
|
def read_json_file(loc, docs_filter=None, limit=None):
|
2017-10-27 22:07:59 +03:00
|
|
|
loc = util.ensure_path(loc)
|
2017-04-15 13:13:00 +03:00
|
|
|
if loc.is_dir():
|
|
|
|
for filename in loc.iterdir():
|
2017-05-17 13:04:50 +03:00
|
|
|
yield from read_json_file(loc / filename, limit=limit)
|
2015-05-29 04:52:55 +03:00
|
|
|
else:
|
2018-03-27 20:23:02 +03:00
|
|
|
for doc in _json_iterate(loc):
|
2015-06-12 03:42:08 +03:00
|
|
|
if docs_filter is not None and not docs_filter(doc):
|
|
|
|
continue
|
2018-11-30 22:16:14 +03:00
|
|
|
for json_tuple in json_to_tuple(doc):
|
|
|
|
yield json_tuple
|
2015-05-06 17:27:31 +03:00
|
|
|
|
|
|
|
|
2018-03-27 20:23:02 +03:00
|
|
|
def _json_iterate(loc):
|
|
|
|
# We should've made these files jsonl...But since we didn't, parse out
|
|
|
|
# the docs one-by-one to reduce memory usage.
|
|
|
|
# It's okay to read in the whole file -- just don't parse it into JSON.
|
|
|
|
cdef bytes py_raw
|
|
|
|
loc = util.ensure_path(loc)
|
2019-03-08 13:42:26 +03:00
|
|
|
with loc.open("rb") as file_:
|
2018-03-27 20:23:02 +03:00
|
|
|
py_raw = file_.read()
|
|
|
|
raw = <char*>py_raw
|
|
|
|
cdef int square_depth = 0
|
|
|
|
cdef int curly_depth = 0
|
|
|
|
cdef int inside_string = 0
|
|
|
|
cdef int escape = 0
|
|
|
|
cdef int start = -1
|
|
|
|
cdef char c
|
|
|
|
cdef char quote = ord('"')
|
2019-03-08 13:42:26 +03:00
|
|
|
cdef char backslash = ord("\\")
|
|
|
|
cdef char open_square = ord("[")
|
|
|
|
cdef char close_square = ord("]")
|
|
|
|
cdef char open_curly = ord("{")
|
|
|
|
cdef char close_curly = ord("}")
|
2018-03-27 20:23:02 +03:00
|
|
|
for i in range(len(py_raw)):
|
|
|
|
c = raw[i]
|
|
|
|
if escape:
|
|
|
|
escape = False
|
|
|
|
continue
|
2018-12-08 12:41:24 +03:00
|
|
|
if c == backslash:
|
|
|
|
escape = True
|
|
|
|
continue
|
2018-03-27 20:23:02 +03:00
|
|
|
if c == quote:
|
|
|
|
inside_string = not inside_string
|
|
|
|
continue
|
|
|
|
if inside_string:
|
|
|
|
continue
|
|
|
|
if c == open_square:
|
|
|
|
square_depth += 1
|
|
|
|
elif c == close_square:
|
|
|
|
square_depth -= 1
|
|
|
|
elif c == open_curly:
|
|
|
|
if square_depth == 1 and curly_depth == 0:
|
|
|
|
start = i
|
|
|
|
curly_depth += 1
|
|
|
|
elif c == close_curly:
|
|
|
|
curly_depth -= 1
|
|
|
|
if square_depth == 1 and curly_depth == 0:
|
2019-03-08 13:42:26 +03:00
|
|
|
py_str = py_raw[start : i + 1].decode("utf8")
|
2018-08-14 15:03:48 +03:00
|
|
|
try:
|
💫 Replace ujson, msgpack and dill/pickle/cloudpickle with srsly (#3003)
Remove hacks and wrappers, keep code in sync across our libraries and move spaCy a few steps closer to only depending on packages with binary wheels 🎉
See here: https://github.com/explosion/srsly
Serialization is hard, especially across Python versions and multiple platforms. After dealing with many subtle bugs over the years (encodings, locales, large files) our libraries like spaCy and Prodigy have steadily grown a number of utility functions to wrap the multiple serialization formats we need to support (especially json, msgpack and pickle). These wrapping functions ended up duplicated across our codebases, so we wanted to put them in one place.
At the same time, we noticed that having a lot of small dependencies was making maintainence harder, and making installation slower. To solve this, we've made srsly standalone, by including the component packages directly within it. This way we can provide all the serialization utilities we need in a single binary wheel.
srsly currently includes forks of the following packages:
ujson
msgpack
msgpack-numpy
cloudpickle
* WIP: replace json/ujson with srsly
* Replace ujson in examples
Use regular json instead of srsly to make code easier to read and follow
* Update requirements
* Fix imports
* Fix typos
* Replace msgpack with srsly
* Fix warning
2018-12-03 03:28:22 +03:00
|
|
|
yield srsly.json_loads(py_str)
|
2018-11-28 20:04:58 +03:00
|
|
|
except Exception:
|
2018-08-14 15:03:48 +03:00
|
|
|
print(py_str)
|
|
|
|
raise
|
2018-03-27 20:23:02 +03:00
|
|
|
start = -1
|
|
|
|
|
|
|
|
|
2017-05-26 19:32:55 +03:00
|
|
|
def iob_to_biluo(tags):
|
2015-04-10 05:59:11 +03:00
|
|
|
out = []
|
|
|
|
curr_label = None
|
|
|
|
tags = list(tags)
|
|
|
|
while tags:
|
|
|
|
out.extend(_consume_os(tags))
|
|
|
|
out.extend(_consume_ent(tags))
|
|
|
|
return out
|
|
|
|
|
|
|
|
|
|
|
|
def _consume_os(tags):
|
2019-03-08 13:42:26 +03:00
|
|
|
while tags and tags[0] == "O":
|
2015-04-10 05:59:11 +03:00
|
|
|
yield tags.pop(0)
|
|
|
|
|
|
|
|
|
|
|
|
def _consume_ent(tags):
|
|
|
|
if not tags:
|
|
|
|
return []
|
2018-05-30 13:28:44 +03:00
|
|
|
tag = tags.pop(0)
|
2019-03-08 13:42:26 +03:00
|
|
|
target_in = "I" + tag[1:]
|
|
|
|
target_last = "L" + tag[1:]
|
2015-04-10 05:59:11 +03:00
|
|
|
length = 1
|
2018-05-30 13:28:44 +03:00
|
|
|
while tags and tags[0] in {target_in, target_last}:
|
2015-04-10 05:59:11 +03:00
|
|
|
length += 1
|
|
|
|
tags.pop(0)
|
2018-05-30 13:28:44 +03:00
|
|
|
label = tag[2:]
|
2015-04-10 05:59:11 +03:00
|
|
|
if length == 1:
|
2019-03-08 13:42:26 +03:00
|
|
|
return ["U-" + label]
|
2015-04-10 05:59:11 +03:00
|
|
|
else:
|
2019-03-08 13:42:26 +03:00
|
|
|
start = "B-" + label
|
|
|
|
end = "L-" + label
|
|
|
|
middle = ["I-%s" % label for _ in range(1, length - 1)]
|
2015-04-10 05:59:11 +03:00
|
|
|
return [start] + middle + [end]
|
|
|
|
|
|
|
|
|
2015-03-09 08:46:22 +03:00
|
|
|
cdef class GoldParse:
|
2019-03-08 13:42:26 +03:00
|
|
|
"""Collection for training annotations.
|
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/goldparse
|
|
|
|
"""
|
2016-10-15 23:09:52 +03:00
|
|
|
@classmethod
|
2019-10-07 17:49:00 +03:00
|
|
|
def from_annot_tuples(cls, doc, annot_tuples, cats=None, make_projective=False):
|
|
|
|
_, words, tags, heads, deps, entities = annot_tuples
|
2017-10-27 18:02:55 +03:00
|
|
|
return cls(doc, words=words, tags=tags, heads=heads, deps=deps,
|
2019-09-15 23:31:31 +03:00
|
|
|
entities=entities, cats=cats,
|
|
|
|
make_projective=make_projective)
|
2016-10-16 00:53:29 +03:00
|
|
|
|
2018-09-25 22:34:53 +03:00
|
|
|
def __init__(self, doc, annot_tuples=None, words=None, tags=None, morphology=None,
|
2017-10-27 18:02:55 +03:00
|
|
|
heads=None, deps=None, entities=None, make_projective=False,
|
2019-06-07 14:54:45 +03:00
|
|
|
cats=None, links=None, **_):
|
2017-05-21 14:53:46 +03:00
|
|
|
"""Create a GoldParse.
|
|
|
|
|
|
|
|
doc (Doc): The document the annotations refer to.
|
|
|
|
words (iterable): A sequence of unicode word strings.
|
|
|
|
tags (iterable): A sequence of strings, representing tag annotations.
|
2017-10-27 18:02:55 +03:00
|
|
|
heads (iterable): A sequence of integers, representing syntactic
|
|
|
|
head offsets.
|
|
|
|
deps (iterable): A sequence of strings, representing the syntactic
|
|
|
|
relation types.
|
2017-05-21 14:53:46 +03:00
|
|
|
entities (iterable): A sequence of named entity annotations, either as
|
|
|
|
BILUO tag strings, or as `(start_char, end_char, label)` tuples,
|
|
|
|
representing the entity positions.
|
2017-10-06 02:43:02 +03:00
|
|
|
cats (dict): Labels for text classification. Each key in the dictionary
|
|
|
|
may be a string or an int, or a `(start_char, end_char, label)`
|
2017-07-20 01:17:47 +03:00
|
|
|
tuple, indicating that the label is applied to only part of the
|
|
|
|
document (usually a sentence). Unlike entity annotations, label
|
|
|
|
annotations can overlap, i.e. a single word can be covered by
|
2017-10-06 02:43:02 +03:00
|
|
|
multiple labelled spans. The TextCategorizer component expects
|
2017-10-27 18:02:55 +03:00
|
|
|
true examples of a label to have the value 1.0, and negative
|
|
|
|
examples of a label to have the value 0.0. Labels not in the
|
|
|
|
dictionary are treated as missing - the gradient for those labels
|
|
|
|
will be zero.
|
2019-07-19 13:36:15 +03:00
|
|
|
links (dict): A dict with `(start_char, end_char)` keys,
|
|
|
|
and the values being dicts with kb_id:value entries,
|
|
|
|
representing the external IDs in a knowledge base (KB)
|
|
|
|
mapped to either 1.0 or 0.0, indicating positive and
|
|
|
|
negative examples respectively.
|
2017-05-21 14:53:46 +03:00
|
|
|
RETURNS (GoldParse): The newly constructed object.
|
2016-11-01 14:25:36 +03:00
|
|
|
"""
|
2016-10-15 23:09:52 +03:00
|
|
|
if words is None:
|
|
|
|
words = [token.text for token in doc]
|
|
|
|
if tags is None:
|
2018-09-26 22:01:48 +03:00
|
|
|
tags = [None for _ in words]
|
2016-10-15 23:09:52 +03:00
|
|
|
if heads is None:
|
2018-09-27 16:14:27 +03:00
|
|
|
heads = [None for _ in words]
|
2016-10-15 23:09:52 +03:00
|
|
|
if deps is None:
|
2018-09-26 22:01:48 +03:00
|
|
|
deps = [None for _ in words]
|
2018-09-25 22:34:53 +03:00
|
|
|
if morphology is None:
|
2018-09-26 22:01:48 +03:00
|
|
|
morphology = [None for _ in words]
|
2018-09-27 16:14:27 +03:00
|
|
|
if entities is None:
|
2019-03-08 13:42:26 +03:00
|
|
|
entities = ["-" for _ in doc]
|
2016-10-15 23:09:52 +03:00
|
|
|
elif len(entities) == 0:
|
2019-03-08 13:42:26 +03:00
|
|
|
entities = ["O" for _ in doc]
|
2019-02-27 14:06:32 +03:00
|
|
|
else:
|
|
|
|
# Translate the None values to '-', to make processing easier.
|
|
|
|
# See Issue #2603
|
2019-03-08 13:42:26 +03:00
|
|
|
entities = [(ent if ent is not None else "-") for ent in entities]
|
2019-02-27 14:06:32 +03:00
|
|
|
if not isinstance(entities[0], basestring):
|
|
|
|
# Assume we have entities specified by character offset.
|
|
|
|
entities = biluo_tags_from_offsets(doc, entities)
|
2015-03-09 08:46:22 +03:00
|
|
|
self.mem = Pool()
|
|
|
|
self.loss = 0
|
2016-10-16 00:55:07 +03:00
|
|
|
self.length = len(doc)
|
2015-03-09 08:46:22 +03:00
|
|
|
|
2015-03-09 14:06:01 +03:00
|
|
|
# These are filled by the tagger/parser/entity recogniser
|
2016-10-16 00:53:29 +03:00
|
|
|
self.c.tags = <int*>self.mem.alloc(len(doc), sizeof(int))
|
|
|
|
self.c.heads = <int*>self.mem.alloc(len(doc), sizeof(int))
|
2017-05-28 15:06:40 +03:00
|
|
|
self.c.labels = <attr_t*>self.mem.alloc(len(doc), sizeof(attr_t))
|
2017-05-30 21:37:24 +03:00
|
|
|
self.c.has_dep = <int*>self.mem.alloc(len(doc), sizeof(int))
|
2017-08-26 04:03:14 +03:00
|
|
|
self.c.sent_start = <int*>self.mem.alloc(len(doc), sizeof(int))
|
2016-10-16 00:53:29 +03:00
|
|
|
self.c.ner = <Transition*>self.mem.alloc(len(doc), sizeof(Transition))
|
2015-03-09 08:46:22 +03:00
|
|
|
|
2017-10-06 02:43:02 +03:00
|
|
|
self.cats = {} if cats is None else dict(cats)
|
2019-06-07 14:54:45 +03:00
|
|
|
self.links = links
|
2016-11-25 17:57:59 +03:00
|
|
|
self.words = [None] * len(doc)
|
2016-10-16 00:53:29 +03:00
|
|
|
self.tags = [None] * len(doc)
|
|
|
|
self.heads = [None] * len(doc)
|
2017-03-16 17:38:28 +03:00
|
|
|
self.labels = [None] * len(doc)
|
|
|
|
self.ner = [None] * len(doc)
|
2018-09-25 22:34:53 +03:00
|
|
|
self.morphology = [None] * len(doc)
|
2015-05-24 03:49:56 +03:00
|
|
|
|
2018-03-27 20:23:02 +03:00
|
|
|
# This needs to be done before we align the words
|
|
|
|
if make_projective and heads is not None and deps is not None:
|
|
|
|
heads, deps = nonproj.projectivize(heads, deps)
|
|
|
|
|
|
|
|
# Do many-to-one alignment for misaligned tokens.
|
|
|
|
# If we over-segment, we'll have one gold word that covers a sequence
|
|
|
|
# of predicted words
|
|
|
|
# If we under-segment, we'll have one predicted word that covers a
|
|
|
|
# sequence of gold words.
|
|
|
|
# If we "mis-segment", we'll have a sequence of predicted words covering
|
|
|
|
# a sequence of gold words. That's many-to-many -- we don't do that.
|
|
|
|
cost, i2j, j2i, i2j_multi, j2i_multi = align([t.orth_ for t in doc], words)
|
|
|
|
|
|
|
|
self.cand_to_gold = [(j if j >= 0 else None) for j in i2j]
|
|
|
|
self.gold_to_cand = [(i if i >= 0 else None) for i in j2i]
|
2015-05-24 18:35:49 +03:00
|
|
|
|
2016-10-16 00:53:29 +03:00
|
|
|
annot_tuples = (range(len(words)), words, tags, heads, deps, entities)
|
2015-07-28 15:44:53 +03:00
|
|
|
self.orig_annot = list(zip(*annot_tuples))
|
2015-03-09 08:46:22 +03:00
|
|
|
|
2015-05-24 18:35:49 +03:00
|
|
|
for i, gold_i in enumerate(self.cand_to_gold):
|
2016-10-16 12:41:36 +03:00
|
|
|
if doc[i].text.isspace():
|
2016-11-25 17:57:59 +03:00
|
|
|
self.words[i] = doc[i].text
|
2019-03-08 13:42:26 +03:00
|
|
|
self.tags[i] = "_SP"
|
2015-07-09 14:30:41 +03:00
|
|
|
self.heads[i] = None
|
|
|
|
self.labels[i] = None
|
2019-08-29 15:33:07 +03:00
|
|
|
self.ner[i] = None
|
2018-09-25 22:34:53 +03:00
|
|
|
self.morphology[i] = set()
|
2016-10-16 12:41:36 +03:00
|
|
|
if gold_i is None:
|
2018-03-27 20:23:02 +03:00
|
|
|
if i in i2j_multi:
|
|
|
|
self.words[i] = words[i2j_multi[i]]
|
|
|
|
self.tags[i] = tags[i2j_multi[i]]
|
2018-09-25 22:34:53 +03:00
|
|
|
self.morphology[i] = morphology[i2j_multi[i]]
|
2018-03-27 20:23:02 +03:00
|
|
|
is_last = i2j_multi[i] != i2j_multi.get(i+1)
|
|
|
|
is_first = i2j_multi[i] != i2j_multi.get(i-1)
|
|
|
|
# Set next word in multi-token span as head, until last
|
|
|
|
if not is_last:
|
|
|
|
self.heads[i] = i+1
|
2019-03-08 13:42:26 +03:00
|
|
|
self.labels[i] = "subtok"
|
2018-03-27 20:23:02 +03:00
|
|
|
else:
|
|
|
|
self.heads[i] = self.gold_to_cand[heads[i2j_multi[i]]]
|
|
|
|
self.labels[i] = deps[i2j_multi[i]]
|
|
|
|
# Now set NER...This is annoying because if we've split
|
|
|
|
# got an entity word split into two, we need to adjust the
|
2019-05-31 13:19:19 +03:00
|
|
|
# BILUO tags. We can't have BB or LL etc.
|
2018-03-27 20:23:02 +03:00
|
|
|
# Case 1: O -- easy.
|
|
|
|
ner_tag = entities[i2j_multi[i]]
|
2019-03-08 13:42:26 +03:00
|
|
|
if ner_tag == "O":
|
|
|
|
self.ner[i] = "O"
|
2018-03-27 20:23:02 +03:00
|
|
|
# Case 2: U. This has to become a B I* L sequence.
|
2019-03-08 13:42:26 +03:00
|
|
|
elif ner_tag.startswith("U-"):
|
2018-03-27 20:23:02 +03:00
|
|
|
if is_first:
|
2019-03-08 13:42:26 +03:00
|
|
|
self.ner[i] = ner_tag.replace("U-", "B-", 1)
|
2018-03-27 20:23:02 +03:00
|
|
|
elif is_last:
|
2019-03-08 13:42:26 +03:00
|
|
|
self.ner[i] = ner_tag.replace("U-", "L-", 1)
|
2018-03-27 20:23:02 +03:00
|
|
|
else:
|
2019-03-08 13:42:26 +03:00
|
|
|
self.ner[i] = ner_tag.replace("U-", "I-", 1)
|
2018-03-27 20:23:02 +03:00
|
|
|
# Case 3: L. If not last, change to I.
|
2019-03-08 13:42:26 +03:00
|
|
|
elif ner_tag.startswith("L-"):
|
2018-03-27 20:23:02 +03:00
|
|
|
if is_last:
|
|
|
|
self.ner[i] = ner_tag
|
|
|
|
else:
|
2019-03-08 13:42:26 +03:00
|
|
|
self.ner[i] = ner_tag.replace("L-", "I-", 1)
|
2018-03-27 20:23:02 +03:00
|
|
|
# Case 4: I. Stays correct
|
2019-03-08 13:42:26 +03:00
|
|
|
elif ner_tag.startswith("I-"):
|
2018-03-27 20:23:02 +03:00
|
|
|
self.ner[i] = ner_tag
|
2015-05-24 03:49:56 +03:00
|
|
|
else:
|
2016-11-25 17:57:59 +03:00
|
|
|
self.words[i] = words[gold_i]
|
2016-10-16 00:53:29 +03:00
|
|
|
self.tags[i] = tags[gold_i]
|
2018-09-26 22:01:48 +03:00
|
|
|
self.morphology[i] = morphology[gold_i]
|
2017-05-26 01:15:09 +03:00
|
|
|
if heads[gold_i] is None:
|
|
|
|
self.heads[i] = None
|
|
|
|
else:
|
|
|
|
self.heads[i] = self.gold_to_cand[heads[gold_i]]
|
2016-10-16 00:53:29 +03:00
|
|
|
self.labels[i] = deps[gold_i]
|
|
|
|
self.ner[i] = entities[gold_i]
|
2016-02-22 16:40:40 +03:00
|
|
|
|
2019-08-29 15:33:07 +03:00
|
|
|
# Prevent whitespace that isn't within entities from being tagged as
|
|
|
|
# an entity.
|
|
|
|
for i in range(len(self.ner)):
|
|
|
|
if self.tags[i] == "_SP":
|
|
|
|
prev_ner = self.ner[i-1] if i >= 1 else None
|
|
|
|
next_ner = self.ner[i+1] if (i+1) < len(self.ner) else None
|
|
|
|
if prev_ner == "O" or next_ner == "O":
|
|
|
|
self.ner[i] = "O"
|
|
|
|
|
2016-02-22 16:40:40 +03:00
|
|
|
cycle = nonproj.contains_cycle(self.heads)
|
2017-10-27 18:02:55 +03:00
|
|
|
if cycle is not None:
|
2019-08-29 15:33:07 +03:00
|
|
|
raise ValueError(Errors.E069.format(cycle=cycle,
|
|
|
|
cycle_tokens=" ".join(["'{}'".format(self.words[tok_id]) for tok_id in cycle]),
|
|
|
|
doc_tokens=" ".join(words[:50])))
|
2016-02-22 16:40:40 +03:00
|
|
|
|
2015-03-10 20:00:23 +03:00
|
|
|
def __len__(self):
|
2017-05-21 14:53:46 +03:00
|
|
|
"""Get the number of gold-standard tokens.
|
2017-03-15 17:29:42 +03:00
|
|
|
|
2017-05-21 14:53:46 +03:00
|
|
|
RETURNS (int): The number of gold-standard tokens.
|
2016-11-01 14:25:36 +03:00
|
|
|
"""
|
2015-03-10 20:00:23 +03:00
|
|
|
return self.length
|
2015-03-09 08:46:22 +03:00
|
|
|
|
2015-05-30 02:25:46 +03:00
|
|
|
@property
|
|
|
|
def is_projective(self):
|
2017-05-21 14:53:46 +03:00
|
|
|
"""Whether the provided syntactic annotations form a projective
|
|
|
|
dependency tree.
|
2017-04-15 12:59:21 +03:00
|
|
|
"""
|
2016-02-24 13:26:25 +03:00
|
|
|
return not nonproj.is_nonproj_tree(self.heads)
|
2015-05-30 02:25:46 +03:00
|
|
|
|
2018-05-07 16:51:34 +03:00
|
|
|
property sent_starts:
|
|
|
|
def __get__(self):
|
|
|
|
return [self.c.sent_start[i] for i in range(self.length)]
|
|
|
|
|
|
|
|
def __set__(self, sent_starts):
|
|
|
|
for gold_i, is_sent_start in enumerate(sent_starts):
|
|
|
|
i = self.gold_to_cand[gold_i]
|
|
|
|
if i is not None:
|
|
|
|
if is_sent_start in (1, True):
|
|
|
|
self.c.sent_start[i] = 1
|
|
|
|
elif is_sent_start in (-1, False):
|
|
|
|
self.c.sent_start[i] = -1
|
|
|
|
else:
|
|
|
|
self.c.sent_start[i] = 0
|
2017-08-26 04:03:14 +03:00
|
|
|
|
2015-02-21 19:06:58 +03:00
|
|
|
|
2019-03-18 00:12:54 +03:00
|
|
|
def docs_to_json(docs, id=0):
|
2018-11-30 22:16:14 +03:00
|
|
|
"""Convert a list of Doc objects into the JSON-serializable format used by
|
|
|
|
the spacy train command.
|
|
|
|
|
|
|
|
docs (iterable / Doc): The Doc object(s) to convert.
|
2019-03-18 00:12:54 +03:00
|
|
|
id (int): Id for the JSON.
|
2018-11-30 22:16:14 +03:00
|
|
|
RETURNS (list): The data in spaCy's JSON format.
|
|
|
|
"""
|
2018-08-14 14:13:10 +03:00
|
|
|
if isinstance(docs, Doc):
|
|
|
|
docs = [docs]
|
2019-03-18 00:12:54 +03:00
|
|
|
json_doc = {"id": id, "paragraphs": []}
|
|
|
|
for i, doc in enumerate(docs):
|
2019-09-15 23:31:31 +03:00
|
|
|
json_para = {'raw': doc.text, "sentences": [], "cats": []}
|
|
|
|
for cat, val in doc.cats.items():
|
|
|
|
json_cat = {"label": cat, "value": val}
|
|
|
|
json_para["cats"].append(json_cat)
|
2019-03-18 00:12:54 +03:00
|
|
|
ent_offsets = [(e.start_char, e.end_char, e.label_) for e in doc.ents]
|
|
|
|
biluo_tags = biluo_tags_from_offsets(doc, ent_offsets)
|
|
|
|
for j, sent in enumerate(doc.sents):
|
|
|
|
json_sent = {"tokens": [], "brackets": []}
|
|
|
|
for token in sent:
|
|
|
|
json_token = {"id": token.i, "orth": token.text}
|
|
|
|
if doc.is_tagged:
|
|
|
|
json_token["tag"] = token.tag_
|
|
|
|
if doc.is_parsed:
|
|
|
|
json_token["head"] = token.head.i-token.i
|
|
|
|
json_token["dep"] = token.dep_
|
|
|
|
json_token["ner"] = biluo_tags[token.i]
|
|
|
|
json_sent["tokens"].append(json_token)
|
|
|
|
json_para["sentences"].append(json_sent)
|
|
|
|
json_doc["paragraphs"].append(json_para)
|
2019-04-08 13:53:16 +03:00
|
|
|
return json_doc
|
2018-08-14 14:13:10 +03:00
|
|
|
|
|
|
|
|
2019-03-08 13:42:26 +03:00
|
|
|
def biluo_tags_from_offsets(doc, entities, missing="O"):
|
2017-10-27 18:02:55 +03:00
|
|
|
"""Encode labelled spans into per-token tags, using the
|
|
|
|
Begin/In/Last/Unit/Out scheme (BILUO).
|
2017-05-21 14:53:46 +03:00
|
|
|
|
|
|
|
doc (Doc): The document that the entity offsets refer to. The output tags
|
|
|
|
will refer to the token boundaries within the document.
|
2017-10-27 18:02:55 +03:00
|
|
|
entities (iterable): A sequence of `(start, end, label)` triples. `start`
|
|
|
|
and `end` should be character-offset integers denoting the slice into
|
|
|
|
the original string.
|
2017-05-21 14:53:46 +03:00
|
|
|
RETURNS (list): A list of unicode strings, describing the tags. Each tag
|
|
|
|
string will be of the form either "", "O" or "{action}-{label}", where
|
|
|
|
action is one of "B", "I", "L", "U". The string "-" is used where the
|
2017-10-27 18:02:55 +03:00
|
|
|
entity offsets don't align with the tokenization in the `Doc` object.
|
|
|
|
The training algorithm will view these as missing values. "O" denotes a
|
2017-05-21 14:53:46 +03:00
|
|
|
non-entity token. "B" denotes the beginning of a multi-token entity,
|
|
|
|
"I" the inside of an entity of three or more tokens, and "L" the end
|
|
|
|
of an entity of two or more tokens. "U" denotes a single-token entity.
|
|
|
|
|
|
|
|
EXAMPLE:
|
|
|
|
>>> text = 'I like London.'
|
|
|
|
>>> entities = [(len('I like '), len('I like London'), 'LOC')]
|
|
|
|
>>> doc = nlp.tokenizer(text)
|
|
|
|
>>> tags = biluo_tags_from_offsets(doc, entities)
|
2019-03-08 13:42:26 +03:00
|
|
|
>>> assert tags == ["O", "O", 'U-LOC', "O"]
|
2017-04-15 12:59:21 +03:00
|
|
|
"""
|
2019-08-15 19:13:32 +03:00
|
|
|
# Ensure no overlapping entity labels exist
|
|
|
|
tokens_in_ents = {}
|
|
|
|
|
2016-10-15 22:51:04 +03:00
|
|
|
starts = {token.idx: token.i for token in doc}
|
2019-03-08 13:42:26 +03:00
|
|
|
ends = {token.idx + len(token): token.i for token in doc}
|
|
|
|
biluo = ["-" for _ in doc]
|
2016-10-15 22:51:04 +03:00
|
|
|
# Handle entity cases
|
|
|
|
for start_char, end_char, label in entities:
|
2019-08-15 19:13:32 +03:00
|
|
|
for token_index in range(start_char, end_char):
|
|
|
|
if token_index in tokens_in_ents.keys():
|
|
|
|
raise ValueError(Errors.E103.format(
|
|
|
|
span1=(tokens_in_ents[token_index][0],
|
|
|
|
tokens_in_ents[token_index][1],
|
|
|
|
tokens_in_ents[token_index][2]),
|
|
|
|
span2=(start_char, end_char, label)))
|
|
|
|
tokens_in_ents[token_index] = (start_char, end_char, label)
|
|
|
|
|
2016-10-15 22:51:04 +03:00
|
|
|
start_token = starts.get(start_char)
|
|
|
|
end_token = ends.get(end_char)
|
|
|
|
# Only interested if the tokenization is correct
|
|
|
|
if start_token is not None and end_token is not None:
|
|
|
|
if start_token == end_token:
|
2019-03-08 13:42:26 +03:00
|
|
|
biluo[start_token] = "U-%s" % label
|
2016-10-15 22:51:04 +03:00
|
|
|
else:
|
2019-03-08 13:42:26 +03:00
|
|
|
biluo[start_token] = "B-%s" % label
|
2016-10-15 22:51:04 +03:00
|
|
|
for i in range(start_token+1, end_token):
|
2019-03-08 13:42:26 +03:00
|
|
|
biluo[i] = "I-%s" % label
|
|
|
|
biluo[end_token] = "L-%s" % label
|
2016-10-15 22:51:04 +03:00
|
|
|
# Now distinguish the O cases from ones where we miss the tokenization
|
|
|
|
entity_chars = set()
|
|
|
|
for start_char, end_char, label in entities:
|
|
|
|
for i in range(start_char, end_char):
|
|
|
|
entity_chars.add(i)
|
|
|
|
for token in doc:
|
2019-03-08 13:42:26 +03:00
|
|
|
for i in range(token.idx, token.idx + len(token)):
|
2016-10-15 22:51:04 +03:00
|
|
|
if i in entity_chars:
|
|
|
|
break
|
|
|
|
else:
|
2017-07-29 22:58:37 +03:00
|
|
|
biluo[token.i] = missing
|
2016-10-15 22:51:04 +03:00
|
|
|
return biluo
|
|
|
|
|
|
|
|
|
2019-02-06 13:50:26 +03:00
|
|
|
def spans_from_biluo_tags(doc, tags):
|
|
|
|
"""Encode per-token tags following the BILUO scheme into Span object, e.g.
|
|
|
|
to overwrite the doc.ents.
|
|
|
|
|
|
|
|
doc (Doc): The document that the BILUO tags refer to.
|
|
|
|
entities (iterable): A sequence of BILUO tags with each tag describing one
|
|
|
|
token. Each tags string will be of the form of either "", "O" or
|
|
|
|
"{action}-{label}", where action is one of "B", "I", "L", "U".
|
|
|
|
RETURNS (list): A sequence of Span objects.
|
|
|
|
"""
|
|
|
|
token_offsets = tags_to_entities(tags)
|
|
|
|
spans = []
|
|
|
|
for label, start_idx, end_idx in token_offsets:
|
|
|
|
span = Span(doc, start_idx, end_idx + 1, label=label)
|
|
|
|
spans.append(span)
|
|
|
|
return spans
|
|
|
|
|
|
|
|
|
2017-11-26 18:38:01 +03:00
|
|
|
def offsets_from_biluo_tags(doc, tags):
|
|
|
|
"""Encode per-token tags following the BILUO scheme into entity offsets.
|
|
|
|
|
|
|
|
doc (Doc): The document that the BILUO tags refer to.
|
|
|
|
entities (iterable): A sequence of BILUO tags with each tag describing one
|
|
|
|
token. Each tags string will be of the form of either "", "O" or
|
|
|
|
"{action}-{label}", where action is one of "B", "I", "L", "U".
|
|
|
|
RETURNS (list): A sequence of `(start, end, label)` triples. `start` and
|
|
|
|
`end` will be character-offset integers denoting the slice into the
|
|
|
|
original string.
|
|
|
|
"""
|
2019-02-06 13:50:26 +03:00
|
|
|
spans = spans_from_biluo_tags(doc, tags)
|
|
|
|
return [(span.start_char, span.end_char, span.label_) for span in spans]
|
2017-11-26 18:38:01 +03:00
|
|
|
|
|
|
|
|
2015-02-21 19:06:58 +03:00
|
|
|
def is_punct_label(label):
|
2019-03-08 13:42:26 +03:00
|
|
|
return label == "P" or label.lower() == "punct"
|