2020-10-02 02:36:06 +03:00
|
|
|
from typing import Callable, Iterator, Dict, List, Tuple, TYPE_CHECKING
|
2020-06-26 20:34:12 +03:00
|
|
|
import random
|
|
|
|
import itertools
|
2020-09-28 04:03:27 +03:00
|
|
|
import copy
|
|
|
|
from functools import partial
|
2020-10-02 02:36:06 +03:00
|
|
|
from pydantic import BaseModel, StrictStr
|
2020-06-26 20:34:12 +03:00
|
|
|
|
2020-10-05 16:09:52 +03:00
|
|
|
from ..util import registry
|
2020-10-01 00:03:47 +03:00
|
|
|
from ..tokens import Doc
|
|
|
|
from .example import Example
|
2020-06-26 20:34:12 +03:00
|
|
|
|
2020-10-01 00:03:47 +03:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
from ..language import Language # noqa: F401
|
2020-06-26 20:34:12 +03:00
|
|
|
|
|
|
|
|
2020-10-02 02:36:06 +03:00
|
|
|
class OrthVariantsSingle(BaseModel):
|
|
|
|
tags: List[StrictStr]
|
|
|
|
variants: List[StrictStr]
|
|
|
|
|
|
|
|
|
|
|
|
class OrthVariantsPaired(BaseModel):
|
|
|
|
tags: List[StrictStr]
|
|
|
|
variants: List[List[StrictStr]]
|
|
|
|
|
|
|
|
|
|
|
|
class OrthVariants(BaseModel):
|
|
|
|
paired: List[OrthVariantsPaired] = {}
|
|
|
|
single: List[OrthVariantsSingle] = {}
|
|
|
|
|
|
|
|
|
2020-09-28 04:03:27 +03:00
|
|
|
@registry.augmenters("spacy.orth_variants.v1")
|
2020-10-01 00:03:47 +03:00
|
|
|
def create_orth_variants_augmenter(
|
2020-10-03 18:20:18 +03:00
|
|
|
level: float, lower: float, orth_variants: OrthVariants
|
2020-10-01 00:03:47 +03:00
|
|
|
) -> Callable[["Language", Example], Iterator[Example]]:
|
2020-09-28 04:03:27 +03:00
|
|
|
"""Create a data augmentation callback that uses orth-variant replacement.
|
|
|
|
The callback can be added to a corpus or other data iterator during training.
|
2020-10-04 18:46:29 +03:00
|
|
|
|
|
|
|
level (float): The percentage of texts that will be augmented.
|
|
|
|
lower (float): The percentage of texts that will be lowercased.
|
|
|
|
orth_variants (Dict[str, dict]): A dictionary containing the single and
|
|
|
|
paired orth variants. Typically loaded from a JSON file.
|
|
|
|
RETURNS (Callable[[Language, Example], Iterator[Example]]): The augmenter.
|
2020-09-28 04:03:27 +03:00
|
|
|
"""
|
2020-10-02 02:36:06 +03:00
|
|
|
return partial(
|
|
|
|
orth_variants_augmenter, orth_variants=orth_variants, level=level, lower=lower
|
|
|
|
)
|
2020-09-28 04:03:27 +03:00
|
|
|
|
|
|
|
|
2020-10-04 18:46:29 +03:00
|
|
|
@registry.augmenters("spacy.lower_case.v1")
|
|
|
|
def create_lower_casing_augmenter(
|
|
|
|
level: float,
|
|
|
|
) -> Callable[["Language", Example], Iterator[Example]]:
|
|
|
|
"""Create a data augmentation callback that converts documents to lowercase.
|
|
|
|
The callback can be added to a corpus or other data iterator during training.
|
|
|
|
|
|
|
|
level (float): The percentage of texts that will be augmented.
|
|
|
|
RETURNS (Callable[[Language, Example], Iterator[Example]]): The augmenter.
|
|
|
|
"""
|
|
|
|
return partial(lower_casing_augmenter, level=level)
|
|
|
|
|
|
|
|
|
2020-10-01 00:03:47 +03:00
|
|
|
def dont_augment(nlp: "Language", example: Example) -> Iterator[Example]:
|
2020-09-28 04:03:27 +03:00
|
|
|
yield example
|
|
|
|
|
|
|
|
|
2020-10-04 18:46:29 +03:00
|
|
|
def lower_casing_augmenter(
|
2020-10-05 22:58:18 +03:00
|
|
|
nlp: "Language", example: Example, *, level: float
|
2020-10-04 18:46:29 +03:00
|
|
|
) -> Iterator[Example]:
|
|
|
|
if random.random() >= level:
|
|
|
|
yield example
|
|
|
|
else:
|
|
|
|
example_dict = example.to_dict()
|
|
|
|
doc = nlp.make_doc(example.text.lower())
|
|
|
|
example_dict["token_annotation"]["ORTH"] = [t.lower_ for t in doc]
|
|
|
|
yield example.from_dict(doc, example_dict)
|
|
|
|
|
|
|
|
|
2020-10-01 00:03:47 +03:00
|
|
|
def orth_variants_augmenter(
|
|
|
|
nlp: "Language",
|
|
|
|
example: Example,
|
2020-10-02 02:36:06 +03:00
|
|
|
orth_variants: dict,
|
2020-10-01 00:03:47 +03:00
|
|
|
*,
|
|
|
|
level: float = 0.0,
|
|
|
|
lower: float = 0.0,
|
|
|
|
) -> Iterator[Example]:
|
2020-09-28 04:03:27 +03:00
|
|
|
if random.random() >= level:
|
|
|
|
yield example
|
|
|
|
else:
|
|
|
|
raw_text = example.text
|
|
|
|
orig_dict = example.to_dict()
|
|
|
|
if not orig_dict["token_annotation"]:
|
|
|
|
yield example
|
|
|
|
else:
|
|
|
|
variant_text, variant_token_annot = make_orth_variants(
|
|
|
|
nlp,
|
|
|
|
raw_text,
|
|
|
|
orig_dict["token_annotation"],
|
2020-10-01 00:03:47 +03:00
|
|
|
orth_variants,
|
2020-09-29 22:39:28 +03:00
|
|
|
lower=raw_text is not None and random.random() < lower,
|
2020-09-28 04:03:27 +03:00
|
|
|
)
|
2020-09-30 00:40:54 +03:00
|
|
|
if variant_text:
|
2020-09-30 00:08:56 +03:00
|
|
|
doc = nlp.make_doc(variant_text)
|
2020-09-30 00:40:54 +03:00
|
|
|
else:
|
|
|
|
doc = Doc(nlp.vocab, words=variant_token_annot["ORTH"])
|
|
|
|
variant_token_annot["ORTH"] = [w.text for w in doc]
|
|
|
|
variant_token_annot["SPACY"] = [w.whitespace_ for w in doc]
|
2020-09-28 04:03:27 +03:00
|
|
|
orig_dict["token_annotation"] = variant_token_annot
|
|
|
|
yield example.from_dict(doc, orig_dict)
|
|
|
|
|
|
|
|
|
2020-10-01 00:03:47 +03:00
|
|
|
def make_orth_variants(
|
|
|
|
nlp: "Language",
|
|
|
|
raw: str,
|
|
|
|
token_dict: Dict[str, List[str]],
|
2020-10-02 02:36:06 +03:00
|
|
|
orth_variants: Dict[str, List[Dict[str, List[str]]]],
|
2020-10-01 00:03:47 +03:00
|
|
|
*,
|
|
|
|
lower: bool = False,
|
|
|
|
) -> Tuple[str, Dict[str, List[str]]]:
|
2020-09-28 04:03:27 +03:00
|
|
|
orig_token_dict = copy.deepcopy(token_dict)
|
2020-07-22 16:59:37 +03:00
|
|
|
ndsv = orth_variants.get("single", [])
|
2020-07-22 21:27:22 +03:00
|
|
|
ndpv = orth_variants.get("paired", [])
|
2020-10-05 15:14:49 +03:00
|
|
|
words = token_dict.get("ORTH", [])
|
|
|
|
tags = token_dict.get("TAG", [])
|
2020-06-26 20:34:12 +03:00
|
|
|
# keep unmodified if words or tags are not defined
|
|
|
|
if words and tags:
|
|
|
|
if lower:
|
|
|
|
words = [w.lower() for w in words]
|
|
|
|
# single variants
|
|
|
|
punct_choices = [random.choice(x["variants"]) for x in ndsv]
|
|
|
|
for word_idx in range(len(words)):
|
|
|
|
for punct_idx in range(len(ndsv)):
|
|
|
|
if (
|
2020-10-05 17:40:23 +03:00
|
|
|
tags[word_idx] in ndsv[punct_idx]["tags"]
|
2020-06-26 20:34:12 +03:00
|
|
|
and words[word_idx] in ndsv[punct_idx]["variants"]
|
|
|
|
):
|
|
|
|
words[word_idx] = punct_choices[punct_idx]
|
|
|
|
# paired variants
|
|
|
|
punct_choices = [random.choice(x["variants"]) for x in ndpv]
|
|
|
|
for word_idx in range(len(words)):
|
|
|
|
for punct_idx in range(len(ndpv)):
|
|
|
|
if tags[word_idx] in ndpv[punct_idx]["tags"] and words[
|
|
|
|
word_idx
|
|
|
|
] in itertools.chain.from_iterable(ndpv[punct_idx]["variants"]):
|
|
|
|
# backup option: random left vs. right from pair
|
|
|
|
pair_idx = random.choice([0, 1])
|
|
|
|
# best option: rely on paired POS tags like `` / ''
|
2020-10-05 17:59:49 +03:00
|
|
|
if len(ndpv[punct_idx]["tags"]) == 2:
|
|
|
|
pair_idx = ndpv[punct_idx]["tags"].index(tags[word_idx])
|
2020-06-26 20:34:12 +03:00
|
|
|
# next best option: rely on position in variants
|
|
|
|
# (may not be unambiguous, so order of variants matters)
|
|
|
|
else:
|
|
|
|
for pair in ndpv[punct_idx]["variants"]:
|
|
|
|
if words[word_idx] in pair:
|
|
|
|
pair_idx = pair.index(words[word_idx])
|
|
|
|
words[word_idx] = punct_choices[punct_idx][pair_idx]
|
2020-10-05 15:14:49 +03:00
|
|
|
token_dict["ORTH"] = words
|
|
|
|
token_dict["TAG"] = tags
|
2020-06-26 20:34:12 +03:00
|
|
|
# modify raw
|
|
|
|
if raw is not None:
|
|
|
|
variants = []
|
|
|
|
for single_variants in ndsv:
|
|
|
|
variants.extend(single_variants["variants"])
|
|
|
|
for paired_variants in ndpv:
|
|
|
|
variants.extend(
|
|
|
|
list(itertools.chain.from_iterable(paired_variants["variants"]))
|
|
|
|
)
|
|
|
|
# store variants in reverse length order to be able to prioritize
|
|
|
|
# longer matches (e.g., "---" before "--")
|
|
|
|
variants = sorted(variants, key=lambda x: len(x))
|
|
|
|
variants.reverse()
|
|
|
|
variant_raw = ""
|
|
|
|
raw_idx = 0
|
|
|
|
# add initial whitespace
|
|
|
|
while raw_idx < len(raw) and raw[raw_idx].isspace():
|
|
|
|
variant_raw += raw[raw_idx]
|
|
|
|
raw_idx += 1
|
|
|
|
for word in words:
|
|
|
|
match_found = False
|
|
|
|
# skip whitespace words
|
|
|
|
if word.isspace():
|
|
|
|
match_found = True
|
|
|
|
# add identical word
|
|
|
|
elif word not in variants and raw[raw_idx:].startswith(word):
|
|
|
|
variant_raw += word
|
|
|
|
raw_idx += len(word)
|
|
|
|
match_found = True
|
|
|
|
# add variant word
|
|
|
|
else:
|
|
|
|
for variant in variants:
|
|
|
|
if not match_found and raw[raw_idx:].startswith(variant):
|
|
|
|
raw_idx += len(variant)
|
|
|
|
variant_raw += word
|
|
|
|
match_found = True
|
|
|
|
# something went wrong, abort
|
|
|
|
# (add a warning message?)
|
|
|
|
if not match_found:
|
2020-09-28 04:03:27 +03:00
|
|
|
return raw, orig_token_dict
|
2020-06-26 20:34:12 +03:00
|
|
|
# add following whitespace
|
|
|
|
while raw_idx < len(raw) and raw[raw_idx].isspace():
|
|
|
|
variant_raw += raw[raw_idx]
|
|
|
|
raw_idx += 1
|
|
|
|
raw = variant_raw
|
|
|
|
return raw, token_dict
|