2021-01-17 14:54:41 +03:00
|
|
|
from typing import Dict, Any
|
2021-01-30 04:52:33 +03:00
|
|
|
import srsly
|
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
from ..language import Language
|
2019-02-10 14:14:51 +03:00
|
|
|
from ..matcher import Matcher
|
2020-07-22 14:42:59 +03:00
|
|
|
from ..tokens import Doc
|
2021-01-17 14:54:41 +03:00
|
|
|
from .. import util
|
2019-02-10 14:14:51 +03:00
|
|
|
|
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
@Language.component(
|
2019-10-27 15:35:49 +03:00
|
|
|
"merge_noun_chunks",
|
|
|
|
requires=["token.dep", "token.tag", "token.pos"],
|
|
|
|
retokenizes=True,
|
|
|
|
)
|
2020-07-22 14:42:59 +03:00
|
|
|
def merge_noun_chunks(doc: Doc) -> Doc:
|
2019-02-10 14:14:51 +03:00
|
|
|
"""Merge noun chunks into a single token.
|
|
|
|
|
|
|
|
doc (Doc): The Doc object.
|
|
|
|
RETURNS (Doc): The Doc object with merged noun chunks.
|
2019-03-08 13:42:26 +03:00
|
|
|
|
2021-01-30 12:09:38 +03:00
|
|
|
DOCS: https://spacy.io/api/pipeline-functions#merge_noun_chunks
|
2019-02-10 14:14:51 +03:00
|
|
|
"""
|
2020-09-17 01:14:01 +03:00
|
|
|
if not doc.has_annotation("DEP"):
|
2019-02-10 14:14:51 +03:00
|
|
|
return doc
|
2019-02-15 12:29:44 +03:00
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
for np in doc.noun_chunks:
|
|
|
|
attrs = {"tag": np.root.tag, "dep": np.root.dep}
|
|
|
|
retokenizer.merge(np, attrs=attrs)
|
2019-02-10 14:14:51 +03:00
|
|
|
return doc
|
|
|
|
|
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
@Language.component(
|
2019-10-27 15:35:49 +03:00
|
|
|
"merge_entities",
|
|
|
|
requires=["doc.ents", "token.ent_iob", "token.ent_type"],
|
|
|
|
retokenizes=True,
|
|
|
|
)
|
2020-07-22 14:42:59 +03:00
|
|
|
def merge_entities(doc: Doc):
|
2019-02-10 14:14:51 +03:00
|
|
|
"""Merge entities into a single token.
|
|
|
|
|
|
|
|
doc (Doc): The Doc object.
|
2019-03-08 13:42:26 +03:00
|
|
|
RETURNS (Doc): The Doc object with merged entities.
|
|
|
|
|
2021-01-30 12:09:38 +03:00
|
|
|
DOCS: https://spacy.io/api/pipeline-functions#merge_entities
|
2019-02-10 14:14:51 +03:00
|
|
|
"""
|
2019-02-15 12:29:44 +03:00
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
for ent in doc.ents:
|
|
|
|
attrs = {"tag": ent.root.tag, "dep": ent.root.dep, "ent_type": ent.label}
|
|
|
|
retokenizer.merge(ent, attrs=attrs)
|
2019-02-10 14:14:51 +03:00
|
|
|
return doc
|
|
|
|
|
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
@Language.component("merge_subtokens", requires=["token.dep"], retokenizes=True)
|
|
|
|
def merge_subtokens(doc: Doc, label: str = "subtok") -> Doc:
|
2019-03-08 13:42:26 +03:00
|
|
|
"""Merge subtokens into a single token.
|
|
|
|
|
|
|
|
doc (Doc): The Doc object.
|
2020-05-24 18:20:58 +03:00
|
|
|
label (str): The subtoken dependency label.
|
2019-03-08 13:42:26 +03:00
|
|
|
RETURNS (Doc): The Doc object with merged subtokens.
|
|
|
|
|
2021-01-30 12:09:38 +03:00
|
|
|
DOCS: https://spacy.io/api/pipeline-functions#merge_subtokens
|
2019-03-08 13:42:26 +03:00
|
|
|
"""
|
2020-07-22 14:42:59 +03:00
|
|
|
# TODO: make stateful component with "label" config
|
2019-02-10 14:14:51 +03:00
|
|
|
merger = Matcher(doc.vocab)
|
2020-07-29 12:04:43 +03:00
|
|
|
merger.add("SUBTOK", [[{"DEP": label, "op": "+"}]])
|
2019-02-10 14:14:51 +03:00
|
|
|
matches = merger(doc)
|
2021-01-30 04:52:33 +03:00
|
|
|
spans = util.filter_spans([doc[start : end + 1] for _, start, end in matches])
|
2019-02-15 12:29:44 +03:00
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
for span in spans:
|
|
|
|
retokenizer.merge(span)
|
2019-02-10 14:14:51 +03:00
|
|
|
return doc
|
2021-01-17 14:54:41 +03:00
|
|
|
|
|
|
|
|
|
|
|
@Language.factory(
|
|
|
|
"token_splitter",
|
|
|
|
default_config={"min_length": 25, "split_length": 10},
|
|
|
|
retokenizes=True,
|
|
|
|
)
|
|
|
|
def make_token_splitter(
|
2021-02-13 04:55:56 +03:00
|
|
|
nlp: Language, name: str, *, min_length: int = 0, split_length: int = 0
|
2021-01-17 14:54:41 +03:00
|
|
|
):
|
2021-01-30 04:52:33 +03:00
|
|
|
return TokenSplitter(min_length=min_length, split_length=split_length)
|
2021-01-17 14:54:41 +03:00
|
|
|
|
|
|
|
|
|
|
|
class TokenSplitter:
|
|
|
|
def __init__(self, min_length: int = 0, split_length: int = 0):
|
|
|
|
self.min_length = min_length
|
|
|
|
self.split_length = split_length
|
|
|
|
|
|
|
|
def __call__(self, doc: Doc) -> Doc:
|
|
|
|
if self.min_length > 0 and self.split_length > 0:
|
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
for t in doc:
|
|
|
|
if len(t.text) >= self.min_length:
|
|
|
|
orths = []
|
|
|
|
heads = []
|
|
|
|
attrs = {}
|
|
|
|
for i in range(0, len(t.text), self.split_length):
|
|
|
|
orths.append(t.text[i : i + self.split_length])
|
|
|
|
heads.append((t, i / self.split_length))
|
|
|
|
retokenizer.split(t, orths, heads, attrs)
|
|
|
|
return doc
|
|
|
|
|
|
|
|
def _get_config(self) -> Dict[str, Any]:
|
|
|
|
return {
|
|
|
|
"min_length": self.min_length,
|
|
|
|
"split_length": self.split_length,
|
|
|
|
}
|
|
|
|
|
|
|
|
def _set_config(self, config: Dict[str, Any] = {}) -> None:
|
|
|
|
self.min_length = config.get("min_length", 0)
|
|
|
|
self.split_length = config.get("split_length", 0)
|
|
|
|
|
|
|
|
def to_bytes(self, **kwargs):
|
|
|
|
serializers = {
|
|
|
|
"cfg": lambda: srsly.json_dumps(self._get_config()),
|
|
|
|
}
|
|
|
|
return util.to_bytes(serializers, [])
|
|
|
|
|
|
|
|
def from_bytes(self, data, **kwargs):
|
|
|
|
deserializers = {
|
|
|
|
"cfg": lambda b: self._set_config(srsly.json_loads(b)),
|
|
|
|
}
|
|
|
|
util.from_bytes(data, deserializers, [])
|
|
|
|
return self
|
|
|
|
|
|
|
|
def to_disk(self, path, **kwargs):
|
|
|
|
path = util.ensure_path(path)
|
|
|
|
serializers = {
|
|
|
|
"cfg": lambda p: srsly.write_json(p, self._get_config()),
|
|
|
|
}
|
|
|
|
return util.to_disk(path, serializers, [])
|
|
|
|
|
|
|
|
def from_disk(self, path, **kwargs):
|
|
|
|
path = util.ensure_path(path)
|
|
|
|
serializers = {
|
|
|
|
"cfg": lambda p: self._set_config(srsly.read_json(p)),
|
|
|
|
}
|
|
|
|
util.from_disk(path, serializers, [])
|