2020-07-22 14:42:59 +03:00
|
|
|
from ..language import Language
|
2019-02-10 14:14:51 +03:00
|
|
|
from ..matcher import Matcher
|
2020-07-22 14:42:59 +03:00
|
|
|
from ..tokens import Doc
|
2019-10-28 17:40:28 +03:00
|
|
|
from ..util import filter_spans
|
2019-02-10 14:14:51 +03:00
|
|
|
|
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
@Language.component(
|
2019-10-27 15:35:49 +03:00
|
|
|
"merge_noun_chunks",
|
|
|
|
requires=["token.dep", "token.tag", "token.pos"],
|
|
|
|
retokenizes=True,
|
|
|
|
)
|
2020-07-22 14:42:59 +03:00
|
|
|
def merge_noun_chunks(doc: Doc) -> Doc:
|
2019-02-10 14:14:51 +03:00
|
|
|
"""Merge noun chunks into a single token.
|
|
|
|
|
|
|
|
doc (Doc): The Doc object.
|
|
|
|
RETURNS (Doc): The Doc object with merged noun chunks.
|
2019-03-08 13:42:26 +03:00
|
|
|
|
2020-09-04 13:58:50 +03:00
|
|
|
DOCS: https://nightly.spacy.io/api/pipeline-functions#merge_noun_chunks
|
2019-02-10 14:14:51 +03:00
|
|
|
"""
|
2020-09-17 01:14:01 +03:00
|
|
|
if not doc.has_annotation("DEP"):
|
2019-02-10 14:14:51 +03:00
|
|
|
return doc
|
2019-02-15 12:29:44 +03:00
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
for np in doc.noun_chunks:
|
|
|
|
attrs = {"tag": np.root.tag, "dep": np.root.dep}
|
|
|
|
retokenizer.merge(np, attrs=attrs)
|
2019-02-10 14:14:51 +03:00
|
|
|
return doc
|
|
|
|
|
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
@Language.component(
|
2019-10-27 15:35:49 +03:00
|
|
|
"merge_entities",
|
|
|
|
requires=["doc.ents", "token.ent_iob", "token.ent_type"],
|
|
|
|
retokenizes=True,
|
|
|
|
)
|
2020-07-22 14:42:59 +03:00
|
|
|
def merge_entities(doc: Doc):
|
2019-02-10 14:14:51 +03:00
|
|
|
"""Merge entities into a single token.
|
|
|
|
|
|
|
|
doc (Doc): The Doc object.
|
2019-03-08 13:42:26 +03:00
|
|
|
RETURNS (Doc): The Doc object with merged entities.
|
|
|
|
|
2020-09-04 13:58:50 +03:00
|
|
|
DOCS: https://nightly.spacy.io/api/pipeline-functions#merge_entities
|
2019-02-10 14:14:51 +03:00
|
|
|
"""
|
2019-02-15 12:29:44 +03:00
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
for ent in doc.ents:
|
|
|
|
attrs = {"tag": ent.root.tag, "dep": ent.root.dep, "ent_type": ent.label}
|
|
|
|
retokenizer.merge(ent, attrs=attrs)
|
2019-02-10 14:14:51 +03:00
|
|
|
return doc
|
|
|
|
|
|
|
|
|
2020-07-22 14:42:59 +03:00
|
|
|
@Language.component("merge_subtokens", requires=["token.dep"], retokenizes=True)
|
|
|
|
def merge_subtokens(doc: Doc, label: str = "subtok") -> Doc:
|
2019-03-08 13:42:26 +03:00
|
|
|
"""Merge subtokens into a single token.
|
|
|
|
|
|
|
|
doc (Doc): The Doc object.
|
2020-05-24 18:20:58 +03:00
|
|
|
label (str): The subtoken dependency label.
|
2019-03-08 13:42:26 +03:00
|
|
|
RETURNS (Doc): The Doc object with merged subtokens.
|
|
|
|
|
2020-09-04 13:58:50 +03:00
|
|
|
DOCS: https://nightly.spacy.io/api/pipeline-functions#merge_subtokens
|
2019-03-08 13:42:26 +03:00
|
|
|
"""
|
2020-07-22 14:42:59 +03:00
|
|
|
# TODO: make stateful component with "label" config
|
2019-02-10 14:14:51 +03:00
|
|
|
merger = Matcher(doc.vocab)
|
2020-07-29 12:04:43 +03:00
|
|
|
merger.add("SUBTOK", [[{"DEP": label, "op": "+"}]])
|
2019-02-10 14:14:51 +03:00
|
|
|
matches = merger(doc)
|
2019-10-28 17:40:28 +03:00
|
|
|
spans = filter_spans([doc[start : end + 1] for _, start, end in matches])
|
2019-02-15 12:29:44 +03:00
|
|
|
with doc.retokenize() as retokenizer:
|
|
|
|
for span in spans:
|
|
|
|
retokenizer.merge(span)
|
2019-02-10 14:14:51 +03:00
|
|
|
return doc
|