mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-14 21:57:15 +03:00
7e4cd7575c
* Refactor Docs.is_ flags * Add derived `Doc.has_annotation` method * `Doc.has_annotation(attr)` returns `True` for partial annotation * `Doc.has_annotation(attr, require_complete=True)` returns `True` for complete annotation * Add deprecation warnings to `is_tagged`, `is_parsed`, `is_sentenced` and `is_nered` * Add `Doc._get_array_attrs()`, which returns a full list of `Doc` attrs for use with `Doc.to_array`, `Doc.to_bytes` and `Doc.from_docs`. The list is the `DocBin` attributes list plus `SPACY` and `LENGTH`. Notes on `Doc.has_annotation`: * `HEAD` is converted to `DEP` because heads don't have an unset state * Accept `IS_SENT_START` as a synonym of `SENT_START` Additional changes: * Add `NORM`, `ENT_ID` and `SENT_START` to default attributes for `DocBin` * In `Doc.from_array()` the presence of `DEP` causes `HEAD` to override `SENT_START` * In `Doc.from_array()` using `attrs` other than `Doc._get_array_attrs()` (i.e., a user's custom list rather than our default internal list) with both `HEAD` and `SENT_START` shows a warning that `HEAD` will override `SENT_START` * `set_children_from_heads` does not require dependency labels to set sentence boundaries and sets `sent_start` for all non-sentence starts to `-1` * Fix call to set_children_form_heads Co-authored-by: Matthew Honnibal <honnibal+gh@gmail.com>
68 lines
2.1 KiB
Python
68 lines
2.1 KiB
Python
from ..language import Language
|
|
from ..matcher import Matcher
|
|
from ..tokens import Doc
|
|
from ..util import filter_spans
|
|
|
|
|
|
@Language.component(
|
|
"merge_noun_chunks",
|
|
requires=["token.dep", "token.tag", "token.pos"],
|
|
retokenizes=True,
|
|
)
|
|
def merge_noun_chunks(doc: Doc) -> Doc:
|
|
"""Merge noun chunks into a single token.
|
|
|
|
doc (Doc): The Doc object.
|
|
RETURNS (Doc): The Doc object with merged noun chunks.
|
|
|
|
DOCS: https://nightly.spacy.io/api/pipeline-functions#merge_noun_chunks
|
|
"""
|
|
if not doc.has_annotation("DEP"):
|
|
return doc
|
|
with doc.retokenize() as retokenizer:
|
|
for np in doc.noun_chunks:
|
|
attrs = {"tag": np.root.tag, "dep": np.root.dep}
|
|
retokenizer.merge(np, attrs=attrs)
|
|
return doc
|
|
|
|
|
|
@Language.component(
|
|
"merge_entities",
|
|
requires=["doc.ents", "token.ent_iob", "token.ent_type"],
|
|
retokenizes=True,
|
|
)
|
|
def merge_entities(doc: Doc):
|
|
"""Merge entities into a single token.
|
|
|
|
doc (Doc): The Doc object.
|
|
RETURNS (Doc): The Doc object with merged entities.
|
|
|
|
DOCS: https://nightly.spacy.io/api/pipeline-functions#merge_entities
|
|
"""
|
|
with doc.retokenize() as retokenizer:
|
|
for ent in doc.ents:
|
|
attrs = {"tag": ent.root.tag, "dep": ent.root.dep, "ent_type": ent.label}
|
|
retokenizer.merge(ent, attrs=attrs)
|
|
return doc
|
|
|
|
|
|
@Language.component("merge_subtokens", requires=["token.dep"], retokenizes=True)
|
|
def merge_subtokens(doc: Doc, label: str = "subtok") -> Doc:
|
|
"""Merge subtokens into a single token.
|
|
|
|
doc (Doc): The Doc object.
|
|
label (str): The subtoken dependency label.
|
|
RETURNS (Doc): The Doc object with merged subtokens.
|
|
|
|
DOCS: https://nightly.spacy.io/api/pipeline-functions#merge_subtokens
|
|
"""
|
|
# TODO: make stateful component with "label" config
|
|
merger = Matcher(doc.vocab)
|
|
merger.add("SUBTOK", [[{"DEP": label, "op": "+"}]])
|
|
matches = merger(doc)
|
|
spans = filter_spans([doc[start : end + 1] for _, start, end in matches])
|
|
with doc.retokenize() as retokenizer:
|
|
for span in spans:
|
|
retokenizer.merge(span)
|
|
return doc
|