mirror of
https://github.com/explosion/spaCy.git
synced 2025-08-02 03:10:22 +03:00
In order to support Python 3.13, we had to migrate to Cython 3.0. This caused some tricky interaction with our Pydantic usage, because Cython 3 uses the from __future__ import annotations semantics, which causes type annotations to be saved as strings. The end result is that we can't have Language.factory decorated functions in Cython modules anymore, as the Language.factory decorator expects to inspect the signature of the functions and build a Pydantic model. If the function is implemented in Cython, an error is raised because the type is not resolved. To address this I've moved the factory functions into a new module, spacy.pipeline.factories. I've added __getattr__ importlib hooks to the previous locations, in case anyone was importing these functions directly. The change should have no backwards compatibility implications. Along the way I've also refactored the registration of functions for the config. Previously these ran as import-time side-effects, using the registry decorator. I've created instead a new module spacy.registrations. When the registry is accessed it calls a function ensure_populated(), which cases the registrations to occur. I've made a similar change to the Language.factory registrations in the new spacy.pipeline.factories module. I want to remove these import-time side-effects so that we can speed up the loading time of the library, which can be especially painful on the CLI. I also find that I'm often working to track down the implementations of functions referenced by strings in the config. Having the registrations all happen in one place will make this easier. With these changes I've fortunately avoided the need to migrate to Pydantic v2 properly --- we're still using the v1 compatibility shim. We might not be able to hold out forever though: Pydantic (reasonably) aren't actively supporting the v1 shims. I put a lot of work into v2 migration when investigating the 3.13 support, and it's definitely challenging. In any case, it's a relief that we don't have to do the v2 migration at the same time as the Cython 3.0/Python 3.13 support.
178 lines
6.4 KiB
Cython
178 lines
6.4 KiB
Cython
# cython: infer_types=True, binding=True
|
||
import importlib
|
||
import sys
|
||
from typing import Callable, List, Optional
|
||
|
||
import srsly
|
||
|
||
from ..tokens.doc cimport Doc
|
||
|
||
from .. import util
|
||
from ..language import Language
|
||
from .pipe import Pipe
|
||
from .senter import senter_score
|
||
|
||
# see #9050
|
||
BACKWARD_OVERWRITE = False
|
||
|
||
|
||
class Sentencizer(Pipe):
|
||
"""Segment the Doc into sentences using a rule-based strategy.
|
||
|
||
DOCS: https://spacy.io/api/sentencizer
|
||
"""
|
||
|
||
default_punct_chars = [
|
||
'!', '.', '?', '։', '؟', '۔', '܀', '܁', '܂', '߹',
|
||
'।', '॥', '၊', '။', '።', '፧', '፨', '᙮', '᜵', '᜶', '᠃', '᠉', '᥄',
|
||
'᥅', '᪨', '᪩', '᪪', '᪫', '᭚', '᭛', '᭞', '᭟', '᰻', '᰼', '᱾', '᱿',
|
||
'‼', '‽', '⁇', '⁈', '⁉', '⸮', '⸼', '꓿', '꘎', '꘏', '꛳', '꛷', '꡶',
|
||
'꡷', '꣎', '꣏', '꤯', '꧈', '꧉', '꩝', '꩞', '꩟', '꫰', '꫱', '꯫', '﹒',
|
||
'﹖', '﹗', '!', '.', '?', '𐩖', '𐩗', '𑁇', '𑁈', '𑂾', '𑂿', '𑃀',
|
||
'𑃁', '𑅁', '𑅂', '𑅃', '𑇅', '𑇆', '𑇍', '𑇞', '𑇟', '𑈸', '𑈹', '𑈻', '𑈼',
|
||
'𑊩', '𑑋', '𑑌', '𑗂', '𑗃', '𑗉', '𑗊', '𑗋', '𑗌', '𑗍', '𑗎', '𑗏', '𑗐',
|
||
'𑗑', '𑗒', '𑗓', '𑗔', '𑗕', '𑗖', '𑗗', '𑙁', '𑙂', '𑜼', '𑜽', '𑜾', '𑩂',
|
||
'𑩃', '𑪛', '𑪜', '𑱁', '𑱂', '𖩮', '𖩯', '𖫵', '𖬷', '𖬸', '𖭄', '𛲟', '𝪈',
|
||
'。', '。'
|
||
]
|
||
|
||
def __init__(
|
||
self,
|
||
name="sentencizer",
|
||
*,
|
||
punct_chars=None,
|
||
overwrite=BACKWARD_OVERWRITE,
|
||
scorer=senter_score,
|
||
):
|
||
"""Initialize the sentencizer.
|
||
|
||
punct_chars (list): Punctuation characters to split on. Will be
|
||
serialized with the nlp object.
|
||
scorer (Optional[Callable]): The scoring method. Defaults to
|
||
Scorer.score_spans for the attribute "sents".
|
||
|
||
DOCS: https://spacy.io/api/sentencizer#init
|
||
"""
|
||
self.name = name
|
||
if punct_chars:
|
||
self.punct_chars = set(punct_chars)
|
||
else:
|
||
self.punct_chars = set(self.default_punct_chars)
|
||
self.overwrite = overwrite
|
||
self.scorer = scorer
|
||
|
||
def __call__(self, doc):
|
||
"""Apply the sentencizer to a Doc and set Token.is_sent_start.
|
||
|
||
doc (Doc): The document to process.
|
||
RETURNS (Doc): The processed Doc.
|
||
|
||
DOCS: https://spacy.io/api/sentencizer#call
|
||
"""
|
||
error_handler = self.get_error_handler()
|
||
try:
|
||
tags = self.predict([doc])
|
||
self.set_annotations([doc], tags)
|
||
return doc
|
||
except Exception as e:
|
||
error_handler(self.name, self, [doc], e)
|
||
|
||
def predict(self, docs):
|
||
"""Apply the pipe to a batch of docs, without modifying them.
|
||
|
||
docs (Iterable[Doc]): The documents to predict.
|
||
RETURNS: The predictions for each document.
|
||
"""
|
||
if not any(len(doc) for doc in docs):
|
||
# Handle cases where there are no tokens in any docs.
|
||
guesses = [[] for doc in docs]
|
||
return guesses
|
||
guesses = []
|
||
for doc in docs:
|
||
doc_guesses = [False] * len(doc)
|
||
if len(doc) > 0:
|
||
start = 0
|
||
seen_period = False
|
||
doc_guesses[0] = True
|
||
for i, token in enumerate(doc):
|
||
is_in_punct_chars = token.text in self.punct_chars
|
||
if seen_period and not token.is_punct and not is_in_punct_chars:
|
||
doc_guesses[start] = True
|
||
start = token.i
|
||
seen_period = False
|
||
elif is_in_punct_chars:
|
||
seen_period = True
|
||
if start < len(doc):
|
||
doc_guesses[start] = True
|
||
guesses.append(doc_guesses)
|
||
return guesses
|
||
|
||
def set_annotations(self, docs, batch_tag_ids):
|
||
"""Modify a batch of documents, using pre-computed scores.
|
||
|
||
docs (Iterable[Doc]): The documents to modify.
|
||
scores: The tag IDs produced by Sentencizer.predict.
|
||
"""
|
||
if isinstance(docs, Doc):
|
||
docs = [docs]
|
||
cdef Doc doc
|
||
for i, doc in enumerate(docs):
|
||
doc_tag_ids = batch_tag_ids[i]
|
||
for j, tag_id in enumerate(doc_tag_ids):
|
||
if doc.c[j].sent_start == 0 or self.overwrite:
|
||
if tag_id:
|
||
doc.c[j].sent_start = 1
|
||
else:
|
||
doc.c[j].sent_start = -1
|
||
|
||
def to_bytes(self, *, exclude=tuple()):
|
||
"""Serialize the sentencizer to a bytestring.
|
||
|
||
RETURNS (bytes): The serialized object.
|
||
|
||
DOCS: https://spacy.io/api/sentencizer#to_bytes
|
||
"""
|
||
return srsly.msgpack_dumps({"punct_chars": list(self.punct_chars), "overwrite": self.overwrite})
|
||
|
||
def from_bytes(self, bytes_data, *, exclude=tuple()):
|
||
"""Load the sentencizer from a bytestring.
|
||
|
||
bytes_data (bytes): The data to load.
|
||
returns (Sentencizer): The loaded object.
|
||
|
||
DOCS: https://spacy.io/api/sentencizer#from_bytes
|
||
"""
|
||
cfg = srsly.msgpack_loads(bytes_data)
|
||
self.punct_chars = set(cfg.get("punct_chars", self.default_punct_chars))
|
||
self.overwrite = cfg.get("overwrite", self.overwrite)
|
||
return self
|
||
|
||
def to_disk(self, path, *, exclude=tuple()):
|
||
"""Serialize the sentencizer to disk.
|
||
|
||
DOCS: https://spacy.io/api/sentencizer#to_disk
|
||
"""
|
||
path = util.ensure_path(path)
|
||
path = path.with_suffix(".json")
|
||
srsly.write_json(path, {"punct_chars": list(self.punct_chars), "overwrite": self.overwrite})
|
||
|
||
def from_disk(self, path, *, exclude=tuple()):
|
||
"""Load the sentencizer from disk.
|
||
|
||
DOCS: https://spacy.io/api/sentencizer#from_disk
|
||
"""
|
||
path = util.ensure_path(path)
|
||
path = path.with_suffix(".json")
|
||
cfg = srsly.read_json(path)
|
||
self.punct_chars = set(cfg.get("punct_chars", self.default_punct_chars))
|
||
self.overwrite = cfg.get("overwrite", self.overwrite)
|
||
return self
|
||
|
||
|
||
# Setup backwards compatibility hook for factories
|
||
def __getattr__(name):
|
||
if name == "make_sentencizer":
|
||
module = importlib.import_module("spacy.pipeline.factories")
|
||
return module.make_sentencizer
|
||
raise AttributeError(f"module {__name__} has no attribute {name}")
|