mirror of
https://github.com/explosion/spaCy.git
synced 2025-11-10 21:07:53 +03:00
In order to support Python 3.13, we had to migrate to Cython 3.0. This caused some tricky interaction with our Pydantic usage, because Cython 3 uses the from __future__ import annotations semantics, which causes type annotations to be saved as strings. The end result is that we can't have Language.factory decorated functions in Cython modules anymore, as the Language.factory decorator expects to inspect the signature of the functions and build a Pydantic model. If the function is implemented in Cython, an error is raised because the type is not resolved. To address this I've moved the factory functions into a new module, spacy.pipeline.factories. I've added __getattr__ importlib hooks to the previous locations, in case anyone was importing these functions directly. The change should have no backwards compatibility implications. Along the way I've also refactored the registration of functions for the config. Previously these ran as import-time side-effects, using the registry decorator. I've created instead a new module spacy.registrations. When the registry is accessed it calls a function ensure_populated(), which cases the registrations to occur. I've made a similar change to the Language.factory registrations in the new spacy.pipeline.factories module. I want to remove these import-time side-effects so that we can speed up the loading time of the library, which can be especially painful on the CLI. I also find that I'm often working to track down the implementations of functions referenced by strings in the config. Having the registrations all happen in one place will make this easier. With these changes I've fortunately avoided the need to migrate to Pydantic v2 properly --- we're still using the v1 compatibility shim. We might not be able to hold out forever though: Pydantic (reasonably) aren't actively supporting the v1 shims. I put a lot of work into v2 migration when investigating the 3.13 support, and it's definitely challenging. In any case, it's a relief that we don't have to do the v2 migration at the same time as the Cython 3.0/Python 3.13 support.
179 lines
5.6 KiB
Cython
179 lines
5.6 KiB
Cython
# cython: infer_types=True, binding=True
|
|
import importlib
|
|
import sys
|
|
from collections import defaultdict
|
|
from typing import Callable, Optional
|
|
|
|
from thinc.api import Config, Model
|
|
|
|
from ._parser_internals.transition_system import TransitionSystem
|
|
|
|
from ._parser_internals.arc_eager cimport ArcEager
|
|
from .transition_parser cimport Parser
|
|
|
|
from ..language import Language
|
|
from ..scorer import Scorer
|
|
from ..training import remove_bilu_prefix
|
|
from ..util import registry
|
|
from ._parser_internals import nonproj
|
|
from ._parser_internals.nonproj import DELIMITER
|
|
from .functions import merge_subtokens
|
|
|
|
default_model_config = """
|
|
[model]
|
|
@architectures = "spacy.TransitionBasedParser.v2"
|
|
state_type = "parser"
|
|
extra_state_tokens = false
|
|
hidden_width = 64
|
|
maxout_pieces = 2
|
|
use_upper = true
|
|
|
|
[model.tok2vec]
|
|
@architectures = "spacy.HashEmbedCNN.v2"
|
|
pretrained_vectors = null
|
|
width = 96
|
|
depth = 4
|
|
embed_size = 2000
|
|
window_size = 1
|
|
maxout_pieces = 3
|
|
subword_features = true
|
|
"""
|
|
DEFAULT_PARSER_MODEL = Config().from_str(default_model_config)["model"]
|
|
|
|
|
|
def parser_score(examples, **kwargs):
|
|
"""Score a batch of examples.
|
|
|
|
examples (Iterable[Example]): The examples to score.
|
|
RETURNS (Dict[str, Any]): The scores, produced by Scorer.score_spans
|
|
and Scorer.score_deps.
|
|
|
|
DOCS: https://spacy.io/api/dependencyparser#score
|
|
"""
|
|
def has_sents(doc):
|
|
return doc.has_annotation("SENT_START")
|
|
|
|
def dep_getter(token, attr):
|
|
dep = getattr(token, attr)
|
|
dep = token.vocab.strings.as_string(dep).lower()
|
|
return dep
|
|
results = {}
|
|
results.update(Scorer.score_spans(examples, "sents", has_annotation=has_sents, **kwargs))
|
|
kwargs.setdefault("getter", dep_getter)
|
|
kwargs.setdefault("ignore_labels", ("p", "punct"))
|
|
results.update(Scorer.score_deps(examples, "dep", **kwargs))
|
|
del results["sents_per_type"]
|
|
return results
|
|
|
|
|
|
def make_parser_scorer():
|
|
return parser_score
|
|
|
|
|
|
cdef class DependencyParser(Parser):
|
|
"""Pipeline component for dependency parsing.
|
|
|
|
DOCS: https://spacy.io/api/dependencyparser
|
|
"""
|
|
TransitionSystem = ArcEager
|
|
|
|
def __init__(
|
|
self,
|
|
vocab,
|
|
model,
|
|
name="parser",
|
|
moves=None,
|
|
*,
|
|
update_with_oracle_cut_size=100,
|
|
min_action_freq=30,
|
|
learn_tokens=False,
|
|
beam_width=1,
|
|
beam_density=0.0,
|
|
beam_update_prob=0.0,
|
|
multitasks=tuple(),
|
|
incorrect_spans_key=None,
|
|
scorer=parser_score,
|
|
):
|
|
"""Create a DependencyParser.
|
|
"""
|
|
super().__init__(
|
|
vocab,
|
|
model,
|
|
name,
|
|
moves,
|
|
update_with_oracle_cut_size=update_with_oracle_cut_size,
|
|
min_action_freq=min_action_freq,
|
|
learn_tokens=learn_tokens,
|
|
beam_width=beam_width,
|
|
beam_density=beam_density,
|
|
beam_update_prob=beam_update_prob,
|
|
multitasks=multitasks,
|
|
incorrect_spans_key=incorrect_spans_key,
|
|
scorer=scorer,
|
|
)
|
|
|
|
@property
|
|
def postprocesses(self):
|
|
output = [nonproj.deprojectivize]
|
|
if self.cfg.get("learn_tokens") is True:
|
|
output.append(merge_subtokens)
|
|
return tuple(output)
|
|
|
|
def add_multitask_objective(self, mt_component):
|
|
self._multitasks.append(mt_component)
|
|
|
|
def init_multitask_objectives(self, get_examples, nlp=None, **cfg):
|
|
# TODO: transfer self.model.get_ref("tok2vec") to the multitask's model ?
|
|
for labeller in self._multitasks:
|
|
labeller.model.set_dim("nO", len(self.labels))
|
|
if labeller.model.has_ref("output_layer"):
|
|
labeller.model.get_ref("output_layer").set_dim("nO", len(self.labels))
|
|
labeller.initialize(get_examples, nlp=nlp)
|
|
|
|
@property
|
|
def labels(self):
|
|
labels = set()
|
|
# Get the labels from the model by looking at the available moves
|
|
for move in self.move_names:
|
|
if "-" in move:
|
|
label = remove_bilu_prefix(move)
|
|
if DELIMITER in label:
|
|
label = label.split(DELIMITER)[1]
|
|
labels.add(label)
|
|
return tuple(sorted(labels))
|
|
|
|
def scored_parses(self, beams):
|
|
"""Return two dictionaries with scores for each beam/doc that was processed:
|
|
one containing (i, head) keys, and another containing (i, label) keys.
|
|
"""
|
|
head_scores = []
|
|
label_scores = []
|
|
for beam in beams:
|
|
score_head_dict = defaultdict(float)
|
|
score_label_dict = defaultdict(float)
|
|
for score, parses in self.moves.get_beam_parses(beam):
|
|
for head, i, label in parses:
|
|
score_head_dict[(i, head)] += score
|
|
score_label_dict[(i, label)] += score
|
|
head_scores.append(score_head_dict)
|
|
label_scores.append(score_label_dict)
|
|
return head_scores, label_scores
|
|
|
|
def _ensure_labels_are_added(self, docs):
|
|
# This gives the parser a chance to add labels it's missing for a batch
|
|
# of documents. However, this isn't desirable for the dependency parser,
|
|
# because we instead have a label frequency cut-off and back off rare
|
|
# labels to 'dep'.
|
|
pass
|
|
|
|
|
|
# Setup backwards compatibility hook for factories
|
|
def __getattr__(name):
|
|
if name == "make_parser":
|
|
module = importlib.import_module("spacy.pipeline.factories")
|
|
return module.make_parser
|
|
elif name == "make_beam_parser":
|
|
module = importlib.import_module("spacy.pipeline.factories")
|
|
return module.make_beam_parser
|
|
raise AttributeError(f"module {__name__} has no attribute {name}")
|