mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-13 13:17:06 +03:00
333b1a308b
* Draft layer for BILUO actions * Fixes to biluo layer * WIP on BILUO layer * Add tests for BILUO layer * Format * Fix transitions * Update test * Link in the simple_ner * Update BILUO tagger * Update __init__ * Import simple_ner * Update test * Import * Add files * Add config * Fix label passing for BILUO and tagger * Fix label handling for simple_ner component * Update simple NER test * Update config * Hack train script * Update BILUO layer * Fix SimpleNER component * Update train_from_config * Add biluo_to_iob helper * Add IOB layer * Add IOBTagger model * Update biluo layer * Update SimpleNER tagger * Update BILUO * Read random seed in train-from-config * Update use of normal_init * Fix normalization of gradient in SimpleNER * Update IOBTagger * Remove print * Tweak masking in BILUO * Add dropout in SimpleNER * Update thinc * Tidy up simple_ner * Fix biluo model * Unhack train-from-config * Update setup.cfg and requirements * Add tb_framework.py for parser model * Try to avoid memory leak in BILUO * Move ParserModel into spacy.ml, avoid need for subclass. * Use updated parser model * Remove incorrect call to model.initializre in PrecomputableAffine * Update parser model * Avoid divide by zero in tagger * Add extra dropout layer in tagger * Refine minibatch_by_words function to avoid oom * Fix parser model after refactor * Try to avoid div-by-zero in SimpleNER * Fix infinite loop in minibatch_by_words * Use SequenceCategoricalCrossentropy in Tagger * Fix parser model when hidden layer * Remove extra dropout from tagger * Add extra nan check in tagger * Fix thinc version * Update tests and imports * Fix test * Update test * Update tests * Fix tests * Fix test Co-authored-by: Ines Montani <ines@ines.io>
39 lines
1.1 KiB
Python
39 lines
1.1 KiB
Python
from pydantic import StrictInt
|
|
from thinc.api import Model, chain, list2array, Linear, zero_init, use_ops, with_array
|
|
|
|
from ...util import registry
|
|
from .._precomputable_affine import PrecomputableAffine
|
|
from ..tb_framework import TransitionModel
|
|
|
|
|
|
@registry.architectures.register("spacy.TransitionBasedParser.v1")
|
|
def build_tb_parser_model(
|
|
tok2vec: Model,
|
|
nr_feature_tokens: StrictInt,
|
|
hidden_width: StrictInt,
|
|
maxout_pieces: StrictInt,
|
|
use_upper=True,
|
|
nO=None,
|
|
):
|
|
token_vector_width = tok2vec.get_dim("nO")
|
|
tok2vec = chain(
|
|
tok2vec,
|
|
with_array(Linear(hidden_width, token_vector_width)),
|
|
list2array(),
|
|
)
|
|
tok2vec.set_dim("nO", hidden_width)
|
|
|
|
lower = PrecomputableAffine(
|
|
nO=hidden_width if use_upper else nO,
|
|
nF=nr_feature_tokens,
|
|
nI=tok2vec.get_dim("nO"),
|
|
nP=maxout_pieces
|
|
)
|
|
if use_upper:
|
|
with use_ops("numpy"):
|
|
# Initialize weights at zero, as it's a classification layer.
|
|
upper = Linear(nO=nO, init_W=zero_init)
|
|
else:
|
|
upper = None
|
|
return TransitionModel(tok2vec, lower, upper)
|