mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-10-31 07:57:35 +03:00 
			
		
		
		
	* Update with WIP * Update with WIP * Update with pipeline serialization * Update types and pipe factories * Add deep merge, tidy up and add tests * Fix pipe creation from config * Don't validate default configs on load * Update spacy/language.py Co-authored-by: Ines Montani <ines@ines.io> * Adjust factory/component meta error * Clean up factory args and remove defaults * Add test for failing empty dict defaults * Update pipeline handling and methods * provide KB as registry function instead of as object * small change in test to make functionality more clear * update example script for EL configuration * Fix typo * Simplify test * Simplify test * splitting pipes.pyx into separate files * moving default configs to each component file * fix batch_size type * removing default values from component constructors where possible (TODO: test 4725) * skip instead of xfail * Add test for config -> nlp with multiple instances * pipeline.pipes -> pipeline.pipe * Tidy up, document, remove kwargs * small cleanup/generalization for Tok2VecListener * use DEFAULT_UPSTREAM field * revert to avoid circular imports * Fix tests * Replace deprecated arg * Make model dirs require config * fix pickling of keyword-only arguments in constructor * WIP: clean up and integrate full config * Add helper to handle function args more reliably Now also includes keyword-only args * Fix config composition and serialization * Improve config debugging and add visual diff * Remove unused defaults and fix type * Remove pipeline and factories from meta * Update spacy/default_config.cfg Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com> * Update spacy/default_config.cfg * small UX edits * avoid printing stack trace for debug CLI commands * Add support for language-specific factories * specify the section of the config which holds the model to debug * WIP: add Language.from_config * Update with language data refactor WIP * Auto-format * Add backwards-compat handling for Language.factories * Update morphologizer.pyx * Fix morphologizer * Update and simplify lemmatizers * Fix Japanese tests * Port over tagger changes * Fix Chinese and tests * Update to latest Thinc * WIP: xfail first Russian lemmatizer test * Fix component-specific overrides * fix nO for output layers in debug_model * Fix default value * Fix tests and don't pass objects in config * Fix deep merging * Fix lemma lookup data registry Only load the lookups if an entry is available in the registry (and if spacy-lookups-data is installed) * Add types * Add Vocab.from_config * Fix typo * Fix tests * Make config copying more elegant * Fix pipe analysis * Fix lemmatizers and is_base_form * WIP: move language defaults to config * Fix morphology type * Fix vocab * Remove comment * Update to latest Thinc * Add morph rules to config * Tidy up * Remove set_morphology option from tagger factory * Hack use_gpu * Move [pipeline] to top-level block and make [nlp.pipeline] list Allows separating component blocks from component order – otherwise, ordering the config would mean a changed component order, which is bad. Also allows initial config to define more components and not use all of them * Fix use_gpu and resume in CLI * Auto-format * Remove resume from config * Fix formatting and error * [pipeline] -> [components] * Fix types * Fix tagger test: requires set_morphology? Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com> Co-authored-by: svlandeg <sofie.vanlandeghem@gmail.com> Co-authored-by: Matthew Honnibal <honnibal+gh@gmail.com>
		
			
				
	
	
		
			181 lines
		
	
	
		
			6.4 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			181 lines
		
	
	
		
			6.4 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| from typing import Optional
 | |
| from thinc.api import Model, reduce_mean, Linear, list2ragged, Logistic
 | |
| from thinc.api import chain, concatenate, clone, Dropout, ParametricAttention
 | |
| from thinc.api import SparseLinear, Softmax, softmax_activation, Maxout, reduce_sum
 | |
| from thinc.api import HashEmbed, with_ragged, with_array, with_cpu, uniqued
 | |
| from thinc.api import Relu, residual, expand_window, FeatureExtractor
 | |
| 
 | |
| from ..spacy_vectors import SpacyVectors
 | |
| from ... import util
 | |
| from ...attrs import ID, ORTH, PREFIX, SUFFIX, SHAPE, LOWER
 | |
| from ...util import registry
 | |
| from ..extract_ngrams import extract_ngrams
 | |
| 
 | |
| 
 | |
| @registry.architectures.register("spacy.TextCatCNN.v1")
 | |
| def build_simple_cnn_text_classifier(
 | |
|     tok2vec: Model, exclusive_classes: bool, nO: Optional[int] = None
 | |
| ) -> Model:
 | |
|     """
 | |
|     Build a simple CNN text classifier, given a token-to-vector model as inputs.
 | |
|     If exclusive_classes=True, a softmax non-linearity is applied, so that the
 | |
|     outputs sum to 1. If exclusive_classes=False, a logistic non-linearity
 | |
|     is applied instead, so that outputs are in the range [0, 1].
 | |
|     """
 | |
|     with Model.define_operators({">>": chain}):
 | |
|         if exclusive_classes:
 | |
|             output_layer = Softmax(nO=nO, nI=tok2vec.get_dim("nO"))
 | |
|             model = tok2vec >> list2ragged() >> reduce_mean() >> output_layer
 | |
|             model.set_ref("output_layer", output_layer)
 | |
|         else:
 | |
|             linear_layer = Linear(nO=nO, nI=tok2vec.get_dim("nO"))
 | |
|             model = (
 | |
|                 tok2vec >> list2ragged() >> reduce_mean() >> linear_layer >> Logistic()
 | |
|             )
 | |
|             model.set_ref("output_layer", linear_layer)
 | |
|     model.set_ref("tok2vec", tok2vec)
 | |
|     model.set_dim("nO", nO)
 | |
|     model.attrs["multi_label"] = not exclusive_classes
 | |
|     return model
 | |
| 
 | |
| 
 | |
| @registry.architectures.register("spacy.TextCatBOW.v1")
 | |
| def build_bow_text_classifier(exclusive_classes, ngram_size, no_output_layer, nO=None):
 | |
|     with Model.define_operators({">>": chain}):
 | |
|         sparse_linear = SparseLinear(nO)
 | |
|         model = extract_ngrams(ngram_size, attr=ORTH) >> sparse_linear
 | |
|         model = with_cpu(model, model.ops)
 | |
|         if not no_output_layer:
 | |
|             output_layer = softmax_activation() if exclusive_classes else Logistic()
 | |
|             model = model >> with_cpu(output_layer, output_layer.ops)
 | |
|     model.set_ref("output_layer", sparse_linear)
 | |
|     model.attrs["multi_label"] = not exclusive_classes
 | |
|     return model
 | |
| 
 | |
| 
 | |
| @registry.architectures.register("spacy.TextCat.v1")
 | |
| def build_text_classifier(
 | |
|     width,
 | |
|     embed_size,
 | |
|     pretrained_vectors,
 | |
|     exclusive_classes,
 | |
|     ngram_size,
 | |
|     window_size,
 | |
|     conv_depth,
 | |
|     dropout,
 | |
|     nO=None,
 | |
| ):
 | |
|     cols = [ORTH, LOWER, PREFIX, SUFFIX, SHAPE, ID]
 | |
|     with Model.define_operators({">>": chain, "|": concatenate, "**": clone}):
 | |
|         lower = HashEmbed(
 | |
|             nO=width, nV=embed_size, column=cols.index(LOWER), dropout=dropout, seed=10
 | |
|         )
 | |
|         prefix = HashEmbed(
 | |
|             nO=width // 2,
 | |
|             nV=embed_size,
 | |
|             column=cols.index(PREFIX),
 | |
|             dropout=dropout,
 | |
|             seed=11,
 | |
|         )
 | |
|         suffix = HashEmbed(
 | |
|             nO=width // 2,
 | |
|             nV=embed_size,
 | |
|             column=cols.index(SUFFIX),
 | |
|             dropout=dropout,
 | |
|             seed=12,
 | |
|         )
 | |
|         shape = HashEmbed(
 | |
|             nO=width // 2,
 | |
|             nV=embed_size,
 | |
|             column=cols.index(SHAPE),
 | |
|             dropout=dropout,
 | |
|             seed=13,
 | |
|         )
 | |
| 
 | |
|         width_nI = sum(layer.get_dim("nO") for layer in [lower, prefix, suffix, shape])
 | |
|         trained_vectors = FeatureExtractor(cols) >> with_array(
 | |
|             uniqued(
 | |
|                 (lower | prefix | suffix | shape)
 | |
|                 >> Maxout(nO=width, nI=width_nI, normalize=True),
 | |
|                 column=cols.index(ORTH),
 | |
|             )
 | |
|         )
 | |
| 
 | |
|         if pretrained_vectors:
 | |
|             nlp = util.load_model(pretrained_vectors)
 | |
|             vectors = nlp.vocab.vectors
 | |
|             vector_dim = vectors.data.shape[1]
 | |
| 
 | |
|             static_vectors = SpacyVectors(vectors) >> with_array(
 | |
|                 Linear(width, vector_dim)
 | |
|             )
 | |
|             vector_layer = trained_vectors | static_vectors
 | |
|             vectors_width = width * 2
 | |
|         else:
 | |
|             vector_layer = trained_vectors
 | |
|             vectors_width = width
 | |
|         tok2vec = vector_layer >> with_array(
 | |
|             Maxout(width, vectors_width, normalize=True)
 | |
|             >> residual(
 | |
|                 (
 | |
|                     expand_window(window_size=window_size)
 | |
|                     >> Maxout(
 | |
|                         nO=width, nI=width * ((window_size * 2) + 1), normalize=True
 | |
|                     )
 | |
|                 )
 | |
|             )
 | |
|             ** conv_depth,
 | |
|             pad=conv_depth,
 | |
|         )
 | |
|         cnn_model = (
 | |
|             tok2vec
 | |
|             >> list2ragged()
 | |
|             >> ParametricAttention(width)
 | |
|             >> reduce_sum()
 | |
|             >> residual(Maxout(nO=width, nI=width))
 | |
|             >> Linear(nO=nO, nI=width)
 | |
|             >> Dropout(0.0)
 | |
|         )
 | |
| 
 | |
|         linear_model = build_bow_text_classifier(
 | |
|             nO=nO,
 | |
|             ngram_size=ngram_size,
 | |
|             exclusive_classes=exclusive_classes,
 | |
|             no_output_layer=False,
 | |
|         )
 | |
|         nO_double = nO * 2 if nO else None
 | |
|         if exclusive_classes:
 | |
|             output_layer = Softmax(nO=nO, nI=nO_double)
 | |
|         else:
 | |
|             output_layer = Linear(nO=nO, nI=nO_double) >> Dropout(0.0) >> Logistic()
 | |
|         model = (linear_model | cnn_model) >> output_layer
 | |
|         model.set_ref("tok2vec", tok2vec)
 | |
|     if model.has_dim("nO") is not False:
 | |
|         model.set_dim("nO", nO)
 | |
|     model.set_ref("output_layer", linear_model.get_ref("output_layer"))
 | |
|     model.attrs["multi_label"] = not exclusive_classes
 | |
|     return model
 | |
| 
 | |
| 
 | |
| @registry.architectures.register("spacy.TextCatLowData.v1")
 | |
| def build_text_classifier_lowdata(width, pretrained_vectors, dropout, nO=None):
 | |
|     nlp = util.load_model(pretrained_vectors)
 | |
|     vectors = nlp.vocab.vectors
 | |
|     vector_dim = vectors.data.shape[1]
 | |
| 
 | |
|     # Note, before v.3, this was the default if setting "low_data" and "pretrained_dims"
 | |
|     with Model.define_operators({">>": chain, "**": clone}):
 | |
|         model = (
 | |
|             SpacyVectors(vectors)
 | |
|             >> list2ragged()
 | |
|             >> with_ragged(0, Linear(width, vector_dim))
 | |
|             >> ParametricAttention(width)
 | |
|             >> reduce_sum()
 | |
|             >> residual(Relu(width, width)) ** 2
 | |
|             >> Linear(nO, width)
 | |
|         )
 | |
|         if dropout:
 | |
|             model = model >> Dropout(dropout)
 | |
|         model = model >> Logistic()
 | |
|     return model
 |