mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-11-01 00:17:44 +03:00 
			
		
		
		
	* fix grad_clip naming * cleaning up pretrained_vectors out of cfg * further refactoring Model init's * move Model building out of pipes * further refactor to require a model config when creating a pipe * small fixes * making cfg in nn_parser more consistent * fixing nr_class for parser * fixing nn_parser's nO * fix printing of loss * architectures in own file per type, consistent naming * convenience methods default_tagger_config and default_tok2vec_config * let create_pipe access default config if available for that component * default_parser_config * move defaults to separate folder * allow reading nlp from package or dir with argument 'name' * architecture spacy.VocabVectors.v1 to read static vectors from file * cleanup * default configs for nel, textcat, morphologizer, tensorizer * fix imports * fixing unit tests * fixes and clean up * fixing defaults, nO, fix unit tests * restore parser IO * fix IO * 'fix' serialization test * add *.cfg to manifest * fix example configs with additional arguments * replace Morpohologizer with Tagger * add IO bit when testing overfitting of tagger (currently failing) * fix IO - don't initialize when reading from disk * expand overfitting tests to also check IO goes OK * remove dropout from HashEmbed to fix Tagger performance * add defaults for sentrec * update thinc * always pass a Model instance to a Pipe * fix piped_added statement * remove obsolete W029 * remove obsolete errors * restore byte checking tests (work again) * clean up test * further test cleanup * convert from config to Model in create_pipe * bring back error when component is not initialized * cleanup * remove calls for nlp2.begin_training * use thinc.api in imports * allow setting charembed's nM and nC * fix for hardcoded nM/nC + unit test * formatting fixes * trigger build
		
			
				
	
	
		
			67 lines
		
	
	
		
			1.3 KiB
		
	
	
	
		
			INI
		
	
	
	
	
	
			
		
		
	
	
			67 lines
		
	
	
		
			1.3 KiB
		
	
	
	
		
			INI
		
	
	
	
	
	
| [training]
 | |
| patience = 10000
 | |
| eval_frequency = 200
 | |
| dropout = 0.2
 | |
| init_tok2vec = null
 | |
| vectors = null
 | |
| max_epochs = 100
 | |
| orth_variant_level = 0.0
 | |
| gold_preproc = true
 | |
| max_length = 0
 | |
| use_gpu = -1
 | |
| scores = ["tags_acc", "uas", "las"]
 | |
| score_weights = {"las": 0.8, "tags_acc": 0.2}
 | |
| limit = 0
 | |
| 
 | |
| [training.batch_size]
 | |
| @schedules = "compounding.v1"
 | |
| start = 100
 | |
| stop = 1000
 | |
| compound = 1.001
 | |
| 
 | |
| [optimizer]
 | |
| @optimizers = "Adam.v1"
 | |
| learn_rate = 0.001
 | |
| beta1 = 0.9
 | |
| beta2 = 0.999
 | |
| 
 | |
| [nlp]
 | |
| lang = "en"
 | |
| vectors = ${training:vectors}
 | |
| 
 | |
| [nlp.pipeline.tok2vec]
 | |
| factory = "tok2vec"
 | |
| 
 | |
| [nlp.pipeline.tagger]
 | |
| factory = "tagger"
 | |
| 
 | |
| [nlp.pipeline.parser]
 | |
| factory = "parser"
 | |
| 
 | |
| [nlp.pipeline.tagger.model]
 | |
| @architectures = "spacy.Tagger.v1"
 | |
| 
 | |
| [nlp.pipeline.tagger.model.tok2vec]
 | |
| @architectures = "spacy.Tok2VecTensors.v1"
 | |
| width = ${nlp.pipeline.tok2vec.model:width}
 | |
| 
 | |
| [nlp.pipeline.parser.model]
 | |
| @architectures = "spacy.TransitionBasedParser.v1"
 | |
| nr_feature_tokens = 8
 | |
| hidden_width = 64
 | |
| maxout_pieces = 3
 | |
| 
 | |
| [nlp.pipeline.parser.model.tok2vec]
 | |
| @architectures = "spacy.Tok2VecTensors.v1"
 | |
| width = ${nlp.pipeline.tok2vec.model:width}
 | |
| 
 | |
| [nlp.pipeline.tok2vec.model]
 | |
| @architectures = "spacy.HashEmbedCNN.v1"
 | |
| pretrained_vectors = ${nlp:vectors}
 | |
| width = 96
 | |
| depth = 4
 | |
| window_size = 1
 | |
| embed_size = 2000
 | |
| maxout_pieces = 3
 | |
| subword_features = true
 |