mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-10-31 07:57:35 +03:00 
			
		
		
		
	* Update with WIP * Update with WIP * Update with pipeline serialization * Update types and pipe factories * Add deep merge, tidy up and add tests * Fix pipe creation from config * Don't validate default configs on load * Update spacy/language.py Co-authored-by: Ines Montani <ines@ines.io> * Adjust factory/component meta error * Clean up factory args and remove defaults * Add test for failing empty dict defaults * Update pipeline handling and methods * provide KB as registry function instead of as object * small change in test to make functionality more clear * update example script for EL configuration * Fix typo * Simplify test * Simplify test * splitting pipes.pyx into separate files * moving default configs to each component file * fix batch_size type * removing default values from component constructors where possible (TODO: test 4725) * skip instead of xfail * Add test for config -> nlp with multiple instances * pipeline.pipes -> pipeline.pipe * Tidy up, document, remove kwargs * small cleanup/generalization for Tok2VecListener * use DEFAULT_UPSTREAM field * revert to avoid circular imports * Fix tests * Replace deprecated arg * Make model dirs require config * fix pickling of keyword-only arguments in constructor * WIP: clean up and integrate full config * Add helper to handle function args more reliably Now also includes keyword-only args * Fix config composition and serialization * Improve config debugging and add visual diff * Remove unused defaults and fix type * Remove pipeline and factories from meta * Update spacy/default_config.cfg Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com> * Update spacy/default_config.cfg * small UX edits * avoid printing stack trace for debug CLI commands * Add support for language-specific factories * specify the section of the config which holds the model to debug * WIP: add Language.from_config * Update with language data refactor WIP * Auto-format * Add backwards-compat handling for Language.factories * Update morphologizer.pyx * Fix morphologizer * Update and simplify lemmatizers * Fix Japanese tests * Port over tagger changes * Fix Chinese and tests * Update to latest Thinc * WIP: xfail first Russian lemmatizer test * Fix component-specific overrides * fix nO for output layers in debug_model * Fix default value * Fix tests and don't pass objects in config * Fix deep merging * Fix lemma lookup data registry Only load the lookups if an entry is available in the registry (and if spacy-lookups-data is installed) * Add types * Add Vocab.from_config * Fix typo * Fix tests * Make config copying more elegant * Fix pipe analysis * Fix lemmatizers and is_base_form * WIP: move language defaults to config * Fix morphology type * Fix vocab * Remove comment * Update to latest Thinc * Add morph rules to config * Tidy up * Remove set_morphology option from tagger factory * Hack use_gpu * Move [pipeline] to top-level block and make [nlp.pipeline] list Allows separating component blocks from component order – otherwise, ordering the config would mean a changed component order, which is bad. Also allows initial config to define more components and not use all of them * Fix use_gpu and resume in CLI * Auto-format * Remove resume from config * Fix formatting and error * [pipeline] -> [components] * Fix types * Fix tagger test: requires set_morphology? Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com> Co-authored-by: svlandeg <sofie.vanlandeghem@gmail.com> Co-authored-by: Matthew Honnibal <honnibal+gh@gmail.com>
		
			
				
	
	
		
			77 lines
		
	
	
		
			2.1 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			77 lines
		
	
	
		
			2.1 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| import pytest
 | |
| from spacy.pipeline.functions import merge_subtokens
 | |
| from spacy.language import Language
 | |
| from spacy.tokens import Span
 | |
| 
 | |
| from ..util import get_doc
 | |
| 
 | |
| 
 | |
| @pytest.fixture
 | |
| def doc(en_tokenizer):
 | |
|     # fmt: off
 | |
|     text = "This is a sentence. This is another sentence. And a third."
 | |
|     heads = [1, 0, 1, -2, -3, 1, 0, 1, -2, -3, 1, 1, 1, 0]
 | |
|     deps = ["nsubj", "ROOT", "subtok", "attr", "punct", "nsubj", "ROOT",
 | |
|             "subtok", "attr", "punct", "subtok", "subtok", "subtok", "ROOT"]
 | |
|     # fmt: on
 | |
|     tokens = en_tokenizer(text)
 | |
|     return get_doc(tokens.vocab, words=[t.text for t in tokens], heads=heads, deps=deps)
 | |
| 
 | |
| 
 | |
| @pytest.fixture
 | |
| def doc2(en_tokenizer):
 | |
|     text = "I like New York in Autumn."
 | |
|     heads = [1, 0, 1, -2, -3, -1, -5]
 | |
|     tags = ["PRP", "IN", "NNP", "NNP", "IN", "NNP", "."]
 | |
|     pos = ["PRON", "VERB", "PROPN", "PROPN", "ADP", "PROPN", "PUNCT"]
 | |
|     deps = ["ROOT", "prep", "compound", "pobj", "prep", "pobj", "punct"]
 | |
|     tokens = en_tokenizer(text)
 | |
|     doc = get_doc(
 | |
|         tokens.vocab,
 | |
|         words=[t.text for t in tokens],
 | |
|         heads=heads,
 | |
|         tags=tags,
 | |
|         pos=pos,
 | |
|         deps=deps,
 | |
|     )
 | |
|     doc.ents = [Span(doc, 2, 4, doc.vocab.strings["GPE"])]
 | |
|     doc.is_parsed = True
 | |
|     doc.is_tagged = True
 | |
|     return doc
 | |
| 
 | |
| 
 | |
| def test_merge_subtokens(doc):
 | |
|     doc = merge_subtokens(doc)
 | |
|     # get_doc() doesn't set spaces, so the result is "And a third ."
 | |
|     assert [t.text for t in doc] == [
 | |
|         "This",
 | |
|         "is",
 | |
|         "a sentence",
 | |
|         ".",
 | |
|         "This",
 | |
|         "is",
 | |
|         "another sentence",
 | |
|         ".",
 | |
|         "And a third .",
 | |
|     ]
 | |
| 
 | |
| 
 | |
| def test_factories_merge_noun_chunks(doc2):
 | |
|     assert len(doc2) == 7
 | |
|     nlp = Language()
 | |
|     merge_noun_chunks = nlp.create_pipe("merge_noun_chunks")
 | |
|     merge_noun_chunks(doc2)
 | |
|     assert len(doc2) == 6
 | |
|     assert doc2[2].text == "New York"
 | |
| 
 | |
| 
 | |
| def test_factories_merge_ents(doc2):
 | |
|     assert len(doc2) == 7
 | |
|     assert len(list(doc2.ents)) == 1
 | |
|     nlp = Language()
 | |
|     merge_entities = nlp.create_pipe("merge_entities")
 | |
|     merge_entities(doc2)
 | |
|     assert len(doc2) == 6
 | |
|     assert len(list(doc2.ents)) == 1
 | |
|     assert doc2[2].text == "New York"
 |