mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-10-31 07:57:35 +03:00 
			
		
		
		
	* OrigAnnot class instead of gold.orig_annot list of zipped tuples * from_orig to replace from_annot_tuples * rename to RawAnnot * some unit tests for GoldParse creation and internal format * removing orig_annot and switching to lists instead of tuple * rewriting tuples to use RawAnnot (+ debug statements, WIP) * fix pop() changing the data * small fixes * pop-append fixes * return RawAnnot for existing GoldParse to have uniform interface * clean up imports * fix merge_sents * add unit test for 4402 with new structure (not working yet) * introduce DocAnnot * typo fixes * add unit test for merge_sents * rename from_orig to from_raw * fixing unit tests * fix nn parser * read_annots to produce text, doc_annot pairs * _make_golds fix * rename golds_to_gold_annots * small fixes * fix encoding * have golds_to_gold_annots use DocAnnot * missed a spot * merge_sents as function in DocAnnot * allow specifying only part of the token-level annotations * refactor with Example class + underlying dicts * pipeline components to work with Example objects (wip) * input checking * fix yielding * fix calls to update * small fixes * fix scorer unit test with new format * fix kwargs order * fixes for ud and conllu scripts * fix reading data for conllu script * add in proper errors (not fixed numbering yet to avoid merge conflicts) * fixing few more small bugs * fix EL script
		
			
				
	
	
		
			76 lines
		
	
	
		
			2.0 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			76 lines
		
	
	
		
			2.0 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| # coding: utf8
 | |
| from __future__ import unicode_literals
 | |
| 
 | |
| import pytest
 | |
| from thinc.neural.optimizers import Adam
 | |
| from thinc.neural.ops import NumpyOps
 | |
| from spacy.attrs import NORM
 | |
| from spacy.gold import GoldParse
 | |
| from spacy.vocab import Vocab
 | |
| from spacy.tokens import Doc
 | |
| from spacy.pipeline import DependencyParser
 | |
| 
 | |
| 
 | |
| @pytest.fixture
 | |
| def vocab():
 | |
|     return Vocab(lex_attr_getters={NORM: lambda s: s})
 | |
| 
 | |
| 
 | |
| @pytest.fixture
 | |
| def parser(vocab):
 | |
|     parser = DependencyParser(vocab)
 | |
|     parser.cfg["token_vector_width"] = 4
 | |
|     parser.cfg["hidden_width"] = 32
 | |
|     # parser.add_label('right')
 | |
|     parser.add_label("left")
 | |
|     parser.begin_training([], **parser.cfg)
 | |
|     sgd = Adam(NumpyOps(), 0.001)
 | |
| 
 | |
|     for i in range(10):
 | |
|         losses = {}
 | |
|         doc = Doc(vocab, words=["a", "b", "c", "d"])
 | |
|         gold = GoldParse(doc, heads=[1, 1, 3, 3], deps=["left", "ROOT", "left", "ROOT"])
 | |
|         parser.update((doc, gold), sgd=sgd, losses=losses)
 | |
|     return parser
 | |
| 
 | |
| 
 | |
| def test_no_sentences(parser):
 | |
|     doc = Doc(parser.vocab, words=["a", "b", "c", "d"])
 | |
|     doc = parser(doc)
 | |
|     assert len(list(doc.sents)) >= 1
 | |
| 
 | |
| 
 | |
| def test_sents_1(parser):
 | |
|     doc = Doc(parser.vocab, words=["a", "b", "c", "d"])
 | |
|     doc[2].sent_start = True
 | |
|     doc = parser(doc)
 | |
|     assert len(list(doc.sents)) >= 2
 | |
|     doc = Doc(parser.vocab, words=["a", "b", "c", "d"])
 | |
|     doc[1].sent_start = False
 | |
|     doc[2].sent_start = True
 | |
|     doc[3].sent_start = False
 | |
|     doc = parser(doc)
 | |
|     assert len(list(doc.sents)) == 2
 | |
| 
 | |
| 
 | |
| def test_sents_1_2(parser):
 | |
|     doc = Doc(parser.vocab, words=["a", "b", "c", "d"])
 | |
|     doc[1].sent_start = True
 | |
|     doc[2].sent_start = True
 | |
|     doc = parser(doc)
 | |
|     assert len(list(doc.sents)) >= 3
 | |
| 
 | |
| 
 | |
| def test_sents_1_3(parser):
 | |
|     doc = Doc(parser.vocab, words=["a", "b", "c", "d"])
 | |
|     doc[1].sent_start = True
 | |
|     doc[3].sent_start = True
 | |
|     doc = parser(doc)
 | |
|     assert len(list(doc.sents)) >= 3
 | |
|     doc = Doc(parser.vocab, words=["a", "b", "c", "d"])
 | |
|     doc[1].sent_start = True
 | |
|     doc[2].sent_start = False
 | |
|     doc[3].sent_start = True
 | |
|     doc = parser(doc)
 | |
|     assert len(list(doc.sents)) == 3
 |