mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-11-04 01:48:04 +03:00 
			
		
		
		
	remove debug output from tests
This commit is contained in:
		
							parent
							
								
									f57f843e85
								
							
						
					
					
						commit
						1003e7ccec
					
				| 
						 | 
					@ -32,7 +32,6 @@ def test_arc_eager_finalize_state(EN):
 | 
				
			||||||
	# right branching
 | 
						# right branching
 | 
				
			||||||
	example = EN.tokenizer.tokens_from_list(u"a b c d e".split(' '))
 | 
						example = EN.tokenizer.tokens_from_list(u"a b c d e".split(' '))
 | 
				
			||||||
	apply_transition_sequence(EN, example, ['R-nsubj','D','R-nsubj','R-nsubj','D','R-ROOT'])
 | 
						apply_transition_sequence(EN, example, ['R-nsubj','D','R-nsubj','R-nsubj','D','R-ROOT'])
 | 
				
			||||||
	print [ '%s/%s' % (t.dep_,t.head.i) for t in example ]
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	assert example[0].n_lefts == 0
 | 
						assert example[0].n_lefts == 0
 | 
				
			||||||
	assert example[0].n_rights == 2
 | 
						assert example[0].n_rights == 2
 | 
				
			||||||
| 
						 | 
					@ -67,7 +66,6 @@ def test_arc_eager_finalize_state(EN):
 | 
				
			||||||
	# left branching
 | 
						# left branching
 | 
				
			||||||
	example = EN.tokenizer.tokens_from_list(u"a b c d e".split(' '))
 | 
						example = EN.tokenizer.tokens_from_list(u"a b c d e".split(' '))
 | 
				
			||||||
	apply_transition_sequence(EN, example, ['S','L-nsubj','L-ROOT','S','L-nsubj','L-nsubj'])
 | 
						apply_transition_sequence(EN, example, ['S','L-nsubj','L-ROOT','S','L-nsubj','L-nsubj'])
 | 
				
			||||||
	print [ '%s/%s' % (t.dep_,t.head.i) for t in example ]
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	assert example[0].n_lefts == 0
 | 
						assert example[0].n_lefts == 0
 | 
				
			||||||
	assert example[0].n_rights == 0
 | 
						assert example[0].n_rights == 0
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -138,7 +138,6 @@ def test_sbd_for_root_label_dependents(EN):
 | 
				
			||||||
    example = EN.tokenizer.tokens_from_list(u"I saw a firefly It glowed".split(' '))
 | 
					    example = EN.tokenizer.tokens_from_list(u"I saw a firefly It glowed".split(' '))
 | 
				
			||||||
    EN.tagger(example)
 | 
					    EN.tagger(example)
 | 
				
			||||||
    apply_transition_sequence(EN, example, ['L-nsubj','S','L-det','R-dobj','D','S','L-nsubj','R-ROOT'])
 | 
					    apply_transition_sequence(EN, example, ['L-nsubj','S','L-det','R-dobj','D','S','L-nsubj','R-ROOT'])
 | 
				
			||||||
    print ['%s/%s' % (t.dep_,t.head.i) for t in example]
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
    assert example[1].head.i == 1
 | 
					    assert example[1].head.i == 1
 | 
				
			||||||
    assert example[5].head.i == 5
 | 
					    assert example[5].head.i == 5
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in New Issue
	
	Block a user