mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-11-04 09:57:26 +03:00 
			
		
		
		
	* Upd tests, avoiding unnecessary processing to make testing faster
This commit is contained in:
		
							parent
							
								
									9e78d673d5
								
							
						
					
					
						commit
						35a18250cc
					
				| 
						 | 
					@ -9,7 +9,7 @@ def EN():
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
def test_possess(EN):
 | 
					def test_possess(EN):
 | 
				
			||||||
    tokens = EN("Mike's")
 | 
					    tokens = EN("Mike's", parse=False)
 | 
				
			||||||
    assert EN.vocab.strings[tokens[0].orth] == "Mike"
 | 
					    assert EN.vocab.strings[tokens[0].orth] == "Mike"
 | 
				
			||||||
    assert EN.vocab.strings[tokens[1].orth] == "'s"
 | 
					    assert EN.vocab.strings[tokens[1].orth] == "'s"
 | 
				
			||||||
    assert len(tokens) == 2
 | 
					    assert len(tokens) == 2
 | 
				
			||||||
| 
						 | 
					@ -23,7 +23,7 @@ def test_apostrophe(EN):
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
def test_LL(EN):
 | 
					def test_LL(EN):
 | 
				
			||||||
    tokens = EN("we'll")
 | 
					    tokens = EN("we'll", parse=False)
 | 
				
			||||||
    assert len(tokens) == 2
 | 
					    assert len(tokens) == 2
 | 
				
			||||||
    assert tokens[1].orth_ == "'ll"
 | 
					    assert tokens[1].orth_ == "'ll"
 | 
				
			||||||
    assert tokens[1].lemma_ == "will"
 | 
					    assert tokens[1].lemma_ == "will"
 | 
				
			||||||
| 
						 | 
					@ -31,7 +31,7 @@ def test_LL(EN):
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
def test_aint(EN):
 | 
					def test_aint(EN):
 | 
				
			||||||
    tokens = EN("ain't")
 | 
					    tokens = EN("ain't", parse=False)
 | 
				
			||||||
    assert len(tokens) == 2
 | 
					    assert len(tokens) == 2
 | 
				
			||||||
    assert tokens[0].orth_ == "ai"
 | 
					    assert tokens[0].orth_ == "ai"
 | 
				
			||||||
    assert tokens[0].lemma_ == "be"
 | 
					    assert tokens[0].lemma_ == "be"
 | 
				
			||||||
| 
						 | 
					@ -40,18 +40,18 @@ def test_aint(EN):
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
def test_capitalized(EN):
 | 
					def test_capitalized(EN):
 | 
				
			||||||
    tokens = EN("can't")
 | 
					    tokens = EN("can't", parse=False)
 | 
				
			||||||
    assert len(tokens) == 2
 | 
					    assert len(tokens) == 2
 | 
				
			||||||
    tokens = EN("Can't")
 | 
					    tokens = EN("Can't", parse=False)
 | 
				
			||||||
    assert len(tokens) == 2
 | 
					    assert len(tokens) == 2
 | 
				
			||||||
    tokens = EN("Ain't")
 | 
					    tokens = EN("Ain't", parse=False)
 | 
				
			||||||
    assert len(tokens) == 2
 | 
					    assert len(tokens) == 2
 | 
				
			||||||
    assert tokens[0].orth_ == "Ai"
 | 
					    assert tokens[0].orth_ == "Ai"
 | 
				
			||||||
    assert tokens[0].lemma_ == "be"
 | 
					    assert tokens[0].lemma_ == "be"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
def test_punct(EN):
 | 
					def test_punct(EN):
 | 
				
			||||||
    tokens = EN("We've")
 | 
					    tokens = EN("We've", parse=False)
 | 
				
			||||||
    assert len(tokens) == 2
 | 
					    assert len(tokens) == 2
 | 
				
			||||||
    tokens = EN("``We've")
 | 
					    tokens = EN("``We've", parse=False)
 | 
				
			||||||
    assert len(tokens) == 3
 | 
					    assert len(tokens) == 3
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -7,7 +7,7 @@ from spacy.en import English
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@pytest.fixture
 | 
					@pytest.fixture
 | 
				
			||||||
def EN():
 | 
					def EN():
 | 
				
			||||||
    return English()
 | 
					    return English().tokenizer
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
def test_no_special(EN):
 | 
					def test_no_special(EN):
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in New Issue
	
	Block a user