mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-11-04 01:48:04 +03:00 
			
		
		
		
	Remove old unused tests and conftest files
This commit is contained in:
		
							parent
							
								
									4d0bfebcd9
								
							
						
					
					
						commit
						f8803808ce
					
				
							
								
								
									
										1
									
								
								setup.py
									
									
									
									
									
								
							
							
						
						
									
										1
									
								
								setup.py
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -37,7 +37,6 @@ PACKAGES = [
 | 
			
		|||
    'spacy.munge',
 | 
			
		||||
    'spacy.tests',
 | 
			
		||||
    'spacy.tests.matcher',
 | 
			
		||||
    'spacy.tests.morphology',
 | 
			
		||||
    'spacy.tests.munge',
 | 
			
		||||
    'spacy.tests.parser',
 | 
			
		||||
    'spacy.tests.serialize',
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,17 +0,0 @@
 | 
			
		|||
import pytest
 | 
			
		||||
 | 
			
		||||
import pickle
 | 
			
		||||
import io
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
from spacy.morphology import Morphology
 | 
			
		||||
from spacy.lemmatizer import Lemmatizer
 | 
			
		||||
from spacy.strings import StringStore
 | 
			
		||||
 | 
			
		||||
@pytest.mark.xfail
 | 
			
		||||
def test_pickle():
 | 
			
		||||
    morphology = Morphology(StringStore(), {}, Lemmatizer({}, {}, {})) 
 | 
			
		||||
 | 
			
		||||
    file_ = io.BytesIO()
 | 
			
		||||
    pickle.dump(morphology, file_)
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -92,22 +92,3 @@ def test_phrase_matcher():
 | 
			
		|||
    matcher = PhraseMatcher(vocab, [Doc(vocab, words='Google Now'.split())])
 | 
			
		||||
    doc = Doc(vocab, words=['I', 'like', 'Google', 'Now', 'best'])
 | 
			
		||||
    assert len(matcher(doc)) == 1
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#@pytest.mark.models
 | 
			
		||||
#def test_match_preserved(EN):
 | 
			
		||||
#    patterns = {
 | 
			
		||||
#        'JS': ['PRODUCT', {}, [[{'ORTH': 'JavaScript'}]]],
 | 
			
		||||
#        'GoogleNow':  ['PRODUCT', {}, [[{'ORTH': 'Google'}, {'ORTH': 'Now'}]]],
 | 
			
		||||
#        'Java':       ['PRODUCT', {}, [[{'LOWER': 'java'}]]],
 | 
			
		||||
#    }
 | 
			
		||||
#    matcher = Matcher(EN.vocab, patterns)
 | 
			
		||||
#    doc = EN.tokenizer('I like java.')
 | 
			
		||||
#    EN.tagger(doc)
 | 
			
		||||
#    assert len(doc.ents) == 0
 | 
			
		||||
#    doc = EN.tokenizer('I like java.')
 | 
			
		||||
#    doc.ents += tuple(matcher(doc))
 | 
			
		||||
#    assert len(doc.ents) == 1
 | 
			
		||||
#    EN.tagger(doc)
 | 
			
		||||
#    EN.entity(doc)
 | 
			
		||||
#    assert len(doc.ents) == 1
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,6 +0,0 @@
 | 
			
		|||
import pytest
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@pytest.fixture(scope="session")
 | 
			
		||||
def en_vocab(EN):
 | 
			
		||||
    return EN.vocab
 | 
			
		||||
		Loading…
	
		Reference in New Issue
	
	Block a user