mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-11-04 01:48:04 +03:00 
			
		
		
		
	Always use tqdm with disable=None
				
					
				
			`tqdm` can cause deadlocks in the test suite if enabled.
This commit is contained in:
		
							parent
							
								
									b4990395f9
								
							
						
					
					
						commit
						467c82439e
					
				| 
						 | 
					@ -133,7 +133,9 @@ def apply(
 | 
				
			||||||
    if len(text_files) > 0:
 | 
					    if len(text_files) > 0:
 | 
				
			||||||
        streams.append(_stream_texts(text_files))
 | 
					        streams.append(_stream_texts(text_files))
 | 
				
			||||||
    datagen = cast(DocOrStrStream, chain(*streams))
 | 
					    datagen = cast(DocOrStrStream, chain(*streams))
 | 
				
			||||||
    for doc in tqdm.tqdm(nlp.pipe(datagen, batch_size=batch_size, n_process=n_process)):
 | 
					    for doc in tqdm.tqdm(
 | 
				
			||||||
 | 
					        nlp.pipe(datagen, batch_size=batch_size, n_process=n_process), disable=None
 | 
				
			||||||
 | 
					    ):
 | 
				
			||||||
        docbin.add(doc)
 | 
					        docbin.add(doc)
 | 
				
			||||||
    if output_file.suffix == "":
 | 
					    if output_file.suffix == "":
 | 
				
			||||||
        output_file = output_file.with_suffix(".spacy")
 | 
					        output_file = output_file.with_suffix(".spacy")
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -89,7 +89,7 @@ class Quartiles:
 | 
				
			||||||
def annotate(
 | 
					def annotate(
 | 
				
			||||||
    nlp: Language, docs: List[Doc], batch_size: Optional[int]
 | 
					    nlp: Language, docs: List[Doc], batch_size: Optional[int]
 | 
				
			||||||
) -> numpy.ndarray:
 | 
					) -> numpy.ndarray:
 | 
				
			||||||
    docs = nlp.pipe(tqdm(docs, unit="doc"), batch_size=batch_size)
 | 
					    docs = nlp.pipe(tqdm(docs, unit="doc", disable=None), batch_size=batch_size)
 | 
				
			||||||
    wps = []
 | 
					    wps = []
 | 
				
			||||||
    while True:
 | 
					    while True:
 | 
				
			||||||
        with time_context() as elapsed:
 | 
					        with time_context() as elapsed:
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -71,7 +71,7 @@ def profile(model: str, inputs: Optional[Path] = None, n_texts: int = 10000) ->
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
def parse_texts(nlp: Language, texts: Sequence[str]) -> None:
 | 
					def parse_texts(nlp: Language, texts: Sequence[str]) -> None:
 | 
				
			||||||
    for doc in nlp.pipe(tqdm.tqdm(texts), batch_size=16):
 | 
					    for doc in nlp.pipe(tqdm.tqdm(texts, disable=None), batch_size=16):
 | 
				
			||||||
        pass
 | 
					        pass
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -302,7 +302,7 @@ def read_vectors(
 | 
				
			||||||
            shape = (truncate_vectors, shape[1])
 | 
					            shape = (truncate_vectors, shape[1])
 | 
				
			||||||
    vectors_data = numpy.zeros(shape=shape, dtype="f")
 | 
					    vectors_data = numpy.zeros(shape=shape, dtype="f")
 | 
				
			||||||
    vectors_keys = []
 | 
					    vectors_keys = []
 | 
				
			||||||
    for i, line in enumerate(tqdm.tqdm(f)):
 | 
					    for i, line in enumerate(tqdm.tqdm(f, disable=None)):
 | 
				
			||||||
        line = line.rstrip()
 | 
					        line = line.rstrip()
 | 
				
			||||||
        pieces = line.rsplit(" ", vectors_data.shape[1])
 | 
					        pieces = line.rsplit(" ", vectors_data.shape[1])
 | 
				
			||||||
        word = pieces.pop(0)
 | 
					        word = pieces.pop(0)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in New Issue
	
	Block a user