diff --git a/spacy/errors.py b/spacy/errors.py index fc44f6ba3..e6912a263 100644 --- a/spacy/errors.py +++ b/spacy/errors.py @@ -190,6 +190,8 @@ class Warnings: "vectors. This is almost certainly a mistake.") W113 = ("Sourced component '{name}' may not work as expected: source " "vectors are not identical to current pipeline vectors.") + W114 = ("Using multiprocessing with GPU models is not recommended and may " + "lead to errors.") @add_codes diff --git a/spacy/language.py b/spacy/language.py index 2dfb43b73..80703259d 100644 --- a/spacy/language.py +++ b/spacy/language.py @@ -10,7 +10,7 @@ from contextlib import contextmanager from copy import deepcopy from pathlib import Path import warnings -from thinc.api import get_current_ops, Config, Optimizer +from thinc.api import get_current_ops, Config, CupyOps, Optimizer import srsly import multiprocessing as mp from itertools import chain, cycle @@ -1545,6 +1545,9 @@ class Language: pipes.append(f) if n_process != 1: + if self._has_gpu_model(disable): + warnings.warn(Warnings.W114) + docs = self._multiprocessing_pipe(texts, pipes, n_process, batch_size) else: # if n_process == 1, no processes are forked. @@ -1554,6 +1557,17 @@ class Language: for doc in docs: yield doc + def _has_gpu_model(self, disable: Iterable[str]): + for name, proc in self.pipeline: + is_trainable = hasattr(proc, "is_trainable") and proc.is_trainable # type: ignore + if name in disable or not is_trainable: + continue + + if hasattr(proc, "model") and hasattr(proc.model, "ops") and isinstance(proc.model.ops, CupyOps): # type: ignore + return True + + return False + def _multiprocessing_pipe( self, texts: Iterable[str], diff --git a/spacy/tests/test_language.py b/spacy/tests/test_language.py index c911b8d81..8dbb6fd75 100644 --- a/spacy/tests/test_language.py +++ b/spacy/tests/test_language.py @@ -10,11 +10,21 @@ from spacy.lang.en import English from spacy.lang.de import German from spacy.util import registry, ignore_error, raise_error import spacy -from thinc.api import NumpyOps, get_current_ops +from thinc.api import CupyOps, NumpyOps, get_current_ops from .util import add_vecs_to_vocab, assert_docs_equal +try: + import torch + + # Ensure that we don't deadlock in multiprocessing tests. + torch.set_num_threads(1) + torch.set_num_interop_threads(1) +except ImportError: + pass + + def evil_component(doc): if "2" in doc.text: raise ValueError("no dice") @@ -528,3 +538,17 @@ def test_language_source_and_vectors(nlp2): assert long_string in nlp2.vocab.strings # vectors should remain unmodified assert nlp.vocab.vectors.to_bytes() == vectors_bytes + + +@pytest.mark.skipif( + not isinstance(get_current_ops(), CupyOps), reason="test requires GPU" +) +def test_multiprocessing_gpu_warning(nlp2, texts): + texts = texts * 10 + docs = nlp2.pipe(texts, n_process=2, batch_size=2) + + with pytest.warns(UserWarning, match="multiprocessing with GPU models"): + with pytest.raises(ValueError): + # Trigger multi-processing. + for _ in docs: + pass