diff --git a/spacy/tests/doc/test_token_api.py b/spacy/tests/doc/test_token_api.py index 8338a7290..5ea0bcff0 100644 --- a/spacy/tests/doc/test_token_api.py +++ b/spacy/tests/doc/test_token_api.py @@ -255,7 +255,7 @@ def test_token_api_non_conjuncts(en_vocab): def test_missing_head_dep(en_vocab): - """ Check that the Doc constructor and Example.from_dict parse missing information the same""" + """Check that the Doc constructor and Example.from_dict parse missing information the same""" heads = [1, 1, 1, 1, 2, None] # element 5 is missing deps = ["", "ROOT", "dobj", "cc", "conj", None] # element 0 and 5 are missing words = ["I", "like", "London", "and", "Berlin", "."] diff --git a/spacy/tests/lang/ca/test_prefix_suffix_infix.py b/spacy/tests/lang/ca/test_prefix_suffix_infix.py index 83a75f056..a3c76ab5b 100644 --- a/spacy/tests/lang/ca/test_prefix_suffix_infix.py +++ b/spacy/tests/lang/ca/test_prefix_suffix_infix.py @@ -5,7 +5,7 @@ import pytest "text,expected_tokens", [("d'un", ["d'", "un"]), ("s'ha", ["s'", "ha"])] ) def test_contractions(ca_tokenizer, text, expected_tokens): - """ Test that the contractions are split into two tokens""" + """Test that the contractions are split into two tokens""" tokens = ca_tokenizer(text) assert len(tokens) == 2 assert [t.text for t in tokens] == expected_tokens diff --git a/spacy/tests/lang/it/test_prefix_suffix_infix.py b/spacy/tests/lang/it/test_prefix_suffix_infix.py index 46f66b5e6..5834f9695 100644 --- a/spacy/tests/lang/it/test_prefix_suffix_infix.py +++ b/spacy/tests/lang/it/test_prefix_suffix_infix.py @@ -5,7 +5,7 @@ import pytest "text,expected_tokens", [("c'è", ["c'", "è"]), ("l'ha", ["l'", "ha"])] ) def test_contractions(it_tokenizer, text, expected_tokens): - """ Test that the contractions are split into two tokens""" + """Test that the contractions are split into two tokens""" tokens = it_tokenizer(text) assert len(tokens) == 2 assert [t.text for t in tokens] == expected_tokens diff --git a/spacy/tests/parser/test_ner.py b/spacy/tests/parser/test_ner.py index 00617df56..ee9b6bf01 100644 --- a/spacy/tests/parser/test_ner.py +++ b/spacy/tests/parser/test_ner.py @@ -304,7 +304,7 @@ def test_empty_ner(): def test_ruler_before_ner(): - """ Test that an NER works after an entity_ruler: the second can add annotations """ + """Test that an NER works after an entity_ruler: the second can add annotations""" nlp = English() # 1 : Entity Ruler - should set "this" to B and everything else to empty @@ -334,7 +334,7 @@ def test_ner_constructor(en_vocab): def test_ner_before_ruler(): - """ Test that an entity_ruler works after an NER: the second can overwrite O annotations """ + """Test that an entity_ruler works after an NER: the second can overwrite O annotations""" nlp = English() # 1: untrained NER - should set everything to O @@ -355,7 +355,7 @@ def test_ner_before_ruler(): def test_block_ner(): - """ Test functionality for blocking tokens so they can't be in a named entity """ + """Test functionality for blocking tokens so they can't be in a named entity""" # block "Antti L Korhonen" from being a named entity nlp = English() nlp.add_pipe("blocker", config={"start": 2, "end": 5}) diff --git a/spacy/tests/regression/test_issue3501-4000.py b/spacy/tests/regression/test_issue3501-4000.py index 9d3a27435..71c3768dd 100644 --- a/spacy/tests/regression/test_issue3501-4000.py +++ b/spacy/tests/regression/test_issue3501-4000.py @@ -197,7 +197,7 @@ def test_issue3555(en_vocab): def test_issue3611(): - """ Test whether adding n-grams in the textcat works even when n > token length of some docs """ + """Test whether adding n-grams in the textcat works even when n > token length of some docs""" unique_classes = ["offensive", "inoffensive"] x_train = [ "This is an offensive text", @@ -282,7 +282,7 @@ def test_issue3830_with_subtok(): def test_issue3839(en_vocab): - """Test that match IDs returned by the matcher are correct, are in the string """ + """Test that match IDs returned by the matcher are correct, are in the string""" doc = Doc(en_vocab, words=["terrific", "group", "of", "people"]) matcher = Matcher(en_vocab) match_id = "PATTERN" @@ -366,7 +366,7 @@ def test_issue3951(en_vocab): def test_issue3959(): - """ Ensure that a modified pos attribute is serialized correctly.""" + """Ensure that a modified pos attribute is serialized correctly.""" nlp = English() doc = nlp( "displaCy uses JavaScript, SVG and CSS to show you how computers understand language" diff --git a/spacy/tests/regression/test_issue4001-4500.py b/spacy/tests/regression/test_issue4001-4500.py index a4c15dac2..4410e6236 100644 --- a/spacy/tests/regression/test_issue4001-4500.py +++ b/spacy/tests/regression/test_issue4001-4500.py @@ -38,7 +38,7 @@ def test_issue4002(en_vocab): def test_issue4030(): - """ Test whether textcat works fine with empty doc """ + """Test whether textcat works fine with empty doc""" unique_classes = ["offensive", "inoffensive"] x_train = [ "This is an offensive text", @@ -237,7 +237,7 @@ def test_issue4190(): def test_issue4267(): - """ Test that running an entity_ruler after ner gives consistent results""" + """Test that running an entity_ruler after ner gives consistent results""" nlp = English() ner = nlp.add_pipe("ner") ner.add_label("PEOPLE") @@ -288,7 +288,7 @@ def test_multiple_predictions(): def test_issue4313(): - """ This should not crash or exit with some strange error code """ + """This should not crash or exit with some strange error code""" beam_width = 16 beam_density = 0.0001 nlp = English() diff --git a/spacy/tests/regression/test_issue4501-5000.py b/spacy/tests/regression/test_issue4501-5000.py index f5fcb53fd..effd67306 100644 --- a/spacy/tests/regression/test_issue4501-5000.py +++ b/spacy/tests/regression/test_issue4501-5000.py @@ -152,7 +152,7 @@ def test_issue4707(): def test_issue4725_1(): - """ Ensure the pickling of the NER goes well""" + """Ensure the pickling of the NER goes well""" vocab = Vocab(vectors_name="test_vocab_add_vector") nlp = English(vocab=vocab) config = { diff --git a/spacy/tests/regression/test_issue5001-5500.py b/spacy/tests/regression/test_issue5001-5500.py index 0575c8270..9eefef2e5 100644 --- a/spacy/tests/regression/test_issue5001-5500.py +++ b/spacy/tests/regression/test_issue5001-5500.py @@ -96,7 +96,7 @@ def test_issue5137(): def test_issue5141(en_vocab): - """ Ensure an empty DocBin does not crash on serialization """ + """Ensure an empty DocBin does not crash on serialization""" doc_bin = DocBin(attrs=["DEP", "HEAD"]) assert list(doc_bin.get_docs(en_vocab)) == [] doc_bin_bytes = doc_bin.to_bytes() diff --git a/spacy/tests/serialize/test_serialize_config.py b/spacy/tests/serialize/test_serialize_config.py index 114d4865c..102989705 100644 --- a/spacy/tests/serialize/test_serialize_config.py +++ b/spacy/tests/serialize/test_serialize_config.py @@ -238,7 +238,7 @@ def test_create_nlp_from_config_multiple_instances(): def test_serialize_nlp(): - """ Create a custom nlp pipeline from config and ensure it serializes it correctly """ + """Create a custom nlp pipeline from config and ensure it serializes it correctly""" nlp_config = Config().from_str(nlp_config_string) nlp = load_model_from_config(nlp_config, auto_fill=True) nlp.get_pipe("tagger").add_label("A") @@ -258,7 +258,7 @@ def test_serialize_nlp(): def test_serialize_custom_nlp(): - """ Create a custom nlp pipeline and ensure it serializes it correctly""" + """Create a custom nlp pipeline and ensure it serializes it correctly""" nlp = English() parser_cfg = dict() parser_cfg["model"] = {"@architectures": "my_test_parser"} @@ -279,7 +279,7 @@ def test_serialize_custom_nlp(): "parser_config_string", [parser_config_string_upper, parser_config_string_no_upper] ) def test_serialize_parser(parser_config_string): - """ Create a non-default parser config to check nlp serializes it correctly """ + """Create a non-default parser config to check nlp serializes it correctly""" nlp = English() model_config = Config().from_str(parser_config_string) parser = nlp.add_pipe("parser", config=model_config) diff --git a/spacy/tests/test_misc.py b/spacy/tests/test_misc.py index 1cefeb9a3..45cbdf45b 100644 --- a/spacy/tests/test_misc.py +++ b/spacy/tests/test_misc.py @@ -275,7 +275,7 @@ def test_util_minibatch(doc_sizes, expected_batches): ], ) def test_util_minibatch_oversize(doc_sizes, expected_batches): - """ Test that oversized documents are returned in their own batch""" + """Test that oversized documents are returned in their own batch""" docs = [get_random_doc(doc_size) for doc_size in doc_sizes] tol = 0.2 batch_size = 1000 diff --git a/spacy/training/converters/conllu_to_docs.py b/spacy/training/converters/conllu_to_docs.py index 356021a1d..66156b6e5 100644 --- a/spacy/training/converters/conllu_to_docs.py +++ b/spacy/training/converters/conllu_to_docs.py @@ -69,7 +69,7 @@ def read_conllx( ner_tag_pattern="", ner_map=None, ): - """ Yield docs, one for each sentence """ + """Yield docs, one for each sentence""" vocab = Vocab() # need vocab to make a minimal Doc for sent in input_data.strip().split("\n\n"): lines = sent.strip().split("\n") diff --git a/spacy/training/corpus.py b/spacy/training/corpus.py index 1edc4329b..606dbfb4a 100644 --- a/spacy/training/corpus.py +++ b/spacy/training/corpus.py @@ -186,7 +186,7 @@ class Corpus: def read_docbin( self, vocab: Vocab, locs: Iterable[Union[str, Path]] ) -> Iterator[Doc]: - """ Yield training examples as example dicts """ + """Yield training examples as example dicts""" i = 0 for loc in locs: loc = util.ensure_path(loc) diff --git a/spacy/training/loggers.py b/spacy/training/loggers.py index 7a1cc8133..f7f70226d 100644 --- a/spacy/training/loggers.py +++ b/spacy/training/loggers.py @@ -110,6 +110,7 @@ def wandb_logger( ): try: import wandb + # test that these are available from wandb import init, log, join # noqa: F401 except ImportError: