diff --git a/spacy/tests/lang/en/test_exceptions.py b/spacy/tests/lang/en/test_exceptions.py index 1b56a3b0f..6229b96eb 100644 --- a/spacy/tests/lang/en/test_exceptions.py +++ b/spacy/tests/lang/en/test_exceptions.py @@ -113,9 +113,8 @@ def test_en_tokenizer_norm_exceptions(en_tokenizer, text, norms): assert [token.norm_ for token in tokens] == norms -@pytest.mark.skip @pytest.mark.parametrize( - "text,norm", [("radicalised", "radicalized"), ("cuz", "because")] + "text,norm", [("Jan.", "January"), ("'cuz", "because")] ) def test_en_lex_attrs_norm_exceptions(en_tokenizer, text, norm): tokens = en_tokenizer(text) diff --git a/spacy/tests/lang/uk/test_tokenizer.py b/spacy/tests/lang/uk/test_tokenizer.py index 91ae057f8..3d6e87301 100644 --- a/spacy/tests/lang/uk/test_tokenizer.py +++ b/spacy/tests/lang/uk/test_tokenizer.py @@ -89,7 +89,6 @@ def test_uk_tokenizer_splits_open_appostrophe(uk_tokenizer, text): assert tokens[0].text == "'" -@pytest.mark.skip(reason="See Issue #3327 and PR #3329") @pytest.mark.parametrize("text", ["Тест''"]) def test_uk_tokenizer_splits_double_end_quote(uk_tokenizer, text): tokens = uk_tokenizer(text) diff --git a/spacy/tests/serialize/test_serialize_pipeline.py b/spacy/tests/serialize/test_serialize_pipeline.py index 2deaa180d..a26dceed0 100644 --- a/spacy/tests/serialize/test_serialize_pipeline.py +++ b/spacy/tests/serialize/test_serialize_pipeline.py @@ -140,9 +140,6 @@ def test_to_from_bytes(parser, blank_parser): assert blank_parser.moves.n_moves == parser.moves.n_moves -@pytest.mark.skip( - reason="This seems to be a dict ordering bug somewhere. Only failing on some platforms." -) def test_serialize_tagger_roundtrip_bytes(en_vocab, taggers): tagger1 = taggers[0] tagger1_b = tagger1.to_bytes() diff --git a/spacy/tests/serialize/test_serialize_tokenizer.py b/spacy/tests/serialize/test_serialize_tokenizer.py index 00a88ec38..035561606 100644 --- a/spacy/tests/serialize/test_serialize_tokenizer.py +++ b/spacy/tests/serialize/test_serialize_tokenizer.py @@ -26,7 +26,6 @@ def test_serialize_custom_tokenizer(en_vocab, en_tokenizer): assert tokenizer_reloaded.rules == {} -@pytest.mark.skip(reason="Currently unreliable across platforms") @pytest.mark.parametrize("text", ["I💜you", "they’re", "“hello”"]) def test_serialize_tokenizer_roundtrip_bytes(en_tokenizer, text): tokenizer = en_tokenizer @@ -38,7 +37,6 @@ def test_serialize_tokenizer_roundtrip_bytes(en_tokenizer, text): assert [token.text for token in doc1] == [token.text for token in doc2] -@pytest.mark.skip(reason="Currently unreliable across platforms") def test_serialize_tokenizer_roundtrip_disk(en_tokenizer): tokenizer = en_tokenizer with make_tempdir() as d: diff --git a/spacy/tests/vocab_vectors/test_lookups.py b/spacy/tests/vocab_vectors/test_lookups.py index d8c7651e4..94e31a072 100644 --- a/spacy/tests/vocab_vectors/test_lookups.py +++ b/spacy/tests/vocab_vectors/test_lookups.py @@ -71,7 +71,6 @@ def test_table_api_to_from_bytes(): assert "def" not in new_table2 -@pytest.mark.skip(reason="This fails on Python 3.5") def test_lookups_to_from_bytes(): lookups = Lookups() lookups.add_table("table1", {"foo": "bar", "hello": "world"}) @@ -91,7 +90,6 @@ def test_lookups_to_from_bytes(): assert new_lookups.to_bytes() == lookups_bytes -@pytest.mark.skip(reason="This fails on Python 3.5") def test_lookups_to_from_disk(): lookups = Lookups() lookups.add_table("table1", {"foo": "bar", "hello": "world"}) @@ -111,7 +109,6 @@ def test_lookups_to_from_disk(): assert table2["b"] == 2 -@pytest.mark.skip(reason="This fails on Python 3.5") def test_lookups_to_from_bytes_via_vocab(): table_name = "test" vocab = Vocab() @@ -128,7 +125,6 @@ def test_lookups_to_from_bytes_via_vocab(): assert new_vocab.to_bytes() == vocab_bytes -@pytest.mark.skip(reason="This fails on Python 3.5") def test_lookups_to_from_disk_via_vocab(): table_name = "test" vocab = Vocab()