From c1b4b6d5c6584b0f0a9d5e8fc6f5285143326c12 Mon Sep 17 00:00:00 2001 From: sani Date: Mon, 8 May 2023 01:04:03 +0800 Subject: [PATCH] black format --- spacy/lang/ms/examples.py | 3 +-- spacy/lang/ms/lex_attrs.py | 2 +- spacy/tests/lang/ms/test_prefix_suffix_infix.py | 4 +++- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/spacy/lang/ms/examples.py b/spacy/lang/ms/examples.py index 17080ff45..97ab19b6e 100644 --- a/spacy/lang/ms/examples.py +++ b/spacy/lang/ms/examples.py @@ -11,8 +11,7 @@ sentences = [ "Berapa banyak pelajar yang akan menghadiri majlis perpisahan sekolah?", "Pengeluaran makanan berasal dari beberapa lokasi termasuk Cameron Highlands, Johor Bahru, dan Kuching.", "Syarikat XYZ telah menghasilkan 20,000 unit produk baharu dalam setahun terakhir", - "Kuala Lumpur merupakan ibu negara Malaysia." - "Kau berada di mana semalam?", + "Kuala Lumpur merupakan ibu negara Malaysia." "Kau berada di mana semalam?", "Siapa yang akan memimpin projek itu?", "Siapa perdana menteri Malaysia sekarang?", ] diff --git a/spacy/lang/ms/lex_attrs.py b/spacy/lang/ms/lex_attrs.py index d10100b60..42759fa4f 100644 --- a/spacy/lang/ms/lex_attrs.py +++ b/spacy/lang/ms/lex_attrs.py @@ -30,7 +30,7 @@ _num_words = [ "septilion", "oktilion", "nonilion", - "desilion" + "desilion", ] diff --git a/spacy/tests/lang/ms/test_prefix_suffix_infix.py b/spacy/tests/lang/ms/test_prefix_suffix_infix.py index eee561345..0d2b2c507 100644 --- a/spacy/tests/lang/ms/test_prefix_suffix_infix.py +++ b/spacy/tests/lang/ms/test_prefix_suffix_infix.py @@ -1,5 +1,6 @@ import pytest + @pytest.mark.parametrize("text", ["(Ma'arif)"]) def test_ms_tokenizer_splits_no_special(id_tokenizer, text): tokens = id_tokenizer(text) @@ -61,7 +62,8 @@ def test_ms_tokenizer_splits_uneven_wrap_interact(id_tokenizer, text): @pytest.mark.parametrize( - "text,length", [("kerana", 1), ("Mahathir-Anwar", 3), ("Tun Dr. Ismail-Abdul Rahman", 6)] + "text,length", + [("kerana", 1), ("Mahathir-Anwar", 3), ("Tun Dr. Ismail-Abdul Rahman", 6)], ) def test_my_tokenizer_splits_hyphens(ms_tokenizer, text, length): tokens = ms_tokenizer(text)