diff --git a/spacy/lang/da/tokenizer_exceptions.py b/spacy/lang/da/tokenizer_exceptions.py index 773bf1512..21ab27d7f 100644 --- a/spacy/lang/da/tokenizer_exceptions.py +++ b/spacy/lang/da/tokenizer_exceptions.py @@ -117,6 +117,12 @@ for orth in [ "øv.", "øvr.", "årg.", "årh.", ""]: _exc[orth] = [{ORTH: orth}] +# Dates +for h in range(1, 31 + 1): + for period in ["."]: + _exc["%d%s" % (h, period)] = [ + {ORTH: "%d." % h}] + _custom_base_exc = { "i.": [ {ORTH: "i", LEMMA: "i", NORM: "i"}, diff --git a/spacy/tests/lang/da/test_exceptions.py b/spacy/tests/lang/da/test_exceptions.py index d836a6b5c..71e34fc5c 100644 --- a/spacy/tests/lang/da/test_exceptions.py +++ b/spacy/tests/lang/da/test_exceptions.py @@ -14,6 +14,11 @@ def test_da_tokenizer_handles_ambiguous_abbr(da_tokenizer, text): tokens = da_tokenizer(text) assert len(tokens) == 2 +@pytest.mark.parametrize('text', ["1.", "10.", "31."]) +def test_da_tokenizer_handles_dates(da_tokenizer, text): + tokens = da_tokenizer(text) + assert len(tokens) == 1 + def test_da_tokenizer_handles_exc_in_text(da_tokenizer): text = "Det er bl.a. ikke meningen" tokens = da_tokenizer(text)