From 15d13efafdabc17a95d7cb463a5de5ccb123bd0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=B8ren=20Lind=20Kristiansen?= Date: Wed, 20 Dec 2017 17:36:52 +0100 Subject: [PATCH] Tune Danish tokenizer to more closely match tokenization in Universal Dependencies. --- spacy/lang/da/__init__.py | 3 + spacy/lang/da/punctuation.py | 24 +++ spacy/lang/da/tokenizer_exceptions.py | 154 ++++++++++------- .../tests/lang/da/test_prefix_suffix_infix.py | 160 ++++++++++++++++++ 4 files changed, 278 insertions(+), 63 deletions(-) create mode 100644 spacy/lang/da/punctuation.py create mode 100644 spacy/tests/lang/da/test_prefix_suffix_infix.py diff --git a/spacy/lang/da/__init__.py b/spacy/lang/da/__init__.py index 8b3fe1e05..d18576fba 100644 --- a/spacy/lang/da/__init__.py +++ b/spacy/lang/da/__init__.py @@ -3,6 +3,7 @@ from __future__ import unicode_literals from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS from .norm_exceptions import NORM_EXCEPTIONS +from .punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES from .stop_words import STOP_WORDS from .lex_attrs import LEX_ATTRS from .morph_rules import MORPH_RULES @@ -23,6 +24,8 @@ class DanishDefaults(Language.Defaults): BASE_NORMS, NORM_EXCEPTIONS) tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS) morph_rules = MORPH_RULES + infixes = TOKENIZER_INFIXES + suffixes = TOKENIZER_SUFFIXES tag_map = TAG_MAP stop_words = STOP_WORDS diff --git a/spacy/lang/da/punctuation.py b/spacy/lang/da/punctuation.py new file mode 100644 index 000000000..1802f6a69 --- /dev/null +++ b/spacy/lang/da/punctuation.py @@ -0,0 +1,24 @@ +# coding: utf8 +from __future__ import unicode_literals + +from ..char_classes import LIST_ELLIPSES, LIST_ICONS +from ..char_classes import QUOTES, ALPHA, ALPHA_LOWER, ALPHA_UPPER +from ..punctuation import TOKENIZER_SUFFIXES + + +_quotes = QUOTES.replace("'", '') + +_infixes = (LIST_ELLIPSES + LIST_ICONS + + [r'(?<=[{}])\.(?=[{}])'.format(ALPHA_LOWER, ALPHA_UPPER), + r'(?<=[{a}])[,!?](?=[{a}])'.format(a=ALPHA), + r'(?<=[{a}"])[:<>=](?=[{a}])'.format(a=ALPHA), + r'(?<=[{a}]),(?=[{a}])'.format(a=ALPHA), + r'(?<=[{a}])([{q}\)\]\(\[])(?=[\{a}])'.format(a=ALPHA, q=_quotes), + r'(?<=[{a}])--(?=[{a}])'.format(a=ALPHA)]) + +_suffixes = [suffix for suffix in TOKENIZER_SUFFIXES if suffix not in ["'s", "'S", "’s", "’S", r"\'"]] +_suffixes += [r"(?<=[^sSxXzZ])\'"] + + +TOKENIZER_INFIXES = _infixes +TOKENIZER_SUFFIXES = _suffixes diff --git a/spacy/lang/da/tokenizer_exceptions.py b/spacy/lang/da/tokenizer_exceptions.py index 584ccf6f9..a015079fa 100644 --- a/spacy/lang/da/tokenizer_exceptions.py +++ b/spacy/lang/da/tokenizer_exceptions.py @@ -54,81 +54,109 @@ for exc_data in [ {ORTH: "Lør.", LEMMA: "lørdag"}]: _exc[exc_data[ORTH]] = [exc_data] + +# Specified case only for orth in [ - "A.D.", "A/S", "aarh.", "ac.", "adj.", "adr.", "adsk.", "adv.", "afb.", - "afd.", "afg.", "afk.", "afs.", "aht.", "alg.", "alk.", "alm.", "amer.", - "ang.", "ank.", "anl.", "anv.", "arb.", "arr.", "att.", "B.C.", "bd.", - "bdt.", "beg.", "begr.", "beh.", "bet.", "bev.", "bhk.", "bib.", - "bibl.", "bidr.", "bildl.", "bill.", "bio.", "biol.", "bk.", "BK.", - "bl.", "bl.a.", "borgm.", "bot.", "Boul.", "br.", "brolægn.", "bto.", - "bygn.", "ca.", "cand.", "Chr.", "d.", "d.d.", "d.m.", "d.s.", "d.s.s.", - "d.y.", "d.å.", "d.æ.", "da.", "dagl.", "dat.", "dav.", "def.", "dek.", - "dep.", "desl.", "diam.", "dir.", "disp.", "distr.", "div.", "dkr.", - "dl.", "do.", "dobb.", "Dr.", "dr.h.c", "Dronn.", "ds.", "dvs.", "e.b.", - "e.l.", "e.o.", "e.v.t.", "eftf.", "eftm.", "eg.", "egl.", "eks.", - "eksam.", "ekskl.", "eksp.", "ekspl.", "el.", "el.lign.", "emer.", - "endv.", "eng.", "enk.", "etc.", "etym.", "eur.", "evt.", "exam.", "f.", - "f.eks.", "f.m.", "f.n.", "f.o.", "f.o.m.", "f.s.v.", "f.t.", "f.v.t.", - "f.å.", "fa.", "fakt.", "fam.", "fem.", "ff.", "fg.", "fhv.", "fig.", - "filol.", "filos.", "fl.", "flg.", "fm.", "fmd.", "fol.", "forb.", - "foreg.", "foren.", "forf.", "fork.", "form.", "forr.", "fors.", + "diam.", "ib.", "mia.", "mik.", "pers.", "A.D.", "A/S", "B.C.", "BK.", + "Dr.", "Boul.", "Chr.", "Dronn.", "H.K.H.", "H.M.", "Hf.", "i/s", "I/S", + "Kprs.", "L.A.", "Ll.", "m/s", "M/S", "Mag.", "Mr.", "Ndr.", "Ph.d.", + "Prs.", "Rcp.", "Sdr.", "Skt.", "Spl.", "Vg."]: + _exc[orth] = [{ORTH: orth}] + + +for orth in [ + "aarh.", "ac.", "adj.", "adr.", "adsk.", "adv.", "afb.", "afd.", "afg.", + "afk.", "afs.", "aht.", "alg.", "alk.", "alm.", "amer.", "ang.", "ank.", + "anl.", "anv.", "arb.", "arr.", "att.", "bd.", "bdt.", "beg.", "begr.", + "beh.", "bet.", "bev.", "bhk.", "bib.", "bibl.", "bidr.", "bildl.", + "bill.", "biol.", "bk.", "bl.", "bl.a.", "borgm.", "br.", "brolægn.", + "bto.", "bygn.", "ca.", "cand.", "d.d.", "d.m.", "d.s.", "d.s.s.", + "d.y.", "d.å.", "d.æ.", "dagl.", "dat.", "dav.", "def.", "dek.", "dep.", + "desl.", "dir.", "disp.", "distr.", "div.", "dkr.", "dl.", "do.", + "dobb.", "dr.h.c", "dr.phil.", "ds.", "dvs.", "e.b.", "e.l.", "e.o.", + "e.v.t.", "eftf.", "eftm.", "egl.", "eks.", "eksam.", "ekskl.", "eksp.", + "ekspl.", "el.lign.", "emer.", "endv.", "eng.", "enk.", "etc.", "etym.", + "eur.", "evt.", "exam.", "f.eks.", "f.m.", "f.n.", "f.o.", "f.o.m.", + "f.s.v.", "f.t.", "f.v.t.", "f.å.", "fa.", "fakt.", "fam.", "ff.", + "fg.", "fhv.", "fig.", "filol.", "filos.", "fl.", "flg.", "fm.", "fmd.", + "fol.", "forb.", "foreg.", "foren.", "forf.", "fork.", "forr.", "fors.", "forsk.", "forts.", "fr.", "fr.u.", "frk.", "fsva.", "fuldm.", "fung.", "fx.", "fys.", "fær.", "g.d.", "g.m.", "gd.", "gdr.", "genuds.", "gl.", - "gn.", "gns.", "gr.", "grdl.", "gross.", "h.a.", "h.c.", "H.K.H.", - "H.M.", "hdl.", "henv.", "Hf.", "hhv.", "hj.hj.", "hj.spl.", "hort.", - "hosp.", "hpl.", "Hr.", "hr.", "hrs.", "hum.", "hvp.", "i/s", "I/S", - "i.e.", "ib.", "id.", "if.", "iflg.", "ifm.", "ift.", "iht.", "ill.", - "indb.", "indreg.", "inf.", "ing.", "inh.", "inj.", "inkl.", "insp.", - "instr.", "isl.", "istf.", "it.", "ital.", "iv.", "jap.", "jf.", "jfr.", - "jnr.", "j.nr.", "jr.", "jur.", "jvf.", "K.", "kap.", "kat.", "kbh.", - "kem.", "kgl.", "kl.", "kld.", "knsp.", "komm.", "kons.", "korr.", - "kp.", "Kprs.", "kr.", "kst.", "kt.", "ktr.", "kv.", "kvt.", "l.", - "L.A.", "l.c.", "lab.", "lat.", "lb.m.", "lb.nr.", "lejl.", "lgd.", - "lic.", "lign.", "lin.", "ling.merc.", "litt.", "Ll.", "loc.cit.", - "lok.", "lrs.", "ltr.", "m/s", "M/S", "m.a.o.", "m.fl.", "m.m.", "m.v.", - "m.v.h.", "Mag.", "maks.", "md.", "mdr.", "mdtl.", "mezz.", "mfl.", - "m.h.p.", "m.h.t", "mht.", "mik.", "min.", "mio.", "modt.", "Mr.", - "mrk.", "mul.", "mv.", "n.br.", "n.f.", "nat.", "nb.", "Ndr.", - "nedenst.", "nl.", "nr.", "Nr.", "nto.", "nuv.", "o/m", "o.a.", "o.fl.", - "o.h.", "o.l.", "o.lign.", "o.m.a.", "o.s.fr.", "obl.", "obs.", - "odont.", "oecon.", "off.", "ofl.", "omg.", "omkr.", "omr.", "omtr.", - "opg.", "opl.", "opr.", "org.", "orig.", "osv.", "ovenst.", "overs.", - "ovf.", "p.", "p.a.", "p.b.a", "p.b.v", "p.c.", "p.m.", "p.m.v.", - "p.n.", "p.p.", "p.p.s.", "p.s.", "p.t.", "p.v.a.", "p.v.c.", "pag.", - "par.", "Pas.", "pass.", "pcs.", "pct.", "pd.", "pens.", "pers.", - "pft.", "pg.", "pga.", "pgl.", "Ph.d.", "pinx.", "pk.", "pkt.", + "gn.", "gns.", "gr.", "grdl.", "gross.", "h.a.", "h.c.", "hdl.", + "henv.", "hhv.", "hj.hj.", "hj.spl.", "hort.", "hosp.", "hpl.", "hr.", + "hrs.", "hum.", "hvp.", "i.e.", "id.", "if.", "iflg.", "ifm.", "ift.", + "iht.", "ill.", "indb.", "indreg.", "inf.", "ing.", "inh.", "inj.", + "inkl.", "insp.", "instr.", "isl.", "istf.", "it.", "ital.", "iv.", + "jap.", "jf.", "jfr.", "jnr.", "j.nr.", "jr.", "jur.", "jvf.", "kap.", + "kbh.", "kem.", "kgl.", "kl.", "kld.", "knsp.", "komm.", "kons.", + "korr.", "kp.", "kr.", "kst.", "kt.", "ktr.", "kv.", "kvt.", "l.c.", + "lab.", "lat.", "lb.m.", "lb.nr.", "lejl.", "lgd.", "lic.", "lign.", + "lin.", "ling.merc.", "litt.", "loc.cit.", "lok.", "lrs.", "ltr.", + "m.a.o.", "m.fl.", "m.m.", "m.v.", "m.v.h.", "maks.", "md.", "mdr.", + "mdtl.", "mezz.", "mfl.", "m.h.p.", "m.h.t", "mht.", "mill.", "mio.", + "modt.", "mrk.", "mul.", "mv.", "n.br.", "n.f.", "nb.", "nedenst.", + "nl.", "nr.", "nto.", "nuv.", "o/m", "o.a.", "o.fl.", "o.h.", "o.l.", + "o.lign.", "o.m.a.", "o.s.fr.", "obl.", "obs.", "odont.", "oecon.", + "off.", "ofl.", "omg.", "omkr.", "omr.", "omtr.", "opg.", "opl.", + "opr.", "org.", "orig.", "osv.", "ovenst.", "overs.", "ovf.", "p.a.", + "p.b.a", "p.b.v", "p.c.", "p.m.", "p.m.v.", "p.n.", "p.p.", "p.p.s.", + "p.s.", "p.t.", "p.v.a.", "p.v.c.", "pag.", "pass.", "pcs.", "pct.", + "pd.", "pens.", "pft.", "pg.", "pga.", "pgl.", "pinx.", "pk.", "pkt.", "polit.", "polyt.", "pos.", "pp.", "ppm.", "pr.", "prc.", "priv.", - "prod.", "prof.", "pron.", "Prs.", "præd.", "præf.", "præt.", "psych.", - "pt.", "pæd.", "q.e.d.", "rad.", "Rcp.", "red.", "ref.", "reg.", - "regn.", "rel.", "rep.", "repr.", "resp.", "rest.", "rm.", "rtg.", - "russ.", "s.", "s.br.", "s.d.", "s.f.", "s.m.b.a.", "s.u.", "s.å.", - "sa.", "sb.", "sc.", "scient.", "scil.", "Sdr.", "sek.", "sekr.", - "self.", "sem.", "sen.", "shj.", "sign.", "sing.", "sj.", "skr.", - "Skt.", "slutn.", "sml.", "smp.", "sms.", "snr.", "soc.", "soc.dem.", - "sort.", "sp.", "spec.", "Spl.", "spm.", "spr.", "spsk.", "statsaut.", - "st.", "stk.", "str.", "stud.", "subj.", "subst.", "suff.", "sup.", - "suppl.", "sv.", "såk.", "sædv.", "sø.", "t/r", "t.", "t.h.", "t.o.", - "t.o.m.", "t.v.", "tab.", "tbl.", "tcp/ip", "td.", "tdl.", "tdr.", - "techn.", "tekn.", "temp.", "th.", "theol.", "ti.", "tidl.", "tilf.", - "tilh.", "till.", "tilsv.", "tjg.", "tkr.", "tlf.", "tlgr.", "to.", - "tr.", "trp.", "tsk.", "tv.", "ty.", "u/b", "udb.", "udbet.", "ugtl.", - "undt.", "v.", "v.f.", "var.", "vb.", "vedk.", "vedl.", "vedr.", - "vejl.", "Vg.", "vh.", "vha.", "vs.", "vsa.", "vær.", "zool.", "ø.lgd.", - "øv.", "øvr.", "årg.", "årh.", ""]: + "prod.", "prof.", "pron.", "præd.", "præf.", "præt.", "psych.", "pt.", + "pæd.", "q.e.d.", "rad.", "red.", "ref.", "reg.", "regn.", "rel.", + "rep.", "repr.", "resp.", "rest.", "rm.", "rtg.", "russ.", "s.br.", + "s.d.", "s.f.", "s.m.b.a.", "s.u.", "s.å.", "sa.", "sb.", "sc.", + "scient.", "scil.", "sek.", "sekr.", "self.", "sem.", "shj.", "sign.", + "sing.", "sj.", "skr.", "slutn.", "sml.", "smp.", "snr.", "soc.", + "soc.dem.", "sp.", "spec.", "spm.", "spr.", "spsk.", "statsaut.", "st.", + "stk.", "str.", "stud.", "subj.", "subst.", "suff.", "sup.", "suppl.", + "sv.", "såk.", "sædv.", "t/r", "t.h.", "t.o.", "t.o.m.", "t.v.", "tbl.", + "tcp/ip", "td.", "tdl.", "tdr.", "techn.", "tekn.", "temp.", "th.", + "theol.", "tidl.", "tilf.", "tilh.", "till.", "tilsv.", "tjg.", "tkr.", + "tlf.", "tlgr.", "tr.", "trp.", "tsk.", "tv.", "ty.", "u/b", "udb.", + "udbet.", "ugtl.", "undt.", "v.f.", "vb.", "vedk.", "vedl.", "vedr.", + "vejl.", "vh.", "vha.", "vs.", "vsa.", "vær.", "zool.", "ø.lgd.", + "øvr.", "årg.", "årh."]: _exc[orth] = [{ORTH: orth}] + capitalized = orth.capitalize() + _exc[capitalized] = [{ORTH: capitalized}] + +for exc_data in [ + {ORTH: "s'gu", LEMMA: "s'gu", NORM: "s'gu"}, + {ORTH: "S'gu", LEMMA: "s'gu", NORM: "s'gu"}, + {ORTH: "sgu'", LEMMA: "s'gu", NORM: "s'gu"}, + {ORTH: "Sgu'", LEMMA: "s'gu", NORM: "s'gu"}, + {ORTH: "sku'", LEMMA: "skal", NORM: "skulle"}, + {ORTH: "ku'", LEMMA: "kan", NORM: "kunne"}, + {ORTH: "Ku'", LEMMA: "kan", NORM: "kunne"}, + {ORTH: "ka'", LEMMA: "kan", NORM: "kan"}, + {ORTH: "Ka'", LEMMA: "kan", NORM: "kan"}, + {ORTH: "gi'", LEMMA: "give", NORM: "giv"}, + {ORTH: "Gi'", LEMMA: "give", NORM: "giv"}, + {ORTH: "li'", LEMMA: "lide", NORM: "lide"}, + {ORTH: "ha'", LEMMA: "have", NORM: "have"}, + {ORTH: "Ha'", LEMMA: "have", NORM: "have"}, + {ORTH: "ik'", LEMMA: "ikke", NORM: "ikke"}, + {ORTH: "Ik'", LEMMA: "ikke", NORM: "ikke"}]: + _exc[exc_data[ORTH]] = [exc_data] + # Dates for h in range(1, 31 + 1): for period in ["."]: - _exc["%d%s" % (h, period)] = [ - {ORTH: "%d." % h}] + _exc["%d%s" % (h, period)] = [{ORTH: "%d." % h}] _custom_base_exc = { - "i.": [ - {ORTH: "i", LEMMA: "i", NORM: "i"}, - {ORTH: ".", TAG: PUNCT}] + "i.": [{ + ORTH: "i", + LEMMA: "i", + NORM: "i" + }, { + ORTH: ".", + TAG: PUNCT + }] } _exc.update(_custom_base_exc) - TOKENIZER_EXCEPTIONS = _exc diff --git a/spacy/tests/lang/da/test_prefix_suffix_infix.py b/spacy/tests/lang/da/test_prefix_suffix_infix.py new file mode 100644 index 000000000..4cf0719c9 --- /dev/null +++ b/spacy/tests/lang/da/test_prefix_suffix_infix.py @@ -0,0 +1,160 @@ +# coding: utf-8 +"""Test that tokenizer prefixes, suffixes and infixes are handled correctly.""" +from __future__ import unicode_literals + +import pytest + + +@pytest.mark.parametrize('text', ["(under)"]) +def test_tokenizer_splits_no_special(da_tokenizer, text): + tokens = da_tokenizer(text) + assert len(tokens) == 3 + + +@pytest.mark.parametrize('text', ["ta'r", "Søren's", "Lars'"]) +def test_tokenizer_handles_no_punct(da_tokenizer, text): + tokens = da_tokenizer(text) + assert len(tokens) == 1 + + +@pytest.mark.parametrize('text', ["(ta'r"]) +def test_tokenizer_splits_prefix_punct(da_tokenizer, text): + tokens = da_tokenizer(text) + assert len(tokens) == 2 + assert tokens[0].text == "(" + assert tokens[1].text == "ta'r" + + +@pytest.mark.parametrize('text', ["ta'r)"]) +def test_tokenizer_splits_suffix_punct(da_tokenizer, text): + tokens = da_tokenizer(text) + assert len(tokens) == 2 + assert tokens[0].text == "ta'r" + assert tokens[1].text == ")" + + +@pytest.mark.parametrize('text,expected', [("(ta'r)", ["(", "ta'r", ")"]), ("'ta'r'", ["'", "ta'r", "'"])]) +def test_tokenizer_splits_even_wrap(da_tokenizer, text, expected): + tokens = da_tokenizer(text) + assert len(tokens) == len(expected) + assert [t.text for t in tokens] == expected + + +@pytest.mark.parametrize('text', ["(ta'r?)"]) +def test_tokenizer_splits_uneven_wrap(da_tokenizer, text): + tokens = da_tokenizer(text) + assert len(tokens) == 4 + assert tokens[0].text == "(" + assert tokens[1].text == "ta'r" + assert tokens[2].text == "?" + assert tokens[3].text == ")" + + +@pytest.mark.parametrize('text,expected', [("f.eks.", ["f.eks."]), ("fe.", ["fe", "."]), ("(f.eks.", ["(", "f.eks."])]) +def test_tokenizer_splits_prefix_interact(da_tokenizer, text, expected): + tokens = da_tokenizer(text) + assert len(tokens) == len(expected) + assert [t.text for t in tokens] == expected + + +@pytest.mark.parametrize('text', ["f.eks.)"]) +def test_tokenizer_splits_suffix_interact(da_tokenizer, text): + tokens = da_tokenizer(text) + assert len(tokens) == 2 + assert tokens[0].text == "f.eks." + assert tokens[1].text == ")" + + +@pytest.mark.parametrize('text', ["(f.eks.)"]) +def test_tokenizer_splits_even_wrap_interact(da_tokenizer, text): + tokens = da_tokenizer(text) + assert len(tokens) == 3 + assert tokens[0].text == "(" + assert tokens[1].text == "f.eks." + assert tokens[2].text == ")" + + +@pytest.mark.parametrize('text', ["(f.eks.?)"]) +def test_tokenizer_splits_uneven_wrap_interact(da_tokenizer, text): + tokens = da_tokenizer(text) + assert len(tokens) == 4 + assert tokens[0].text == "(" + assert tokens[1].text == "f.eks." + assert tokens[2].text == "?" + assert tokens[3].text == ")" + + +@pytest.mark.parametrize('text', ["0,1-13,5", "0,0-0,1", "103,27-300", "1/2-3/4"]) +def test_tokenizer_handles_numeric_range(da_tokenizer, text): + tokens = da_tokenizer(text) + assert len(tokens) == 1 + + +@pytest.mark.parametrize('text', ["sort.Gul", "Hej.Verden"]) +def test_tokenizer_splits_period_infix(da_tokenizer, text): + tokens = da_tokenizer(text) + assert len(tokens) == 3 + + +@pytest.mark.parametrize('text', ["Hej,Verden", "en,to"]) +def test_tokenizer_splits_comma_infix(da_tokenizer, text): + tokens = da_tokenizer(text) + assert len(tokens) == 3 + assert tokens[0].text == text.split(",")[0] + assert tokens[1].text == "," + assert tokens[2].text == text.split(",")[1] + + +@pytest.mark.parametrize('text', ["sort...Gul", "sort...gul"]) +def test_tokenizer_splits_ellipsis_infix(da_tokenizer, text): + tokens = da_tokenizer(text) + assert len(tokens) == 3 + + +@pytest.mark.parametrize('text', ['gå-på-mod', '4-hjulstræk', '100-Pfennig-frimærke', 'TV-2-spots', 'trofæ-vaeggen']) +def test_tokenizer_keeps_hyphens(da_tokenizer, text): + tokens = da_tokenizer(text) + assert len(tokens) == 1 + + +def test_tokenizer_splits_double_hyphen_infix(da_tokenizer): + tokens = da_tokenizer("Mange regler--eksempelvis bindestregs-reglerne--er komplicerede.") + assert len(tokens) == 9 + assert tokens[0].text == "Mange" + assert tokens[1].text == "regler" + assert tokens[2].text == "--" + assert tokens[3].text == "eksempelvis" + assert tokens[4].text == "bindestregs-reglerne" + assert tokens[5].text == "--" + assert tokens[6].text == "er" + assert tokens[7].text == "komplicerede" + + +def test_tokenizer_handles_posessives_and_contractions(da_tokenizer): + tokens = da_tokenizer("'DBA's, Lars' og Liz' bil sku' sgu' ik' ha' en bule, det ka' han ik' li' mere', sagde hun.") + assert len(tokens) == 25 + assert tokens[0].text == "'" + assert tokens[1].text == "DBA's" + assert tokens[2].text == "," + assert tokens[3].text == "Lars'" + assert tokens[4].text == "og" + assert tokens[5].text == "Liz'" + assert tokens[6].text == "bil" + assert tokens[7].text == "sku'" + assert tokens[8].text == "sgu'" + assert tokens[9].text == "ik'" + assert tokens[10].text == "ha'" + assert tokens[11].text == "en" + assert tokens[12].text == "bule" + assert tokens[13].text == "," + assert tokens[14].text == "det" + assert tokens[15].text == "ka'" + assert tokens[16].text == "han" + assert tokens[17].text == "ik'" + assert tokens[18].text == "li'" + assert tokens[19].text == "mere" + assert tokens[20].text == "'" + assert tokens[21].text == "," + assert tokens[22].text == "sagde" + assert tokens[23].text == "hun" + assert tokens[24].text == "."