Norwegian language basics

This commit is contained in:
Leif Uwe Vogelsang 2017-03-23 11:10:22 +01:00 committed by luvogels
parent 2bd89e7ade
commit bc9557b21f
5 changed files with 336 additions and 0 deletions

26
spacy/nb/__init__.py Normal file
View File

@ -0,0 +1,26 @@
# encoding: utf8
from __future__ import unicode_literals, print_function
from os import path
from ..language import Language
from ..attrs import LANG
# Import language-specific data
from .language_data import *
# create Language subclass
class NorwegianBokmal(Language):
lang = 'nb' # ISO code
class Defaults(Language.Defaults):
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
lex_attr_getters[LANG] = lambda text: 'nb'
# override defaults
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
#tag_map = TAG_MAP
stop_words = STOP_WORDS

28
spacy/nb/language_data.py Normal file
View File

@ -0,0 +1,28 @@
# encoding: utf8
from __future__ import unicode_literals
# import base language data
from .. import language_data as base
# import util functions
from ..language_data import update_exc, strings_to_exc, expand_exc
# import language-specific data from files
#from .tag_map import TAG_MAP
from .stop_words import STOP_WORDS
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS, ORTH_ONLY
from .morph_rules import MORPH_RULES
TOKENIZER_EXCEPTIONS = dict(TOKENIZER_EXCEPTIONS)
#TAG_MAP = dict(TAG_MAP)
STOP_WORDS = set(STOP_WORDS)
# customize tokenizer exceptions
update_exc(TOKENIZER_EXCEPTIONS, strings_to_exc(ORTH_ONLY))
update_exc(TOKENIZER_EXCEPTIONS, expand_exc(TOKENIZER_EXCEPTIONS, "'", ""))
update_exc(TOKENIZER_EXCEPTIONS, strings_to_exc(base.EMOTICONS))
# export
__all__ = ["TOKENIZER_EXCEPTIONS", "STOP_WORDS", "MORPH_RULES"]

67
spacy/nb/morph_rules.py Normal file
View File

@ -0,0 +1,67 @@
# encoding: utf8
# norwegian bokmål
from __future__ import unicode_literals
from ..symbols import *
from ..language_data import PRON_LEMMA
# Used the table of pronouns at https://no.wiktionary.org/wiki/Tillegg:Pronomen_i_norsk
MORPH_RULES = {
"PRP": {
"jeg": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Case": "Nom"},
"meg": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Case": "Acc"},
"du": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Case": "Nom"},
"deg": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Case": "Acc"},
"han": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Masc", "Case": "Nom"},
"ham": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Masc", "Case": "Acc"},
"han": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Masc", "Case": "Acc"},
"hun": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Fem", "Case": "Nom"},
"henne": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Fem", "Case": "Acc"},
"den": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Neut"},
"det": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Neut"},
"seg": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Reflex": "Yes"},
"vi": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Case": "Nom"},
"oss": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Case": "Acc"},
"dere": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Case": "Nom"},
"de": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Plur", "Case": "Nom"},
"dem": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Plur", "Case": "Acc"},
"seg": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Plur", "Reflex": "Yes"},
"min": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Poss": "Yes", "Gender": "Masc"},
"mi": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Poss": "Yes", "Gender": "Fem"},
"mitt": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Poss": "Yes", "Gender": "Neu"},
"mine": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Poss": "Yes"},
"din": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Poss": "Yes", "Gender": "Masc"},
"di": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Poss": "Yes", "Gender": "Fem"},
"ditt": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Poss": "Yes", "Gender": "Neu"},
"dine": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Poss": "Yes"},
"hans": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender": "Masc"},
"hennes": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender": "Fem"},
"dens": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender": "Neu"},
"dets": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Poss": "Yes", "Gender": "Neu"},
"vår": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Poss": "Yes"},
"vårt": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Poss": "Yes"},
"våre": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Poss": "Yes", "Gender":"Neu"},
"deres": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Poss": "Yes", "Gender":"Neu", "Reflex":"Yes"},
"sin": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Poss": "Yes", "Gender":"Masc", "Reflex":"Yes"},
"si": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Poss": "Yes", "Gender":"Fem", "Reflex":"Yes"},
"sitt": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Poss": "Yes", "Gender":"Neu", "Reflex":"Yes"},
"sine": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Poss": "Yes", "Reflex":"Yes"},
},
"VBZ": {
"er": {LEMMA: "be", "VerbForm": "Fin", "Person": "One", "Tense": "Pres", "Mood": "Ind"},
"er": {LEMMA: "be", "VerbForm": "Fin", "Person": "Two", "Tense": "Pres", "Mood": "Ind"},
"er": {LEMMA: "be", "VerbForm": "Fin", "Person": "Three", "Tense": "Pres", "Mood": "Ind"},
},
"VBP": {
"er": {LEMMA: "be", "VerbForm": "Fin", "Tense": "Pres", "Mood": "Ind"}
},
"VBD": {
"var": {LEMMA: "be", "VerbForm": "Fin", "Tense": "Past", "Number": "Sing"},
"vært": {LEMMA: "be", "VerbForm": "Fin", "Tense": "Past", "Number": "Plur"}
}
}

40
spacy/nb/stop_words.py Normal file
View File

@ -0,0 +1,40 @@
# encoding: utf8
from __future__ import unicode_literals
STOP_WORDS = set("""alle at av
bare begge ble blei bli blir blitt både båe
da de deg dei deim deira deires dem den denne der dere deres det dette di din disse ditt du dykk dykkar
eg ein eit eitt eller elles en enn er et ett etter
for fordi fra før
ha hadde han hans har hennar henne hennes her hjå ho hoe honom hoss hossen hun hva hvem hver hvilke hvilken hvis hvor hvordan hvorfor
i ikke ikkje ikkje ingen ingi inkje inn inni
ja jeg
kan kom korleis korso kun kunne kva kvar kvarhelst kven kvi kvifor
man mange me med medan meg meget mellom men mi min mine mitt mot mykje
ned no noe noen noka noko nokon nokor nokre når
og også om opp oss over
samme seg selv si sia sidan siden sin sine sitt sjøl skal skulle slik so som som somme somt sånn
til
um upp ut uten
var vart varte ved vere verte vi vil ville vore vors vort vår være være vært
å
""".split())

View File

@ -0,0 +1,175 @@
# encoding: utf8
# Norwegian bokmaål
from __future__ import unicode_literals
from ..symbols import *
from ..language_data import PRON_LEMMA
TOKENIZER_EXCEPTIONS = {
"jan.": [
{ORTH: "jan.", LEMMA: "januar"}
],
"feb.": [
{ORTH: "feb.", LEMMA: "februar"}
],
"jul.": [
{ORTH: "jul.", LEMMA: "juli"}
]
}
ORTH_ONLY = ["adm.dir.",
"a.m.",
"Aq.",
"b.c.",
"bl.a.",
"bla.",
"bm.",
"bto.",
"ca.",
"cand.mag.",
"c.c.",
"co.",
"d.d.",
"dept.",
"d.m.",
"dr.philos.",
"dvs.",
"d.y.",
"E. coli",
"eg.",
"ekskl.",
"e.Kr.",
"el.",
"e.l.",
"et.",
"etg.",
"ev.",
"evt.",
"f.",
"f.eks.",
"fhv.",
"fk.",
"f.Kr.",
"f.o.m.",
"foreg.",
"fork.",
"fv.",
"fvt.",
"g.",
"gt.",
"gl.",
"gno.",
"gnr.",
"grl.",
"hhv.",
"hoh.",
"hr.",
"h.r.adv.",
"ifb.",
"ifm.",
"iht.",
"inkl.",
"istf.",
"jf.",
"jr.",
"jun.",
"kfr.",
"kgl.res.",
"kl.",
"komm.",
"kst.",
"lø.",
"ma.",
"mag.art.",
"m.a.o.",
"md.",
"mfl.",
"mill.",
"min.",
"m.m.",
"mnd.",
"moh.",
"Mr.",
"muh.",
"mv.",
"mva.",
"ndf.",
"no.",
"nov.",
"nr.",
"nto.",
"nyno.",
"n.å.",
"o.a.",
"off.",
"ofl.",
"okt.",
"o.l.",
"on.",
"op.",
"osv.",
"ovf.",
"p.",
"p.a.",
"Pb.",
"pga.",
"ph.d.",
"pkt.",
"p.m.",
"pr.",
"pst.",
"p.t.",
"red.anm.",
"ref.",
"res.",
"res.kap.",
"resp.",
"rv.",
"s.",
"s.d.",
"sen.",
"sep.",
"siviling.",
"sms.",
"spm.",
"sr.",
"sst.",
"st.",
"stip.",
"stk.",
"st.meld.",
"st.prp.",
"stud.",
"s.u.",
"sv.",
"sø.",
"s.å.",
"såk.",
"temp.",
"ti.",
"tils.",
"tilsv.",
"tl;dr",
"tlf.",
"to.",
"t.o.m.",
"ult.",
"utg.",
"v.",
"vedk.",
"vedr.",
"vg.",
"vgs.",
"vha.",
"vit.ass.",
"vn.",
"vol.",
"vs.",
"vsa.",
"årg.",
"årh."
]