2015-10-09 10:44:21 +03:00
|
|
|
from __future__ import unicode_literals, print_function
|
2015-01-04 22:01:32 +03:00
|
|
|
import codecs
|
2016-09-24 21:26:17 +03:00
|
|
|
import pathlib
|
2014-12-23 07:16:57 +03:00
|
|
|
|
2016-10-20 22:23:26 +03:00
|
|
|
import ujson as json
|
2015-08-27 10:16:11 +03:00
|
|
|
|
2015-10-09 10:44:21 +03:00
|
|
|
from .parts_of_speech import NOUN, VERB, ADJ, PUNCT
|
2015-08-27 10:16:11 +03:00
|
|
|
|
2014-12-23 07:16:57 +03:00
|
|
|
|
|
|
|
class Lemmatizer(object):
|
2015-08-27 10:16:11 +03:00
|
|
|
@classmethod
|
2016-09-24 21:26:17 +03:00
|
|
|
def load(cls, path):
|
2015-08-27 10:16:11 +03:00
|
|
|
index = {}
|
|
|
|
exc = {}
|
2015-09-12 07:03:44 +03:00
|
|
|
for pos in ['adj', 'noun', 'verb']:
|
2016-09-24 21:26:17 +03:00
|
|
|
pos_index_path = path / 'wordnet' / 'index.{pos}'.format(pos=pos)
|
|
|
|
if pos_index_path.exists():
|
|
|
|
with pos_index_path.open() as file_:
|
|
|
|
index[pos] = read_index(file_)
|
|
|
|
else:
|
|
|
|
index[pos] = set()
|
|
|
|
pos_exc_path = path / 'wordnet' / '{pos}.exc'.format(pos=pos)
|
|
|
|
if pos_exc_path.exists():
|
|
|
|
with pos_exc_path.open() as file_:
|
|
|
|
exc[pos] = read_exc(file_)
|
|
|
|
else:
|
|
|
|
exc[pos] = {}
|
2016-10-20 22:06:48 +03:00
|
|
|
with (path / 'vocab' / 'lemma_rules.json').open('r', encoding='utf8') as file_:
|
2016-09-24 21:26:17 +03:00
|
|
|
rules = json.load(file_)
|
2015-08-27 10:16:11 +03:00
|
|
|
return cls(index, exc, rules)
|
2014-12-23 07:16:57 +03:00
|
|
|
|
2015-08-27 10:16:11 +03:00
|
|
|
def __init__(self, index, exceptions, rules):
|
|
|
|
self.index = index
|
|
|
|
self.exc = exceptions
|
|
|
|
self.rules = rules
|
2015-08-25 16:46:19 +03:00
|
|
|
|
2016-09-27 15:16:22 +03:00
|
|
|
def __call__(self, string, univ_pos, **morphology):
|
|
|
|
if univ_pos == NOUN:
|
|
|
|
univ_pos = 'noun'
|
|
|
|
elif univ_pos == VERB:
|
|
|
|
univ_pos = 'verb'
|
|
|
|
elif univ_pos == ADJ:
|
|
|
|
univ_pos = 'adj'
|
|
|
|
elif univ_pos == PUNCT:
|
|
|
|
univ_pos = 'punct'
|
2016-09-27 14:52:11 +03:00
|
|
|
# See Issue #435 for example of where this logic is requied.
|
2016-09-27 18:47:05 +03:00
|
|
|
if self.is_base_form(univ_pos, **morphology):
|
2016-09-27 14:52:11 +03:00
|
|
|
return set([string.lower()])
|
2016-09-27 18:47:05 +03:00
|
|
|
lemmas = lemmatize(string, self.index.get(univ_pos, {}),
|
|
|
|
self.exc.get(univ_pos, {}),
|
|
|
|
self.rules.get(univ_pos, []))
|
2015-09-08 16:38:03 +03:00
|
|
|
return lemmas
|
2014-12-23 07:16:57 +03:00
|
|
|
|
2016-09-27 18:47:05 +03:00
|
|
|
def is_base_form(self, univ_pos, **morphology):
|
2016-09-27 14:52:11 +03:00
|
|
|
'''Check whether we're dealing with an uninflected paradigm, so we can
|
|
|
|
avoid lemmatization entirely.'''
|
2016-09-27 15:01:16 +03:00
|
|
|
others = [key for key in morphology if key not in ('number', 'pos', 'verbform')]
|
2016-09-27 18:47:05 +03:00
|
|
|
if univ_pos == 'noun' and morphology.get('number') == 'sing' and not others:
|
2016-09-27 14:52:11 +03:00
|
|
|
return True
|
2016-09-27 18:47:05 +03:00
|
|
|
elif univ_pos == 'verb' and morphology.get('verbform') == 'inf' and not others:
|
2016-09-27 14:52:11 +03:00
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
2014-12-23 07:16:57 +03:00
|
|
|
|
2016-09-27 14:52:11 +03:00
|
|
|
def noun(self, string, **morphology):
|
|
|
|
return self(string, 'noun', **morphology)
|
2014-12-23 07:16:57 +03:00
|
|
|
|
2016-09-27 14:52:11 +03:00
|
|
|
def verb(self, string, **morphology):
|
|
|
|
return self(string, 'verb', **morphology)
|
2014-12-23 07:16:57 +03:00
|
|
|
|
2016-09-27 14:52:11 +03:00
|
|
|
def adj(self, string, **morphology):
|
|
|
|
return self(string, 'adj', **morphology)
|
|
|
|
|
|
|
|
def punct(self, string, **morphology):
|
|
|
|
return self(string, 'punct', **morphology)
|
2015-10-09 10:44:21 +03:00
|
|
|
|
2014-12-23 07:16:57 +03:00
|
|
|
|
|
|
|
def lemmatize(string, index, exceptions, rules):
|
|
|
|
string = string.lower()
|
|
|
|
forms = []
|
2016-09-27 14:52:11 +03:00
|
|
|
# TODO: Is this correct? See discussion in Issue #435.
|
|
|
|
#if string in index:
|
|
|
|
# forms.append(string)
|
2014-12-23 07:16:57 +03:00
|
|
|
forms.extend(exceptions.get(string, []))
|
|
|
|
for old, new in rules:
|
|
|
|
if string.endswith(old):
|
|
|
|
form = string[:len(string) - len(old)] + new
|
2015-10-09 10:44:21 +03:00
|
|
|
if form in index or not form.isalpha():
|
2014-12-23 07:16:57 +03:00
|
|
|
forms.append(form)
|
|
|
|
if not forms:
|
|
|
|
forms.append(string)
|
|
|
|
return set(forms)
|
|
|
|
|
|
|
|
|
2015-12-07 08:01:28 +03:00
|
|
|
def read_index(fileobj):
|
2014-12-23 07:16:57 +03:00
|
|
|
index = set()
|
2015-12-07 08:01:28 +03:00
|
|
|
for line in fileobj:
|
2014-12-23 07:16:57 +03:00
|
|
|
if line.startswith(' '):
|
|
|
|
continue
|
|
|
|
pieces = line.split()
|
|
|
|
word = pieces[0]
|
|
|
|
if word.count('_') == 0:
|
|
|
|
index.add(word)
|
|
|
|
return index
|
|
|
|
|
|
|
|
|
2015-12-07 08:01:28 +03:00
|
|
|
def read_exc(fileobj):
|
2014-12-23 07:16:57 +03:00
|
|
|
exceptions = {}
|
2015-12-07 08:01:28 +03:00
|
|
|
for line in fileobj:
|
2014-12-23 07:16:57 +03:00
|
|
|
if line.startswith(' '):
|
|
|
|
continue
|
|
|
|
pieces = line.split()
|
|
|
|
exceptions[pieces[0]] = tuple(pieces[1:])
|
|
|
|
return exceptions
|