spaCy/spacy/lemmatizer.py

92 lines
2.4 KiB
Python
Raw Normal View History

2015-01-04 21:51:26 +03:00
from __future__ import unicode_literals
2014-12-23 07:16:57 +03:00
from os import path
2015-01-04 22:01:32 +03:00
import codecs
2014-12-23 07:16:57 +03:00
try:
import ujson as json
except ImportError:
import json
from .parts_of_speech import NOUN, VERB, ADJ
2014-12-23 07:16:57 +03:00
class Lemmatizer(object):
@classmethod
def from_dir(cls, data_dir):
index = {}
exc = {}
for pos in ['adj', 'noun', 'verb']:
2015-09-08 16:38:03 +03:00
index[pos] = read_index(path.join(data_dir, 'wordnet', 'index.%s' % pos))
exc[pos] = read_exc(path.join(data_dir, 'wordnet', '%s.exc' % pos))
if path.exists(path.join(data_dir, 'vocab', 'lemma_rules.json')):
rules = json.load(open(path.join(data_dir, 'vocab', 'lemma_rules.json')))
else:
rules = {}
return cls(index, exc, rules)
2014-12-23 07:16:57 +03:00
def __init__(self, index, exceptions, rules):
self.index = index
self.exc = exceptions
self.rules = rules
2015-08-25 16:46:19 +03:00
def __call__(self, string, pos):
if pos == NOUN:
pos = 'noun'
elif pos == VERB:
pos = 'verb'
elif pos == ADJ:
pos = 'adj'
2015-09-10 15:51:39 +03:00
lemmas = lemmatize(string, self.index.get(pos, {}), self.exc.get(pos, {}), self.rules.get(pos, []))
2015-09-08 16:38:03 +03:00
return lemmas
2014-12-23 07:16:57 +03:00
def noun(self, string):
2015-08-25 16:46:19 +03:00
return self(string, 'noun')
2014-12-23 07:16:57 +03:00
def verb(self, string):
2015-08-25 16:46:19 +03:00
return self(string, 'verb')
2014-12-23 07:16:57 +03:00
def adj(self, string):
2015-08-25 16:46:19 +03:00
return self(string, 'adj')
2014-12-23 07:16:57 +03:00
def lemmatize(string, index, exceptions, rules):
string = string.lower()
forms = []
if string in index:
forms.append(string)
forms.extend(exceptions.get(string, []))
for old, new in rules:
if string.endswith(old):
form = string[:len(string) - len(old)] + new
if form in index:
forms.append(form)
if not forms:
forms.append(string)
return set(forms)
def read_index(loc):
index = set()
if not path.exists(loc):
return index
2015-01-04 22:01:32 +03:00
for line in codecs.open(loc, 'r', 'utf8'):
2014-12-23 07:16:57 +03:00
if line.startswith(' '):
continue
pieces = line.split()
word = pieces[0]
if word.count('_') == 0:
index.add(word)
return index
def read_exc(loc):
exceptions = {}
if not path.exists(loc):
return exceptions
2015-01-04 22:01:32 +03:00
for line in codecs.open(loc, 'r', 'utf8'):
2014-12-23 07:16:57 +03:00
if line.startswith(' '):
continue
pieces = line.split()
exceptions[pieces[0]] = tuple(pieces[1:])
return exceptions