2017-05-03 07:56:21 +03:00
|
|
|
# encoding: utf8
|
|
|
|
from __future__ import unicode_literals, print_function
|
|
|
|
|
|
|
|
from os import path
|
|
|
|
|
2017-06-27 19:18:05 +03:00
|
|
|
from ..language import Language, BaseDefaults
|
|
|
|
from ..tokenizer import Tokenizer
|
2017-08-07 19:27:15 +03:00
|
|
|
from ..tagger import Tagger
|
2017-05-03 07:56:21 +03:00
|
|
|
from ..attrs import LANG
|
|
|
|
from ..tokens import Doc
|
|
|
|
|
|
|
|
from .language_data import *
|
|
|
|
|
2017-08-07 19:27:15 +03:00
|
|
|
import re
|
|
|
|
from collections import namedtuple
|
|
|
|
|
|
|
|
ShortUnitWord = namedtuple('ShortUnitWord', ['surface', 'base_form', 'part_of_speech'])
|
|
|
|
|
2017-08-21 18:01:28 +03:00
|
|
|
def try_mecab_import():
|
|
|
|
"""Mecab is required for Japanese support, so check for it.
|
|
|
|
|
|
|
|
It it's not available blow up and explain how to fix it."""
|
|
|
|
try:
|
|
|
|
import MeCab
|
|
|
|
return MeCab
|
|
|
|
except ImportError:
|
|
|
|
raise ImportError("Japanese support requires MeCab: "
|
|
|
|
"https://github.com/SamuraiT/mecab-python3")
|
|
|
|
|
2017-06-27 19:18:05 +03:00
|
|
|
class JapaneseTokenizer(object):
|
|
|
|
def __init__(self, cls, nlp=None):
|
|
|
|
self.vocab = nlp.vocab if nlp is not None else cls.create_vocab(nlp)
|
2017-08-21 18:01:28 +03:00
|
|
|
MeCab = try_mecab_import()
|
2017-08-07 19:27:15 +03:00
|
|
|
self.tokenizer = MeCab.Tagger()
|
2017-06-27 19:18:05 +03:00
|
|
|
|
|
|
|
def __call__(self, text):
|
2017-08-07 19:27:15 +03:00
|
|
|
words = [x.surface for x in detailed_tokens(self.tokenizer, text)]
|
2017-05-03 07:56:21 +03:00
|
|
|
return Doc(self.vocab, words=words, spaces=[False]*len(words))
|
2017-06-27 19:18:05 +03:00
|
|
|
|
2017-08-07 19:27:15 +03:00
|
|
|
def resolve_pos(token):
|
|
|
|
"""If necessary, add a field to the POS tag for UD mapping.
|
|
|
|
|
|
|
|
Under Universal Dependencies, sometimes the same Unidic POS tag can
|
|
|
|
be mapped differently depending on the literal token or its context
|
|
|
|
in the sentence. This function adds information to the POS tag to
|
|
|
|
resolve ambiguous mappings.
|
|
|
|
"""
|
|
|
|
|
|
|
|
# NOTE: This is a first take. The rules here are crude approximations.
|
|
|
|
# For many of these, full dependencies are needed to properly resolve
|
|
|
|
# PoS mappings.
|
|
|
|
|
|
|
|
if token.part_of_speech == '連体詞,*,*,*':
|
|
|
|
if re.match('^[こそあど此其彼]の', token.surface):
|
|
|
|
return token.part_of_speech + ',DET'
|
2017-08-21 18:01:49 +03:00
|
|
|
if re.match('^[こそあど此其彼]', token.surface):
|
|
|
|
return token.part_of_speech + ',PRON'
|
2017-08-07 19:27:15 +03:00
|
|
|
else:
|
|
|
|
return token.part_of_speech + ',ADJ'
|
|
|
|
return token.part_of_speech
|
|
|
|
|
|
|
|
def detailed_tokens(tokenizer, text):
|
|
|
|
"""Format Mecab output into a nice data structure, based on Janome."""
|
|
|
|
|
|
|
|
node = tokenizer.parseToNode(text)
|
|
|
|
node = node.next # first node is beginning of sentence and empty, skip it
|
|
|
|
words = []
|
|
|
|
while node.posid != 0:
|
2017-08-29 17:58:42 +03:00
|
|
|
surface = node.surface
|
|
|
|
base = surface
|
2017-08-07 19:27:15 +03:00
|
|
|
parts = node.feature.split(',')
|
|
|
|
pos = ','.join(parts[0:4])
|
2017-08-29 17:58:42 +03:00
|
|
|
|
|
|
|
if len(parts) > 6:
|
|
|
|
# this information is only available for words in the tokenizer dictionary
|
|
|
|
reading = parts[6]
|
|
|
|
base = parts[7]
|
2017-08-07 19:27:15 +03:00
|
|
|
|
|
|
|
words.append( ShortUnitWord(surface, base, pos) )
|
|
|
|
node = node.next
|
|
|
|
return words
|
|
|
|
|
|
|
|
class JapaneseTagger(object):
|
|
|
|
def __init__(self, vocab):
|
2017-08-21 18:01:28 +03:00
|
|
|
MeCab = try_mecab_import()
|
2017-08-07 19:27:15 +03:00
|
|
|
self.tagger = Tagger(vocab)
|
|
|
|
self.tokenizer = MeCab.Tagger()
|
|
|
|
|
|
|
|
def __call__(self, tokens):
|
|
|
|
# two parts to this:
|
|
|
|
# 1. get raw JP tags
|
|
|
|
# 2. add features to tags as necessary for UD
|
|
|
|
|
|
|
|
dtokens = detailed_tokens(self.tokenizer, tokens.text)
|
|
|
|
rawtags = list(map(resolve_pos, dtokens))
|
|
|
|
self.tagger.tag_from_strings(tokens, rawtags)
|
|
|
|
|
2017-06-27 19:18:05 +03:00
|
|
|
class JapaneseDefaults(BaseDefaults):
|
2017-08-07 19:27:15 +03:00
|
|
|
tag_map = TAG_MAP
|
|
|
|
|
2017-06-27 19:18:05 +03:00
|
|
|
@classmethod
|
|
|
|
def create_tokenizer(cls, nlp=None):
|
|
|
|
return JapaneseTokenizer(cls, nlp)
|
|
|
|
|
2017-08-07 19:27:15 +03:00
|
|
|
@classmethod
|
|
|
|
def create_tagger(cls, tokenizer):
|
|
|
|
return JapaneseTagger(tokenizer.vocab)
|
|
|
|
|
2017-06-27 19:18:05 +03:00
|
|
|
class Japanese(Language):
|
|
|
|
lang = 'ja'
|
|
|
|
|
|
|
|
Defaults = JapaneseDefaults
|
|
|
|
|
|
|
|
def make_doc(self, text):
|
2017-08-07 19:27:15 +03:00
|
|
|
words = [str(t) for t in self.tokenizer(text)]
|
|
|
|
doc = Doc(self.vocab, words=words, spaces=[False]*len(words))
|
|
|
|
tagger = JapaneseDefaults.create_tagger(self.tokenizer)
|
|
|
|
tagger(doc)
|
|
|
|
return doc
|