mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-26 09:56:28 +03:00
410fb7ee43
* Add more rules to deal with Japanese UD mappings Japanese UD rules sometimes give different UD tags to tokens with the same underlying POS tag. The UD spec indicates these cases should be disambiguated using the output of a tool called "comainu", but rules are enough to get the right result. These rules are taken from Ginza at time of writing, see #3756. * Add new tags from GSD This is a few rare tags that aren't in Unidic but are in the GSD data. * Add basic Japanese sentencization This code is taken from Ginza again. * Add sentenceizer quote handling Could probably add more paired characters but this will do for now. Also includes some tests. * Replace fugashi with SudachiPy * Modify tag format to match GSD annotations Some of the tests still need to be updated, but I want to get this up for testing training. * Deal with case with closing punct without opening * refactor resolve_pos() * change tag field separator from "," to "-" * add TAG_ORTH_MAP * add TAG_BIGRAM_MAP * revise rules for 連体詞 * revise rules for 連体詞 * improve POS about 2% * add syntax_iterator.py (not mature yet) * improve syntax_iterators.py * improve syntax_iterators.py * add phrases including nouns and drop NPs consist of STOP_WORDS * First take at noun chunks This works in many situations but still has issues in others. If the start of a subtree has no noun, then nested phrases can be generated. また行きたい、そんな気持ちにさせてくれるお店です。 [そんな気持ち, また行きたい、そんな気持ちにさせてくれるお店] For some reason て gets included sometimes. Not sure why. ゲンに連れ添って円盤生物を調査するパートナーとなる。 [て円盤生物, ...] Some phrases that look like they should be split are grouped together; not entirely sure that's wrong. This whole thing becomes one chunk: 道の駅遠山郷北側からかぐら大橋南詰現道交点までの1.060kmのみ開通済み * Use new generic get_words_and_spaces The new get_words_and_spaces function is simpler than what was used in Japanese, so it's good to be able to switch to it. However, there was an issue. The new function works just on text, so POS info could get out of sync. Fixing this required a small change to the way dtokens (tokens with POS and lemma info) were generated. Specifically, multiple extraneous spaces now become a single token, so when generating dtokens multiple space tokens should be created in a row. * Fix noun_chunks, should be working now * Fix some tests, add naughty strings tests Some of the existing tests changed because the tokenization mode of Sudachi changed to the more fine-grained A mode. Sudachi also has issues with some strings, so this adds a test against the naughty strings. * Remove empty Sudachi tokens Not doing this creates zero-length tokens and causes errors in the internal spaCy processing. * Add yield_bunsetu back in as a separate piece of code Co-authored-by: Hiroshi Matsuda <40782025+hiroshi-matsuda-rit@users.noreply.github.com> Co-authored-by: hiroshi <hiroshi_matsuda@megagon.ai>
145 lines
4.4 KiB
Python
145 lines
4.4 KiB
Python
# coding: utf8
|
|
from __future__ import unicode_literals
|
|
|
|
from .stop_words import STOP_WORDS
|
|
|
|
|
|
POS_PHRASE_MAP = {
|
|
"NOUN": "NP",
|
|
"NUM": "NP",
|
|
"PRON": "NP",
|
|
"PROPN": "NP",
|
|
|
|
"VERB": "VP",
|
|
|
|
"ADJ": "ADJP",
|
|
|
|
"ADV": "ADVP",
|
|
|
|
"CCONJ": "CCONJP",
|
|
}
|
|
|
|
|
|
# return value: [(bunsetu_tokens, phrase_type={'NP', 'VP', 'ADJP', 'ADVP'}, phrase_tokens)]
|
|
def yield_bunsetu(doc, debug=False):
|
|
bunsetu = []
|
|
bunsetu_may_end = False
|
|
phrase_type = None
|
|
phrase = None
|
|
prev = None
|
|
prev_tag = None
|
|
prev_dep = None
|
|
prev_head = None
|
|
for t in doc:
|
|
pos = t.pos_
|
|
pos_type = POS_PHRASE_MAP.get(pos, None)
|
|
tag = t.tag_
|
|
dep = t.dep_
|
|
head = t.head.i
|
|
if debug:
|
|
print(t.i, t.orth_, pos, pos_type, dep, head, bunsetu_may_end, phrase_type, phrase, bunsetu)
|
|
|
|
# DET is always an individual bunsetu
|
|
if pos == "DET":
|
|
if bunsetu:
|
|
yield bunsetu, phrase_type, phrase
|
|
yield [t], None, None
|
|
bunsetu = []
|
|
bunsetu_may_end = False
|
|
phrase_type = None
|
|
phrase = None
|
|
|
|
# PRON or Open PUNCT always splits bunsetu
|
|
elif tag == "補助記号-括弧開":
|
|
if bunsetu:
|
|
yield bunsetu, phrase_type, phrase
|
|
bunsetu = [t]
|
|
bunsetu_may_end = True
|
|
phrase_type = None
|
|
phrase = None
|
|
|
|
# bunsetu head not appeared
|
|
elif phrase_type is None:
|
|
if bunsetu and prev_tag == "補助記号-読点":
|
|
yield bunsetu, phrase_type, phrase
|
|
bunsetu = []
|
|
bunsetu_may_end = False
|
|
phrase_type = None
|
|
phrase = None
|
|
bunsetu.append(t)
|
|
if pos_type: # begin phrase
|
|
phrase = [t]
|
|
phrase_type = pos_type
|
|
if pos_type in {"ADVP", "CCONJP"}:
|
|
bunsetu_may_end = True
|
|
|
|
# entering new bunsetu
|
|
elif pos_type and (
|
|
pos_type != phrase_type or # different phrase type arises
|
|
bunsetu_may_end # same phrase type but bunsetu already ended
|
|
):
|
|
# exceptional case: NOUN to VERB
|
|
if phrase_type == "NP" and pos_type == "VP" and prev_dep == 'compound' and prev_head == t.i:
|
|
bunsetu.append(t)
|
|
phrase_type = "VP"
|
|
phrase.append(t)
|
|
# exceptional case: VERB to NOUN
|
|
elif phrase_type == "VP" and pos_type == "NP" and (
|
|
prev_dep == 'compound' and prev_head == t.i or
|
|
dep == 'compound' and prev == head or
|
|
prev_dep == 'nmod' and prev_head == t.i
|
|
):
|
|
bunsetu.append(t)
|
|
phrase_type = "NP"
|
|
phrase.append(t)
|
|
else:
|
|
yield bunsetu, phrase_type, phrase
|
|
bunsetu = [t]
|
|
bunsetu_may_end = False
|
|
phrase_type = pos_type
|
|
phrase = [t]
|
|
|
|
# NOUN bunsetu
|
|
elif phrase_type == "NP":
|
|
bunsetu.append(t)
|
|
if not bunsetu_may_end and ((
|
|
(pos_type == "NP" or pos == "SYM") and (prev_head == t.i or prev_head == head) and prev_dep in {'compound', 'nummod'}
|
|
) or (
|
|
pos == "PART" and (prev == head or prev_head == head) and dep == 'mark'
|
|
)):
|
|
phrase.append(t)
|
|
else:
|
|
bunsetu_may_end = True
|
|
|
|
# VERB bunsetu
|
|
elif phrase_type == "VP":
|
|
bunsetu.append(t)
|
|
if not bunsetu_may_end and pos == "VERB" and prev_head == t.i and prev_dep == 'compound':
|
|
phrase.append(t)
|
|
else:
|
|
bunsetu_may_end = True
|
|
|
|
# ADJ bunsetu
|
|
elif phrase_type == "ADJP" and tag != '連体詞':
|
|
bunsetu.append(t)
|
|
if not bunsetu_may_end and ((
|
|
pos == "NOUN" and (prev_head == t.i or prev_head == head) and prev_dep in {'amod', 'compound'}
|
|
) or (
|
|
pos == "PART" and (prev == head or prev_head == head) and dep == 'mark'
|
|
)):
|
|
phrase.append(t)
|
|
else:
|
|
bunsetu_may_end = True
|
|
|
|
# other bunsetu
|
|
else:
|
|
bunsetu.append(t)
|
|
|
|
prev = t.i
|
|
prev_tag = t.tag_
|
|
prev_dep = t.dep_
|
|
prev_head = head
|
|
|
|
if bunsetu:
|
|
yield bunsetu, phrase_type, phrase
|