2015-01-09 20:53:26 +03:00
|
|
|
#!/usr/bin/env python
|
|
|
|
from __future__ import division
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
|
|
|
import os
|
|
|
|
from os import path
|
|
|
|
import shutil
|
|
|
|
import codecs
|
|
|
|
import random
|
|
|
|
|
|
|
|
import plac
|
|
|
|
import cProfile
|
|
|
|
import pstats
|
2015-05-23 18:21:25 +03:00
|
|
|
import re
|
2015-01-09 20:53:26 +03:00
|
|
|
|
|
|
|
import spacy.util
|
|
|
|
from spacy.en import English
|
|
|
|
from spacy.en.pos import POS_TEMPLATES, POS_TAGS, setup_model_dir
|
|
|
|
|
|
|
|
from spacy.syntax.parser import GreedyParser
|
2015-02-02 15:02:48 +03:00
|
|
|
from spacy.syntax.parser import OracleError
|
2015-01-09 20:53:26 +03:00
|
|
|
from spacy.syntax.util import Config
|
2015-05-06 17:38:54 +03:00
|
|
|
from spacy.syntax.conll import read_docparse_file, read_json_file
|
2015-03-09 08:46:53 +03:00
|
|
|
from spacy.syntax.conll import GoldParse
|
2015-01-09 20:53:26 +03:00
|
|
|
|
2015-03-10 20:00:23 +03:00
|
|
|
from spacy.scorer import Scorer
|
|
|
|
|
2015-01-09 20:53:26 +03:00
|
|
|
|
2015-05-06 17:38:54 +03:00
|
|
|
def train(Language, gold_tuples, model_dir, n_iter=15, feat_set=u'basic', seed=0,
|
2015-03-24 07:12:37 +03:00
|
|
|
gold_preproc=False, n_sents=0):
|
2015-01-09 20:53:26 +03:00
|
|
|
dep_model_dir = path.join(model_dir, 'deps')
|
|
|
|
pos_model_dir = path.join(model_dir, 'pos')
|
2015-03-09 08:46:53 +03:00
|
|
|
ner_model_dir = path.join(model_dir, 'ner')
|
2015-01-09 20:53:26 +03:00
|
|
|
if path.exists(dep_model_dir):
|
|
|
|
shutil.rmtree(dep_model_dir)
|
|
|
|
if path.exists(pos_model_dir):
|
|
|
|
shutil.rmtree(pos_model_dir)
|
2015-03-09 08:46:53 +03:00
|
|
|
if path.exists(ner_model_dir):
|
|
|
|
shutil.rmtree(ner_model_dir)
|
2015-01-09 20:53:26 +03:00
|
|
|
os.mkdir(dep_model_dir)
|
|
|
|
os.mkdir(pos_model_dir)
|
2015-03-09 08:46:53 +03:00
|
|
|
os.mkdir(ner_model_dir)
|
|
|
|
|
|
|
|
setup_model_dir(sorted(POS_TAGS.keys()), POS_TAGS, POS_TEMPLATES, pos_model_dir)
|
|
|
|
|
2015-01-09 20:53:26 +03:00
|
|
|
Config.write(dep_model_dir, 'config', features=feat_set, seed=seed,
|
2015-03-09 08:46:53 +03:00
|
|
|
labels=Language.ParserTransitionSystem.get_labels(gold_tuples))
|
2015-03-09 14:06:01 +03:00
|
|
|
Config.write(ner_model_dir, 'config', features='ner', seed=seed,
|
2015-03-09 08:46:53 +03:00
|
|
|
labels=Language.EntityTransitionSystem.get_labels(gold_tuples))
|
|
|
|
|
2015-03-10 20:00:23 +03:00
|
|
|
if n_sents > 0:
|
|
|
|
gold_tuples = gold_tuples[:n_sents]
|
2015-04-08 23:48:26 +03:00
|
|
|
nlp = Language(data_dir=model_dir)
|
2015-03-10 20:00:23 +03:00
|
|
|
|
2015-05-23 18:21:25 +03:00
|
|
|
print "Itn.\tUAS\tNER F.\tTag %\tToken %"
|
2015-01-09 20:53:26 +03:00
|
|
|
for itn in range(n_iter):
|
2015-03-10 20:00:23 +03:00
|
|
|
scorer = Scorer()
|
2015-05-06 17:38:54 +03:00
|
|
|
for raw_text, segmented_text, annot_tuples, ctnt in gold_tuples:
|
2015-04-08 01:35:19 +03:00
|
|
|
tokens = nlp(raw_text, merge_mwes=False)
|
2015-03-24 06:25:38 +03:00
|
|
|
gold = GoldParse(tokens, annot_tuples)
|
|
|
|
scorer.score(tokens, gold, verbose=False)
|
|
|
|
|
2015-03-08 08:17:12 +03:00
|
|
|
if gold_preproc:
|
2015-03-09 08:46:53 +03:00
|
|
|
sents = [nlp.tokenizer.tokens_from_list(s) for s in segmented_text]
|
2015-03-08 08:17:12 +03:00
|
|
|
else:
|
2015-03-09 08:46:53 +03:00
|
|
|
sents = [nlp.tokenizer(raw_text)]
|
|
|
|
for tokens in sents:
|
2015-03-09 14:06:01 +03:00
|
|
|
gold = GoldParse(tokens, annot_tuples)
|
2015-03-09 08:46:53 +03:00
|
|
|
nlp.tagger(tokens)
|
2015-05-23 18:21:25 +03:00
|
|
|
try:
|
|
|
|
nlp.parser.train(tokens, gold)
|
|
|
|
except AssertionError:
|
|
|
|
# TODO: Do something about non-projective sentences
|
|
|
|
continue
|
2015-04-15 07:02:04 +03:00
|
|
|
if gold.ents:
|
|
|
|
nlp.entity.train(tokens, gold)
|
2015-03-10 20:00:23 +03:00
|
|
|
nlp.tagger.train(tokens, gold.tags)
|
2015-03-23 19:32:55 +03:00
|
|
|
|
2015-05-23 18:21:25 +03:00
|
|
|
print '%d:\t%.3f\t%.3f\t%.3f\t%.3f' % (itn, scorer.uas, scorer.ents_f,
|
|
|
|
scorer.tags_acc,
|
|
|
|
scorer.token_acc)
|
2015-05-07 23:52:27 +03:00
|
|
|
random.shuffle(gold_tuples)
|
2015-01-09 20:53:26 +03:00
|
|
|
nlp.parser.model.end_training()
|
2015-03-10 20:00:23 +03:00
|
|
|
nlp.entity.model.end_training()
|
2015-01-09 20:53:26 +03:00
|
|
|
nlp.tagger.model.end_training()
|
2015-03-25 03:08:24 +03:00
|
|
|
nlp.vocab.strings.dump(path.join(model_dir, 'vocab', 'strings.txt'))
|
2015-01-09 20:53:26 +03:00
|
|
|
|
|
|
|
|
2015-05-06 17:38:54 +03:00
|
|
|
def evaluate(Language, gold_tuples, model_dir, gold_preproc=False, verbose=True):
|
2015-03-10 20:00:23 +03:00
|
|
|
assert not gold_preproc
|
2015-04-08 23:48:26 +03:00
|
|
|
nlp = Language(data_dir=model_dir)
|
2015-03-10 20:00:23 +03:00
|
|
|
scorer = Scorer()
|
2015-05-06 17:38:54 +03:00
|
|
|
for raw_text, segmented_text, annot_tuples, brackets in gold_tuples:
|
2015-04-08 01:35:19 +03:00
|
|
|
tokens = nlp(raw_text, merge_mwes=False)
|
2015-03-10 20:00:23 +03:00
|
|
|
gold = GoldParse(tokens, annot_tuples)
|
2015-03-11 09:27:22 +03:00
|
|
|
scorer.score(tokens, gold, verbose=verbose)
|
2015-03-10 20:00:23 +03:00
|
|
|
return scorer
|
2015-03-08 08:17:12 +03:00
|
|
|
|
|
|
|
|
2015-03-20 03:14:20 +03:00
|
|
|
def write_parses(Language, dev_loc, model_dir, out_loc):
|
|
|
|
nlp = Language()
|
|
|
|
gold_tuples = read_docparse_file(dev_loc)
|
|
|
|
scorer = Scorer()
|
|
|
|
out_file = codecs.open(out_loc, 'w', 'utf8')
|
|
|
|
for raw_text, segmented_text, annot_tuples in gold_tuples:
|
|
|
|
tokens = nlp(raw_text)
|
|
|
|
for t in tokens:
|
|
|
|
out_file.write(
|
|
|
|
'%s\t%s\t%s\t%s\n' % (t.orth_, t.tag_, t.head.orth_, t.dep_)
|
|
|
|
)
|
|
|
|
return scorer
|
|
|
|
|
|
|
|
|
2015-05-06 17:38:54 +03:00
|
|
|
def get_sents(json_dir, section):
|
2015-05-23 18:21:25 +03:00
|
|
|
if path.exists(path.join(json_dir, section + '.json')):
|
|
|
|
for sent in read_json_file(path.join(json_dir, section + '.json')):
|
2015-05-06 17:38:54 +03:00
|
|
|
yield sent
|
2015-05-23 18:21:25 +03:00
|
|
|
else:
|
|
|
|
if section == 'train':
|
|
|
|
file_range = range(2, 22)
|
|
|
|
elif section == 'dev':
|
|
|
|
file_range = range(22, 23)
|
|
|
|
|
|
|
|
for i in file_range:
|
|
|
|
sec = str(i)
|
|
|
|
if len(sec) == 1:
|
|
|
|
sec = '0' + sec
|
|
|
|
loc = path.join(json_dir, sec + '.json')
|
|
|
|
for sent in read_json_file(loc):
|
|
|
|
yield sent
|
2015-05-06 17:38:54 +03:00
|
|
|
|
|
|
|
|
2015-02-23 22:05:04 +03:00
|
|
|
@plac.annotations(
|
2015-05-06 17:38:54 +03:00
|
|
|
json_dir=("Annotated JSON files directory",),
|
2015-02-23 22:05:04 +03:00
|
|
|
model_dir=("Location of output model directory",),
|
2015-03-20 03:14:20 +03:00
|
|
|
out_loc=("Out location", "option", "o", str),
|
2015-03-14 18:09:55 +03:00
|
|
|
n_sents=("Number of training sentences", "option", "n", int),
|
|
|
|
verbose=("Verbose error reporting", "flag", "v", bool),
|
2015-03-24 06:25:38 +03:00
|
|
|
debug=("Debug mode", "flag", "d", bool)
|
2015-02-23 22:05:04 +03:00
|
|
|
)
|
2015-05-06 17:38:54 +03:00
|
|
|
def main(json_dir, model_dir, n_sents=0, out_loc="", verbose=False,
|
2015-03-24 06:25:38 +03:00
|
|
|
debug=False):
|
2015-05-06 17:38:54 +03:00
|
|
|
train(English, list(get_sents(json_dir, 'train')), model_dir,
|
|
|
|
feat_set='basic' if not debug else 'debug',
|
2015-03-24 07:12:37 +03:00
|
|
|
gold_preproc=False, n_sents=n_sents)
|
2015-03-20 03:14:20 +03:00
|
|
|
if out_loc:
|
|
|
|
write_parses(English, dev_loc, model_dir, out_loc)
|
2015-05-06 17:38:54 +03:00
|
|
|
scorer = evaluate(English, list(get_sents(json_dir, 'dev')),
|
|
|
|
model_dir, gold_preproc=False, verbose=verbose)
|
2015-05-23 18:21:25 +03:00
|
|
|
print 'TOK', 100-scorer.token_acc
|
2015-03-10 20:00:23 +03:00
|
|
|
print 'POS', scorer.tags_acc
|
|
|
|
print 'UAS', scorer.uas
|
|
|
|
print 'LAS', scorer.las
|
|
|
|
|
|
|
|
print 'NER P', scorer.ents_p
|
|
|
|
print 'NER R', scorer.ents_r
|
|
|
|
print 'NER F', scorer.ents_f
|
2015-04-19 11:31:31 +03:00
|
|
|
|
2015-01-09 20:53:26 +03:00
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
plac.call(main)
|