mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-10 19:57:17 +03:00
Auto-format examples
This commit is contained in:
parent
6f2d3c863a
commit
45798cc53e
|
@ -1,5 +1,12 @@
|
|||
"""
|
||||
This example shows how to use an LSTM sentiment classification model trained using Keras in spaCy. spaCy splits the document into sentences, and each sentence is classified using the LSTM. The scores for the sentences are then aggregated to give the document score. This kind of hierarchical model is quite difficult in "pure" Keras or Tensorflow, but it's very effective. The Keras example on this dataset performs quite poorly, because it cuts off the documents so that they're a fixed size. This hurts review accuracy a lot, because people often summarise their rating in the final sentence
|
||||
This example shows how to use an LSTM sentiment classification model trained
|
||||
using Keras in spaCy. spaCy splits the document into sentences, and each
|
||||
sentence is classified using the LSTM. The scores for the sentences are then
|
||||
aggregated to give the document score. This kind of hierarchical model is quite
|
||||
difficult in "pure" Keras or Tensorflow, but it's very effective. The Keras
|
||||
example on this dataset performs quite poorly, because it cuts off the documents
|
||||
so that they're a fixed size. This hurts review accuracy a lot, because people
|
||||
often summarise their rating in the final sentence
|
||||
|
||||
Prerequisites:
|
||||
spacy download en_vectors_web_lg
|
||||
|
@ -25,9 +32,9 @@ import spacy
|
|||
class SentimentAnalyser(object):
|
||||
@classmethod
|
||||
def load(cls, path, nlp, max_length=100):
|
||||
with (path / 'config.json').open() as file_:
|
||||
with (path / "config.json").open() as file_:
|
||||
model = model_from_json(file_.read())
|
||||
with (path / 'model').open('rb') as file_:
|
||||
with (path / "model").open("rb") as file_:
|
||||
lstm_weights = pickle.load(file_)
|
||||
embeddings = get_embeddings(nlp.vocab)
|
||||
model.set_weights([embeddings] + lstm_weights)
|
||||
|
@ -69,12 +76,12 @@ def get_labelled_sentences(docs, doc_labels):
|
|||
for sent in doc.sents:
|
||||
sentences.append(sent)
|
||||
labels.append(y)
|
||||
return sentences, numpy.asarray(labels, dtype='int32')
|
||||
return sentences, numpy.asarray(labels, dtype="int32")
|
||||
|
||||
|
||||
def get_features(docs, max_length):
|
||||
docs = list(docs)
|
||||
Xs = numpy.zeros((len(docs), max_length), dtype='int32')
|
||||
Xs = numpy.zeros((len(docs), max_length), dtype="int32")
|
||||
for i, doc in enumerate(docs):
|
||||
j = 0
|
||||
for token in doc:
|
||||
|
@ -89,16 +96,25 @@ def get_features(docs, max_length):
|
|||
return Xs
|
||||
|
||||
|
||||
def train(train_texts, train_labels, dev_texts, dev_labels,
|
||||
lstm_shape, lstm_settings, lstm_optimizer, batch_size=100,
|
||||
nb_epoch=5, by_sentence=True):
|
||||
|
||||
def train(
|
||||
train_texts,
|
||||
train_labels,
|
||||
dev_texts,
|
||||
dev_labels,
|
||||
lstm_shape,
|
||||
lstm_settings,
|
||||
lstm_optimizer,
|
||||
batch_size=100,
|
||||
nb_epoch=5,
|
||||
by_sentence=True,
|
||||
):
|
||||
|
||||
print("Loading spaCy")
|
||||
nlp = spacy.load('en_vectors_web_lg')
|
||||
nlp.add_pipe(nlp.create_pipe('sentencizer'))
|
||||
nlp = spacy.load("en_vectors_web_lg")
|
||||
nlp.add_pipe(nlp.create_pipe("sentencizer"))
|
||||
embeddings = get_embeddings(nlp.vocab)
|
||||
model = compile_lstm(embeddings, lstm_shape, lstm_settings)
|
||||
|
||||
|
||||
print("Parsing texts...")
|
||||
train_docs = list(nlp.pipe(train_texts))
|
||||
dev_docs = list(nlp.pipe(dev_texts))
|
||||
|
@ -106,10 +122,15 @@ def train(train_texts, train_labels, dev_texts, dev_labels,
|
|||
train_docs, train_labels = get_labelled_sentences(train_docs, train_labels)
|
||||
dev_docs, dev_labels = get_labelled_sentences(dev_docs, dev_labels)
|
||||
|
||||
train_X = get_features(train_docs, lstm_shape['max_length'])
|
||||
dev_X = get_features(dev_docs, lstm_shape['max_length'])
|
||||
model.fit(train_X, train_labels, validation_data=(dev_X, dev_labels),
|
||||
epochs=nb_epoch, batch_size=batch_size)
|
||||
train_X = get_features(train_docs, lstm_shape["max_length"])
|
||||
dev_X = get_features(dev_docs, lstm_shape["max_length"])
|
||||
model.fit(
|
||||
train_X,
|
||||
train_labels,
|
||||
validation_data=(dev_X, dev_labels),
|
||||
epochs=nb_epoch,
|
||||
batch_size=batch_size,
|
||||
)
|
||||
return model
|
||||
|
||||
|
||||
|
@ -119,19 +140,28 @@ def compile_lstm(embeddings, shape, settings):
|
|||
Embedding(
|
||||
embeddings.shape[0],
|
||||
embeddings.shape[1],
|
||||
input_length=shape['max_length'],
|
||||
input_length=shape["max_length"],
|
||||
trainable=False,
|
||||
weights=[embeddings],
|
||||
mask_zero=True
|
||||
mask_zero=True,
|
||||
)
|
||||
)
|
||||
model.add(TimeDistributed(Dense(shape['nr_hidden'], use_bias=False)))
|
||||
model.add(Bidirectional(LSTM(shape['nr_hidden'],
|
||||
recurrent_dropout=settings['dropout'],
|
||||
dropout=settings['dropout'])))
|
||||
model.add(Dense(shape['nr_class'], activation='sigmoid'))
|
||||
model.compile(optimizer=Adam(lr=settings['lr']), loss='binary_crossentropy',
|
||||
metrics=['accuracy'])
|
||||
model.add(TimeDistributed(Dense(shape["nr_hidden"], use_bias=False)))
|
||||
model.add(
|
||||
Bidirectional(
|
||||
LSTM(
|
||||
shape["nr_hidden"],
|
||||
recurrent_dropout=settings["dropout"],
|
||||
dropout=settings["dropout"],
|
||||
)
|
||||
)
|
||||
)
|
||||
model.add(Dense(shape["nr_class"], activation="sigmoid"))
|
||||
model.compile(
|
||||
optimizer=Adam(lr=settings["lr"]),
|
||||
loss="binary_crossentropy",
|
||||
metrics=["accuracy"],
|
||||
)
|
||||
return model
|
||||
|
||||
|
||||
|
@ -140,8 +170,8 @@ def get_embeddings(vocab):
|
|||
|
||||
|
||||
def evaluate(model_dir, texts, labels, max_length=100):
|
||||
nlp = spacy.load('en_vectors_web_lg')
|
||||
nlp.add_pipe(nlp.create_pipe('sentencizer'))
|
||||
nlp = spacy.load("en_vectors_web_lg")
|
||||
nlp.add_pipe(nlp.create_pipe("sentencizer"))
|
||||
nlp.add_pipe(SentimentAnalyser.load(model_dir, nlp, max_length=max_length))
|
||||
|
||||
correct = 0
|
||||
|
@ -154,7 +184,7 @@ def evaluate(model_dir, texts, labels, max_length=100):
|
|||
|
||||
def read_data(data_dir, limit=0):
|
||||
examples = []
|
||||
for subdir, label in (('pos', 1), ('neg', 0)):
|
||||
for subdir, label in (("pos", 1), ("neg", 0)):
|
||||
for filename in (data_dir / subdir).iterdir():
|
||||
with filename.open() as file_:
|
||||
text = file_.read()
|
||||
|
@ -162,7 +192,7 @@ def read_data(data_dir, limit=0):
|
|||
random.shuffle(examples)
|
||||
if limit >= 1:
|
||||
examples = examples[:limit]
|
||||
return zip(*examples) # Unzips into two lists
|
||||
return zip(*examples) # Unzips into two lists
|
||||
|
||||
|
||||
@plac.annotations(
|
||||
|
@ -176,13 +206,21 @@ def read_data(data_dir, limit=0):
|
|||
learn_rate=("Learn rate", "option", "e", float),
|
||||
nb_epoch=("Number of training epochs", "option", "i", int),
|
||||
batch_size=("Size of minibatches for training LSTM", "option", "b", int),
|
||||
nr_examples=("Limit to N examples", "option", "n", int)
|
||||
nr_examples=("Limit to N examples", "option", "n", int),
|
||||
)
|
||||
def main(model_dir=None, train_dir=None, dev_dir=None,
|
||||
is_runtime=False,
|
||||
nr_hidden=64, max_length=100, # Shape
|
||||
dropout=0.5, learn_rate=0.001, # General NN config
|
||||
nb_epoch=5, batch_size=256, nr_examples=-1): # Training params
|
||||
def main(
|
||||
model_dir=None,
|
||||
train_dir=None,
|
||||
dev_dir=None,
|
||||
is_runtime=False,
|
||||
nr_hidden=64,
|
||||
max_length=100, # Shape
|
||||
dropout=0.5,
|
||||
learn_rate=0.001, # General NN config
|
||||
nb_epoch=5,
|
||||
batch_size=256,
|
||||
nr_examples=-1,
|
||||
): # Training params
|
||||
if model_dir is not None:
|
||||
model_dir = pathlib.Path(model_dir)
|
||||
if train_dir is None or dev_dir is None:
|
||||
|
@ -204,20 +242,26 @@ def main(model_dir=None, train_dir=None, dev_dir=None,
|
|||
dev_texts, dev_labels = zip(*imdb_data[1])
|
||||
else:
|
||||
dev_texts, dev_labels = read_data(dev_dir, imdb_data, limit=nr_examples)
|
||||
train_labels = numpy.asarray(train_labels, dtype='int32')
|
||||
dev_labels = numpy.asarray(dev_labels, dtype='int32')
|
||||
lstm = train(train_texts, train_labels, dev_texts, dev_labels,
|
||||
{'nr_hidden': nr_hidden, 'max_length': max_length, 'nr_class': 1},
|
||||
{'dropout': dropout, 'lr': learn_rate},
|
||||
{},
|
||||
nb_epoch=nb_epoch, batch_size=batch_size)
|
||||
train_labels = numpy.asarray(train_labels, dtype="int32")
|
||||
dev_labels = numpy.asarray(dev_labels, dtype="int32")
|
||||
lstm = train(
|
||||
train_texts,
|
||||
train_labels,
|
||||
dev_texts,
|
||||
dev_labels,
|
||||
{"nr_hidden": nr_hidden, "max_length": max_length, "nr_class": 1},
|
||||
{"dropout": dropout, "lr": learn_rate},
|
||||
{},
|
||||
nb_epoch=nb_epoch,
|
||||
batch_size=batch_size,
|
||||
)
|
||||
weights = lstm.get_weights()
|
||||
if model_dir is not None:
|
||||
with (model_dir / 'model').open('wb') as file_:
|
||||
with (model_dir / "model").open("wb") as file_:
|
||||
pickle.dump(weights[1:], file_)
|
||||
with (model_dir / 'config.json').open('w') as file_:
|
||||
with (model_dir / "config.json").open("w") as file_:
|
||||
file_.write(lstm.to_json())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
plac.call(main)
|
||||
|
|
|
@ -15,14 +15,15 @@ import spacy
|
|||
|
||||
|
||||
TEXTS = [
|
||||
'Net income was $9.4 million compared to the prior year of $2.7 million.',
|
||||
'Revenue exceeded twelve billion dollars, with a loss of $1b.',
|
||||
"Net income was $9.4 million compared to the prior year of $2.7 million.",
|
||||
"Revenue exceeded twelve billion dollars, with a loss of $1b.",
|
||||
]
|
||||
|
||||
|
||||
@plac.annotations(
|
||||
model=("Model to load (needs parser and NER)", "positional", None, str))
|
||||
def main(model='en_core_web_sm'):
|
||||
model=("Model to load (needs parser and NER)", "positional", None, str)
|
||||
)
|
||||
def main(model="en_core_web_sm"):
|
||||
nlp = spacy.load(model)
|
||||
print("Loaded model '%s'" % model)
|
||||
print("Processing %d texts" % len(TEXTS))
|
||||
|
@ -31,7 +32,7 @@ def main(model='en_core_web_sm'):
|
|||
doc = nlp(text)
|
||||
relations = extract_currency_relations(doc)
|
||||
for r1, r2 in relations:
|
||||
print('{:<10}\t{}\t{}'.format(r1.text, r2.ent_type_, r2.text))
|
||||
print("{:<10}\t{}\t{}".format(r1.text, r2.ent_type_, r2.text))
|
||||
|
||||
|
||||
def extract_currency_relations(doc):
|
||||
|
@ -41,18 +42,18 @@ def extract_currency_relations(doc):
|
|||
span.merge()
|
||||
|
||||
relations = []
|
||||
for money in filter(lambda w: w.ent_type_ == 'MONEY', doc):
|
||||
if money.dep_ in ('attr', 'dobj'):
|
||||
subject = [w for w in money.head.lefts if w.dep_ == 'nsubj']
|
||||
for money in filter(lambda w: w.ent_type_ == "MONEY", doc):
|
||||
if money.dep_ in ("attr", "dobj"):
|
||||
subject = [w for w in money.head.lefts if w.dep_ == "nsubj"]
|
||||
if subject:
|
||||
subject = subject[0]
|
||||
relations.append((subject, money))
|
||||
elif money.dep_ == 'pobj' and money.head.dep_ == 'prep':
|
||||
elif money.dep_ == "pobj" and money.head.dep_ == "prep":
|
||||
relations.append((money.head.head, money))
|
||||
return relations
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
plac.call(main)
|
||||
|
||||
# Expected output:
|
||||
|
|
|
@ -24,37 +24,39 @@ import plac
|
|||
import spacy
|
||||
|
||||
|
||||
@plac.annotations(
|
||||
model=("Model to load", "positional", None, str))
|
||||
def main(model='en_core_web_sm'):
|
||||
@plac.annotations(model=("Model to load", "positional", None, str))
|
||||
def main(model="en_core_web_sm"):
|
||||
nlp = spacy.load(model)
|
||||
print("Loaded model '%s'" % model)
|
||||
|
||||
doc = nlp("displaCy uses CSS and JavaScript to show you how computers "
|
||||
"understand language")
|
||||
doc = nlp(
|
||||
"displaCy uses CSS and JavaScript to show you how computers "
|
||||
"understand language"
|
||||
)
|
||||
|
||||
# The easiest way is to find the head of the subtree you want, and then use
|
||||
# the `.subtree`, `.children`, `.lefts` and `.rights` iterators. `.subtree`
|
||||
# is the one that does what you're asking for most directly:
|
||||
for word in doc:
|
||||
if word.dep_ in ('xcomp', 'ccomp'):
|
||||
print(''.join(w.text_with_ws for w in word.subtree))
|
||||
if word.dep_ in ("xcomp", "ccomp"):
|
||||
print("".join(w.text_with_ws for w in word.subtree))
|
||||
|
||||
# It'd probably be better for `word.subtree` to return a `Span` object
|
||||
# instead of a generator over the tokens. If you want the `Span` you can
|
||||
# get it via the `.right_edge` and `.left_edge` properties. The `Span`
|
||||
# object is nice because you can easily get a vector, merge it, etc.
|
||||
for word in doc:
|
||||
if word.dep_ in ('xcomp', 'ccomp'):
|
||||
if word.dep_ in ("xcomp", "ccomp"):
|
||||
subtree_span = doc[word.left_edge.i : word.right_edge.i + 1]
|
||||
print(subtree_span.text, '|', subtree_span.root.text)
|
||||
print(subtree_span.text, "|", subtree_span.root.text)
|
||||
|
||||
# You might also want to select a head, and then select a start and end
|
||||
# position by walking along its children. You could then take the
|
||||
# `.left_edge` and `.right_edge` of those tokens, and use it to calculate
|
||||
# a span.
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
if __name__ == "__main__":
|
||||
plac.call(main)
|
||||
|
||||
# Expected output:
|
||||
|
|
|
@ -32,7 +32,7 @@ def set_keras_backend(backend):
|
|||
K.set_session(K.tf.Session(config=cfg))
|
||||
K.clear_session()
|
||||
|
||||
set_keras_backend("tensorflow")
|
||||
set_keras_backend("tensorflow")
|
||||
|
||||
|
||||
def train(train_loc, dev_loc, shape, settings):
|
||||
|
@ -42,7 +42,7 @@ def train(train_loc, dev_loc, shape, settings):
|
|||
print("Loading spaCy")
|
||||
nlp = spacy.load('en_vectors_web_lg')
|
||||
assert nlp.path is not None
|
||||
|
||||
|
||||
print("Processing texts...")
|
||||
train_X = create_dataset(nlp, train_texts1, train_texts2, 100, shape[0])
|
||||
dev_X = create_dataset(nlp, dev_texts1, dev_texts2, 100, shape[0])
|
||||
|
@ -57,7 +57,7 @@ def train(train_loc, dev_loc, shape, settings):
|
|||
validation_data = (dev_X, dev_labels),
|
||||
epochs = settings['nr_epoch'],
|
||||
batch_size = settings['batch_size'])
|
||||
|
||||
|
||||
if not (nlp.path / 'similarity').exists():
|
||||
(nlp.path / 'similarity').mkdir()
|
||||
print("Saving to", nlp.path / 'similarity')
|
||||
|
@ -74,7 +74,7 @@ def evaluate(dev_loc, shape):
|
|||
dev_texts1, dev_texts2, dev_labels = read_snli(dev_loc)
|
||||
nlp = spacy.load('en_vectors_web_lg')
|
||||
nlp.add_pipe(KerasSimilarityShim.load(nlp.path / 'similarity', nlp, shape[0]))
|
||||
|
||||
|
||||
total = 0.
|
||||
correct = 0.
|
||||
for text1, text2, label in zip(dev_texts1, dev_texts2, dev_labels):
|
||||
|
@ -119,33 +119,33 @@ def read_snli(path):
|
|||
|
||||
def create_dataset(nlp, texts, hypotheses, num_unk, max_length):
|
||||
sents = texts + hypotheses
|
||||
|
||||
|
||||
sents_as_ids = []
|
||||
for sent in sents:
|
||||
doc = nlp(sent)
|
||||
word_ids = []
|
||||
|
||||
|
||||
for i, token in enumerate(doc):
|
||||
# skip odd spaces from tokenizer
|
||||
if token.has_vector and token.vector_norm == 0:
|
||||
continue
|
||||
|
||||
|
||||
if i > max_length:
|
||||
break
|
||||
|
||||
|
||||
if token.has_vector:
|
||||
word_ids.append(token.rank + num_unk + 1)
|
||||
else:
|
||||
# if we don't have a vector, pick an OOV entry
|
||||
word_ids.append(token.rank % num_unk + 1)
|
||||
|
||||
word_ids.append(token.rank % num_unk + 1)
|
||||
|
||||
# there must be a simpler way of generating padded arrays from lists...
|
||||
word_id_vec = np.zeros((max_length), dtype='int')
|
||||
clipped_len = min(max_length, len(word_ids))
|
||||
word_id_vec[:clipped_len] = word_ids[:clipped_len]
|
||||
sents_as_ids.append(word_id_vec)
|
||||
|
||||
|
||||
|
||||
|
||||
return [np.array(sents_as_ids[:len(texts)]), np.array(sents_as_ids[len(texts):])]
|
||||
|
||||
|
||||
|
@ -169,7 +169,7 @@ def main(mode, train_loc, dev_loc,
|
|||
batch_size = 1024,
|
||||
nr_epoch = 10,
|
||||
entail_dir="both"):
|
||||
|
||||
|
||||
shape = (max_length, nr_hidden, 3)
|
||||
settings = {
|
||||
'lr': learn_rate,
|
||||
|
|
|
@ -10,19 +10,19 @@ def build_model(vectors, shape, settings):
|
|||
|
||||
input1 = layers.Input(shape=(max_length,), dtype='int32', name='words1')
|
||||
input2 = layers.Input(shape=(max_length,), dtype='int32', name='words2')
|
||||
|
||||
|
||||
# embeddings (projected)
|
||||
embed = create_embedding(vectors, max_length, nr_hidden)
|
||||
|
||||
|
||||
a = embed(input1)
|
||||
b = embed(input2)
|
||||
|
||||
|
||||
# step 1: attend
|
||||
F = create_feedforward(nr_hidden)
|
||||
att_weights = layers.dot([F(a), F(b)], axes=-1)
|
||||
|
||||
|
||||
G = create_feedforward(nr_hidden)
|
||||
|
||||
|
||||
if settings['entail_dir'] == 'both':
|
||||
norm_weights_a = layers.Lambda(normalizer(1))(att_weights)
|
||||
norm_weights_b = layers.Lambda(normalizer(2))(att_weights)
|
||||
|
@ -55,18 +55,18 @@ def build_model(vectors, shape, settings):
|
|||
v1 = layers.TimeDistributed(G)(comp1)
|
||||
v1_sum = layers.Lambda(sum_word)(v1)
|
||||
concat = v1_sum
|
||||
|
||||
|
||||
H = create_feedforward(nr_hidden)
|
||||
out = H(concat)
|
||||
out = layers.Dense(nr_class, activation='softmax')(out)
|
||||
|
||||
|
||||
model = Model([input1, input2], out)
|
||||
|
||||
|
||||
model.compile(
|
||||
optimizer=optimizers.Adam(lr=settings['lr']),
|
||||
loss='categorical_crossentropy',
|
||||
metrics=['accuracy'])
|
||||
|
||||
|
||||
return model
|
||||
|
||||
|
||||
|
@ -78,7 +78,7 @@ def create_embedding(vectors, max_length, projected_dim):
|
|||
input_length=max_length,
|
||||
weights=[vectors],
|
||||
trainable=False),
|
||||
|
||||
|
||||
layers.TimeDistributed(
|
||||
layers.Dense(projected_dim,
|
||||
activation=None,
|
||||
|
|
|
@ -19,39 +19,40 @@ from pathlib import Path
|
|||
|
||||
|
||||
@plac.annotations(
|
||||
output_dir=("Output directory for saved HTML", "positional", None, Path))
|
||||
output_dir=("Output directory for saved HTML", "positional", None, Path)
|
||||
)
|
||||
def main(output_dir=None):
|
||||
nlp = English() # start off with blank English class
|
||||
|
||||
Doc.set_extension('overlap', method=overlap_tokens)
|
||||
doc1 = nlp(u"Peach emoji is where it has always been.")
|
||||
doc2 = nlp(u"Peach is the superior emoji.")
|
||||
Doc.set_extension("overlap", method=overlap_tokens)
|
||||
doc1 = nlp("Peach emoji is where it has always been.")
|
||||
doc2 = nlp("Peach is the superior emoji.")
|
||||
print("Text 1:", doc1.text)
|
||||
print("Text 2:", doc2.text)
|
||||
print("Overlapping tokens:", doc1._.overlap(doc2))
|
||||
|
||||
Doc.set_extension('to_html', method=to_html)
|
||||
doc = nlp(u"This is a sentence about Apple.")
|
||||
Doc.set_extension("to_html", method=to_html)
|
||||
doc = nlp("This is a sentence about Apple.")
|
||||
# add entity manually for demo purposes, to make it work without a model
|
||||
doc.ents = [Span(doc, 5, 6, label=nlp.vocab.strings['ORG'])]
|
||||
doc.ents = [Span(doc, 5, 6, label=nlp.vocab.strings["ORG"])]
|
||||
print("Text:", doc.text)
|
||||
doc._.to_html(output=output_dir, style='ent')
|
||||
doc._.to_html(output=output_dir, style="ent")
|
||||
|
||||
|
||||
def to_html(doc, output='/tmp', style='dep'):
|
||||
def to_html(doc, output="/tmp", style="dep"):
|
||||
"""Doc method extension for saving the current state as a displaCy
|
||||
visualization.
|
||||
"""
|
||||
# generate filename from first six non-punct tokens
|
||||
file_name = '-'.join([w.text for w in doc[:6] if not w.is_punct]) + '.html'
|
||||
file_name = "-".join([w.text for w in doc[:6] if not w.is_punct]) + ".html"
|
||||
html = displacy.render(doc, style=style, page=True) # render markup
|
||||
if output is not None:
|
||||
output_path = Path(output)
|
||||
if not output_path.exists():
|
||||
output_path.mkdir()
|
||||
output_file = Path(output) / file_name
|
||||
output_file.open('w', encoding='utf-8').write(html) # save to file
|
||||
print('Saved HTML to {}'.format(output_file))
|
||||
output_file.open("w", encoding="utf-8").write(html) # save to file
|
||||
print("Saved HTML to {}".format(output_file))
|
||||
else:
|
||||
print(html)
|
||||
|
||||
|
@ -67,7 +68,7 @@ def overlap_tokens(doc, other_doc):
|
|||
return overlap
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
plac.call(main)
|
||||
|
||||
# Expected output:
|
||||
|
|
|
@ -25,15 +25,19 @@ def main():
|
|||
# and no model or pre-defined pipeline loaded.
|
||||
nlp = English()
|
||||
rest_countries = RESTCountriesComponent(nlp) # initialise component
|
||||
nlp.add_pipe(rest_countries) # add it to the pipeline
|
||||
doc = nlp(u"Some text about Colombia and the Czech Republic")
|
||||
print('Pipeline', nlp.pipe_names) # pipeline contains component name
|
||||
print('Doc has countries', doc._.has_country) # Doc contains countries
|
||||
nlp.add_pipe(rest_countries) # add it to the pipeline
|
||||
doc = nlp("Some text about Colombia and the Czech Republic")
|
||||
print("Pipeline", nlp.pipe_names) # pipeline contains component name
|
||||
print("Doc has countries", doc._.has_country) # Doc contains countries
|
||||
for token in doc:
|
||||
if token._.is_country:
|
||||
print(token.text, token._.country_capital, token._.country_latlng,
|
||||
token._.country_flag) # country data
|
||||
print('Entities', [(e.text, e.label_) for e in doc.ents]) # entities
|
||||
print(
|
||||
token.text,
|
||||
token._.country_capital,
|
||||
token._.country_latlng,
|
||||
token._.country_flag,
|
||||
) # country data
|
||||
print("Entities", [(e.text, e.label_) for e in doc.ents]) # entities
|
||||
|
||||
|
||||
class RESTCountriesComponent(object):
|
||||
|
@ -41,42 +45,42 @@ class RESTCountriesComponent(object):
|
|||
the REST Countries API, merges country names into one token, assigns entity
|
||||
labels and sets attributes on country tokens.
|
||||
"""
|
||||
name = 'rest_countries' # component name, will show up in the pipeline
|
||||
|
||||
def __init__(self, nlp, label='GPE'):
|
||||
name = "rest_countries" # component name, will show up in the pipeline
|
||||
|
||||
def __init__(self, nlp, label="GPE"):
|
||||
"""Initialise the pipeline component. The shared nlp instance is used
|
||||
to initialise the matcher with the shared vocab, get the label ID and
|
||||
generate Doc objects as phrase match patterns.
|
||||
"""
|
||||
# Make request once on initialisation and store the data
|
||||
r = requests.get('https://restcountries.eu/rest/v2/all')
|
||||
r = requests.get("https://restcountries.eu/rest/v2/all")
|
||||
r.raise_for_status() # make sure requests raises an error if it fails
|
||||
countries = r.json()
|
||||
|
||||
# Convert API response to dict keyed by country name for easy lookup
|
||||
# This could also be extended using the alternative and foreign language
|
||||
# names provided by the API
|
||||
self.countries = {c['name']: c for c in countries}
|
||||
self.countries = {c["name"]: c for c in countries}
|
||||
self.label = nlp.vocab.strings[label] # get entity label ID
|
||||
|
||||
# Set up the PhraseMatcher with Doc patterns for each country name
|
||||
patterns = [nlp(c) for c in self.countries.keys()]
|
||||
self.matcher = PhraseMatcher(nlp.vocab)
|
||||
self.matcher.add('COUNTRIES', None, *patterns)
|
||||
self.matcher.add("COUNTRIES", None, *patterns)
|
||||
|
||||
# Register attribute on the Token. We'll be overwriting this based on
|
||||
# the matches, so we're only setting a default value, not a getter.
|
||||
# If no default value is set, it defaults to None.
|
||||
Token.set_extension('is_country', default=False)
|
||||
Token.set_extension('country_capital', default=False)
|
||||
Token.set_extension('country_latlng', default=False)
|
||||
Token.set_extension('country_flag', default=False)
|
||||
Token.set_extension("is_country", default=False)
|
||||
Token.set_extension("country_capital", default=False)
|
||||
Token.set_extension("country_latlng", default=False)
|
||||
Token.set_extension("country_flag", default=False)
|
||||
|
||||
# Register attributes on Doc and Span via a getter that checks if one of
|
||||
# the contained tokens is set to is_country == True.
|
||||
Doc.set_extension('has_country', getter=self.has_country)
|
||||
Span.set_extension('has_country', getter=self.has_country)
|
||||
|
||||
Doc.set_extension("has_country", getter=self.has_country)
|
||||
Span.set_extension("has_country", getter=self.has_country)
|
||||
|
||||
def __call__(self, doc):
|
||||
"""Apply the pipeline component on a Doc object and modify it if matches
|
||||
|
@ -93,10 +97,10 @@ class RESTCountriesComponent(object):
|
|||
# Can be extended with other data returned by the API, like
|
||||
# currencies, country code, flag, calling code etc.
|
||||
for token in entity:
|
||||
token._.set('is_country', True)
|
||||
token._.set('country_capital', self.countries[entity.text]['capital'])
|
||||
token._.set('country_latlng', self.countries[entity.text]['latlng'])
|
||||
token._.set('country_flag', self.countries[entity.text]['flag'])
|
||||
token._.set("is_country", True)
|
||||
token._.set("country_capital", self.countries[entity.text]["capital"])
|
||||
token._.set("country_latlng", self.countries[entity.text]["latlng"])
|
||||
token._.set("country_flag", self.countries[entity.text]["flag"])
|
||||
# Overwrite doc.ents and add entity – be careful not to replace!
|
||||
doc.ents = list(doc.ents) + [entity]
|
||||
for span in spans:
|
||||
|
@ -111,10 +115,10 @@ class RESTCountriesComponent(object):
|
|||
is a country. Since the getter is only called when we access the
|
||||
attribute, we can refer to the Token's 'is_country' attribute here,
|
||||
which is already set in the processing step."""
|
||||
return any([t._.get('is_country') for t in tokens])
|
||||
return any([t._.get("is_country") for t in tokens])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
plac.call(main)
|
||||
|
||||
# Expected output:
|
||||
|
|
|
@ -20,23 +20,24 @@ from spacy.tokens import Doc, Span, Token
|
|||
|
||||
@plac.annotations(
|
||||
text=("Text to process", "positional", None, str),
|
||||
companies=("Names of technology companies", "positional", None, str))
|
||||
companies=("Names of technology companies", "positional", None, str),
|
||||
)
|
||||
def main(text="Alphabet Inc. is the company behind Google.", *companies):
|
||||
# For simplicity, we start off with only the blank English Language class
|
||||
# and no model or pre-defined pipeline loaded.
|
||||
nlp = English()
|
||||
if not companies: # set default companies if none are set via args
|
||||
companies = ['Alphabet Inc.', 'Google', 'Netflix', 'Apple'] # etc.
|
||||
companies = ["Alphabet Inc.", "Google", "Netflix", "Apple"] # etc.
|
||||
component = TechCompanyRecognizer(nlp, companies) # initialise component
|
||||
nlp.add_pipe(component, last=True) # add last to the pipeline
|
||||
|
||||
doc = nlp(text)
|
||||
print('Pipeline', nlp.pipe_names) # pipeline contains component name
|
||||
print('Tokens', [t.text for t in doc]) # company names from the list are merged
|
||||
print('Doc has_tech_org', doc._.has_tech_org) # Doc contains tech orgs
|
||||
print('Token 0 is_tech_org', doc[0]._.is_tech_org) # "Alphabet Inc." is a tech org
|
||||
print('Token 1 is_tech_org', doc[1]._.is_tech_org) # "is" is not
|
||||
print('Entities', [(e.text, e.label_) for e in doc.ents]) # all orgs are entities
|
||||
print("Pipeline", nlp.pipe_names) # pipeline contains component name
|
||||
print("Tokens", [t.text for t in doc]) # company names from the list are merged
|
||||
print("Doc has_tech_org", doc._.has_tech_org) # Doc contains tech orgs
|
||||
print("Token 0 is_tech_org", doc[0]._.is_tech_org) # "Alphabet Inc." is a tech org
|
||||
print("Token 1 is_tech_org", doc[1]._.is_tech_org) # "is" is not
|
||||
print("Entities", [(e.text, e.label_) for e in doc.ents]) # all orgs are entities
|
||||
|
||||
|
||||
class TechCompanyRecognizer(object):
|
||||
|
@ -45,9 +46,10 @@ class TechCompanyRecognizer(object):
|
|||
labelled as ORG and their spans are merged into one token. Additionally,
|
||||
._.has_tech_org and ._.is_tech_org is set on the Doc/Span and Token
|
||||
respectively."""
|
||||
name = 'tech_companies' # component name, will show up in the pipeline
|
||||
|
||||
def __init__(self, nlp, companies=tuple(), label='ORG'):
|
||||
name = "tech_companies" # component name, will show up in the pipeline
|
||||
|
||||
def __init__(self, nlp, companies=tuple(), label="ORG"):
|
||||
"""Initialise the pipeline component. The shared nlp instance is used
|
||||
to initialise the matcher with the shared vocab, get the label ID and
|
||||
generate Doc objects as phrase match patterns.
|
||||
|
@ -58,16 +60,16 @@ class TechCompanyRecognizer(object):
|
|||
# so even if the list of companies is long, it's very efficient
|
||||
patterns = [nlp(org) for org in companies]
|
||||
self.matcher = PhraseMatcher(nlp.vocab)
|
||||
self.matcher.add('TECH_ORGS', None, *patterns)
|
||||
self.matcher.add("TECH_ORGS", None, *patterns)
|
||||
|
||||
# Register attribute on the Token. We'll be overwriting this based on
|
||||
# the matches, so we're only setting a default value, not a getter.
|
||||
Token.set_extension('is_tech_org', default=False)
|
||||
Token.set_extension("is_tech_org", default=False)
|
||||
|
||||
# Register attributes on Doc and Span via a getter that checks if one of
|
||||
# the contained tokens is set to is_tech_org == True.
|
||||
Doc.set_extension('has_tech_org', getter=self.has_tech_org)
|
||||
Span.set_extension('has_tech_org', getter=self.has_tech_org)
|
||||
Doc.set_extension("has_tech_org", getter=self.has_tech_org)
|
||||
Span.set_extension("has_tech_org", getter=self.has_tech_org)
|
||||
|
||||
def __call__(self, doc):
|
||||
"""Apply the pipeline component on a Doc object and modify it if matches
|
||||
|
@ -82,7 +84,7 @@ class TechCompanyRecognizer(object):
|
|||
spans.append(entity)
|
||||
# Set custom attribute on each token of the entity
|
||||
for token in entity:
|
||||
token._.set('is_tech_org', True)
|
||||
token._.set("is_tech_org", True)
|
||||
# Overwrite doc.ents and add entity – be careful not to replace!
|
||||
doc.ents = list(doc.ents) + [entity]
|
||||
for span in spans:
|
||||
|
@ -97,10 +99,10 @@ class TechCompanyRecognizer(object):
|
|||
is a tech org. Since the getter is only called when we access the
|
||||
attribute, we can refer to the Token's 'is_tech_org' attribute here,
|
||||
which is already set in the processing step."""
|
||||
return any([t._.get('is_tech_org') for t in tokens])
|
||||
return any([t._.get("is_tech_org") for t in tokens])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
plac.call(main)
|
||||
|
||||
# Expected output:
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
'''Example of adding a pipeline component to prohibit sentence boundaries
|
||||
"""Example of adding a pipeline component to prohibit sentence boundaries
|
||||
before certain tokens.
|
||||
|
||||
What we do is write to the token.is_sent_start attribute, which
|
||||
|
@ -10,16 +10,18 @@ should also improve the parse quality.
|
|||
The specific example here is drawn from https://github.com/explosion/spaCy/issues/2627
|
||||
Other versions of the model may not make the original mistake, so the specific
|
||||
example might not be apt for future versions.
|
||||
'''
|
||||
"""
|
||||
import plac
|
||||
import spacy
|
||||
|
||||
|
||||
def prevent_sentence_boundaries(doc):
|
||||
for token in doc:
|
||||
if not can_be_sentence_start(token):
|
||||
token.is_sent_start = False
|
||||
return doc
|
||||
|
||||
|
||||
def can_be_sentence_start(token):
|
||||
if token.i == 0:
|
||||
return True
|
||||
|
@ -32,17 +34,18 @@ def can_be_sentence_start(token):
|
|||
else:
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
nlp = spacy.load('en_core_web_lg')
|
||||
nlp = spacy.load("en_core_web_lg")
|
||||
raw_text = "Been here and I'm loving it."
|
||||
doc = nlp(raw_text)
|
||||
sentences = [sent.string.strip() for sent in doc.sents]
|
||||
print(sentences)
|
||||
nlp.add_pipe(prevent_sentence_boundaries, before='parser')
|
||||
nlp.add_pipe(prevent_sentence_boundaries, before="parser")
|
||||
doc = nlp(raw_text)
|
||||
sentences = [sent.string.strip() for sent in doc.sents]
|
||||
print(sentences)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
plac.call(main)
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
'''Demonstrate adding a rule-based component that forces some tokens to not
|
||||
"""Demonstrate adding a rule-based component that forces some tokens to not
|
||||
be entities, before the NER tagger is applied. This is used to hotfix the issue
|
||||
in https://github.com/explosion/spaCy/issues/2870 , present as of spaCy v2.0.16.
|
||||
'''
|
||||
"""
|
||||
import spacy
|
||||
from spacy.attrs import ENT_IOB
|
||||
|
||||
|
||||
def fix_space_tags(doc):
|
||||
ent_iobs = doc.to_array([ENT_IOB])
|
||||
for i, token in enumerate(doc):
|
||||
|
@ -14,14 +15,16 @@ def fix_space_tags(doc):
|
|||
doc.from_array([ENT_IOB], ent_iobs.reshape((len(doc), 1)))
|
||||
return doc
|
||||
|
||||
def main():
|
||||
nlp = spacy.load('en_core_web_sm')
|
||||
text = u'''This is some crazy test where I dont need an Apple Watch to make things bug'''
|
||||
doc = nlp(text)
|
||||
print('Before', doc.ents)
|
||||
nlp.add_pipe(fix_space_tags, name='fix-ner', before='ner')
|
||||
doc = nlp(text)
|
||||
print('After', doc.ents)
|
||||
|
||||
if __name__ == '__main__':
|
||||
def main():
|
||||
nlp = spacy.load("en_core_web_sm")
|
||||
text = u"""This is some crazy test where I dont need an Apple Watch to make things bug"""
|
||||
doc = nlp(text)
|
||||
print("Before", doc.ents)
|
||||
nlp.add_pipe(fix_space_tags, name="fix-ner", before="ner")
|
||||
doc = nlp(text)
|
||||
print("After", doc.ents)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
@ -9,6 +9,7 @@ built-in dataset loader.
|
|||
Compatible with: spaCy v2.0.0+
|
||||
"""
|
||||
from __future__ import print_function, unicode_literals
|
||||
|
||||
from toolz import partition_all
|
||||
from pathlib import Path
|
||||
from joblib import Parallel, delayed
|
||||
|
@ -22,9 +23,9 @@ import spacy
|
|||
model=("Model name (needs tagger)", "positional", None, str),
|
||||
n_jobs=("Number of workers", "option", "n", int),
|
||||
batch_size=("Batch-size for each process", "option", "b", int),
|
||||
limit=("Limit of entries from the dataset", "option", "l", int))
|
||||
def main(output_dir, model='en_core_web_sm', n_jobs=4, batch_size=1000,
|
||||
limit=10000):
|
||||
limit=("Limit of entries from the dataset", "option", "l", int),
|
||||
)
|
||||
def main(output_dir, model="en_core_web_sm", n_jobs=4, batch_size=1000, limit=10000):
|
||||
nlp = spacy.load(model) # load spaCy model
|
||||
print("Loaded model '%s'" % model)
|
||||
if not output_dir.exists():
|
||||
|
@ -37,42 +38,44 @@ def main(output_dir, model='en_core_web_sm', n_jobs=4, batch_size=1000,
|
|||
partitions = partition_all(batch_size, texts)
|
||||
executor = Parallel(n_jobs=n_jobs)
|
||||
do = delayed(transform_texts)
|
||||
tasks = (do(nlp, i, batch, output_dir)
|
||||
for i, batch in enumerate(partitions))
|
||||
tasks = (do(nlp, i, batch, output_dir) for i, batch in enumerate(partitions))
|
||||
executor(tasks)
|
||||
|
||||
|
||||
def transform_texts(nlp, batch_id, texts, output_dir):
|
||||
print(nlp.pipe_names)
|
||||
out_path = Path(output_dir) / ('%d.txt' % batch_id)
|
||||
out_path = Path(output_dir) / ("%d.txt" % batch_id)
|
||||
if out_path.exists(): # return None in case same batch is called again
|
||||
return None
|
||||
print('Processing batch', batch_id)
|
||||
with out_path.open('w', encoding='utf8') as f:
|
||||
print("Processing batch", batch_id)
|
||||
with out_path.open("w", encoding="utf8") as f:
|
||||
for doc in nlp.pipe(texts):
|
||||
f.write(' '.join(represent_word(w) for w in doc if not w.is_space))
|
||||
f.write('\n')
|
||||
print('Saved {} texts to {}.txt'.format(len(texts), batch_id))
|
||||
f.write(" ".join(represent_word(w) for w in doc if not w.is_space))
|
||||
f.write("\n")
|
||||
print("Saved {} texts to {}.txt".format(len(texts), batch_id))
|
||||
|
||||
|
||||
def represent_word(word):
|
||||
text = word.text
|
||||
# True-case, i.e. try to normalize sentence-initial capitals.
|
||||
# Only do this if the lower-cased form is more probable.
|
||||
if text.istitle() and is_sent_begin(word) \
|
||||
and word.prob < word.doc.vocab[text.lower()].prob:
|
||||
if (
|
||||
text.istitle()
|
||||
and is_sent_begin(word)
|
||||
and word.prob < word.doc.vocab[text.lower()].prob
|
||||
):
|
||||
text = text.lower()
|
||||
return text + '|' + word.tag_
|
||||
return text + "|" + word.tag_
|
||||
|
||||
|
||||
def is_sent_begin(word):
|
||||
if word.i == 0:
|
||||
return True
|
||||
elif word.i >= 2 and word.nbor(-1).text in ('.', '!', '?', '...'):
|
||||
elif word.i >= 2 and word.nbor(-1).text in (".", "!", "?", "..."):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
plac.call(main)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
'''Train for CONLL 2017 UD treebank evaluation. Takes .conllu files, writes
|
||||
"""Train for CONLL 2017 UD treebank evaluation. Takes .conllu files, writes
|
||||
.conllu format for development data, allowing the official scorer to be used.
|
||||
'''
|
||||
"""
|
||||
from __future__ import unicode_literals
|
||||
import plac
|
||||
import tqdm
|
||||
|
@ -35,6 +35,7 @@ spacy.lang.ja.Japanese.Defaults.use_janome = False
|
|||
random.seed(0)
|
||||
numpy.random.seed(0)
|
||||
|
||||
|
||||
def minibatch_by_words(items, size=5000):
|
||||
random.shuffle(items)
|
||||
if isinstance(size, int):
|
||||
|
@ -59,21 +60,31 @@ def minibatch_by_words(items, size=5000):
|
|||
else:
|
||||
break
|
||||
|
||||
|
||||
################
|
||||
# Data reading #
|
||||
################
|
||||
|
||||
space_re = re.compile('\s+')
|
||||
def split_text(text):
|
||||
return [space_re.sub(' ', par.strip()) for par in text.split('\n\n')]
|
||||
|
||||
space_re = re.compile("\s+")
|
||||
|
||||
def read_data(nlp, conllu_file, text_file, raw_text=True, oracle_segments=False,
|
||||
max_doc_length=None, limit=None):
|
||||
'''Read the CONLLU format into (Doc, GoldParse) tuples. If raw_text=True,
|
||||
|
||||
def split_text(text):
|
||||
return [space_re.sub(" ", par.strip()) for par in text.split("\n\n")]
|
||||
|
||||
|
||||
def read_data(
|
||||
nlp,
|
||||
conllu_file,
|
||||
text_file,
|
||||
raw_text=True,
|
||||
oracle_segments=False,
|
||||
max_doc_length=None,
|
||||
limit=None,
|
||||
):
|
||||
"""Read the CONLLU format into (Doc, GoldParse) tuples. If raw_text=True,
|
||||
include Doc objects created using nlp.make_doc and then aligned against
|
||||
the gold-standard sequences. If oracle_segments=True, include Doc objects
|
||||
created from the gold-standard segments. At least one must be True.'''
|
||||
created from the gold-standard segments. At least one must be True."""
|
||||
if not raw_text and not oracle_segments:
|
||||
raise ValueError("At least one of raw_text or oracle_segments must be True")
|
||||
paragraphs = split_text(text_file.read())
|
||||
|
@ -87,22 +98,21 @@ def read_data(nlp, conllu_file, text_file, raw_text=True, oracle_segments=False,
|
|||
for cs in cd:
|
||||
sent = defaultdict(list)
|
||||
for id_, word, lemma, pos, tag, morph, head, dep, _, space_after in cs:
|
||||
if '.' in id_:
|
||||
if "." in id_:
|
||||
continue
|
||||
if '-' in id_:
|
||||
if "-" in id_:
|
||||
continue
|
||||
id_ = int(id_)-1
|
||||
head = int(head)-1 if head != '0' else id_
|
||||
sent['words'].append(word)
|
||||
sent['tags'].append(tag)
|
||||
sent['heads'].append(head)
|
||||
sent['deps'].append('ROOT' if dep == 'root' else dep)
|
||||
sent['spaces'].append(space_after == '_')
|
||||
sent['entities'] = ['-'] * len(sent['words'])
|
||||
sent['heads'], sent['deps'] = projectivize(sent['heads'],
|
||||
sent['deps'])
|
||||
id_ = int(id_) - 1
|
||||
head = int(head) - 1 if head != "0" else id_
|
||||
sent["words"].append(word)
|
||||
sent["tags"].append(tag)
|
||||
sent["heads"].append(head)
|
||||
sent["deps"].append("ROOT" if dep == "root" else dep)
|
||||
sent["spaces"].append(space_after == "_")
|
||||
sent["entities"] = ["-"] * len(sent["words"])
|
||||
sent["heads"], sent["deps"] = projectivize(sent["heads"], sent["deps"])
|
||||
if oracle_segments:
|
||||
docs.append(Doc(nlp.vocab, words=sent['words'], spaces=sent['spaces']))
|
||||
docs.append(Doc(nlp.vocab, words=sent["words"], spaces=sent["spaces"]))
|
||||
golds.append(GoldParse(docs[-1], **sent))
|
||||
|
||||
sent_annots.append(sent)
|
||||
|
@ -128,18 +138,18 @@ def read_conllu(file_):
|
|||
sent = []
|
||||
doc = []
|
||||
for line in file_:
|
||||
if line.startswith('# newdoc'):
|
||||
if line.startswith("# newdoc"):
|
||||
if doc:
|
||||
docs.append(doc)
|
||||
doc = []
|
||||
elif line.startswith('#'):
|
||||
elif line.startswith("#"):
|
||||
continue
|
||||
elif not line.strip():
|
||||
if sent:
|
||||
doc.append(sent)
|
||||
sent = []
|
||||
else:
|
||||
sent.append(list(line.strip().split('\t')))
|
||||
sent.append(list(line.strip().split("\t")))
|
||||
if len(sent[-1]) != 10:
|
||||
print(repr(line))
|
||||
raise ValueError
|
||||
|
@ -154,25 +164,29 @@ def _make_gold(nlp, text, sent_annots):
|
|||
# Flatten the conll annotations, and adjust the head indices
|
||||
flat = defaultdict(list)
|
||||
for sent in sent_annots:
|
||||
flat['heads'].extend(len(flat['words'])+head for head in sent['heads'])
|
||||
for field in ['words', 'tags', 'deps', 'entities', 'spaces']:
|
||||
flat["heads"].extend(len(flat["words"]) + head for head in sent["heads"])
|
||||
for field in ["words", "tags", "deps", "entities", "spaces"]:
|
||||
flat[field].extend(sent[field])
|
||||
# Construct text if necessary
|
||||
assert len(flat['words']) == len(flat['spaces'])
|
||||
assert len(flat["words"]) == len(flat["spaces"])
|
||||
if text is None:
|
||||
text = ''.join(word+' '*space for word, space in zip(flat['words'], flat['spaces']))
|
||||
text = "".join(
|
||||
word + " " * space for word, space in zip(flat["words"], flat["spaces"])
|
||||
)
|
||||
doc = nlp.make_doc(text)
|
||||
flat.pop('spaces')
|
||||
flat.pop("spaces")
|
||||
gold = GoldParse(doc, **flat)
|
||||
return doc, gold
|
||||
|
||||
|
||||
#############################
|
||||
# Data transforms for spaCy #
|
||||
#############################
|
||||
|
||||
|
||||
def golds_to_gold_tuples(docs, golds):
|
||||
'''Get out the annoying 'tuples' format used by begin_training, given the
|
||||
GoldParse objects.'''
|
||||
"""Get out the annoying 'tuples' format used by begin_training, given the
|
||||
GoldParse objects."""
|
||||
tuples = []
|
||||
for doc, gold in zip(docs, golds):
|
||||
text = doc.text
|
||||
|
@ -186,15 +200,16 @@ def golds_to_gold_tuples(docs, golds):
|
|||
# Evaluation #
|
||||
##############
|
||||
|
||||
|
||||
def evaluate(nlp, text_loc, gold_loc, sys_loc, limit=None):
|
||||
with text_loc.open('r', encoding='utf8') as text_file:
|
||||
with text_loc.open("r", encoding="utf8") as text_file:
|
||||
texts = split_text(text_file.read())
|
||||
docs = list(nlp.pipe(texts))
|
||||
with sys_loc.open('w', encoding='utf8') as out_file:
|
||||
with sys_loc.open("w", encoding="utf8") as out_file:
|
||||
write_conllu(docs, out_file)
|
||||
with gold_loc.open('r', encoding='utf8') as gold_file:
|
||||
with gold_loc.open("r", encoding="utf8") as gold_file:
|
||||
gold_ud = conll17_ud_eval.load_conllu(gold_file)
|
||||
with sys_loc.open('r', encoding='utf8') as sys_file:
|
||||
with sys_loc.open("r", encoding="utf8") as sys_file:
|
||||
sys_ud = conll17_ud_eval.load_conllu(sys_file)
|
||||
scores = conll17_ud_eval.evaluate(gold_ud, sys_ud)
|
||||
return scores
|
||||
|
@ -202,10 +217,10 @@ def evaluate(nlp, text_loc, gold_loc, sys_loc, limit=None):
|
|||
|
||||
def write_conllu(docs, file_):
|
||||
merger = Matcher(docs[0].vocab)
|
||||
merger.add('SUBTOK', None, [{'DEP': 'subtok', 'op': '+'}])
|
||||
merger.add("SUBTOK", None, [{"DEP": "subtok", "op": "+"}])
|
||||
for i, doc in enumerate(docs):
|
||||
matches = merger(doc)
|
||||
spans = [doc[start:end+1] for _, start, end in matches]
|
||||
spans = [doc[start : end + 1] for _, start, end in matches]
|
||||
offsets = [(span.start_char, span.end_char) for span in spans]
|
||||
for start_char, end_char in offsets:
|
||||
doc.merge(start_char, end_char)
|
||||
|
@ -214,58 +229,73 @@ def write_conllu(docs, file_):
|
|||
file_.write("# sent_id = {i}.{j}\n".format(i=i, j=j))
|
||||
file_.write("# text = {text}\n".format(text=sent.text))
|
||||
for k, token in enumerate(sent):
|
||||
file_.write(token._.get_conllu_lines(k) + '\n')
|
||||
file_.write('\n')
|
||||
file_.write(token._.get_conllu_lines(k) + "\n")
|
||||
file_.write("\n")
|
||||
|
||||
|
||||
def print_progress(itn, losses, ud_scores):
|
||||
fields = {
|
||||
'dep_loss': losses.get('parser', 0.0),
|
||||
'tag_loss': losses.get('tagger', 0.0),
|
||||
'words': ud_scores['Words'].f1 * 100,
|
||||
'sents': ud_scores['Sentences'].f1 * 100,
|
||||
'tags': ud_scores['XPOS'].f1 * 100,
|
||||
'uas': ud_scores['UAS'].f1 * 100,
|
||||
'las': ud_scores['LAS'].f1 * 100,
|
||||
"dep_loss": losses.get("parser", 0.0),
|
||||
"tag_loss": losses.get("tagger", 0.0),
|
||||
"words": ud_scores["Words"].f1 * 100,
|
||||
"sents": ud_scores["Sentences"].f1 * 100,
|
||||
"tags": ud_scores["XPOS"].f1 * 100,
|
||||
"uas": ud_scores["UAS"].f1 * 100,
|
||||
"las": ud_scores["LAS"].f1 * 100,
|
||||
}
|
||||
header = ['Epoch', 'Loss', 'LAS', 'UAS', 'TAG', 'SENT', 'WORD']
|
||||
header = ["Epoch", "Loss", "LAS", "UAS", "TAG", "SENT", "WORD"]
|
||||
if itn == 0:
|
||||
print('\t'.join(header))
|
||||
tpl = '\t'.join((
|
||||
'{:d}',
|
||||
'{dep_loss:.1f}',
|
||||
'{las:.1f}',
|
||||
'{uas:.1f}',
|
||||
'{tags:.1f}',
|
||||
'{sents:.1f}',
|
||||
'{words:.1f}',
|
||||
))
|
||||
print("\t".join(header))
|
||||
tpl = "\t".join(
|
||||
(
|
||||
"{:d}",
|
||||
"{dep_loss:.1f}",
|
||||
"{las:.1f}",
|
||||
"{uas:.1f}",
|
||||
"{tags:.1f}",
|
||||
"{sents:.1f}",
|
||||
"{words:.1f}",
|
||||
)
|
||||
)
|
||||
print(tpl.format(itn, **fields))
|
||||
|
||||
#def get_sent_conllu(sent, sent_id):
|
||||
|
||||
# def get_sent_conllu(sent, sent_id):
|
||||
# lines = ["# sent_id = {sent_id}".format(sent_id=sent_id)]
|
||||
|
||||
|
||||
def get_token_conllu(token, i):
|
||||
if token._.begins_fused:
|
||||
n = 1
|
||||
while token.nbor(n)._.inside_fused:
|
||||
n += 1
|
||||
id_ = '%d-%d' % (i, i+n)
|
||||
lines = [id_, token.text, '_', '_', '_', '_', '_', '_', '_', '_']
|
||||
id_ = "%d-%d" % (i, i + n)
|
||||
lines = [id_, token.text, "_", "_", "_", "_", "_", "_", "_", "_"]
|
||||
else:
|
||||
lines = []
|
||||
if token.head.i == token.i:
|
||||
head = 0
|
||||
else:
|
||||
head = i + (token.head.i - token.i) + 1
|
||||
fields = [str(i+1), token.text, token.lemma_, token.pos_, token.tag_, '_',
|
||||
str(head), token.dep_.lower(), '_', '_']
|
||||
lines.append('\t'.join(fields))
|
||||
return '\n'.join(lines)
|
||||
fields = [
|
||||
str(i + 1),
|
||||
token.text,
|
||||
token.lemma_,
|
||||
token.pos_,
|
||||
token.tag_,
|
||||
"_",
|
||||
str(head),
|
||||
token.dep_.lower(),
|
||||
"_",
|
||||
"_",
|
||||
]
|
||||
lines.append("\t".join(fields))
|
||||
return "\n".join(lines)
|
||||
|
||||
Token.set_extension('get_conllu_lines', method=get_token_conllu)
|
||||
Token.set_extension('begins_fused', default=False)
|
||||
Token.set_extension('inside_fused', default=False)
|
||||
|
||||
Token.set_extension("get_conllu_lines", method=get_token_conllu)
|
||||
Token.set_extension("begins_fused", default=False)
|
||||
Token.set_extension("inside_fused", default=False)
|
||||
|
||||
|
||||
##################
|
||||
|
@ -274,31 +304,32 @@ Token.set_extension('inside_fused', default=False)
|
|||
|
||||
|
||||
def load_nlp(corpus, config):
|
||||
lang = corpus.split('_')[0]
|
||||
lang = corpus.split("_")[0]
|
||||
nlp = spacy.blank(lang)
|
||||
if config.vectors:
|
||||
nlp.vocab.from_disk(config.vectors / 'vocab')
|
||||
nlp.vocab.from_disk(config.vectors / "vocab")
|
||||
return nlp
|
||||
|
||||
|
||||
def initialize_pipeline(nlp, docs, golds, config):
|
||||
nlp.add_pipe(nlp.create_pipe('parser'))
|
||||
nlp.add_pipe(nlp.create_pipe("parser"))
|
||||
if config.multitask_tag:
|
||||
nlp.parser.add_multitask_objective('tag')
|
||||
nlp.parser.add_multitask_objective("tag")
|
||||
if config.multitask_sent:
|
||||
nlp.parser.add_multitask_objective('sent_start')
|
||||
nlp.parser.moves.add_action(2, 'subtok')
|
||||
nlp.add_pipe(nlp.create_pipe('tagger'))
|
||||
nlp.parser.add_multitask_objective("sent_start")
|
||||
nlp.parser.moves.add_action(2, "subtok")
|
||||
nlp.add_pipe(nlp.create_pipe("tagger"))
|
||||
for gold in golds:
|
||||
for tag in gold.tags:
|
||||
if tag is not None:
|
||||
nlp.tagger.add_label(tag)
|
||||
# Replace labels that didn't make the frequency cutoff
|
||||
actions = set(nlp.parser.labels)
|
||||
label_set = set([act.split('-')[1] for act in actions if '-' in act])
|
||||
label_set = set([act.split("-")[1] for act in actions if "-" in act])
|
||||
for gold in golds:
|
||||
for i, label in enumerate(gold.labels):
|
||||
if label is not None and label not in label_set:
|
||||
gold.labels[i] = label.split('||')[0]
|
||||
gold.labels[i] = label.split("||")[0]
|
||||
return nlp.begin_training(lambda: golds_to_gold_tuples(docs, golds))
|
||||
|
||||
|
||||
|
@ -306,6 +337,7 @@ def initialize_pipeline(nlp, docs, golds, config):
|
|||
# Command line helpers #
|
||||
########################
|
||||
|
||||
|
||||
@attr.s
|
||||
class Config(object):
|
||||
vectors = attr.ib(default=None)
|
||||
|
@ -318,7 +350,7 @@ class Config(object):
|
|||
|
||||
@classmethod
|
||||
def load(cls, loc):
|
||||
with Path(loc).open('r', encoding='utf8') as file_:
|
||||
with Path(loc).open("r", encoding="utf8") as file_:
|
||||
cfg = json.load(file_)
|
||||
return cls(**cfg)
|
||||
|
||||
|
@ -331,32 +363,36 @@ class Dataset(object):
|
|||
self.text = None
|
||||
for file_path in self.path.iterdir():
|
||||
name = file_path.parts[-1]
|
||||
if section in name and name.endswith('conllu'):
|
||||
if section in name and name.endswith("conllu"):
|
||||
self.conllu = file_path
|
||||
elif section in name and name.endswith('txt'):
|
||||
elif section in name and name.endswith("txt"):
|
||||
self.text = file_path
|
||||
if self.conllu is None:
|
||||
msg = "Could not find .txt file in {path} for {section}"
|
||||
raise IOError(msg.format(section=section, path=path))
|
||||
if self.text is None:
|
||||
msg = "Could not find .txt file in {path} for {section}"
|
||||
self.lang = self.conllu.parts[-1].split('-')[0].split('_')[0]
|
||||
self.lang = self.conllu.parts[-1].split("-")[0].split("_")[0]
|
||||
|
||||
|
||||
class TreebankPaths(object):
|
||||
def __init__(self, ud_path, treebank, **cfg):
|
||||
self.train = Dataset(ud_path / treebank, 'train')
|
||||
self.dev = Dataset(ud_path / treebank, 'dev')
|
||||
self.train = Dataset(ud_path / treebank, "train")
|
||||
self.dev = Dataset(ud_path / treebank, "dev")
|
||||
self.lang = self.train.lang
|
||||
|
||||
|
||||
@plac.annotations(
|
||||
ud_dir=("Path to Universal Dependencies corpus", "positional", None, Path),
|
||||
corpus=("UD corpus to train and evaluate on, e.g. en, es_ancora, etc",
|
||||
"positional", None, str),
|
||||
corpus=(
|
||||
"UD corpus to train and evaluate on, e.g. en, es_ancora, etc",
|
||||
"positional",
|
||||
None,
|
||||
str,
|
||||
),
|
||||
parses_dir=("Directory to write the development parses", "positional", None, Path),
|
||||
config=("Path to json formatted config file", "positional", None, Config.load),
|
||||
limit=("Size limit", "option", "n", int)
|
||||
limit=("Size limit", "option", "n", int),
|
||||
)
|
||||
def main(ud_dir, parses_dir, config, corpus, limit=0):
|
||||
paths = TreebankPaths(ud_dir, corpus)
|
||||
|
@ -365,8 +401,13 @@ def main(ud_dir, parses_dir, config, corpus, limit=0):
|
|||
print("Train and evaluate", corpus, "using lang", paths.lang)
|
||||
nlp = load_nlp(paths.lang, config)
|
||||
|
||||
docs, golds = read_data(nlp, paths.train.conllu.open(), paths.train.text.open(),
|
||||
max_doc_length=config.max_doc_length, limit=limit)
|
||||
docs, golds = read_data(
|
||||
nlp,
|
||||
paths.train.conllu.open(),
|
||||
paths.train.text.open(),
|
||||
max_doc_length=config.max_doc_length,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
optimizer = initialize_pipeline(nlp, docs, golds, config)
|
||||
|
||||
|
@ -379,14 +420,19 @@ def main(ud_dir, parses_dir, config, corpus, limit=0):
|
|||
for batch in batches:
|
||||
batch_docs, batch_gold = zip(*batch)
|
||||
pbar.update(sum(len(doc) for doc in batch_docs))
|
||||
nlp.update(batch_docs, batch_gold, sgd=optimizer,
|
||||
drop=config.dropout, losses=losses)
|
||||
|
||||
out_path = parses_dir / corpus / 'epoch-{i}.conllu'.format(i=i)
|
||||
nlp.update(
|
||||
batch_docs,
|
||||
batch_gold,
|
||||
sgd=optimizer,
|
||||
drop=config.dropout,
|
||||
losses=losses,
|
||||
)
|
||||
|
||||
out_path = parses_dir / corpus / "epoch-{i}.conllu".format(i=i)
|
||||
with nlp.use_params(optimizer.averages):
|
||||
scores = evaluate(nlp, paths.dev.text, paths.dev.conllu, out_path)
|
||||
print_progress(i, losses, scores)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
plac.call(main)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
'''This example shows how to add a multi-task objective that is trained
|
||||
"""This example shows how to add a multi-task objective that is trained
|
||||
alongside the entity recognizer. This is an alternative to adding features
|
||||
to the model.
|
||||
|
||||
|
@ -19,7 +19,7 @@ The specific example here is not necessarily a good idea --- but it shows
|
|||
how an arbitrary objective function for some word can be used.
|
||||
|
||||
Developed and tested for spaCy 2.0.6
|
||||
'''
|
||||
"""
|
||||
import random
|
||||
import plac
|
||||
import spacy
|
||||
|
@ -30,30 +30,29 @@ random.seed(0)
|
|||
|
||||
PWD = os.path.dirname(__file__)
|
||||
|
||||
TRAIN_DATA = list(read_json_file(os.path.join(PWD, 'training-data.json')))
|
||||
|
||||
TRAIN_DATA = list(read_json_file(os.path.join(PWD, "training-data.json")))
|
||||
|
||||
|
||||
def get_position_label(i, words, tags, heads, labels, ents):
|
||||
'''Return labels indicating the position of the word in the document.
|
||||
'''
|
||||
"""Return labels indicating the position of the word in the document.
|
||||
"""
|
||||
if len(words) < 20:
|
||||
return 'short-doc'
|
||||
return "short-doc"
|
||||
elif i == 0:
|
||||
return 'first-word'
|
||||
return "first-word"
|
||||
elif i < 10:
|
||||
return 'early-word'
|
||||
return "early-word"
|
||||
elif i < 20:
|
||||
return 'mid-word'
|
||||
elif i == len(words)-1:
|
||||
return 'last-word'
|
||||
return "mid-word"
|
||||
elif i == len(words) - 1:
|
||||
return "last-word"
|
||||
else:
|
||||
return 'late-word'
|
||||
return "late-word"
|
||||
|
||||
|
||||
def main(n_iter=10):
|
||||
nlp = spacy.blank('en')
|
||||
ner = nlp.create_pipe('ner')
|
||||
nlp = spacy.blank("en")
|
||||
ner = nlp.create_pipe("ner")
|
||||
ner.add_multitask_objective(get_position_label)
|
||||
nlp.add_pipe(ner)
|
||||
|
||||
|
@ -71,15 +70,16 @@ def main(n_iter=10):
|
|||
[gold], # batch of annotations
|
||||
drop=0.2, # dropout - make it harder to memorise data
|
||||
sgd=optimizer, # callable to update weights
|
||||
losses=losses)
|
||||
print(losses.get('nn_labeller', 0.0), losses['ner'])
|
||||
losses=losses,
|
||||
)
|
||||
print(losses.get("nn_labeller", 0.0), losses["ner"])
|
||||
|
||||
# test the trained model
|
||||
for text, _ in TRAIN_DATA:
|
||||
doc = nlp(text)
|
||||
print('Entities', [(ent.text, ent.label_) for ent in doc.ents])
|
||||
print('Tokens', [(t.text, t.ent_type_, t.ent_iob) for t in doc])
|
||||
print("Entities", [(ent.text, ent.label_) for ent in doc.ents])
|
||||
print("Tokens", [(t.text, t.ent_type_, t.ent_iob) for t in doc])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
plac.call(main)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
'''This script is experimental.
|
||||
"""This script is experimental.
|
||||
|
||||
Try pre-training the CNN component of the text categorizer using a cheap
|
||||
language modelling-like objective. Specifically, we load pre-trained vectors
|
||||
|
@ -12,7 +12,7 @@ To evaluate the technique, we're pre-training with the 50k texts from the IMDB
|
|||
corpus, and then training with only 100 labels. Note that it's a bit dirty to
|
||||
pre-train with the development data, but also not *so* terrible: we're not using
|
||||
the development labels, after all --- only the unlabelled text.
|
||||
'''
|
||||
"""
|
||||
import plac
|
||||
import random
|
||||
import spacy
|
||||
|
@ -46,8 +46,8 @@ def load_textcat_data(limit=0):
|
|||
train_data = train_data[-limit:]
|
||||
texts, labels = zip(*train_data)
|
||||
eval_texts, eval_labels = zip(*eval_data)
|
||||
cats = [{'POSITIVE': bool(y), 'NEGATIVE': not bool(y)} for y in labels]
|
||||
eval_cats = [{'POSITIVE': bool(y), 'NEGATIVE': not bool(y)} for y in eval_labels]
|
||||
cats = [{"POSITIVE": bool(y), "NEGATIVE": not bool(y)} for y in labels]
|
||||
eval_cats = [{"POSITIVE": bool(y), "NEGATIVE": not bool(y)} for y in eval_labels]
|
||||
return (texts, cats), (eval_texts, eval_cats)
|
||||
|
||||
|
||||
|
@ -57,6 +57,7 @@ def prefer_gpu():
|
|||
return False
|
||||
else:
|
||||
import cupy.random
|
||||
|
||||
cupy.random.seed(0)
|
||||
return True
|
||||
|
||||
|
@ -68,7 +69,7 @@ def build_textcat_model(tok2vec, nr_class, width):
|
|||
from thinc.misc import Residual, LayerNorm
|
||||
from spacy._ml import logistic, zero_init
|
||||
|
||||
with Model.define_operators({'>>': chain}):
|
||||
with Model.define_operators({">>": chain}):
|
||||
model = (
|
||||
tok2vec
|
||||
>> flatten_add_lengths
|
||||
|
@ -78,27 +79,35 @@ def build_textcat_model(tok2vec, nr_class, width):
|
|||
model.tok2vec = tok2vec
|
||||
return model
|
||||
|
||||
|
||||
def block_gradients(model):
|
||||
from thinc.api import wrap
|
||||
def forward(X, drop=0.):
|
||||
|
||||
def forward(X, drop=0.0):
|
||||
Y, _ = model.begin_update(X, drop=drop)
|
||||
return Y, None
|
||||
|
||||
return wrap(forward, model)
|
||||
|
||||
|
||||
def create_pipeline(width, embed_size, vectors_model):
|
||||
print("Load vectors")
|
||||
nlp = spacy.load(vectors_model)
|
||||
print("Start training")
|
||||
textcat = TextCategorizer(nlp.vocab,
|
||||
labels=['POSITIVE', 'NEGATIVE'],
|
||||
textcat = TextCategorizer(
|
||||
nlp.vocab,
|
||||
labels=["POSITIVE", "NEGATIVE"],
|
||||
model=build_textcat_model(
|
||||
Tok2Vec(width=width, embed_size=embed_size), 2, width))
|
||||
Tok2Vec(width=width, embed_size=embed_size), 2, width
|
||||
),
|
||||
)
|
||||
|
||||
nlp.add_pipe(textcat)
|
||||
return nlp
|
||||
|
||||
|
||||
def train_tensorizer(nlp, texts, dropout, n_iter):
|
||||
tensorizer = nlp.create_pipe('tensorizer')
|
||||
tensorizer = nlp.create_pipe("tensorizer")
|
||||
nlp.add_pipe(tensorizer)
|
||||
optimizer = nlp.begin_training()
|
||||
for i in range(n_iter):
|
||||
|
@ -109,36 +118,43 @@ def train_tensorizer(nlp, texts, dropout, n_iter):
|
|||
print(losses)
|
||||
return optimizer
|
||||
|
||||
|
||||
def train_textcat(nlp, n_texts, n_iter=10):
|
||||
textcat = nlp.get_pipe('textcat')
|
||||
textcat = nlp.get_pipe("textcat")
|
||||
tok2vec_weights = textcat.model.tok2vec.to_bytes()
|
||||
(train_texts, train_cats), (dev_texts, dev_cats) = load_textcat_data(limit=n_texts)
|
||||
print("Using {} examples ({} training, {} evaluation)"
|
||||
.format(n_texts, len(train_texts), len(dev_texts)))
|
||||
train_data = list(zip(train_texts,
|
||||
[{'cats': cats} for cats in train_cats]))
|
||||
print(
|
||||
"Using {} examples ({} training, {} evaluation)".format(
|
||||
n_texts, len(train_texts), len(dev_texts)
|
||||
)
|
||||
)
|
||||
train_data = list(zip(train_texts, [{"cats": cats} for cats in train_cats]))
|
||||
|
||||
# get names of other pipes to disable them during training
|
||||
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'textcat']
|
||||
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "textcat"]
|
||||
with nlp.disable_pipes(*other_pipes): # only train textcat
|
||||
optimizer = nlp.begin_training()
|
||||
textcat.model.tok2vec.from_bytes(tok2vec_weights)
|
||||
print("Training the model...")
|
||||
print('{:^5}\t{:^5}\t{:^5}\t{:^5}'.format('LOSS', 'P', 'R', 'F'))
|
||||
print("{:^5}\t{:^5}\t{:^5}\t{:^5}".format("LOSS", "P", "R", "F"))
|
||||
for i in range(n_iter):
|
||||
losses = {'textcat': 0.0}
|
||||
losses = {"textcat": 0.0}
|
||||
# batch up the examples using spaCy's minibatch
|
||||
batches = minibatch(tqdm.tqdm(train_data), size=2)
|
||||
for batch in batches:
|
||||
texts, annotations = zip(*batch)
|
||||
nlp.update(texts, annotations, sgd=optimizer, drop=0.2,
|
||||
losses=losses)
|
||||
nlp.update(texts, annotations, sgd=optimizer, drop=0.2, losses=losses)
|
||||
with textcat.model.use_params(optimizer.averages):
|
||||
# evaluate on the dev data split off in load_data()
|
||||
scores = evaluate_textcat(nlp.tokenizer, textcat, dev_texts, dev_cats)
|
||||
print('{0:.3f}\t{1:.3f}\t{2:.3f}\t{3:.3f}' # print a simple table
|
||||
.format(losses['textcat'], scores['textcat_p'],
|
||||
scores['textcat_r'], scores['textcat_f']))
|
||||
print(
|
||||
"{0:.3f}\t{1:.3f}\t{2:.3f}\t{3:.3f}".format( # print a simple table
|
||||
losses["textcat"],
|
||||
scores["textcat_p"],
|
||||
scores["textcat_r"],
|
||||
scores["textcat_f"],
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def evaluate_textcat(tokenizer, textcat, texts, cats):
|
||||
|
@ -153,9 +169,9 @@ def evaluate_textcat(tokenizer, textcat, texts, cats):
|
|||
if label not in gold:
|
||||
continue
|
||||
if score >= 0.5 and gold[label] >= 0.5:
|
||||
tp += 1.
|
||||
tp += 1.0
|
||||
elif score >= 0.5 and gold[label] < 0.5:
|
||||
fp += 1.
|
||||
fp += 1.0
|
||||
elif score < 0.5 and gold[label] < 0.5:
|
||||
tn += 1
|
||||
elif score < 0.5 and gold[label] >= 0.5:
|
||||
|
@ -163,8 +179,7 @@ def evaluate_textcat(tokenizer, textcat, texts, cats):
|
|||
precision = tp / (tp + fp)
|
||||
recall = tp / (tp + fn)
|
||||
f_score = 2 * (precision * recall) / (precision + recall)
|
||||
return {'textcat_p': precision, 'textcat_r': recall, 'textcat_f': f_score}
|
||||
|
||||
return {"textcat_p": precision, "textcat_r": recall, "textcat_f": f_score}
|
||||
|
||||
|
||||
@plac.annotations(
|
||||
|
@ -173,10 +188,16 @@ def evaluate_textcat(tokenizer, textcat, texts, cats):
|
|||
pretrain_iters=("Number of iterations to pretrain", "option", "pn", int),
|
||||
train_iters=("Number of iterations to pretrain", "option", "tn", int),
|
||||
train_examples=("Number of labelled examples", "option", "eg", int),
|
||||
vectors_model=("Name or path to vectors model to learn from")
|
||||
vectors_model=("Name or path to vectors model to learn from"),
|
||||
)
|
||||
def main(width, embed_size, vectors_model,
|
||||
pretrain_iters=30, train_iters=30, train_examples=1000):
|
||||
def main(
|
||||
width,
|
||||
embed_size,
|
||||
vectors_model,
|
||||
pretrain_iters=30,
|
||||
train_iters=30,
|
||||
train_examples=1000,
|
||||
):
|
||||
random.seed(0)
|
||||
numpy.random.seed(0)
|
||||
use_gpu = prefer_gpu()
|
||||
|
@ -190,5 +211,6 @@ def main(width, embed_size, vectors_model,
|
|||
print("Train textcat")
|
||||
train_textcat(nlp, train_examples, n_iter=train_iters)
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
if __name__ == "__main__":
|
||||
plac.call(main)
|
||||
|
|
|
@ -29,73 +29,113 @@ from spacy.util import minibatch, compounding
|
|||
# training data: texts, heads and dependency labels
|
||||
# for no relation, we simply chose an arbitrary dependency label, e.g. '-'
|
||||
TRAIN_DATA = [
|
||||
("find a cafe with great wifi", {
|
||||
'heads': [0, 2, 0, 5, 5, 2], # index of token head
|
||||
'deps': ['ROOT', '-', 'PLACE', '-', 'QUALITY', 'ATTRIBUTE']
|
||||
}),
|
||||
("find a hotel near the beach", {
|
||||
'heads': [0, 2, 0, 5, 5, 2],
|
||||
'deps': ['ROOT', '-', 'PLACE', 'QUALITY', '-', 'ATTRIBUTE']
|
||||
}),
|
||||
("find me the closest gym that's open late", {
|
||||
'heads': [0, 0, 4, 4, 0, 6, 4, 6, 6],
|
||||
'deps': ['ROOT', '-', '-', 'QUALITY', 'PLACE', '-', '-', 'ATTRIBUTE', 'TIME']
|
||||
}),
|
||||
("show me the cheapest store that sells flowers", {
|
||||
'heads': [0, 0, 4, 4, 0, 4, 4, 4], # attach "flowers" to store!
|
||||
'deps': ['ROOT', '-', '-', 'QUALITY', 'PLACE', '-', '-', 'PRODUCT']
|
||||
}),
|
||||
("find a nice restaurant in london", {
|
||||
'heads': [0, 3, 3, 0, 3, 3],
|
||||
'deps': ['ROOT', '-', 'QUALITY', 'PLACE', '-', 'LOCATION']
|
||||
}),
|
||||
("show me the coolest hostel in berlin", {
|
||||
'heads': [0, 0, 4, 4, 0, 4, 4],
|
||||
'deps': ['ROOT', '-', '-', 'QUALITY', 'PLACE', '-', 'LOCATION']
|
||||
}),
|
||||
("find a good italian restaurant near work", {
|
||||
'heads': [0, 4, 4, 4, 0, 4, 5],
|
||||
'deps': ['ROOT', '-', 'QUALITY', 'ATTRIBUTE', 'PLACE', 'ATTRIBUTE', 'LOCATION']
|
||||
})
|
||||
(
|
||||
"find a cafe with great wifi",
|
||||
{
|
||||
"heads": [0, 2, 0, 5, 5, 2], # index of token head
|
||||
"deps": ["ROOT", "-", "PLACE", "-", "QUALITY", "ATTRIBUTE"],
|
||||
},
|
||||
),
|
||||
(
|
||||
"find a hotel near the beach",
|
||||
{
|
||||
"heads": [0, 2, 0, 5, 5, 2],
|
||||
"deps": ["ROOT", "-", "PLACE", "QUALITY", "-", "ATTRIBUTE"],
|
||||
},
|
||||
),
|
||||
(
|
||||
"find me the closest gym that's open late",
|
||||
{
|
||||
"heads": [0, 0, 4, 4, 0, 6, 4, 6, 6],
|
||||
"deps": [
|
||||
"ROOT",
|
||||
"-",
|
||||
"-",
|
||||
"QUALITY",
|
||||
"PLACE",
|
||||
"-",
|
||||
"-",
|
||||
"ATTRIBUTE",
|
||||
"TIME",
|
||||
],
|
||||
},
|
||||
),
|
||||
(
|
||||
"show me the cheapest store that sells flowers",
|
||||
{
|
||||
"heads": [0, 0, 4, 4, 0, 4, 4, 4], # attach "flowers" to store!
|
||||
"deps": ["ROOT", "-", "-", "QUALITY", "PLACE", "-", "-", "PRODUCT"],
|
||||
},
|
||||
),
|
||||
(
|
||||
"find a nice restaurant in london",
|
||||
{
|
||||
"heads": [0, 3, 3, 0, 3, 3],
|
||||
"deps": ["ROOT", "-", "QUALITY", "PLACE", "-", "LOCATION"],
|
||||
},
|
||||
),
|
||||
(
|
||||
"show me the coolest hostel in berlin",
|
||||
{
|
||||
"heads": [0, 0, 4, 4, 0, 4, 4],
|
||||
"deps": ["ROOT", "-", "-", "QUALITY", "PLACE", "-", "LOCATION"],
|
||||
},
|
||||
),
|
||||
(
|
||||
"find a good italian restaurant near work",
|
||||
{
|
||||
"heads": [0, 4, 4, 4, 0, 4, 5],
|
||||
"deps": [
|
||||
"ROOT",
|
||||
"-",
|
||||
"QUALITY",
|
||||
"ATTRIBUTE",
|
||||
"PLACE",
|
||||
"ATTRIBUTE",
|
||||
"LOCATION",
|
||||
],
|
||||
},
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@plac.annotations(
|
||||
model=("Model name. Defaults to blank 'en' model.", "option", "m", str),
|
||||
output_dir=("Optional output directory", "option", "o", Path),
|
||||
n_iter=("Number of training iterations", "option", "n", int))
|
||||
n_iter=("Number of training iterations", "option", "n", int),
|
||||
)
|
||||
def main(model=None, output_dir=None, n_iter=15):
|
||||
"""Load the model, set up the pipeline and train the parser."""
|
||||
if model is not None:
|
||||
nlp = spacy.load(model) # load existing spaCy model
|
||||
print("Loaded model '%s'" % model)
|
||||
else:
|
||||
nlp = spacy.blank('en') # create blank Language class
|
||||
nlp = spacy.blank("en") # create blank Language class
|
||||
print("Created blank 'en' model")
|
||||
|
||||
# We'll use the built-in dependency parser class, but we want to create a
|
||||
# fresh instance – just in case.
|
||||
if 'parser' in nlp.pipe_names:
|
||||
nlp.remove_pipe('parser')
|
||||
parser = nlp.create_pipe('parser')
|
||||
if "parser" in nlp.pipe_names:
|
||||
nlp.remove_pipe("parser")
|
||||
parser = nlp.create_pipe("parser")
|
||||
nlp.add_pipe(parser, first=True)
|
||||
|
||||
for text, annotations in TRAIN_DATA:
|
||||
for dep in annotations.get('deps', []):
|
||||
for dep in annotations.get("deps", []):
|
||||
parser.add_label(dep)
|
||||
|
||||
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'parser']
|
||||
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "parser"]
|
||||
with nlp.disable_pipes(*other_pipes): # only train parser
|
||||
optimizer = nlp.begin_training()
|
||||
for itn in range(n_iter):
|
||||
random.shuffle(TRAIN_DATA)
|
||||
losses = {}
|
||||
# batch up the examples using spaCy's minibatch
|
||||
batches = minibatch(TRAIN_DATA, size=compounding(4., 32., 1.001))
|
||||
batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001))
|
||||
for batch in batches:
|
||||
texts, annotations = zip(*batch)
|
||||
nlp.update(texts, annotations, sgd=optimizer, losses=losses)
|
||||
print('Losses', losses)
|
||||
print("Losses", losses)
|
||||
|
||||
# test the trained model
|
||||
test_model(nlp)
|
||||
|
@ -115,16 +155,18 @@ def main(model=None, output_dir=None, n_iter=15):
|
|||
|
||||
|
||||
def test_model(nlp):
|
||||
texts = ["find a hotel with good wifi",
|
||||
"find me the cheapest gym near work",
|
||||
"show me the best hotel in berlin"]
|
||||
texts = [
|
||||
"find a hotel with good wifi",
|
||||
"find me the cheapest gym near work",
|
||||
"show me the best hotel in berlin",
|
||||
]
|
||||
docs = nlp.pipe(texts)
|
||||
for doc in docs:
|
||||
print(doc.text)
|
||||
print([(t.text, t.dep_, t.head.text) for t in doc if t.dep_ != '-'])
|
||||
print([(t.text, t.dep_, t.head.text) for t in doc if t.dep_ != "-"])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
plac.call(main)
|
||||
|
||||
# Expected output:
|
||||
|
|
|
@ -20,51 +20,48 @@ from spacy.util import minibatch, compounding
|
|||
|
||||
# training data
|
||||
TRAIN_DATA = [
|
||||
('Who is Shaka Khan?', {
|
||||
'entities': [(7, 17, 'PERSON')]
|
||||
}),
|
||||
('I like London and Berlin.', {
|
||||
'entities': [(7, 13, 'LOC'), (18, 24, 'LOC')]
|
||||
})
|
||||
("Who is Shaka Khan?", {"entities": [(7, 17, "PERSON")]}),
|
||||
("I like London and Berlin.", {"entities": [(7, 13, "LOC"), (18, 24, "LOC")]}),
|
||||
]
|
||||
|
||||
|
||||
@plac.annotations(
|
||||
model=("Model name. Defaults to blank 'en' model.", "option", "m", str),
|
||||
output_dir=("Optional output directory", "option", "o", Path),
|
||||
n_iter=("Number of training iterations", "option", "n", int))
|
||||
n_iter=("Number of training iterations", "option", "n", int),
|
||||
)
|
||||
def main(model=None, output_dir=None, n_iter=100):
|
||||
"""Load the model, set up the pipeline and train the entity recognizer."""
|
||||
if model is not None:
|
||||
nlp = spacy.load(model) # load existing spaCy model
|
||||
print("Loaded model '%s'" % model)
|
||||
else:
|
||||
nlp = spacy.blank('en') # create blank Language class
|
||||
nlp = spacy.blank("en") # create blank Language class
|
||||
print("Created blank 'en' model")
|
||||
|
||||
# create the built-in pipeline components and add them to the pipeline
|
||||
# nlp.create_pipe works for built-ins that are registered with spaCy
|
||||
if 'ner' not in nlp.pipe_names:
|
||||
ner = nlp.create_pipe('ner')
|
||||
if "ner" not in nlp.pipe_names:
|
||||
ner = nlp.create_pipe("ner")
|
||||
nlp.add_pipe(ner, last=True)
|
||||
# otherwise, get it so we can add labels
|
||||
else:
|
||||
ner = nlp.get_pipe('ner')
|
||||
ner = nlp.get_pipe("ner")
|
||||
|
||||
# add labels
|
||||
for _, annotations in TRAIN_DATA:
|
||||
for ent in annotations.get('entities'):
|
||||
for ent in annotations.get("entities"):
|
||||
ner.add_label(ent[2])
|
||||
|
||||
# get names of other pipes to disable them during training
|
||||
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner']
|
||||
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "ner"]
|
||||
with nlp.disable_pipes(*other_pipes): # only train NER
|
||||
optimizer = nlp.begin_training()
|
||||
for itn in range(n_iter):
|
||||
random.shuffle(TRAIN_DATA)
|
||||
losses = {}
|
||||
# batch up the examples using spaCy's minibatch
|
||||
batches = minibatch(TRAIN_DATA, size=compounding(4., 32., 1.001))
|
||||
batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001))
|
||||
for batch in batches:
|
||||
texts, annotations = zip(*batch)
|
||||
nlp.update(
|
||||
|
@ -72,14 +69,15 @@ def main(model=None, output_dir=None, n_iter=100):
|
|||
annotations, # batch of annotations
|
||||
drop=0.5, # dropout - make it harder to memorise data
|
||||
sgd=optimizer, # callable to update weights
|
||||
losses=losses)
|
||||
print('Losses', losses)
|
||||
losses=losses,
|
||||
)
|
||||
print("Losses", losses)
|
||||
|
||||
# test the trained model
|
||||
for text, _ in TRAIN_DATA:
|
||||
doc = nlp(text)
|
||||
print('Entities', [(ent.text, ent.label_) for ent in doc.ents])
|
||||
print('Tokens', [(t.text, t.ent_type_, t.ent_iob) for t in doc])
|
||||
print("Entities", [(ent.text, ent.label_) for ent in doc.ents])
|
||||
print("Tokens", [(t.text, t.ent_type_, t.ent_iob) for t in doc])
|
||||
|
||||
# save model to output directory
|
||||
if output_dir is not None:
|
||||
|
@ -94,11 +92,11 @@ def main(model=None, output_dir=None, n_iter=100):
|
|||
nlp2 = spacy.load(output_dir)
|
||||
for text, _ in TRAIN_DATA:
|
||||
doc = nlp2(text)
|
||||
print('Entities', [(ent.text, ent.label_) for ent in doc.ents])
|
||||
print('Tokens', [(t.text, t.ent_type_, t.ent_iob) for t in doc])
|
||||
print("Entities", [(ent.text, ent.label_) for ent in doc.ents])
|
||||
print("Tokens", [(t.text, t.ent_type_, t.ent_iob) for t in doc])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
plac.call(main)
|
||||
|
||||
# Expected output:
|
||||
|
|
|
@ -35,7 +35,7 @@ from spacy.util import minibatch, compounding
|
|||
|
||||
|
||||
# new entity label
|
||||
LABEL = 'ANIMAL'
|
||||
LABEL = "ANIMAL"
|
||||
|
||||
# training data
|
||||
# Note: If you're using an existing model, make sure to mix in examples of
|
||||
|
@ -43,29 +43,21 @@ LABEL = 'ANIMAL'
|
|||
# model might learn the new type, but "forget" what it previously knew.
|
||||
# https://explosion.ai/blog/pseudo-rehearsal-catastrophic-forgetting
|
||||
TRAIN_DATA = [
|
||||
("Horses are too tall and they pretend to care about your feelings", {
|
||||
'entities': [(0, 6, 'ANIMAL')]
|
||||
}),
|
||||
|
||||
("Do they bite?", {
|
||||
'entities': []
|
||||
}),
|
||||
|
||||
("horses are too tall and they pretend to care about your feelings", {
|
||||
'entities': [(0, 6, 'ANIMAL')]
|
||||
}),
|
||||
|
||||
("horses pretend to care about your feelings", {
|
||||
'entities': [(0, 6, 'ANIMAL')]
|
||||
}),
|
||||
|
||||
("they pretend to care about your feelings, those horses", {
|
||||
'entities': [(48, 54, 'ANIMAL')]
|
||||
}),
|
||||
|
||||
("horses?", {
|
||||
'entities': [(0, 6, 'ANIMAL')]
|
||||
})
|
||||
(
|
||||
"Horses are too tall and they pretend to care about your feelings",
|
||||
{"entities": [(0, 6, "ANIMAL")]},
|
||||
),
|
||||
("Do they bite?", {"entities": []}),
|
||||
(
|
||||
"horses are too tall and they pretend to care about your feelings",
|
||||
{"entities": [(0, 6, "ANIMAL")]},
|
||||
),
|
||||
("horses pretend to care about your feelings", {"entities": [(0, 6, "ANIMAL")]}),
|
||||
(
|
||||
"they pretend to care about your feelings, those horses",
|
||||
{"entities": [(48, 54, "ANIMAL")]},
|
||||
),
|
||||
("horses?", {"entities": [(0, 6, "ANIMAL")]}),
|
||||
]
|
||||
|
||||
|
||||
|
@ -73,25 +65,26 @@ TRAIN_DATA = [
|
|||
model=("Model name. Defaults to blank 'en' model.", "option", "m", str),
|
||||
new_model_name=("New model name for model meta.", "option", "nm", str),
|
||||
output_dir=("Optional output directory", "option", "o", Path),
|
||||
n_iter=("Number of training iterations", "option", "n", int))
|
||||
def main(model=None, new_model_name='animal', output_dir=None, n_iter=10):
|
||||
n_iter=("Number of training iterations", "option", "n", int),
|
||||
)
|
||||
def main(model=None, new_model_name="animal", output_dir=None, n_iter=10):
|
||||
"""Set up the pipeline and entity recognizer, and train the new entity."""
|
||||
if model is not None:
|
||||
nlp = spacy.load(model) # load existing spaCy model
|
||||
print("Loaded model '%s'" % model)
|
||||
else:
|
||||
nlp = spacy.blank('en') # create blank Language class
|
||||
nlp = spacy.blank("en") # create blank Language class
|
||||
print("Created blank 'en' model")
|
||||
# Add entity recognizer to model if it's not in the pipeline
|
||||
# nlp.create_pipe works for built-ins that are registered with spaCy
|
||||
if 'ner' not in nlp.pipe_names:
|
||||
ner = nlp.create_pipe('ner')
|
||||
if "ner" not in nlp.pipe_names:
|
||||
ner = nlp.create_pipe("ner")
|
||||
nlp.add_pipe(ner)
|
||||
# otherwise, get it, so we can add labels to it
|
||||
else:
|
||||
ner = nlp.get_pipe('ner')
|
||||
ner = nlp.get_pipe("ner")
|
||||
|
||||
ner.add_label(LABEL) # add new entity label to entity recognizer
|
||||
ner.add_label(LABEL) # add new entity label to entity recognizer
|
||||
if model is None:
|
||||
optimizer = nlp.begin_training()
|
||||
else:
|
||||
|
@ -100,21 +93,20 @@ def main(model=None, new_model_name='animal', output_dir=None, n_iter=10):
|
|||
optimizer = nlp.entity.create_optimizer()
|
||||
|
||||
# get names of other pipes to disable them during training
|
||||
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner']
|
||||
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "ner"]
|
||||
with nlp.disable_pipes(*other_pipes): # only train NER
|
||||
for itn in range(n_iter):
|
||||
random.shuffle(TRAIN_DATA)
|
||||
losses = {}
|
||||
# batch up the examples using spaCy's minibatch
|
||||
batches = minibatch(TRAIN_DATA, size=compounding(4., 32., 1.001))
|
||||
batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001))
|
||||
for batch in batches:
|
||||
texts, annotations = zip(*batch)
|
||||
nlp.update(texts, annotations, sgd=optimizer, drop=0.35,
|
||||
losses=losses)
|
||||
print('Losses', losses)
|
||||
nlp.update(texts, annotations, sgd=optimizer, drop=0.35, losses=losses)
|
||||
print("Losses", losses)
|
||||
|
||||
# test the trained model
|
||||
test_text = 'Do you like horses?'
|
||||
test_text = "Do you like horses?"
|
||||
doc = nlp(test_text)
|
||||
print("Entities in '%s'" % test_text)
|
||||
for ent in doc.ents:
|
||||
|
@ -125,7 +117,7 @@ def main(model=None, new_model_name='animal', output_dir=None, n_iter=10):
|
|||
output_dir = Path(output_dir)
|
||||
if not output_dir.exists():
|
||||
output_dir.mkdir()
|
||||
nlp.meta['name'] = new_model_name # rename model
|
||||
nlp.meta["name"] = new_model_name # rename model
|
||||
nlp.to_disk(output_dir)
|
||||
print("Saved model to", output_dir)
|
||||
|
||||
|
@ -137,5 +129,5 @@ def main(model=None, new_model_name='animal', output_dir=None, n_iter=10):
|
|||
print(ent.label_, ent.text)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
plac.call(main)
|
||||
|
|
|
@ -18,62 +18,69 @@ from spacy.util import minibatch, compounding
|
|||
|
||||
# training data
|
||||
TRAIN_DATA = [
|
||||
("They trade mortgage-backed securities.", {
|
||||
'heads': [1, 1, 4, 4, 5, 1, 1],
|
||||
'deps': ['nsubj', 'ROOT', 'compound', 'punct', 'nmod', 'dobj', 'punct']
|
||||
}),
|
||||
("I like London and Berlin.", {
|
||||
'heads': [1, 1, 1, 2, 2, 1],
|
||||
'deps': ['nsubj', 'ROOT', 'dobj', 'cc', 'conj', 'punct']
|
||||
})
|
||||
(
|
||||
"They trade mortgage-backed securities.",
|
||||
{
|
||||
"heads": [1, 1, 4, 4, 5, 1, 1],
|
||||
"deps": ["nsubj", "ROOT", "compound", "punct", "nmod", "dobj", "punct"],
|
||||
},
|
||||
),
|
||||
(
|
||||
"I like London and Berlin.",
|
||||
{
|
||||
"heads": [1, 1, 1, 2, 2, 1],
|
||||
"deps": ["nsubj", "ROOT", "dobj", "cc", "conj", "punct"],
|
||||
},
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@plac.annotations(
|
||||
model=("Model name. Defaults to blank 'en' model.", "option", "m", str),
|
||||
output_dir=("Optional output directory", "option", "o", Path),
|
||||
n_iter=("Number of training iterations", "option", "n", int))
|
||||
n_iter=("Number of training iterations", "option", "n", int),
|
||||
)
|
||||
def main(model=None, output_dir=None, n_iter=10):
|
||||
"""Load the model, set up the pipeline and train the parser."""
|
||||
if model is not None:
|
||||
nlp = spacy.load(model) # load existing spaCy model
|
||||
print("Loaded model '%s'" % model)
|
||||
else:
|
||||
nlp = spacy.blank('en') # create blank Language class
|
||||
nlp = spacy.blank("en") # create blank Language class
|
||||
print("Created blank 'en' model")
|
||||
|
||||
# add the parser to the pipeline if it doesn't exist
|
||||
# nlp.create_pipe works for built-ins that are registered with spaCy
|
||||
if 'parser' not in nlp.pipe_names:
|
||||
parser = nlp.create_pipe('parser')
|
||||
if "parser" not in nlp.pipe_names:
|
||||
parser = nlp.create_pipe("parser")
|
||||
nlp.add_pipe(parser, first=True)
|
||||
# otherwise, get it, so we can add labels to it
|
||||
else:
|
||||
parser = nlp.get_pipe('parser')
|
||||
parser = nlp.get_pipe("parser")
|
||||
|
||||
# add labels to the parser
|
||||
for _, annotations in TRAIN_DATA:
|
||||
for dep in annotations.get('deps', []):
|
||||
for dep in annotations.get("deps", []):
|
||||
parser.add_label(dep)
|
||||
|
||||
# get names of other pipes to disable them during training
|
||||
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'parser']
|
||||
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "parser"]
|
||||
with nlp.disable_pipes(*other_pipes): # only train parser
|
||||
optimizer = nlp.begin_training()
|
||||
for itn in range(n_iter):
|
||||
random.shuffle(TRAIN_DATA)
|
||||
losses = {}
|
||||
# batch up the examples using spaCy's minibatch
|
||||
batches = minibatch(TRAIN_DATA, size=compounding(4., 32., 1.001))
|
||||
batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001))
|
||||
for batch in batches:
|
||||
texts, annotations = zip(*batch)
|
||||
nlp.update(texts, annotations, sgd=optimizer, losses=losses)
|
||||
print('Losses', losses)
|
||||
print("Losses", losses)
|
||||
|
||||
# test the trained model
|
||||
test_text = "I like securities."
|
||||
doc = nlp(test_text)
|
||||
print('Dependencies', [(t.text, t.dep_, t.head.text) for t in doc])
|
||||
print("Dependencies", [(t.text, t.dep_, t.head.text) for t in doc])
|
||||
|
||||
# save model to output directory
|
||||
if output_dir is not None:
|
||||
|
@ -87,10 +94,10 @@ def main(model=None, output_dir=None, n_iter=10):
|
|||
print("Loading from", output_dir)
|
||||
nlp2 = spacy.load(output_dir)
|
||||
doc = nlp2(test_text)
|
||||
print('Dependencies', [(t.text, t.dep_, t.head.text) for t in doc])
|
||||
print("Dependencies", [(t.text, t.dep_, t.head.text) for t in doc])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
plac.call(main)
|
||||
|
||||
# expected result:
|
||||
|
|
|
@ -25,11 +25,7 @@ from spacy.util import minibatch, compounding
|
|||
# http://universaldependencies.github.io/docs/u/pos/index.html
|
||||
# You may also specify morphological features for your tags, from the universal
|
||||
# scheme.
|
||||
TAG_MAP = {
|
||||
'N': {'pos': 'NOUN'},
|
||||
'V': {'pos': 'VERB'},
|
||||
'J': {'pos': 'ADJ'}
|
||||
}
|
||||
TAG_MAP = {"N": {"pos": "NOUN"}, "V": {"pos": "VERB"}, "J": {"pos": "ADJ"}}
|
||||
|
||||
# Usually you'll read this in, of course. Data formats vary. Ensure your
|
||||
# strings are unicode and that the number of tags assigned matches spaCy's
|
||||
|
@ -37,16 +33,17 @@ TAG_MAP = {
|
|||
# that specifies the gold-standard tokenization, e.g.:
|
||||
# ("Eatblueham", {'words': ['Eat', 'blue', 'ham'] 'tags': ['V', 'J', 'N']})
|
||||
TRAIN_DATA = [
|
||||
("I like green eggs", {'tags': ['N', 'V', 'J', 'N']}),
|
||||
("Eat blue ham", {'tags': ['V', 'J', 'N']})
|
||||
("I like green eggs", {"tags": ["N", "V", "J", "N"]}),
|
||||
("Eat blue ham", {"tags": ["V", "J", "N"]}),
|
||||
]
|
||||
|
||||
|
||||
@plac.annotations(
|
||||
lang=("ISO Code of language to use", "option", "l", str),
|
||||
output_dir=("Optional output directory", "option", "o", Path),
|
||||
n_iter=("Number of training iterations", "option", "n", int))
|
||||
def main(lang='en', output_dir=None, n_iter=25):
|
||||
n_iter=("Number of training iterations", "option", "n", int),
|
||||
)
|
||||
def main(lang="en", output_dir=None, n_iter=25):
|
||||
"""Create a new model, set up the pipeline and train the tagger. In order to
|
||||
train the tagger with a custom tag map, we're creating a new Language
|
||||
instance with a custom vocab.
|
||||
|
@ -54,7 +51,7 @@ def main(lang='en', output_dir=None, n_iter=25):
|
|||
nlp = spacy.blank(lang)
|
||||
# add the tagger to the pipeline
|
||||
# nlp.create_pipe works for built-ins that are registered with spaCy
|
||||
tagger = nlp.create_pipe('tagger')
|
||||
tagger = nlp.create_pipe("tagger")
|
||||
# Add the tags. This needs to be done before you start training.
|
||||
for tag, values in TAG_MAP.items():
|
||||
tagger.add_label(tag, values)
|
||||
|
@ -65,16 +62,16 @@ def main(lang='en', output_dir=None, n_iter=25):
|
|||
random.shuffle(TRAIN_DATA)
|
||||
losses = {}
|
||||
# batch up the examples using spaCy's minibatch
|
||||
batches = minibatch(TRAIN_DATA, size=compounding(4., 32., 1.001))
|
||||
batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001))
|
||||
for batch in batches:
|
||||
texts, annotations = zip(*batch)
|
||||
nlp.update(texts, annotations, sgd=optimizer, losses=losses)
|
||||
print('Losses', losses)
|
||||
print("Losses", losses)
|
||||
|
||||
# test the trained model
|
||||
test_text = "I like blue eggs"
|
||||
doc = nlp(test_text)
|
||||
print('Tags', [(t.text, t.tag_, t.pos_) for t in doc])
|
||||
print("Tags", [(t.text, t.tag_, t.pos_) for t in doc])
|
||||
|
||||
# save model to output directory
|
||||
if output_dir is not None:
|
||||
|
@ -88,10 +85,10 @@ def main(lang='en', output_dir=None, n_iter=25):
|
|||
print("Loading from", output_dir)
|
||||
nlp2 = spacy.load(output_dir)
|
||||
doc = nlp2(test_text)
|
||||
print('Tags', [(t.text, t.tag_, t.pos_) for t in doc])
|
||||
print("Tags", [(t.text, t.tag_, t.pos_) for t in doc])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
plac.call(main)
|
||||
|
||||
# Expected output:
|
||||
|
|
|
@ -23,55 +23,62 @@ from spacy.util import minibatch, compounding
|
|||
model=("Model name. Defaults to blank 'en' model.", "option", "m", str),
|
||||
output_dir=("Optional output directory", "option", "o", Path),
|
||||
n_texts=("Number of texts to train from", "option", "t", int),
|
||||
n_iter=("Number of training iterations", "option", "n", int))
|
||||
n_iter=("Number of training iterations", "option", "n", int),
|
||||
)
|
||||
def main(model=None, output_dir=None, n_iter=20, n_texts=2000):
|
||||
if model is not None:
|
||||
nlp = spacy.load(model) # load existing spaCy model
|
||||
print("Loaded model '%s'" % model)
|
||||
else:
|
||||
nlp = spacy.blank('en') # create blank Language class
|
||||
nlp = spacy.blank("en") # create blank Language class
|
||||
print("Created blank 'en' model")
|
||||
|
||||
# add the text classifier to the pipeline if it doesn't exist
|
||||
# nlp.create_pipe works for built-ins that are registered with spaCy
|
||||
if 'textcat' not in nlp.pipe_names:
|
||||
textcat = nlp.create_pipe('textcat')
|
||||
if "textcat" not in nlp.pipe_names:
|
||||
textcat = nlp.create_pipe("textcat")
|
||||
nlp.add_pipe(textcat, last=True)
|
||||
# otherwise, get it, so we can add labels to it
|
||||
else:
|
||||
textcat = nlp.get_pipe('textcat')
|
||||
textcat = nlp.get_pipe("textcat")
|
||||
|
||||
# add label to text classifier
|
||||
textcat.add_label('POSITIVE')
|
||||
textcat.add_label("POSITIVE")
|
||||
|
||||
# load the IMDB dataset
|
||||
print("Loading IMDB data...")
|
||||
(train_texts, train_cats), (dev_texts, dev_cats) = load_data(limit=n_texts)
|
||||
print("Using {} examples ({} training, {} evaluation)"
|
||||
.format(n_texts, len(train_texts), len(dev_texts)))
|
||||
train_data = list(zip(train_texts,
|
||||
[{'cats': cats} for cats in train_cats]))
|
||||
print(
|
||||
"Using {} examples ({} training, {} evaluation)".format(
|
||||
n_texts, len(train_texts), len(dev_texts)
|
||||
)
|
||||
)
|
||||
train_data = list(zip(train_texts, [{"cats": cats} for cats in train_cats]))
|
||||
|
||||
# get names of other pipes to disable them during training
|
||||
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'textcat']
|
||||
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "textcat"]
|
||||
with nlp.disable_pipes(*other_pipes): # only train textcat
|
||||
optimizer = nlp.begin_training()
|
||||
print("Training the model...")
|
||||
print('{:^5}\t{:^5}\t{:^5}\t{:^5}'.format('LOSS', 'P', 'R', 'F'))
|
||||
print("{:^5}\t{:^5}\t{:^5}\t{:^5}".format("LOSS", "P", "R", "F"))
|
||||
for i in range(n_iter):
|
||||
losses = {}
|
||||
# batch up the examples using spaCy's minibatch
|
||||
batches = minibatch(train_data, size=compounding(4., 32., 1.001))
|
||||
batches = minibatch(train_data, size=compounding(4.0, 32.0, 1.001))
|
||||
for batch in batches:
|
||||
texts, annotations = zip(*batch)
|
||||
nlp.update(texts, annotations, sgd=optimizer, drop=0.2,
|
||||
losses=losses)
|
||||
nlp.update(texts, annotations, sgd=optimizer, drop=0.2, losses=losses)
|
||||
with textcat.model.use_params(optimizer.averages):
|
||||
# evaluate on the dev data split off in load_data()
|
||||
scores = evaluate(nlp.tokenizer, textcat, dev_texts, dev_cats)
|
||||
print('{0:.3f}\t{1:.3f}\t{2:.3f}\t{3:.3f}' # print a simple table
|
||||
.format(losses['textcat'], scores['textcat_p'],
|
||||
scores['textcat_r'], scores['textcat_f']))
|
||||
print(
|
||||
"{0:.3f}\t{1:.3f}\t{2:.3f}\t{3:.3f}".format( # print a simple table
|
||||
losses["textcat"],
|
||||
scores["textcat_p"],
|
||||
scores["textcat_r"],
|
||||
scores["textcat_f"],
|
||||
)
|
||||
)
|
||||
|
||||
# test the trained model
|
||||
test_text = "This movie sucked"
|
||||
|
@ -99,7 +106,7 @@ def load_data(limit=0, split=0.8):
|
|||
random.shuffle(train_data)
|
||||
train_data = train_data[-limit:]
|
||||
texts, labels = zip(*train_data)
|
||||
cats = [{'POSITIVE': bool(y)} for y in labels]
|
||||
cats = [{"POSITIVE": bool(y)} for y in labels]
|
||||
split = int(len(train_data) * split)
|
||||
return (texts[:split], cats[:split]), (texts[split:], cats[split:])
|
||||
|
||||
|
@ -116,9 +123,9 @@ def evaluate(tokenizer, textcat, texts, cats):
|
|||
if label not in gold:
|
||||
continue
|
||||
if score >= 0.5 and gold[label] >= 0.5:
|
||||
tp += 1.
|
||||
tp += 1.0
|
||||
elif score >= 0.5 and gold[label] < 0.5:
|
||||
fp += 1.
|
||||
fp += 1.0
|
||||
elif score < 0.5 and gold[label] < 0.5:
|
||||
tn += 1
|
||||
elif score < 0.5 and gold[label] >= 0.5:
|
||||
|
@ -126,8 +133,8 @@ def evaluate(tokenizer, textcat, texts, cats):
|
|||
precision = tp / (tp + fp)
|
||||
recall = tp / (tp + fn)
|
||||
f_score = 2 * (precision * recall) / (precision + recall)
|
||||
return {'textcat_p': precision, 'textcat_r': recall, 'textcat_f': f_score}
|
||||
return {"textcat_p": precision, "textcat_r": recall, "textcat_f": f_score}
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
plac.call(main)
|
||||
|
|
|
@ -14,8 +14,13 @@ from spacy.language import Language
|
|||
|
||||
@plac.annotations(
|
||||
vectors_loc=("Path to .vec file", "positional", None, str),
|
||||
lang=("Optional language ID. If not set, blank Language() will be used.",
|
||||
"positional", None, str))
|
||||
lang=(
|
||||
"Optional language ID. If not set, blank Language() will be used.",
|
||||
"positional",
|
||||
None,
|
||||
str,
|
||||
),
|
||||
)
|
||||
def main(vectors_loc, lang=None):
|
||||
if lang is None:
|
||||
nlp = Language()
|
||||
|
@ -24,21 +29,21 @@ def main(vectors_loc, lang=None):
|
|||
# save the model to disk and load it back later (models always need a
|
||||
# "lang" setting). Use 'xx' for blank multi-language class.
|
||||
nlp = spacy.blank(lang)
|
||||
with open(vectors_loc, 'rb') as file_:
|
||||
with open(vectors_loc, "rb") as file_:
|
||||
header = file_.readline()
|
||||
nr_row, nr_dim = header.split()
|
||||
nlp.vocab.reset_vectors(width=int(nr_dim))
|
||||
for line in file_:
|
||||
line = line.rstrip().decode('utf8')
|
||||
pieces = line.rsplit(' ', int(nr_dim))
|
||||
line = line.rstrip().decode("utf8")
|
||||
pieces = line.rsplit(" ", int(nr_dim))
|
||||
word = pieces[0]
|
||||
vector = numpy.asarray([float(v) for v in pieces[1:]], dtype='f')
|
||||
vector = numpy.asarray([float(v) for v in pieces[1:]], dtype="f")
|
||||
nlp.vocab.set_vector(word, vector) # add the vectors to the vocab
|
||||
# test the vectors and similarity
|
||||
text = 'class colspan'
|
||||
text = "class colspan"
|
||||
doc = nlp(text)
|
||||
print(text, doc[0].similarity(doc[1]))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
plac.call(main)
|
||||
|
|
|
@ -14,26 +14,45 @@ import plac
|
|||
import spacy
|
||||
import tensorflow as tf
|
||||
import tqdm
|
||||
from tensorflow.contrib.tensorboard.plugins.projector import visualize_embeddings, ProjectorConfig
|
||||
from tensorflow.contrib.tensorboard.plugins.projector import (
|
||||
visualize_embeddings,
|
||||
ProjectorConfig,
|
||||
)
|
||||
|
||||
|
||||
@plac.annotations(
|
||||
vectors_loc=("Path to spaCy model that contains vectors", "positional", None, str),
|
||||
out_loc=("Path to output folder for tensorboard session data", "positional", None, str),
|
||||
name=("Human readable name for tsv file and vectors tensor", "positional", None, str),
|
||||
out_loc=(
|
||||
"Path to output folder for tensorboard session data",
|
||||
"positional",
|
||||
None,
|
||||
str,
|
||||
),
|
||||
name=(
|
||||
"Human readable name for tsv file and vectors tensor",
|
||||
"positional",
|
||||
None,
|
||||
str,
|
||||
),
|
||||
)
|
||||
def main(vectors_loc, out_loc, name="spaCy_vectors"):
|
||||
meta_file = "{}.tsv".format(name)
|
||||
out_meta_file = path.join(out_loc, meta_file)
|
||||
|
||||
print('Loading spaCy vectors model: {}'.format(vectors_loc))
|
||||
print("Loading spaCy vectors model: {}".format(vectors_loc))
|
||||
model = spacy.load(vectors_loc)
|
||||
print('Finding lexemes with vectors attached: {}'.format(vectors_loc))
|
||||
strings_stream = tqdm.tqdm(model.vocab.strings, total=len(model.vocab.strings), leave=False)
|
||||
print("Finding lexemes with vectors attached: {}".format(vectors_loc))
|
||||
strings_stream = tqdm.tqdm(
|
||||
model.vocab.strings, total=len(model.vocab.strings), leave=False
|
||||
)
|
||||
queries = [w for w in strings_stream if model.vocab.has_vector(w)]
|
||||
vector_count = len(queries)
|
||||
|
||||
print('Building Tensorboard Projector metadata for ({}) vectors: {}'.format(vector_count, out_meta_file))
|
||||
print(
|
||||
"Building Tensorboard Projector metadata for ({}) vectors: {}".format(
|
||||
vector_count, out_meta_file
|
||||
)
|
||||
)
|
||||
|
||||
# Store vector data in a tensorflow variable
|
||||
tf_vectors_variable = numpy.zeros((vector_count, model.vocab.vectors.shape[1]))
|
||||
|
@ -41,22 +60,26 @@ def main(vectors_loc, out_loc, name="spaCy_vectors"):
|
|||
# Write a tab-separated file that contains information about the vectors for visualization
|
||||
#
|
||||
# Reference: https://www.tensorflow.org/programmers_guide/embedding#metadata
|
||||
with open(out_meta_file, 'wb') as file_metadata:
|
||||
with open(out_meta_file, "wb") as file_metadata:
|
||||
# Define columns in the first row
|
||||
file_metadata.write("Text\tFrequency\n".encode('utf-8'))
|
||||
file_metadata.write("Text\tFrequency\n".encode("utf-8"))
|
||||
# Write out a row for each vector that we add to the tensorflow variable we created
|
||||
vec_index = 0
|
||||
for text in tqdm.tqdm(queries, total=len(queries), leave=False):
|
||||
# https://github.com/tensorflow/tensorflow/issues/9094
|
||||
text = '<Space>' if text.lstrip() == '' else text
|
||||
text = "<Space>" if text.lstrip() == "" else text
|
||||
lex = model.vocab[text]
|
||||
|
||||
# Store vector data and metadata
|
||||
tf_vectors_variable[vec_index] = model.vocab.get_vector(text)
|
||||
file_metadata.write("{}\t{}\n".format(text, math.exp(lex.prob) * vector_count).encode('utf-8'))
|
||||
file_metadata.write(
|
||||
"{}\t{}\n".format(text, math.exp(lex.prob) * vector_count).encode(
|
||||
"utf-8"
|
||||
)
|
||||
)
|
||||
vec_index += 1
|
||||
|
||||
print('Running Tensorflow Session...')
|
||||
print("Running Tensorflow Session...")
|
||||
sess = tf.InteractiveSession()
|
||||
tf.Variable(tf_vectors_variable, trainable=False, name=name)
|
||||
tf.global_variables_initializer().run()
|
||||
|
@ -73,10 +96,10 @@ def main(vectors_loc, out_loc, name="spaCy_vectors"):
|
|||
visualize_embeddings(writer, config)
|
||||
|
||||
# Save session and print run command to the output
|
||||
print('Saving Tensorboard Session...')
|
||||
saver.save(sess, path.join(out_loc, '{}.ckpt'.format(name)))
|
||||
print('Done. Run `tensorboard --logdir={0}` to view in Tensorboard'.format(out_loc))
|
||||
print("Saving Tensorboard Session...")
|
||||
saver.save(sess, path.join(out_loc, "{}.ckpt".format(name)))
|
||||
print("Done. Run `tensorboard --logdir={0}` to view in Tensorboard".format(out_loc))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
plac.call(main)
|
||||
|
|
Loading…
Reference in New Issue
Block a user