mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-26 01:46:28 +03:00
Add hidden layers for tagger
This commit is contained in:
parent
620df0414f
commit
467bbeadb8
|
@ -119,7 +119,7 @@ class TokenVectorEncoder(object):
|
||||||
assert tokvecs.shape[0] == len(doc)
|
assert tokvecs.shape[0] == len(doc)
|
||||||
doc.tensor = tokvecs
|
doc.tensor = tokvecs
|
||||||
|
|
||||||
def update(self, docs, golds, state=None, drop=0., sgd=None):
|
def update(self, docs, golds, state=None, drop=0., sgd=None, losses=None):
|
||||||
"""Update the model.
|
"""Update the model.
|
||||||
|
|
||||||
docs (iterable): A batch of `Doc` objects.
|
docs (iterable): A batch of `Doc` objects.
|
||||||
|
@ -199,7 +199,7 @@ class NeuralTagger(object):
|
||||||
vocab.morphology.assign_tag_id(&doc.c[j], tag_id)
|
vocab.morphology.assign_tag_id(&doc.c[j], tag_id)
|
||||||
idx += 1
|
idx += 1
|
||||||
|
|
||||||
def update(self, docs_tokvecs, golds, drop=0., sgd=None):
|
def update(self, docs_tokvecs, golds, drop=0., sgd=None, losses=None):
|
||||||
docs, tokvecs = docs_tokvecs
|
docs, tokvecs = docs_tokvecs
|
||||||
|
|
||||||
if self.model.nI is None:
|
if self.model.nI is None:
|
||||||
|
@ -248,7 +248,8 @@ class NeuralTagger(object):
|
||||||
vocab.morphology.lemmatizer)
|
vocab.morphology.lemmatizer)
|
||||||
token_vector_width = pipeline[0].model.nO
|
token_vector_width = pipeline[0].model.nO
|
||||||
self.model = with_flatten(
|
self.model = with_flatten(
|
||||||
Softmax(self.vocab.morphology.n_tags, token_vector_width))
|
chain(Maxout(token_vector_width, token_vector_width),
|
||||||
|
Softmax(self.vocab.morphology.n_tags, token_vector_width)))
|
||||||
|
|
||||||
def use_params(self, params):
|
def use_params(self, params):
|
||||||
with self.model.use_params(params):
|
with self.model.use_params(params):
|
||||||
|
@ -274,7 +275,8 @@ class NeuralLabeller(NeuralTagger):
|
||||||
self.labels[dep] = len(self.labels)
|
self.labels[dep] = len(self.labels)
|
||||||
token_vector_width = pipeline[0].model.nO
|
token_vector_width = pipeline[0].model.nO
|
||||||
self.model = with_flatten(
|
self.model = with_flatten(
|
||||||
Softmax(len(self.labels), token_vector_width))
|
chain(Maxout(token_vector_width, token_vector_width),
|
||||||
|
Softmax(len(self.labels), token_vector_width)))
|
||||||
|
|
||||||
def get_loss(self, docs, golds, scores):
|
def get_loss(self, docs, golds, scores):
|
||||||
scores = self.model.ops.flatten(scores)
|
scores = self.model.ops.flatten(scores)
|
||||||
|
|
Loading…
Reference in New Issue
Block a user