Update training defaults

This commit is contained in:
Matthew Honnibal 2017-09-27 11:48:07 -05:00
parent 13d7a97f3a
commit 1a37a2c0a0
2 changed files with 3 additions and 3 deletions

View File

@ -505,7 +505,7 @@ def getitem(i):
return layerize(getitem_fwd)
def build_tagger_model(nr_class, **cfg):
embed_size = util.env_opt('embed_size', 4000)
embed_size = util.env_opt('embed_size', 1000)
if 'token_vector_width' in cfg:
token_vector_width = cfg['token_vector_width']
else:

View File

@ -240,12 +240,12 @@ cdef class Parser:
Base class of the DependencyParser and EntityRecognizer.
"""
@classmethod
def Model(cls, nr_class, token_vector_width=128, hidden_width=300, depth=1, **cfg):
def Model(cls, nr_class, token_vector_width=128, hidden_width=200, depth=1, **cfg):
depth = util.env_opt('parser_hidden_depth', depth)
token_vector_width = util.env_opt('token_vector_width', token_vector_width)
hidden_width = util.env_opt('hidden_width', hidden_width)
parser_maxout_pieces = util.env_opt('parser_maxout_pieces', 2)
embed_size = util.env_opt('embed_size', 4000)
embed_size = util.env_opt('embed_size', 1000)
tok2vec = Tok2Vec(token_vector_width, embed_size,
pretrained_dims=cfg.get('pretrained_dims', 0))
tok2vec = chain(tok2vec, flatten)