mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-10 19:57:17 +03:00
Clean up commented out code from beam parser.
This commit is contained in:
parent
476977ef62
commit
a1281835a8
|
@ -235,143 +235,3 @@ def _cleanup(Beam beam):
|
|||
cdef hash_t _hash_state(void* _state, void* _) except 0:
|
||||
state = <StateClass>_state
|
||||
return state.c.hash()
|
||||
|
||||
#
|
||||
# def _maxent_update(self, Doc doc, pred_scores, pred_hist, gold_scores, gold_hist):
|
||||
# Z = 0
|
||||
# for i, (score, history) in enumerate(zip(pred_scores, pred_hist)):
|
||||
# prob = exp(score)
|
||||
# if prob < 1e-6:
|
||||
# continue
|
||||
# stcls = StateClass.init(doc.c, doc.length)
|
||||
# self.moves.initialize_state(stcls.c)
|
||||
# for clas in history:
|
||||
# delta_loss[clas] = prob * 1/Z
|
||||
# gradient = [(input_ * prob) / Z for input_ in hidden]
|
||||
# fill_context(context, stcls.c)
|
||||
# nr_feat = model.extracter.set_features(features, context)
|
||||
# for feat in features[:nr_feat]:
|
||||
# key = (clas, feat.key)
|
||||
# counts[key] = counts.get(key, 0.0) + feat.value
|
||||
# self.moves.c[clas].do(stcls.c, self.moves.c[clas].label)
|
||||
# for key in counts:
|
||||
# counts[key] *= prob
|
||||
# Z += prob
|
||||
# gZ, g_counts = self._maxent_counts(doc, gold_scores, gold_hist)
|
||||
# for (clas, feat), value in g_counts.items():
|
||||
# self.model.update_weight(feat, clas, value / gZ)
|
||||
#
|
||||
# Z, counts = self._maxent_counts(doc, pred_scores, pred_hist)
|
||||
# for (clas, feat), value in counts.items():
|
||||
# self.model.update_weight(feat, clas, -value / (Z + gZ))
|
||||
#
|
||||
#
|
||||
|
||||
|
||||
# def _maxent_update(self, doc, pred_scores, pred_hist, gold_scores, gold_hist,
|
||||
# step_size=0.001):
|
||||
# cdef weight_t Z, gZ, value
|
||||
# cdef feat_t feat
|
||||
# cdef class_t clas
|
||||
# gZ, g_counts = self._maxent_counts(doc, gold_scores, gold_hist)
|
||||
# Z, counts = self._maxent_counts(doc, pred_scores, pred_hist)
|
||||
# update = {}
|
||||
# if gZ > 0:
|
||||
# for (clas, feat), value in g_counts.items():
|
||||
# update[(clas, feat)] = value / gZ
|
||||
# Z += gZ
|
||||
# for (clas, feat), value in counts.items():
|
||||
# update.setdefault((clas, feat), 0.0)
|
||||
# update[(clas, feat)] -= value / Z
|
||||
# for (clas, feat), value in update.items():
|
||||
# if value < 1000:
|
||||
# self.model.update_weight(feat, clas, step_size * value)
|
||||
#
|
||||
# def _maxent_counts(self, Doc doc, scores, history):
|
||||
# cdef Pool mem = Pool()
|
||||
# cdef atom_t[CONTEXT_SIZE] context
|
||||
# features = <FeatureC*>mem.alloc(self.model.nr_feat, sizeof(FeatureC))
|
||||
#
|
||||
# cdef StateClass stcls
|
||||
#
|
||||
# cdef class_t clas
|
||||
# cdef ParserPerceptron model = self.model
|
||||
#
|
||||
# cdef weight_t Z = 0.0
|
||||
# cdef weight_t score
|
||||
# counts = {}
|
||||
# for i, (score, history) in enumerate(zip(scores, history)):
|
||||
# prob = exp(score)
|
||||
# if prob < 1e-6:
|
||||
# continue
|
||||
# stcls = StateClass.init(doc.c, doc.length)
|
||||
# self.moves.initialize_state(stcls.c)
|
||||
# for clas in history:
|
||||
# fill_context(context, stcls.c)
|
||||
# nr_feat = model.extracter.set_features(features, context)
|
||||
# for feat in features[:nr_feat]:
|
||||
# key = (clas, feat.key)
|
||||
# counts[key] = counts.get(key, 0.0) + feat.value
|
||||
# self.moves.c[clas].do(stcls.c, self.moves.c[clas].label)
|
||||
# for key in counts:
|
||||
# counts[key] *= prob
|
||||
# Z += prob
|
||||
# return Z, counts
|
||||
#
|
||||
#
|
||||
# def _advance_beam(self, Beam beam, GoldParse gold, bint follow_gold, words):
|
||||
# cdef atom_t[CONTEXT_SIZE] context
|
||||
# cdef int i, j, cost
|
||||
# cdef bint is_valid
|
||||
# cdef const Transition* move
|
||||
#
|
||||
# for i in range(beam.size):
|
||||
# state = <StateClass>beam.at(i)
|
||||
# if not state.is_final():
|
||||
# # What the model is predicting here:
|
||||
# # We know, separately, the probability of the current state
|
||||
# # We can think of a state as a sequence of (action, score) pairs
|
||||
# # We obtain a state by doing reduce(state, [act for act, score in scores])
|
||||
# # We obtain its probability by doing sum(score for act, score in scores)
|
||||
# #
|
||||
# # So after running the forward pass, we have this output layer...
|
||||
# # The output layer has N nodes in its output, for our N moves
|
||||
# # The model asserts that:
|
||||
# #
|
||||
# # P(actions[i](state)) == score + output[i]
|
||||
# #
|
||||
# # i.e. each node holds a score that means "This is the difference
|
||||
# # in goodness that will occur if you apply this action to this state.
|
||||
# # If you apply this action, this is how I would judge the state."
|
||||
# self.model.set_scoresC(beam.scores[i], eg)
|
||||
# self.moves.set_validC(beam.is_valid[i], state)
|
||||
# if gold is not None:
|
||||
# for i in range(beam.size):
|
||||
# state = <StateClass>beam.at(i)
|
||||
# if not stcls.is_final():
|
||||
# self.moves.set_costsC(beam.costs[i], beam.is_valid[i],
|
||||
# state, gold)
|
||||
# if follow_gold:
|
||||
# for j in range(self.moves.n_moves):
|
||||
# beam.is_valid[i][j] *= beam.costs[i][j] == 0
|
||||
# beam.advance(_transition_state, _hash_state, <void*>self.moves.c)
|
||||
# beam.check_done(_check_final_state, NULL)
|
||||
#
|
||||
# def _update(self, Doc doc, g_hist, p_hist, loss):
|
||||
# pred = StateClass(doc)
|
||||
# gold = StateClass(doc)
|
||||
# for g_move, p_move in zip(g_hist, p_hist):
|
||||
# self.model(pred_eg)
|
||||
# self.model(gold_eg)
|
||||
#
|
||||
# margin = pred_eg.scores[p_move] - gold_eg.scores[g_move] + 1
|
||||
# if margin > 0:
|
||||
# gold_eg.losses[g_move] = margin
|
||||
# self.model.update(gold_eg)
|
||||
# pred_eg.losses[p_move] = -margin
|
||||
# self.model.update(pred_eg.guess)
|
||||
# self.c.moves[g_move].do(gold)
|
||||
# self.c.moves[p_move].do(pred)
|
||||
#
|
||||
#
|
||||
#
|
||||
|
|
Loading…
Reference in New Issue
Block a user