mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-11-01 00:17:44 +03:00 
			
		
		
		
	Fix beam search after refactor
This commit is contained in:
		
							parent
							
								
									5a0f26be0c
								
							
						
					
					
						commit
						8a82367a9d
					
				|  | @ -37,6 +37,7 @@ from ..errors import Errors, TempErrors | ||||||
| from .. import util | from .. import util | ||||||
| from .stateclass cimport StateClass | from .stateclass cimport StateClass | ||||||
| from .transition_system cimport Transition | from .transition_system cimport Transition | ||||||
|  | from . import _beam_utils | ||||||
| from . import nonproj | from . import nonproj | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -196,26 +197,6 @@ class ParserModel(Model): | ||||||
|         Model.__init__(self) |         Model.__init__(self) | ||||||
|         self._layers = [tok2vec, lower_model, upper_model] |         self._layers = [tok2vec, lower_model, upper_model] | ||||||
| 
 | 
 | ||||||
|     @property |  | ||||||
|     def nO(self): |  | ||||||
|         return self._layers[-1].nO |  | ||||||
|      |  | ||||||
|     @property |  | ||||||
|     def nI(self): |  | ||||||
|         return self._layers[1].nI |  | ||||||
| 
 |  | ||||||
|     @property |  | ||||||
|     def nH(self): |  | ||||||
|         return self._layers[1].nO |  | ||||||
|      |  | ||||||
|     @property |  | ||||||
|     def nF(self): |  | ||||||
|         return self._layers[1].nF |  | ||||||
| 
 |  | ||||||
|     @property |  | ||||||
|     def nP(self): |  | ||||||
|         return self._layers[1].nP |  | ||||||
| 
 |  | ||||||
|     def begin_update(self, docs, drop=0.): |     def begin_update(self, docs, drop=0.): | ||||||
|         step_model = ParserStepModel(docs, self._layers, drop=drop) |         step_model = ParserStepModel(docs, self._layers, drop=drop) | ||||||
|         def finish_parser_update(golds, sgd=None): |         def finish_parser_update(golds, sgd=None): | ||||||
|  | @ -223,6 +204,15 @@ class ParserModel(Model): | ||||||
|             return None |             return None | ||||||
|         return step_model, finish_parser_update |         return step_model, finish_parser_update | ||||||
| 
 | 
 | ||||||
|  |     def resize_output(self, new_output): | ||||||
|  |         # Weights are stored in (nr_out, nr_in) format, so we're basically | ||||||
|  |         # just adding rows here. | ||||||
|  |         smaller = self._layers[-1]._layers[-1] | ||||||
|  |         larger = Affine(self.moves.n_moves, smaller.nI) | ||||||
|  |         copy_array(larger.W[:smaller.nO], smaller.W) | ||||||
|  |         copy_array(larger.b[:smaller.nO], smaller.b) | ||||||
|  |         self._layers[-1]._layers[-1] = larger | ||||||
|  |     | ||||||
|     @property |     @property | ||||||
|     def tok2vec(self): |     def tok2vec(self): | ||||||
|         return self._layers[0] |         return self._layers[0] | ||||||
|  | @ -274,15 +264,15 @@ class ParserStepModel(Model): | ||||||
|             return None |             return None | ||||||
|         return scores, backprop_parser_step |         return scores, backprop_parser_step | ||||||
| 
 | 
 | ||||||
|     def get_token_ids(self, states): |     def get_token_ids(self, batch): | ||||||
|         cdef StateClass state |         states = _beam_utils.collect_states(batch) | ||||||
|         cdef int n_tokens = self.state2vec.nF |         cdef np.ndarray ids = numpy.zeros((len(states), self.state2vec.nF), | ||||||
|         cdef np.ndarray ids = numpy.zeros((len(states), n_tokens), |  | ||||||
|                                           dtype='i', order='C') |                                           dtype='i', order='C') | ||||||
|         c_ids = <int*>ids.data |         c_ids = <int*>ids.data | ||||||
|         for i, state in enumerate(states): |         cdef StateClass state | ||||||
|             if not state.is_final(): |         for state in states: | ||||||
|                 state.c.set_context_tokens(c_ids, n_tokens) |             if not state.c.is_final(): | ||||||
|  |                 state.c.set_context_tokens(c_ids, ids.shape[1]) | ||||||
|             c_ids += ids.shape[1] |             c_ids += ids.shape[1] | ||||||
|         return ids |         return ids | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -43,6 +43,8 @@ from .. import util | ||||||
| from .stateclass cimport StateClass | from .stateclass cimport StateClass | ||||||
| from ._state cimport StateC | from ._state cimport StateC | ||||||
| from .transition_system cimport Transition | from .transition_system cimport Transition | ||||||
|  | from . cimport _beam_utils | ||||||
|  | from . import _beam_utils | ||||||
| from . import nonproj | from . import nonproj | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -172,7 +174,7 @@ cdef class Parser: | ||||||
|         with self.model.use_params(params): |         with self.model.use_params(params): | ||||||
|             yield |             yield | ||||||
| 
 | 
 | ||||||
|     def __call__(self, Doc doc, beam_width=None, beam_density=None): |     def __call__(self, Doc doc, beam_width=None): | ||||||
|         """Apply the parser or entity recognizer, setting the annotations onto |         """Apply the parser or entity recognizer, setting the annotations onto | ||||||
|         the `Doc` object. |         the `Doc` object. | ||||||
| 
 | 
 | ||||||
|  | @ -180,14 +182,11 @@ cdef class Parser: | ||||||
|         """ |         """ | ||||||
|         if beam_width is None: |         if beam_width is None: | ||||||
|             beam_width = self.cfg.get('beam_width', 1) |             beam_width = self.cfg.get('beam_width', 1) | ||||||
|         if beam_density is None: |         states = self.predict([doc], beam_width=beam_width) | ||||||
|             beam_density = self.cfg.get('beam_density', 0.0) |  | ||||||
|         states = self.predict([doc]) |  | ||||||
|         self.set_annotations([doc], states, tensors=None) |         self.set_annotations([doc], states, tensors=None) | ||||||
|         return doc |         return doc | ||||||
| 
 | 
 | ||||||
|     def pipe(self, docs, int batch_size=256, int n_threads=2, |     def pipe(self, docs, int batch_size=256, int n_threads=2, beam_width=None): | ||||||
|              beam_width=None, beam_density=None): |  | ||||||
|         """Process a stream of documents. |         """Process a stream of documents. | ||||||
| 
 | 
 | ||||||
|         stream: The sequence of documents to process. |         stream: The sequence of documents to process. | ||||||
|  | @ -198,38 +197,40 @@ cdef class Parser: | ||||||
|         """ |         """ | ||||||
|         if beam_width is None: |         if beam_width is None: | ||||||
|             beam_width = self.cfg.get('beam_width', 1) |             beam_width = self.cfg.get('beam_width', 1) | ||||||
|         if beam_density is None: |  | ||||||
|             beam_density = self.cfg.get('beam_density', 0.0) |  | ||||||
|         cdef Doc doc |         cdef Doc doc | ||||||
|         for batch in cytoolz.partition_all(batch_size, docs): |         for batch in cytoolz.partition_all(batch_size, docs): | ||||||
|             batch_in_order = list(batch) |             batch_in_order = list(batch) | ||||||
|             by_length = sorted(batch_in_order, key=lambda doc: len(doc)) |             by_length = sorted(batch_in_order, key=lambda doc: len(doc)) | ||||||
|             for subbatch in cytoolz.partition_all(8, by_length): |             for subbatch in cytoolz.partition_all(8, by_length): | ||||||
|                 subbatch = list(subbatch) |                 subbatch = list(subbatch) | ||||||
|                 parse_states = self.predict(subbatch, |                 parse_states = self.predict(subbatch, beam_width=beam_width) | ||||||
|                                             beam_width=beam_width, |  | ||||||
|                                             beam_density=beam_density) |  | ||||||
|                 self.set_annotations(subbatch, parse_states, tensors=None) |                 self.set_annotations(subbatch, parse_states, tensors=None) | ||||||
|             for doc in batch_in_order: |             for doc in batch_in_order: | ||||||
|                 yield doc |                 yield doc | ||||||
| 
 | 
 | ||||||
|     def predict(self, docs, beam_width=1, beam_density=0.): |     def predict(self, docs, beam_width=1): | ||||||
|         if isinstance(docs, Doc): |         if isinstance(docs, Doc): | ||||||
|             docs = [docs] |             docs = [docs] | ||||||
| 
 | 
 | ||||||
|         cdef vector[StateC*] states |         cdef vector[StateC*] states | ||||||
|         cdef StateClass state |         cdef StateClass state | ||||||
|         state_objs = self.moves.init_batch(docs) |  | ||||||
|         for state in state_objs: |  | ||||||
|             states.push_back(state.c) |  | ||||||
|         # Prepare the stepwise model, and get the callback for finishing the batch |  | ||||||
|         model = self.model(docs) |         model = self.model(docs) | ||||||
|         weights = get_c_weights(model) |         if beam_width == 1: | ||||||
|         sizes = get_c_sizes(model, states.size()) |             batch = self.moves.init_batch(docs) | ||||||
|         with nogil: |             weights = get_c_weights(model) | ||||||
|             self._parseC(&states[0], |             sizes = get_c_sizes(model, states.size()) | ||||||
|                 weights, sizes) |             for state in batch: | ||||||
|         return state_objs |                 states.push_back(state.c) | ||||||
|  |             with nogil: | ||||||
|  |                 self._parseC(&states[0], | ||||||
|  |                     weights, sizes) | ||||||
|  |         else: | ||||||
|  |             batch = self.moves.init_beams(docs, beam_width) | ||||||
|  |             unfinished = list(batch) | ||||||
|  |             while unfinished: | ||||||
|  |                 scores = model.predict(unfinished) | ||||||
|  |                 unfinished = self.transition_beams(batch, scores) | ||||||
|  |         return batch | ||||||
|      |      | ||||||
|     cdef void _parseC(self, StateC** states, |     cdef void _parseC(self, StateC** states, | ||||||
|             WeightsC weights, SizesC sizes) nogil: |             WeightsC weights, SizesC sizes) nogil: | ||||||
|  | @ -250,10 +251,21 @@ cdef class Parser: | ||||||
|                 states[i] = unfinished[i] |                 states[i] = unfinished[i] | ||||||
|             sizes.states = unfinished.size() |             sizes.states = unfinished.size() | ||||||
|             unfinished.clear() |             unfinished.clear() | ||||||
|       |      | ||||||
|     def set_annotations(self, docs, states, tensors=None): |     def set_annotations(self, docs, states_or_beams, tensors=None): | ||||||
|         cdef StateClass state |         cdef StateClass state | ||||||
|  |         cdef Beam beam | ||||||
|         cdef Doc doc |         cdef Doc doc | ||||||
|  |         states = [] | ||||||
|  |         beams = [] | ||||||
|  |         for state_or_beam in states_or_beams: | ||||||
|  |             if isinstance(state_or_beam, StateClass): | ||||||
|  |                 states.append(state_or_beam) | ||||||
|  |             else: | ||||||
|  |                 beam = state_or_beam | ||||||
|  |                 state = StateClass.borrow(<StateC*>beam.at(0)) | ||||||
|  |                 states.append(state) | ||||||
|  |                 beams.append(beam) | ||||||
|         for i, (state, doc) in enumerate(zip(states, docs)): |         for i, (state, doc) in enumerate(zip(states, docs)): | ||||||
|             self.moves.finalize_state(state.c) |             self.moves.finalize_state(state.c) | ||||||
|             for j in range(doc.length): |             for j in range(doc.length): | ||||||
|  | @ -262,14 +274,17 @@ cdef class Parser: | ||||||
|             for hook in self.postprocesses: |             for hook in self.postprocesses: | ||||||
|                 for doc in docs: |                 for doc in docs: | ||||||
|                     hook(doc) |                     hook(doc) | ||||||
|  |         for beam in beams: | ||||||
|  |             _beam_utils.cleanup_beam(beam) | ||||||
|       |       | ||||||
|     def transition_batch(self, states, float[:, ::1] scores): |     def transition_states(self, states, float[:, ::1] scores): | ||||||
|         cdef StateClass state |         cdef StateClass state | ||||||
|         cdef float* c_scores = &scores[0, 0] |         cdef float* c_scores = &scores[0, 0] | ||||||
|         cdef vector[StateC*] c_states |         cdef vector[StateC*] c_states | ||||||
|         for state in states: |         for state in states: | ||||||
|             c_states.push_back(state.c) |             c_states.push_back(state.c) | ||||||
|         self.c_transition_batch(&c_states[0], c_scores, scores.shape[1], scores.shape[0])  |         self.c_transition_batch(&c_states[0], c_scores, scores.shape[1], scores.shape[0])  | ||||||
|  |         return [state for state in states if not state.c.is_final()] | ||||||
| 
 | 
 | ||||||
|     cdef void c_transition_batch(self, StateC** states, const float* scores, |     cdef void c_transition_batch(self, StateC** states, const float* scores, | ||||||
|             int nr_class, int batch_size) nogil: |             int nr_class, int batch_size) nogil: | ||||||
|  | @ -282,6 +297,20 @@ cdef class Parser: | ||||||
|             action = self.moves.c[guess] |             action = self.moves.c[guess] | ||||||
|             action.do(states[i], action.label) |             action.do(states[i], action.label) | ||||||
|             states[i].push_hist(guess) |             states[i].push_hist(guess) | ||||||
|  | 
 | ||||||
|  |     def transition_beams(self, beams, float[:, ::1] scores): | ||||||
|  |         cdef Beam beam | ||||||
|  |         cdef float* c_scores = &scores[0, 0] | ||||||
|  |         for beam in beams: | ||||||
|  |             for i in range(beam.size): | ||||||
|  |                 state = <StateC*>beam.at(i) | ||||||
|  |                 if not state.is_final(): | ||||||
|  |                     self.moves.set_valid(beam.is_valid[i], state) | ||||||
|  |                     memcpy(beam.scores[i], c_scores, scores.shape[1] * sizeof(float)) | ||||||
|  |                     c_scores += scores.shape[1] | ||||||
|  |             beam.advance(_beam_utils.transition_state, NULL, <void*>self.moves.c) | ||||||
|  |             beam.check_done(_beam_utils.check_final_state, NULL) | ||||||
|  |         return [b for b in beams if not b.is_done] | ||||||
|   |   | ||||||
|     def update(self, docs, golds, drop=0., sgd=None, losses=None): |     def update(self, docs, golds, drop=0., sgd=None, losses=None): | ||||||
|         if isinstance(docs, Doc) and isinstance(golds, GoldParse): |         if isinstance(docs, Doc) and isinstance(golds, GoldParse): | ||||||
|  | @ -290,6 +319,13 @@ cdef class Parser: | ||||||
|         if len(docs) != len(golds): |         if len(docs) != len(golds): | ||||||
|             raise ValueError(Errors.E077.format(value='update', n_docs=len(docs), |             raise ValueError(Errors.E077.format(value='update', n_docs=len(docs), | ||||||
|                                                 n_golds=len(golds))) |                                                 n_golds=len(golds))) | ||||||
|  |         # The probability we use beam update, instead of falling back to | ||||||
|  |         # a greedy update | ||||||
|  |         beam_update_prob = 1-self.cfg.get('beam_update_prob', 0.5) | ||||||
|  |         if self.cfg.get('beam_width', 1) >= 2 and numpy.random.random() >= beam_update_prob: | ||||||
|  |             return self.update_beam(docs, golds, | ||||||
|  |                     self.cfg['beam_width'], self.cfg['beam_density'], | ||||||
|  |                     drop=drop, sgd=sgd, losses=losses) | ||||||
|         # Chop sequences into lengths of this many transitions, to make the |         # Chop sequences into lengths of this many transitions, to make the | ||||||
|         # batch uniform length. |         # batch uniform length. | ||||||
|         cut_gold = numpy.random.choice(range(20, 100)) |         cut_gold = numpy.random.choice(range(20, 100)) | ||||||
|  | @ -307,11 +343,36 @@ cdef class Parser: | ||||||
|             d_scores = self.get_batch_loss(states, golds, scores, losses) |             d_scores = self.get_batch_loss(states, golds, scores, losses) | ||||||
|             backprop(d_scores, sgd=sgd) |             backprop(d_scores, sgd=sgd) | ||||||
|             # Follow the predicted action |             # Follow the predicted action | ||||||
|             self.transition_batch(states, scores) |             self.transition_states(states, scores) | ||||||
|             states_golds = [eg for eg in states_golds if not eg[0].is_final()] |             states_golds = [eg for eg in states_golds if not eg[0].is_final()] | ||||||
|         # Do the backprop |         # Do the backprop | ||||||
|         finish_update(golds, sgd=sgd) |         finish_update(golds, sgd=sgd) | ||||||
|         return losses |         return losses | ||||||
|  | 
 | ||||||
|  |     def update_beam(self, docs, golds, width, drop=0., sgd=None, losses=None): | ||||||
|  |         lengths = [len(d) for d in docs] | ||||||
|  |         states = self.moves.init_batch(docs) | ||||||
|  |         for gold in golds: | ||||||
|  |             self.moves.preprocess_gold(gold) | ||||||
|  |         model, finish_update = self.model.begin_update(docs, drop=drop) | ||||||
|  |         states_d_scores, backprops, beams = _beam_utils.update_beam( | ||||||
|  |             self.moves, self.nr_feature, 500, states, golds, model.state2vec, | ||||||
|  |             model.vec2scores, width, drop=drop, losses=losses) | ||||||
|  |         for i, d_scores in enumerate(states_d_scores): | ||||||
|  |             ids, bp_vectors, bp_scores = backprops[i] | ||||||
|  |             d_vector = bp_scores(d_scores, sgd=sgd) | ||||||
|  |             if isinstance(model.ops, CupyOps) \ | ||||||
|  |             and not isinstance(ids, model.state2vec.ops.xp.ndarray): | ||||||
|  |                 model.backprops.append(( | ||||||
|  |                     util.get_async(model.cuda_stream, ids), | ||||||
|  |                     util.get_async(model.cuda_stream, d_vector), | ||||||
|  |                     bp_vectors)) | ||||||
|  |             else: | ||||||
|  |                 model.backprops.append((ids, d_vector, bp_vectors)) | ||||||
|  |         model.make_updates(sgd) | ||||||
|  |         cdef Beam beam | ||||||
|  |         for beam in beams: | ||||||
|  |             _beam_utils.cleanup_beam(beam) | ||||||
|      |      | ||||||
|     def _init_gold_batch(self, whole_docs, whole_golds, min_length=5, max_length=500): |     def _init_gold_batch(self, whole_docs, whole_golds, min_length=5, max_length=500): | ||||||
|         """Make a square batch, of length equal to the shortest doc. A long |         """Make a square batch, of length equal to the shortest doc. A long | ||||||
|  |  | ||||||
|  | @ -5,9 +5,12 @@ from __future__ import unicode_literals | ||||||
| from cpython.ref cimport Py_INCREF | from cpython.ref cimport Py_INCREF | ||||||
| from cymem.cymem cimport Pool | from cymem.cymem cimport Pool | ||||||
| from thinc.typedefs cimport weight_t | from thinc.typedefs cimport weight_t | ||||||
|  | from thinc.extra.search cimport Beam | ||||||
| from collections import OrderedDict, Counter | from collections import OrderedDict, Counter | ||||||
| import ujson | import ujson | ||||||
| 
 | 
 | ||||||
|  | from . cimport _beam_utils | ||||||
|  | from ..tokens.doc cimport Doc | ||||||
| from ..structs cimport TokenC | from ..structs cimport TokenC | ||||||
| from .stateclass cimport StateClass | from .stateclass cimport StateClass | ||||||
| from ..typedefs cimport attr_t | from ..typedefs cimport attr_t | ||||||
|  | @ -57,6 +60,21 @@ cdef class TransitionSystem: | ||||||
|             offset += len(doc) |             offset += len(doc) | ||||||
|         return states |         return states | ||||||
| 
 | 
 | ||||||
|  |     def init_beams(self, docs, beam_width): | ||||||
|  |         cdef Doc doc | ||||||
|  |         beams = [] | ||||||
|  |         cdef int offset = 0 | ||||||
|  |         for doc in docs: | ||||||
|  |             beam = Beam(self.n_moves, beam_width) | ||||||
|  |             beam.initialize(self.init_beam_state, doc.length, doc.c) | ||||||
|  |             for i in range(beam.width): | ||||||
|  |                 state = <StateC*>beam.at(i) | ||||||
|  |                 state.offset = offset | ||||||
|  |             offset += len(doc) | ||||||
|  |             beam.check_done(_beam_utils.check_final_state, NULL) | ||||||
|  |             beams.append(beam) | ||||||
|  |         return beams | ||||||
|  | 
 | ||||||
|     def get_oracle_sequence(self, doc, GoldParse gold): |     def get_oracle_sequence(self, doc, GoldParse gold): | ||||||
|         cdef Pool mem = Pool() |         cdef Pool mem = Pool() | ||||||
|         costs = <float*>mem.alloc(self.n_moves, sizeof(float)) |         costs = <float*>mem.alloc(self.n_moves, sizeof(float)) | ||||||
|  |  | ||||||
		Loading…
	
		Reference in New Issue
	
	Block a user