mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-11-01 00:17:44 +03:00 
			
		
		
		
	Merge pull request #1294 from Vimos/master
Fix issue #1292 and add test case for the Assertion Error
This commit is contained in:
		
						commit
						34c585396a
					
				
							
								
								
									
										40
									
								
								spacy/tests/tokenizer/test_customized_tokenizer.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										40
									
								
								spacy/tests/tokenizer/test_customized_tokenizer.py
									
									
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,40 @@ | |||
| # coding: utf-8 | ||||
| from __future__ import unicode_literals | ||||
| 
 | ||||
| from ...en import English | ||||
| from ...tokenizer import Tokenizer | ||||
| from ... import util | ||||
| 
 | ||||
| import pytest | ||||
| 
 | ||||
| @pytest.fixture | ||||
| def tokenizer(en_vocab): | ||||
|     prefix_re = util.compile_prefix_regex(nlp_model.Defaults.prefixes) | ||||
|     suffix_re = util.compile_suffix_regex(nlp_model.Defaults.suffixes) | ||||
|     custom_infixes = ['\.\.\.+', | ||||
|                       '(?<=[0-9])-(?=[0-9])', | ||||
|                       # '(?<=[0-9]+),(?=[0-9]+)', | ||||
|                       '[0-9]+(,[0-9]+)+', | ||||
|                       u'[\[\]!&:,()\*—–\/-]'] | ||||
| 
 | ||||
|     infix_re = util.compile_infix_regex(custom_infixes) | ||||
|     return Tokenizer(en_vocab, | ||||
|                      English.Defaults.tokenizer_exceptions, | ||||
|                      prefix_re.search, | ||||
|                      suffix_re.search, | ||||
|                      infix_re.finditer, | ||||
|                      token_match=None) | ||||
| 
 | ||||
| def test_customized_tokenizer_handles_infixes(tokenizer): | ||||
|     sentence = "The 8 and 10-county definitions are not used for the greater Southern California Megaregion." | ||||
|     context = [word.text for word in tokenizer(sentence)] | ||||
|     assert context == [u'The', u'8', u'and', u'10', u'-', u'county', u'definitions', u'are', u'not', u'used', | ||||
|                        u'for', | ||||
|                        u'the', u'greater', u'Southern', u'California', u'Megaregion', u'.'] | ||||
| 
 | ||||
|     # the trailing '-' may cause Assertion Error | ||||
|     sentence = "The 8- and 10-county definitions are not used for the greater Southern California Megaregion." | ||||
|     context = [word.text for word in tokenizer(sentence)] | ||||
|     assert context == [u'The', u'8', u'-', u'and', u'10', u'-', u'county', u'definitions', u'are', u'not', u'used', | ||||
|                        u'for', | ||||
|                        u'the', u'greater', u'Southern', u'California', u'Megaregion', u'.'] | ||||
|  | @ -312,7 +312,8 @@ cdef class Tokenizer: | |||
| 
 | ||||
|                         start = infix_end | ||||
|                     span = string[start:] | ||||
|                     tokens.push_back(self.vocab.get(tokens.mem, span), False) | ||||
|                     if span: | ||||
|                         tokens.push_back(self.vocab.get(tokens.mem, span), False) | ||||
|         cdef vector[const LexemeC*].reverse_iterator it = suffixes.rbegin() | ||||
|         while it != suffixes.rend(): | ||||
|             lexeme = deref(it) | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue
	
	Block a user