mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-11-04 01:48:04 +03:00 
			
		
		
		
	
		
			
				
	
	
		
			63 lines
		
	
	
		
			2.7 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
			
		
		
	
	
			63 lines
		
	
	
		
			2.7 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
//- 💫 DOCS > USAGE > SPACY 101 > TOKENIZATION
 | 
						||
 | 
						||
p
 | 
						||
    |  During processing, spaCy first #[strong tokenizes] the text, i.e.
 | 
						||
    |  segments it into words, punctuation and so on. This is done by applying
 | 
						||
    |  rules specific to each language. For example, punctuation at the end of a
 | 
						||
    |  sentence should be split off – whereas "U.K." should remain one token.
 | 
						||
    |  Each #[code Doc] consists of individual tokens, and we can simply iterate
 | 
						||
    |  over them:
 | 
						||
 | 
						||
+code.
 | 
						||
    for token in doc:
 | 
						||
        print(token.text)
 | 
						||
 | 
						||
+table([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).u-text-center
 | 
						||
    +row
 | 
						||
        for cell in ["Apple", "is", "looking", "at", "buying", "U.K.", "startup", "for", "$", "1", "billion"]
 | 
						||
            +cell=cell
 | 
						||
 | 
						||
p
 | 
						||
    |  Fist, the raw text is split on whitespace characters, similar to
 | 
						||
    |  #[code text.split(' ')]. Then, the tokenizer processes the text from
 | 
						||
    |  left to right. On each substring, it performs two checks:
 | 
						||
 | 
						||
+list("numbers")
 | 
						||
    +item
 | 
						||
        |  #[strong Does the substring match a tokenizer exception rule?] For
 | 
						||
        |  example, "don't" does not contain whitespace, but should be split
 | 
						||
        |  into two tokens, "do" and "n't", while "U.K." should always
 | 
						||
        |  remain one token.
 | 
						||
    +item
 | 
						||
        |  #[strong Can a prefix, suffix or infixes be split off?] For example
 | 
						||
        |  punctuation like commas, periods, hyphens or quotes.
 | 
						||
 | 
						||
p
 | 
						||
    |  If there's a match, the rule is applied and the tokenizer continues its
 | 
						||
    |  loop, starting with the newly split substrings. This way, spaCy can split
 | 
						||
    |  #[strong complex, nested tokens] like combinations of abbreviations and
 | 
						||
    |  multiple punctuation marks.
 | 
						||
 | 
						||
+aside
 | 
						||
    |  #[strong Tokenizer exception:] Special-case rule to split a string into
 | 
						||
    |  several tokens or prevent a token from being split when punctuation rules
 | 
						||
    |  are applied.#[br]
 | 
						||
    |  #[strong Prefix:] Character(s) at the beginning, e.g.
 | 
						||
    |  #[code $], #[code (], #[code “], #[code ¿].#[br]
 | 
						||
    |  #[strong Suffix:] Character(s) at the end, e.g.
 | 
						||
    |  #[code km], #[code )], #[code ”], #[code !].#[br]
 | 
						||
    |  #[strong Infix:] Character(s) in between, e.g.
 | 
						||
    |  #[code -], #[code --], #[code /], #[code …].#[br]
 | 
						||
 | 
						||
+image
 | 
						||
    include ../../../assets/img/docs/tokenization.svg
 | 
						||
    .u-text-right
 | 
						||
        +button("/assets/img/docs/tokenization.svg", false, "secondary").u-text-tag View large graphic
 | 
						||
 | 
						||
p
 | 
						||
    |  While punctuation rules are usually pretty general, tokenizer exceptions
 | 
						||
    |  strongly depend on the specifics of the individual language. This is
 | 
						||
    |  why each #[+a("/docs/api/language-models") available language] has its
 | 
						||
    |  own subclass like #[code English] or #[code German], that loads in lists
 | 
						||
    |  of hard-coded data and exception rules.
 |