mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-10-31 07:57:35 +03:00 
			
		
		
		
	* remove _convert_examples * fix test_gold, raise TypeError if tuples are used instead of Example's * throwing proper errors when the wrong type of objects are passed * fix deprectated format in tests * fix deprectated format in parser tests * fix tests for NEL, morph, senter, tagger, textcat * update regression tests with new Example format * use make_doc * more fixes to nlp.update calls * few more small fixes for rehearse and evaluate * only import ml_datasets if really necessary
		
			
				
	
	
		
			196 lines
		
	
	
		
			5.8 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			196 lines
		
	
	
		
			5.8 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| #!/usr/bin/env python
 | ||
| # coding: utf-8
 | ||
| """Using the parser to recognise your own semantics
 | ||
| 
 | ||
| spaCy's parser component can be trained to predict any type of tree
 | ||
| structure over your input text. You can also predict trees over whole documents
 | ||
| or chat logs, with connections between the sentence-roots used to annotate
 | ||
| discourse structure. In this example, we'll build a message parser for a common
 | ||
| "chat intent": finding local businesses. Our message semantics will have the
 | ||
| following types of relations: ROOT, PLACE, QUALITY, ATTRIBUTE, TIME, LOCATION.
 | ||
| 
 | ||
| "show me the best hotel in berlin"
 | ||
| ('show', 'ROOT', 'show')
 | ||
| ('best', 'QUALITY', 'hotel') --> hotel with QUALITY best
 | ||
| ('hotel', 'PLACE', 'show') --> show PLACE hotel
 | ||
| ('berlin', 'LOCATION', 'hotel') --> hotel with LOCATION berlin
 | ||
| 
 | ||
| Compatible with: spaCy v2.0.0+
 | ||
| """
 | ||
| from __future__ import unicode_literals, print_function
 | ||
| 
 | ||
| import plac
 | ||
| import random
 | ||
| from pathlib import Path
 | ||
| import spacy
 | ||
| from spacy.gold import Example
 | ||
| from spacy.util import minibatch, compounding
 | ||
| 
 | ||
| 
 | ||
| # training data: texts, heads and dependency labels
 | ||
| # for no relation, we simply chose an arbitrary dependency label, e.g. '-'
 | ||
| TRAIN_DATA = [
 | ||
|     (
 | ||
|         "find a cafe with great wifi",
 | ||
|         {
 | ||
|             "heads": [0, 2, 0, 5, 5, 2],  # index of token head
 | ||
|             "deps": ["ROOT", "-", "PLACE", "-", "QUALITY", "ATTRIBUTE"],
 | ||
|         },
 | ||
|     ),
 | ||
|     (
 | ||
|         "find a hotel near the beach",
 | ||
|         {
 | ||
|             "heads": [0, 2, 0, 5, 5, 2],
 | ||
|             "deps": ["ROOT", "-", "PLACE", "QUALITY", "-", "ATTRIBUTE"],
 | ||
|         },
 | ||
|     ),
 | ||
|     (
 | ||
|         "find me the closest gym that's open late",
 | ||
|         {
 | ||
|             "heads": [0, 0, 4, 4, 0, 6, 4, 6, 6],
 | ||
|             "deps": [
 | ||
|                 "ROOT",
 | ||
|                 "-",
 | ||
|                 "-",
 | ||
|                 "QUALITY",
 | ||
|                 "PLACE",
 | ||
|                 "-",
 | ||
|                 "-",
 | ||
|                 "ATTRIBUTE",
 | ||
|                 "TIME",
 | ||
|             ],
 | ||
|         },
 | ||
|     ),
 | ||
|     (
 | ||
|         "show me the cheapest store that sells flowers",
 | ||
|         {
 | ||
|             "heads": [0, 0, 4, 4, 0, 4, 4, 4],  # attach "flowers" to store!
 | ||
|             "deps": ["ROOT", "-", "-", "QUALITY", "PLACE", "-", "-", "PRODUCT"],
 | ||
|         },
 | ||
|     ),
 | ||
|     (
 | ||
|         "find a nice restaurant in london",
 | ||
|         {
 | ||
|             "heads": [0, 3, 3, 0, 3, 3],
 | ||
|             "deps": ["ROOT", "-", "QUALITY", "PLACE", "-", "LOCATION"],
 | ||
|         },
 | ||
|     ),
 | ||
|     (
 | ||
|         "show me the coolest hostel in berlin",
 | ||
|         {
 | ||
|             "heads": [0, 0, 4, 4, 0, 4, 4],
 | ||
|             "deps": ["ROOT", "-", "-", "QUALITY", "PLACE", "-", "LOCATION"],
 | ||
|         },
 | ||
|     ),
 | ||
|     (
 | ||
|         "find a good italian restaurant near work",
 | ||
|         {
 | ||
|             "heads": [0, 4, 4, 4, 0, 4, 5],
 | ||
|             "deps": [
 | ||
|                 "ROOT",
 | ||
|                 "-",
 | ||
|                 "QUALITY",
 | ||
|                 "ATTRIBUTE",
 | ||
|                 "PLACE",
 | ||
|                 "ATTRIBUTE",
 | ||
|                 "LOCATION",
 | ||
|             ],
 | ||
|         },
 | ||
|     ),
 | ||
| ]
 | ||
| 
 | ||
| 
 | ||
| @plac.annotations(
 | ||
|     model=("Model name. Defaults to blank 'en' model.", "option", "m", str),
 | ||
|     output_dir=("Optional output directory", "option", "o", Path),
 | ||
|     n_iter=("Number of training iterations", "option", "n", int),
 | ||
| )
 | ||
| def main(model=None, output_dir=None, n_iter=15):
 | ||
|     """Load the model, set up the pipeline and train the parser."""
 | ||
|     if model is not None:
 | ||
|         nlp = spacy.load(model)  # load existing spaCy model
 | ||
|         print("Loaded model '%s'" % model)
 | ||
|     else:
 | ||
|         nlp = spacy.blank("en")  # create blank Language class
 | ||
|         print("Created blank 'en' model")
 | ||
| 
 | ||
|     # We'll use the built-in dependency parser class, but we want to create a
 | ||
|     # fresh instance – just in case.
 | ||
|     if "parser" in nlp.pipe_names:
 | ||
|         nlp.remove_pipe("parser")
 | ||
|     parser = nlp.create_pipe("parser")
 | ||
|     nlp.add_pipe(parser, first=True)
 | ||
| 
 | ||
|     train_examples = []
 | ||
|     for text, annotations in TRAIN_DATA:
 | ||
|         train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))
 | ||
|         for dep in annotations.get("deps", []):
 | ||
|             parser.add_label(dep)
 | ||
| 
 | ||
|     with nlp.select_pipes(enable="parser"):  # only train parser
 | ||
|         optimizer = nlp.begin_training()
 | ||
|         for itn in range(n_iter):
 | ||
|             random.shuffle(train_examples)
 | ||
|             losses = {}
 | ||
|             # batch up the examples using spaCy's minibatch
 | ||
|             batches = minibatch(train_examples, size=compounding(4.0, 32.0, 1.001))
 | ||
|             for batch in batches:
 | ||
|                 nlp.update(batch, sgd=optimizer, losses=losses)
 | ||
|             print("Losses", losses)
 | ||
| 
 | ||
|     # test the trained model
 | ||
|     test_model(nlp)
 | ||
| 
 | ||
|     # save model to output directory
 | ||
|     if output_dir is not None:
 | ||
|         output_dir = Path(output_dir)
 | ||
|         if not output_dir.exists():
 | ||
|             output_dir.mkdir()
 | ||
|         nlp.to_disk(output_dir)
 | ||
|         print("Saved model to", output_dir)
 | ||
| 
 | ||
|         # test the saved model
 | ||
|         print("Loading from", output_dir)
 | ||
|         nlp2 = spacy.load(output_dir)
 | ||
|         test_model(nlp2)
 | ||
| 
 | ||
| 
 | ||
| def test_model(nlp):
 | ||
|     texts = [
 | ||
|         "find a hotel with good wifi",
 | ||
|         "find me the cheapest gym near work",
 | ||
|         "show me the best hotel in berlin",
 | ||
|     ]
 | ||
|     docs = nlp.pipe(texts)
 | ||
|     for doc in docs:
 | ||
|         print(doc.text)
 | ||
|         print([(t.text, t.dep_, t.head.text) for t in doc if t.dep_ != "-"])
 | ||
| 
 | ||
| 
 | ||
| if __name__ == "__main__":
 | ||
|     plac.call(main)
 | ||
| 
 | ||
|     # Expected output:
 | ||
|     # find a hotel with good wifi
 | ||
|     # [
 | ||
|     #   ('find', 'ROOT', 'find'),
 | ||
|     #   ('hotel', 'PLACE', 'find'),
 | ||
|     #   ('good', 'QUALITY', 'wifi'),
 | ||
|     #   ('wifi', 'ATTRIBUTE', 'hotel')
 | ||
|     # ]
 | ||
|     # find me the cheapest gym near work
 | ||
|     # [
 | ||
|     #   ('find', 'ROOT', 'find'),
 | ||
|     #   ('cheapest', 'QUALITY', 'gym'),
 | ||
|     #   ('gym', 'PLACE', 'find'),
 | ||
|     #   ('near', 'ATTRIBUTE', 'gym'),
 | ||
|     #   ('work', 'LOCATION', 'near')
 | ||
|     # ]
 | ||
|     # show me the best hotel in berlin
 | ||
|     # [
 | ||
|     #   ('show', 'ROOT', 'show'),
 | ||
|     #   ('best', 'QUALITY', 'hotel'),
 | ||
|     #   ('hotel', 'PLACE', 'show'),
 | ||
|     #   ('berlin', 'LOCATION', 'hotel')
 | ||
|     # ]
 |