mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-11-04 01:48:04 +03:00 
			
		
		
		
	
		
			
				
	
	
		
			93 lines
		
	
	
		
			3.9 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
			
		
		
	
	
			93 lines
		
	
	
		
			3.9 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
//- 💫 DOCS > USAGE > DEEP LEARNING
 | 
						|
 | 
						|
include ../../_includes/_mixins
 | 
						|
 | 
						|
p
 | 
						|
    |  In this example, we'll be using #[+a("https://keras.io/") Keras], as
 | 
						|
    |  it's the most popular deep learning library for Python. Using Keras,
 | 
						|
    |  we will write a custom sentiment analysis model that predicts whether a
 | 
						|
    |  document is positive or negative. Then, we will use it to find which entities
 | 
						|
    |  are commonly associated with positive or negative documents. Here's a
 | 
						|
    |  quick example of how that can look at runtime.
 | 
						|
 | 
						|
+aside("What's Keras?")
 | 
						|
    |  #[+a("https://keras.io/") Keras] gives you a high-level, declarative
 | 
						|
    |  interface to define neural networks. Models are trained using Google's
 | 
						|
    |  #[+a("https://www.tensorflow.org") TensorFlow] by default.
 | 
						|
    |  #[+a("http://deeplearning.net/software/theano/") Theano] is also
 | 
						|
    |  supported.
 | 
						|
 | 
						|
+under-construction
 | 
						|
 | 
						|
p
 | 
						|
    |  For most applications, I it's recommended to use pre-trained word embeddings
 | 
						|
    |  without "fine-tuning". This means that you'll use the same embeddings
 | 
						|
    |  across different models, and avoid learning adjustments to them on your
 | 
						|
    |  training data. The embeddings table is large, and the values provided by
 | 
						|
    |  the pre-trained vectors are already pretty good. Fine-tuning the
 | 
						|
    |  embeddings table is therefore a waste of your "parameter budget". It's
 | 
						|
    |  usually better to make your network larger some other way, e.g. by
 | 
						|
    |  adding another LSTM layer, using attention mechanism, using character
 | 
						|
    |  features, etc.
 | 
						|
 | 
						|
+h(2, "attribute-hooks") Attribute hooks
 | 
						|
 | 
						|
+under-construction
 | 
						|
 | 
						|
p
 | 
						|
    |  Earlier, we saw how to store data in the new generic #[code user_data]
 | 
						|
    |  dict. This generalises well, but it's not terribly satisfying. Ideally,
 | 
						|
    |  we want to let the custom data drive more "native" behaviours. For
 | 
						|
    |  instance, consider the #[code .similarity()] methods provided by spaCy's
 | 
						|
    |  #[+api("doc") #[code Doc]], #[+api("token") #[code Token]] and
 | 
						|
    |  #[+api("span") #[code Span]] objects:
 | 
						|
 | 
						|
+code("Polymorphic similarity example").
 | 
						|
    span.similarity(doc)
 | 
						|
    token.similarity(span)
 | 
						|
    doc1.similarity(doc2)
 | 
						|
 | 
						|
p
 | 
						|
    |  By default, this just averages the vectors for each document, and
 | 
						|
    |  computes their cosine. Obviously, spaCy should make it easy for you to
 | 
						|
    |  install your own similarity model. This introduces a tricky design
 | 
						|
    |  challenge. The current solution is to add three more dicts to the
 | 
						|
    |  #[code Doc] object:
 | 
						|
 | 
						|
+aside("Implementation note")
 | 
						|
    |  The hooks live on the #[code Doc] object because the #[code Span] and
 | 
						|
    |  #[code Token] objects are created lazily, and don't own any data. They
 | 
						|
    |  just proxy to their parent #[code Doc]. This turns out to be convenient
 | 
						|
    |  here — we only have to worry about installing hooks in one place.
 | 
						|
 | 
						|
+table(["Name", "Description"])
 | 
						|
    +row
 | 
						|
        +cell #[code user_hooks]
 | 
						|
        +cell Customise behaviour of #[code doc.vector], #[code doc.has_vector], #[code doc.vector_norm] or #[code doc.sents]
 | 
						|
 | 
						|
    +row
 | 
						|
        +cell #[code user_token_hooks]
 | 
						|
        +cell Customise behaviour of #[code token.similarity], #[code token.vector], #[code token.has_vector], #[code token.vector_norm] or #[code token.conjuncts]
 | 
						|
 | 
						|
    +row
 | 
						|
        +cell #[code user_span_hooks]
 | 
						|
        +cell Customise behaviour of #[code span.similarity], #[code span.vector], #[code span.has_vector], #[code span.vector_norm] or #[code span.root]
 | 
						|
 | 
						|
p
 | 
						|
    |  To sum up, here's an example of hooking in custom #[code .similarity()]
 | 
						|
    |  methods:
 | 
						|
 | 
						|
+code("Add custom similarity hooks").
 | 
						|
    class SimilarityModel(object):
 | 
						|
        def __init__(self, model):
 | 
						|
            self._model = model
 | 
						|
 | 
						|
        def __call__(self, doc):
 | 
						|
            doc.user_hooks['similarity'] = self.similarity
 | 
						|
            doc.user_span_hooks['similarity'] = self.similarity
 | 
						|
            doc.user_token_hooks['similarity'] = self.similarity
 | 
						|
 | 
						|
        def similarity(self, obj1, obj2):
 | 
						|
            y = self._model([obj1.vector, obj2.vector])
 | 
						|
            return float(y[0])
 |