mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-25 17:36:30 +03:00
Fix code, links and formatting
This commit is contained in:
parent
11f2e80c6a
commit
8a148b6563
|
@ -82,7 +82,8 @@ p
|
||||||
| compute. As of spaCy v2.0, #[code Language] classes are not imported on
|
| compute. As of spaCy v2.0, #[code Language] classes are not imported on
|
||||||
| initialisation and are only loaded when you import them directly, or load
|
| initialisation and are only loaded when you import them directly, or load
|
||||||
| a model that requires a language to be loaded. To lazy-load languages in
|
| a model that requires a language to be loaded. To lazy-load languages in
|
||||||
| your application, you can use the #[code util.get_lang_class()] helper
|
| your application, you can use the
|
||||||
|
| #[+api("util#get_lang_class") #[code util.get_lang_class()]] helper
|
||||||
| function with the two-letter language code as its argument.
|
| function with the two-letter language code as its argument.
|
||||||
|
|
||||||
+h(2, "language-data") Adding language data
|
+h(2, "language-data") Adding language data
|
||||||
|
@ -284,14 +285,14 @@ p
|
||||||
|
|
||||||
p
|
p
|
||||||
| When adding the tokenizer exceptions to the #[code Defaults], you can use
|
| When adding the tokenizer exceptions to the #[code Defaults], you can use
|
||||||
| the #[code update_exc()] helper function to merge them with the global
|
| the #[+api("util#update_exc") #[code update_exc()]] helper function to merge
|
||||||
| base exceptions (including one-letter abbreviations and emoticons).
|
| them with the global base exceptions (including one-letter abbreviations
|
||||||
| The function performs a basic check to make sure exceptions are
|
| and emoticons). The function performs a basic check to make sure
|
||||||
| provided in the correct format. It can take any number of exceptions
|
| exceptions are provided in the correct format. It can take any number of
|
||||||
| dicts as its arguments, and will update and overwrite the exception in
|
| exceptions dicts as its arguments, and will update and overwrite the
|
||||||
| this order. For example, if your language's tokenizer exceptions include
|
| exception in this order. For example, if your language's tokenizer
|
||||||
| a custom tokenization pattern for "a.", it will overwrite the base
|
| exceptions include a custom tokenization pattern for "a.", it will
|
||||||
| exceptions with the language's custom one.
|
| overwrite the base exceptions with the language's custom one.
|
||||||
|
|
||||||
+code("Example").
|
+code("Example").
|
||||||
from ...util import update_exc
|
from ...util import update_exc
|
||||||
|
|
|
@ -19,133 +19,8 @@ p
|
||||||
|
|
||||||
+under-construction
|
+under-construction
|
||||||
|
|
||||||
+code("Runtime usage").
|
|
||||||
def count_entity_sentiment(nlp, texts):
|
|
||||||
'''Compute the net document sentiment for each entity in the texts.'''
|
|
||||||
entity_sentiments = collections.Counter(float)
|
|
||||||
for doc in nlp.pipe(texts, batch_size=1000, n_threads=4):
|
|
||||||
for ent in doc.ents:
|
|
||||||
entity_sentiments[ent.text] += doc.sentiment
|
|
||||||
return entity_sentiments
|
|
||||||
|
|
||||||
def load_nlp(lstm_path, lang_id='en'):
|
|
||||||
def create_pipeline(nlp):
|
|
||||||
return [nlp.tagger, nlp.entity, SentimentAnalyser.load(lstm_path, nlp)]
|
|
||||||
return spacy.load(lang_id, create_pipeline=create_pipeline)
|
|
||||||
|
|
||||||
p
|
p
|
||||||
| All you have to do is pass a #[code create_pipeline] callback function
|
| For most applications, I it's recommended to use pre-trained word embeddings
|
||||||
| to #[code spacy.load()]. The function should take a
|
|
||||||
| #[code spacy.language.Language] object as its only argument, and return
|
|
||||||
| a sequence of callables. Each callable should accept a
|
|
||||||
| #[+api("docs") #[code Doc]] object, modify it in place, and return
|
|
||||||
| #[code None].
|
|
||||||
|
|
||||||
p
|
|
||||||
| Of course, operating on single documents is inefficient, especially for
|
|
||||||
| deep learning models. Usually we want to annotate many texts, and we
|
|
||||||
| want to process them in parallel. You should therefore ensure that your
|
|
||||||
| model component also supports a #[code .pipe()] method. The
|
|
||||||
| #[code .pipe()] method should be a well-behaved generator function that
|
|
||||||
| operates on arbitrarily large sequences. It should consume a small
|
|
||||||
| buffer of documents, work on them in parallel, and yield them one-by-one.
|
|
||||||
|
|
||||||
+code("Custom Annotator Class").
|
|
||||||
class SentimentAnalyser(object):
|
|
||||||
@classmethod
|
|
||||||
def load(cls, path, nlp):
|
|
||||||
with (path / 'config.json').open() as file_:
|
|
||||||
model = model_from_json(file_.read())
|
|
||||||
with (path / 'model').open('rb') as file_:
|
|
||||||
lstm_weights = pickle.load(file_)
|
|
||||||
embeddings = get_embeddings(nlp.vocab)
|
|
||||||
model.set_weights([embeddings] + lstm_weights)
|
|
||||||
return cls(model)
|
|
||||||
|
|
||||||
def __init__(self, model):
|
|
||||||
self._model = model
|
|
||||||
|
|
||||||
def __call__(self, doc):
|
|
||||||
X = get_features([doc], self.max_length)
|
|
||||||
y = self._model.predict(X)
|
|
||||||
self.set_sentiment(doc, y)
|
|
||||||
|
|
||||||
def pipe(self, docs, batch_size=1000, n_threads=2):
|
|
||||||
for minibatch in cytoolz.partition_all(batch_size, docs):
|
|
||||||
Xs = get_features(minibatch)
|
|
||||||
ys = self._model.predict(Xs)
|
|
||||||
for i, doc in enumerate(minibatch):
|
|
||||||
doc.sentiment = ys[i]
|
|
||||||
|
|
||||||
def set_sentiment(self, doc, y):
|
|
||||||
doc.sentiment = float(y[0])
|
|
||||||
# Sentiment has a native slot for a single float.
|
|
||||||
# For arbitrary data storage, there's:
|
|
||||||
# doc.user_data['my_data'] = y
|
|
||||||
|
|
||||||
def get_features(docs, max_length):
|
|
||||||
Xs = numpy.zeros((len(docs), max_length), dtype='int32')
|
|
||||||
for i, doc in enumerate(minibatch):
|
|
||||||
for j, token in enumerate(doc[:max_length]):
|
|
||||||
Xs[i, j] = token.rank if token.has_vector else 0
|
|
||||||
return Xs
|
|
||||||
|
|
||||||
p
|
|
||||||
| By default, spaCy 1.0 downloads and uses the 300-dimensional
|
|
||||||
| #[+a("http://nlp.stanford.edu/projects/glove/") GloVe] common crawl
|
|
||||||
| vectors. It's also easy to replace these vectors with ones you've
|
|
||||||
| trained yourself, or to disable the word vectors entirely. If you've
|
|
||||||
| installed your word vectors into spaCy's #[+api("vocab") #[code Vocab]]
|
|
||||||
| object, here's how to use them in a Keras model:
|
|
||||||
|
|
||||||
+code("Training with Keras").
|
|
||||||
def train(train_texts, train_labels, dev_texts, dev_labels,
|
|
||||||
lstm_shape, lstm_settings, lstm_optimizer, batch_size=100, nb_epoch=5):
|
|
||||||
nlp = spacy.load('en', parser=False, tagger=False, entity=False)
|
|
||||||
embeddings = get_embeddings(nlp.vocab)
|
|
||||||
model = compile_lstm(embeddings, lstm_shape, lstm_settings)
|
|
||||||
train_X = get_features(nlp.pipe(train_texts))
|
|
||||||
dev_X = get_features(nlp.pipe(dev_texts))
|
|
||||||
model.fit(train_X, train_labels, validation_data=(dev_X, dev_labels),
|
|
||||||
nb_epoch=nb_epoch, batch_size=batch_size)
|
|
||||||
return model
|
|
||||||
|
|
||||||
def compile_lstm(embeddings, shape, settings):
|
|
||||||
model = Sequential()
|
|
||||||
model.add(
|
|
||||||
Embedding(
|
|
||||||
embeddings.shape[1],
|
|
||||||
embeddings.shape[0],
|
|
||||||
input_length=shape['max_length'],
|
|
||||||
trainable=False,
|
|
||||||
weights=[embeddings]
|
|
||||||
)
|
|
||||||
)
|
|
||||||
model.add(Bidirectional(LSTM(shape['nr_hidden'])))
|
|
||||||
model.add(Dropout(settings['dropout']))
|
|
||||||
model.add(Dense(shape['nr_class'], activation='sigmoid'))
|
|
||||||
model.compile(optimizer=Adam(lr=settings['lr']), loss='binary_crossentropy',
|
|
||||||
metrics=['accuracy'])
|
|
||||||
|
|
||||||
return model
|
|
||||||
|
|
||||||
def get_embeddings(vocab):
|
|
||||||
max_rank = max(lex.rank for lex in vocab if lex.has_vector)
|
|
||||||
vectors = numpy.ndarray((max_rank+1, vocab.vectors_length), dtype='float32')
|
|
||||||
for lex in vocab:
|
|
||||||
if lex.has_vector:
|
|
||||||
vectors[lex.rank] = lex.vector
|
|
||||||
return vectors
|
|
||||||
|
|
||||||
def get_features(docs, max_length):
|
|
||||||
Xs = numpy.zeros(len(list(docs)), max_length, dtype='int32')
|
|
||||||
for i, doc in enumerate(docs):
|
|
||||||
for j, token in enumerate(doc[:max_length]):
|
|
||||||
Xs[i, j] = token.rank if token.has_vector else 0
|
|
||||||
return Xs
|
|
||||||
|
|
||||||
p
|
|
||||||
| For most applications, I recommend using pre-trained word embeddings
|
|
||||||
| without "fine-tuning". This means that you'll use the same embeddings
|
| without "fine-tuning". This means that you'll use the same embeddings
|
||||||
| across different models, and avoid learning adjustments to them on your
|
| across different models, and avoid learning adjustments to them on your
|
||||||
| training data. The embeddings table is large, and the values provided by
|
| training data. The embeddings table is large, and the values provided by
|
||||||
|
|
|
@ -156,7 +156,7 @@ include _spacy-101/_pipelines
|
||||||
| #[strong create your own], see the usage guide on
|
| #[strong create your own], see the usage guide on
|
||||||
| #[+a("/docs/usage/language-processing-pipeline") language processing pipelines].
|
| #[+a("/docs/usage/language-processing-pipeline") language processing pipelines].
|
||||||
|
|
||||||
+h(2, "vocab") Vocab and lexemes
|
+h(2, "vocab") Vocab, hashes and lexemes
|
||||||
|
|
||||||
include _spacy-101/_vocab
|
include _spacy-101/_vocab
|
||||||
|
|
||||||
|
|
|
@ -120,7 +120,6 @@ p
|
||||||
doc = nlp.make_doc(raw_text)
|
doc = nlp.make_doc(raw_text)
|
||||||
nlp.tagger(doc)
|
nlp.tagger(doc)
|
||||||
loss = nlp.entity.update(doc, gold)
|
loss = nlp.entity.update(doc, gold)
|
||||||
nlp.end_training()
|
|
||||||
nlp.save_to_directory(output_dir)
|
nlp.save_to_directory(output_dir)
|
||||||
|
|
||||||
p
|
p
|
||||||
|
|
|
@ -26,8 +26,6 @@ include _spacy-101/_training
|
||||||
gold = GoldParse(doc, tags=['N', 'V', 'N'])
|
gold = GoldParse(doc, tags=['N', 'V', 'N'])
|
||||||
tagger.update(doc, gold)
|
tagger.update(doc, gold)
|
||||||
|
|
||||||
tagger.model.end_training()
|
|
||||||
|
|
||||||
p
|
p
|
||||||
+button(gh("spaCy", "examples/training/train_tagger.py"), false, "secondary") Full example
|
+button(gh("spaCy", "examples/training/train_tagger.py"), false, "secondary") Full example
|
||||||
|
|
||||||
|
@ -44,8 +42,6 @@ p
|
||||||
doc = Doc(vocab, words=['Who', 'is', 'Shaka', 'Khan', '?'])
|
doc = Doc(vocab, words=['Who', 'is', 'Shaka', 'Khan', '?'])
|
||||||
entity.update(doc, ['O', 'O', 'B-PERSON', 'L-PERSON', 'O'])
|
entity.update(doc, ['O', 'O', 'B-PERSON', 'L-PERSON', 'O'])
|
||||||
|
|
||||||
entity.model.end_training()
|
|
||||||
|
|
||||||
p
|
p
|
||||||
+button(gh("spaCy", "examples/training/train_ner.py"), false, "secondary") Full example
|
+button(gh("spaCy", "examples/training/train_ner.py"), false, "secondary") Full example
|
||||||
|
|
||||||
|
@ -77,7 +73,5 @@ p.o-inline-list
|
||||||
parser.update(doc, [(1, 'nsubj'), (1, 'ROOT'), (3, 'compound'), (1, 'dobj'),
|
parser.update(doc, [(1, 'nsubj'), (1, 'ROOT'), (3, 'compound'), (1, 'dobj'),
|
||||||
(1, 'punct')])
|
(1, 'punct')])
|
||||||
|
|
||||||
parser.model.end_training()
|
|
||||||
|
|
||||||
p
|
p
|
||||||
+button(gh("spaCy", "examples/training/train_parser.py"), false, "secondary") Full example
|
+button(gh("spaCy", "examples/training/train_parser.py"), false, "secondary") Full example
|
||||||
|
|
|
@ -372,7 +372,7 @@ p
|
||||||
p
|
p
|
||||||
| If you're using the matcher, you can now add patterns in one step. This
|
| If you're using the matcher, you can now add patterns in one step. This
|
||||||
| should be easy to update – simply merge the ID, callback and patterns
|
| should be easy to update – simply merge the ID, callback and patterns
|
||||||
| into one call to #[+api("matcher#add") #[code matcher.add]].
|
| into one call to #[+api("matcher#add") #[code matcher.add()]].
|
||||||
|
|
||||||
+code-new.
|
+code-new.
|
||||||
matcher.add('GoogleNow', merge_phrases, [{ORTH: 'Google'}, {ORTH: 'Now'}])
|
matcher.add('GoogleNow', merge_phrases, [{ORTH: 'Google'}, {ORTH: 'Now'}])
|
||||||
|
|
Loading…
Reference in New Issue
Block a user