mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-10-31 07:57:35 +03:00 
			
		
		
		
	Update v2 guide and split into partials
This commit is contained in:
		
							parent
							
								
									1c7313051f
								
							
						
					
					
						commit
						5ab4e96144
					
				|  | @ -79,6 +79,7 @@ | |||
|         "title": "What's New in v2.0", | ||||
|         "teaser": "New features, backwards incompatibilities and migration guide.", | ||||
|         "menu": { | ||||
|             "Summary": "summary", | ||||
|             "New features": "features", | ||||
|             "Backwards Incompatibilities": "incompat", | ||||
|             "Migrating from v1.x": "migrating", | ||||
|  |  | |||
							
								
								
									
										237
									
								
								website/usage/_v2/_features.jade
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										237
									
								
								website/usage/_v2/_features.jade
									
									
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,237 @@ | |||
| //- 💫 DOCS > USAGE > WHAT'S NEW IN V2.0 > NEW FEATURES | ||||
| 
 | ||||
| p | ||||
|     |  This section contains an overview of the most important | ||||
|     |  #[strong new features and improvements]. The #[+a("/api") API docs] | ||||
|     |  include additional  deprecation notes. New methods and functions that | ||||
|     |  were introduced in this version are marked with a | ||||
|     |  #[span.u-text-tag.u-text-tag--spaced v2.0] tag. | ||||
| 
 | ||||
| +h(3, "features-models") Convolutional neural network models | ||||
| 
 | ||||
| +aside-code("Example", "bash") | ||||
|     for model in ["en", "de", "fr", "es", "pt", "it"] | ||||
|         | spacy download #{model}  # default #{LANGUAGES[model]} model!{'\n'} | ||||
|     | spacy download xx_ent_wiki_sm  # multi-language NER | ||||
| 
 | ||||
| p | ||||
|     |  spaCy v2.0 features new neural models for tagging, | ||||
|     |  parsing and entity recognition. The models have | ||||
|     |  been designed and implemented from scratch specifically for spaCy, to | ||||
|     |  give you an unmatched balance of speed, size and accuracy. The new | ||||
|     |  models are #[strong 10× smaller], #[strong 20% more accurate], | ||||
|     |  and #[strong just as fast] as the previous generation. | ||||
|     |  #[strong GPU usage] is now supported via | ||||
|     |  #[+a("http://chainer.org") Chainer]'s CuPy module. | ||||
| 
 | ||||
| +infobox | ||||
|     |  #[+label-inline Usage:] #[+a("/models") Models directory], | ||||
|     |  #[+a("/models/comparison") Models comparison], | ||||
|     |  #[+a("/usage/#gpu") Using spaCy with GPU] | ||||
| 
 | ||||
| +h(3, "features-pipelines") Improved processing pipelines | ||||
| 
 | ||||
| +aside-code("Example"). | ||||
|     # Set custom attributes | ||||
|     Doc.set_extension('my_attr', default=False) | ||||
|     Token.set_extension('my_attr', getter=my_token_getter) | ||||
|     assert doc._.my_attr, token._.my_attr | ||||
| 
 | ||||
|     # Add components to the pipeline | ||||
|     my_component = lambda doc: doc | ||||
|     nlp.add_pipe(my_component) | ||||
| 
 | ||||
| p | ||||
|     |  It's now much easier to #[strong customise the pipeline] with your own | ||||
|     |  components: functions that receive a #[code Doc] object, modify and | ||||
|     |  return it. Extensions let you write any | ||||
|     |  #[strong attributes, properties and methods] to the #[code Doc], | ||||
|     |  #[code Token] and #[code Span]. You can add data, implement new | ||||
|     |  features, integrate other libraries with spaCy or plug in your own | ||||
|     |  machine learning models. | ||||
| 
 | ||||
| +image | ||||
|     include ../../assets/img/pipeline.svg | ||||
| 
 | ||||
| +infobox | ||||
|     |  #[+label-inline API:] #[+api("language") #[code Language]], | ||||
|     |  #[+api("doc#set_extension") #[code Doc.set_extension]], | ||||
|     |  #[+api("span#set_extension") #[code Span.set_extension]], | ||||
|     |  #[+api("token#set_extension") #[code Token.set_extension]] | ||||
|     |  #[+label-inline Usage:] | ||||
|     |  #[+a("/usage/processing-pipelines") Processing pipelines] | ||||
|     |  #[+label-inline Code:] | ||||
|     |  #[+src("/usage/examples#section-pipeline") Pipeline examples] | ||||
| 
 | ||||
| +h(3, "features-text-classification") Text classification | ||||
| 
 | ||||
| +aside-code("Example"). | ||||
|     textcat = nlp.create_pipe('textcat') | ||||
|     nlp.add_pipe(textcat, last=True) | ||||
|     optimizer = nlp.begin_training() | ||||
|     for itn in range(100): | ||||
|         for doc, gold in train_data: | ||||
|             nlp.update([doc], [gold], sgd=optimizer) | ||||
|     doc = nlp(u'This is a text.') | ||||
|     print(doc.cats) | ||||
| 
 | ||||
| p | ||||
|     |  spaCy v2.0 lets you add text categorization models to spaCy pipelines. | ||||
|     |  The model supports classification with multiple, non-mutually | ||||
|     |  exclusive labels – so multiple labels can apply at once. You can | ||||
|     |  change the model architecture rather easily, but by default, the | ||||
|     |  #[code TextCategorizer] class uses a convolutional neural network to | ||||
|     |  assign position-sensitive vectors to each word in the document. | ||||
| 
 | ||||
| +infobox | ||||
|     |  #[+label-inline API:] #[+api("textcategorizer") #[code TextCategorizer]], | ||||
|     |  #[+api("doc#attributes") #[code Doc.cats]], | ||||
|     |  #[+api("goldparse#attributes") #[code GoldParse.cats]]#[br] | ||||
|     |  #[+label-inline Usage:] #[+a("/usage/text-classification") Text classification] | ||||
| 
 | ||||
| +h(3, "features-hash-ids") Hash values instead of integer IDs | ||||
| 
 | ||||
| +aside-code("Example"). | ||||
|     doc = nlp(u'I love coffee') | ||||
|     assert doc.vocab.strings[u'coffee'] == 3197928453018144401 | ||||
|     assert doc.vocab.strings[3197928453018144401] == u'coffee' | ||||
| 
 | ||||
|     beer_hash = doc.vocab.strings.add(u'beer') | ||||
|     assert doc.vocab.strings[u'beer'] == beer_hash | ||||
|     assert doc.vocab.strings[beer_hash] == u'beer' | ||||
| 
 | ||||
| p | ||||
|     |  The #[+api("stringstore") #[code StringStore]] now resolves all strings | ||||
|     |  to hash values instead of integer IDs. This means that the string-to-int | ||||
|     |  mapping #[strong no longer depends on the vocabulary state], making a lot | ||||
|     |  of workflows much simpler, especially during training. Unlike integer IDs | ||||
|     |  in spaCy v1.x, hash values will #[strong always match] – even across | ||||
|     |  models. Strings can now be added explicitly using the new | ||||
|     |  #[+api("stringstore#add") #[code Stringstore.add]] method. A token's hash | ||||
|     |  is available via #[code token.orth]. | ||||
| 
 | ||||
| +infobox | ||||
|     |  #[+label-inline API:] #[+api("stringstore") #[code StringStore]] | ||||
|     |  #[+label-inline Usage:] #[+a("/usage/spacy-101#vocab") Vocab, hashes and lexemes 101] | ||||
| 
 | ||||
| +h(3, "features-vectors") Improved word vectors support | ||||
| 
 | ||||
| +aside-code("Example"). | ||||
|     for word, vector in vector_data: | ||||
|         nlp.vocab.set_vector(word, vector) | ||||
|     nlp.vocab.vectors.from_glove('/path/to/vectors') | ||||
|     # keep 10000 unique vectors and remap the rest | ||||
|     nlp.vocab.prune_vectors(10000) | ||||
|     nlp.to_disk('/model') | ||||
| 
 | ||||
| p | ||||
|     |  The new #[+api("vectors") #[code Vectors]] class helps the | ||||
|     |  #[code Vocab] manage the vectors assigned to strings, and lets you | ||||
|     |  assign vectors individually, or | ||||
|     |  #[+a("/usage/vectors-similarity#custom-loading-glove") load in GloVe vectors] | ||||
|     |  from a directory. To help you strike a good balance between coverage | ||||
|     |  and memory usage, the #[code Vectors] class lets you map | ||||
|     |  #[strong multiple keys] to the #[strong same row] of the table. If | ||||
|     |  you're using the #[+api("cli#vocab") #[code spacy vocab]] command to | ||||
|     |  create a vocabulary, pruning the vectors will be taken care of | ||||
|     |  automatically. Otherwise, you can use the new | ||||
|     |  #[+api("vocab#prune_vectors") #[code Vocab.prune_vectors]]. | ||||
| 
 | ||||
| +infobox | ||||
|     |  #[+label-inline API:] #[+api("vectors") #[code Vectors]], | ||||
|     |  #[+api("vocab") #[code Vocab]] | ||||
|     |  #[+label-inline Usage:] #[+a("/usage/vectors-similarity") Word vectors and semantic similarity] | ||||
| 
 | ||||
| +h(3, "features-serializer") Saving, loading and serialization | ||||
| 
 | ||||
| +aside-code("Example"). | ||||
|     nlp = spacy.load('en') # shortcut link | ||||
|     nlp = spacy.load('en_core_web_sm') # package | ||||
|     nlp = spacy.load('/path/to/en') # unicode path | ||||
|     nlp = spacy.load(Path('/path/to/en')) # pathlib Path | ||||
| 
 | ||||
|     nlp.to_disk('/path/to/nlp') | ||||
|     nlp = English().from_disk('/path/to/nlp') | ||||
| 
 | ||||
| p | ||||
|     |  spay's serialization API has been made consistent across classes and | ||||
|     |  objects. All container classes, i.e. #[code Language], #[code Doc], | ||||
|     |  #[code Vocab] and #[code StringStore] now have a #[code to_bytes()], | ||||
|     |  #[code from_bytes()], #[code to_disk()] and #[code from_disk()] method | ||||
|     |  that supports the Pickle protocol. | ||||
| 
 | ||||
| p | ||||
|     |  The improved #[code spacy.load] makes loading models easier and more | ||||
|     |  transparent. You can load a model by supplying its | ||||
|     |  #[+a("/usage/models#usage") shortcut link], the name of an installed | ||||
|     |  #[+a("/usage/saving-loading#generating") model package] or a path. | ||||
|     |  The #[code Language] class to initialise will be determined based on the | ||||
|     |  model's settings. For a blank language, you can import the class directly, | ||||
|     |  e.g. #[code from spacy.lang.en import English]. | ||||
| 
 | ||||
| +infobox | ||||
|     |  #[+label-inline API:] #[+api("spacy#load") #[code spacy.load]] | ||||
|     |  #[+label-inline Usage:] #[+a("/usage/saving-loading") Saving and loading] | ||||
| 
 | ||||
| +h(3, "features-displacy") displaCy visualizer with Jupyter support | ||||
| 
 | ||||
| +aside-code("Example"). | ||||
|     from spacy import displacy | ||||
|     doc = nlp(u'This is a sentence about Facebook.') | ||||
|     displacy.serve(doc, style='dep') # run the web server | ||||
|     html = displacy.render(doc, style='ent') # generate HTML | ||||
| 
 | ||||
| p | ||||
|     |  Our popular dependency and named entity visualizers are now an official | ||||
|     |  part of the spaCy library. displaCy can run a simple web server, or | ||||
|     |  generate raw HTML markup or SVG files to be exported. You can pass in one | ||||
|     |  or more docs, and customise the style. displaCy also auto-detects whether | ||||
|     |  you're running #[+a("https://jupyter.org") Jupyter] and will render the | ||||
|     |  visualizations in your notebook. | ||||
| 
 | ||||
| +infobox | ||||
|     |  #[+label-inline API:] #[+api("displacy") #[code displacy]] | ||||
|     |  #[+label-inline Usage:] #[+a("/usage/visualizers") Visualizing spaCy] | ||||
| 
 | ||||
| +h(3, "features-language") Improved language data and lazy loading | ||||
| 
 | ||||
| p | ||||
|     |  Language-specfic data now lives in its own submodule, #[code spacy.lang]. | ||||
|     |  Languages are lazy-loaded, i.e. only loaded when you import a | ||||
|     |  #[code Language] class, or load a model that initialises one. This allows | ||||
|     |  languages to contain more custom data, e.g. lemmatizer lookup tables, or | ||||
|     |  complex regular expressions. The language data has also been tidied up | ||||
|     |  and simplified. spaCy now also supports simple lookup-based | ||||
|     |  lemmatization – and #[strong #{LANG_COUNT} languages] in total! | ||||
| 
 | ||||
| +infobox | ||||
|     |  #[+label-inline API:] #[+api("language") #[code Language]] | ||||
|     |  #[+label-inline Code:] #[+src(gh("spaCy", "spacy/lang")) #[code spacy/lang]] | ||||
|     |  #[+label-inline Usage:] #[+a("/usage/adding-languages") Adding languages] | ||||
| 
 | ||||
| +h(3, "features-matcher") Revised matcher API and phrase matcher | ||||
| 
 | ||||
| +aside-code("Example"). | ||||
|     from spacy.matcher import Matcher, PhraseMatcher | ||||
| 
 | ||||
|     matcher = Matcher(nlp.vocab) | ||||
|     matcher.add('HEARTS', None, [{'ORTH': '❤️', 'OP': '+'}]) | ||||
| 
 | ||||
|     phrasematcher = PhraseMatcher(nlp.vocab) | ||||
|     phrasematcher.add('OBAMA', None, nlp(u"Barack Obama")) | ||||
| 
 | ||||
| p | ||||
|     |  Patterns can now be added to the matcher by calling | ||||
|     |  #[+api("matcher-add") #[code matcher.add()]] with a match ID, an optional | ||||
|     |  callback function to be invoked on each match, and one or more patterns. | ||||
|     |  This allows you to write powerful, pattern-specific logic using only one | ||||
|     |  matcher. For example, you might only want to merge some entity types, | ||||
|     |  and set custom flags for other matched patterns. The new | ||||
|     |  #[+api("phrasematcher") #[code PhraseMatcher]] lets you efficiently | ||||
|     |  match very large terminology lists using #[code Doc] objects as match | ||||
|     |  patterns. | ||||
| 
 | ||||
| +infobox | ||||
|     |  #[+label-inline API:] #[+api("matcher") #[code Matcher]], | ||||
|     |  #[+api("phrasematcher") #[code PhraseMatcher]] | ||||
|     |  #[+label-inline Usage:] #[+a("/usage/rule-based-matching") Rule-based matching] | ||||
							
								
								
									
										141
									
								
								website/usage/_v2/_incompat.jade
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										141
									
								
								website/usage/_v2/_incompat.jade
									
									
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,141 @@ | |||
| //- 💫 DOCS > USAGE > WHAT'S NEW IN V2.0 > BACKWARDS INCOMPATIBILITIES | ||||
| 
 | ||||
| +table(["Old", "New"]) | ||||
|     +row | ||||
|         +cell | ||||
|             |  #[code spacy.en] etc. | ||||
|         +cell | ||||
|             |  #[code spacy.lang.en] etc. | ||||
| 
 | ||||
|     +row | ||||
|         +cell #[code spacy.orth] | ||||
|         +cell #[code spacy.lang.xx.lex_attrs] | ||||
| 
 | ||||
|     +row | ||||
|         +cell #[code spacy.syntax.iterators] | ||||
|         +cell #[code spacy.lang.xx.syntax_iterators] | ||||
| 
 | ||||
|     +row | ||||
|         +cell #[code spacy.tagger.Tagger] | ||||
|         +cell #[code spacy.pipeline.Tagger] | ||||
| 
 | ||||
|     +row | ||||
|         +cell #[code spacy.cli.model] | ||||
|         +cell #[+api("cli#vocab") #[code spacy.cli.vocab]] | ||||
| 
 | ||||
|     +row | ||||
|         +cell #[code Language.save_to_directory] | ||||
|         +cell #[+api("language#to_disk") #[code Language.to_disk]] | ||||
| 
 | ||||
|     +row | ||||
|         +cell #[code Language.end_training] | ||||
|         +cell #[+api("language#begin_training") #[code Language.begin_training]] | ||||
| 
 | ||||
|     +row | ||||
|         +cell #[code Language.create_make_doc] | ||||
|         +cell #[+api("language#attributes") #[code Language.tokenizer]] | ||||
| 
 | ||||
|     +row | ||||
|         +cell | ||||
|             |  #[code Vocab.load] | ||||
|             |  #[code Vocab.load_lexemes] | ||||
|         +cell | ||||
|             |  #[+api("vocab#from_disk") #[code Vocab.from_disk]] | ||||
|             |  #[+api("vocab#from_bytes") #[code Vocab.from_bytes]] | ||||
| 
 | ||||
|     +row | ||||
|         +cell | ||||
|             |  #[code Vocab.dump] | ||||
|         +cell | ||||
|             |  #[+api("vocab#to_disk") #[code Vocab.to_disk]]#[br] | ||||
|             |  #[+api("vocab#to_bytes") #[code Vocab.to_bytes]] | ||||
| 
 | ||||
|     +row | ||||
|         +cell | ||||
|             |  #[code Vocab.load_vectors] | ||||
|             |  #[code Vocab.load_vectors_from_bin_loc] | ||||
|         +cell | ||||
|             |  #[+api("vectors#from_disk") #[code Vectors.from_disk]] | ||||
|             |  #[+api("vectors#from_bytes") #[code Vectors.from_bytes]] | ||||
|             |  #[+api("vectors#from_glove") #[code Vectors.from_glove]] | ||||
| 
 | ||||
|     +row | ||||
|         +cell | ||||
|             |  #[code Vocab.dump_vectors] | ||||
|         +cell | ||||
|             |  #[+api("vectors#to_disk") #[code Vectors.to_disk]] | ||||
|             |  #[+api("vectors#to_bytes") #[code Vectors.to_bytes]] | ||||
| 
 | ||||
|     +row | ||||
|         +cell | ||||
|             |  #[code StringStore.load] | ||||
|         +cell | ||||
|             |  #[+api("stringstore#from_disk") #[code StringStore.from_disk]] | ||||
|             |  #[+api("stringstore#from_bytes") #[code StringStore.from_bytes]] | ||||
| 
 | ||||
|     +row | ||||
|         +cell | ||||
|             |  #[code StringStore.dump] | ||||
|         +cell | ||||
|             |  #[+api("stringstore#to_disk") #[code StringStore.to_disk]] | ||||
|             |  #[+api("stringstore#to_bytes") #[code StringStore.to_bytes]] | ||||
| 
 | ||||
|     +row | ||||
|         +cell #[code Tokenizer.load] | ||||
|         +cell | ||||
|             |  #[+api("tokenizer#from_disk") #[code Tokenizer.from_disk]] | ||||
|             |  #[+api("tokenizer#from_bytes") #[code Tokenizer.from_bytes]] | ||||
| 
 | ||||
|     +row | ||||
|         +cell #[code Tagger.load] | ||||
|         +cell | ||||
|             |  #[+api("tagger#from_disk") #[code Tagger.from_disk]] | ||||
|             |  #[+api("tagger#from_bytes") #[code Tagger.from_bytes]] | ||||
| 
 | ||||
|     +row | ||||
|         +cell #[code DependencyParser.load] | ||||
|         +cell | ||||
|             |  #[+api("dependencyparser#from_disk") #[code DependencyParser.from_disk]] | ||||
|             |  #[+api("dependencyparser#from_bytes") #[code DependencyParser.from_bytes]] | ||||
| 
 | ||||
|     +row | ||||
|         +cell #[code EntityRecognizer.load] | ||||
|         +cell | ||||
|             |  #[+api("entityrecognizer#from_disk") #[code EntityRecognizer.from_disk]] | ||||
|             |  #[+api("entityrecognizer#from_bytes") #[code EntityRecognizer.from_bytes]] | ||||
| 
 | ||||
|     +row | ||||
|         +cell #[code Matcher.load] | ||||
|         +cell - | ||||
| 
 | ||||
|     +row | ||||
|         +cell | ||||
|             |  #[code Matcher.add_pattern] | ||||
|             |  #[code Matcher.add_entity] | ||||
|         +cell | ||||
|             |  #[+api("matcher#add") #[code Matcher.add]] | ||||
|             |  #[+api("phrasematcher#add") #[code PhraseMatcher.add]] | ||||
| 
 | ||||
|     +row | ||||
|         +cell #[code Matcher.get_entity] | ||||
|         +cell #[+api("matcher#get") #[code Matcher.get]] | ||||
| 
 | ||||
|     +row | ||||
|         +cell #[code Matcher.has_entity] | ||||
|         +cell #[+api("matcher#has_key") #[code Matcher.has_key]] | ||||
| 
 | ||||
|     +row | ||||
|         +cell #[code Doc.read_bytes] | ||||
|         +cell | ||||
|             |  #[+api("doc#to_bytes") #[code Doc.to_bytes]] | ||||
|             |  #[+api("doc#from_bytes") #[code Doc.from_bytes]] | ||||
|             |  #[+api("doc#to_disk") #[code Doc.to_disk]] | ||||
|             |  #[+api("doc#from_disk") #[code Doc.from_disk]] | ||||
| 
 | ||||
|     +row | ||||
|         +cell #[code Token.is_ancestor_of] | ||||
|         +cell #[+api("token#is_ancestor") #[code Token.is_ancestor]] | ||||
| 
 | ||||
|     +row | ||||
|         +cell #[code Span.sent_start] | ||||
|         +cell #[+api("span#is_sent_start") #[code Span.is_sent_start]] | ||||
							
								
								
									
										224
									
								
								website/usage/_v2/_migrating.jade
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										224
									
								
								website/usage/_v2/_migrating.jade
									
									
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,224 @@ | |||
| //- 💫 DOCS > USAGE > WHAT'S NEW IN V2.0 > MIGRATING FROM SPACY 1.X | ||||
| 
 | ||||
| p | ||||
|     |  Because we'e made so many architectural changes to the library, we've | ||||
|     |  tried to #[strong keep breaking changes to a minimum]. A lot of projects | ||||
|     |  follow the philosophy that if you're going to break anything, you may as | ||||
|     |  well break everything. We think migration is easier if there's a logic to | ||||
|     |  what has changed. We've therefore followed a policy of avoiding | ||||
|     |  breaking changes to the #[code Doc], #[code Span] and #[code Token] | ||||
|     |  objects. This way, you can focus on only migrating the code that | ||||
|     |  does training, loading and serialization — in other words, code that | ||||
|     |  works with the #[code nlp] object directly. Code that uses the | ||||
|     |  annotations should continue to work. | ||||
| 
 | ||||
| +infobox("Important note", "⚠️") | ||||
|     |  If you've trained your own models, keep in mind that your train and | ||||
|     |  runtime inputs must match. This means you'll have to | ||||
|     |  #[strong retrain your models] with spaCy v2.0. | ||||
| 
 | ||||
| +h(3, "migrating-saving-loading") Saving, loading and serialization | ||||
| 
 | ||||
| p | ||||
|     |  Double-check all calls to #[code spacy.load()] and make sure they don't | ||||
|     |  use the #[code path] keyword argument. If you're only loading in binary | ||||
|     |  data and not a model package that can construct its own #[code Language] | ||||
|     |  class and pipeline, you should now use the | ||||
|     |  #[+api("language#from_disk") #[code Language.from_disk()]] method. | ||||
| 
 | ||||
| +code-new. | ||||
|     nlp = spacy.load('/model') | ||||
|     nlp = English().from_disk('/model/data') | ||||
| +code-old nlp = spacy.load('en', path='/model') | ||||
| 
 | ||||
| p | ||||
|     |  Review all other code that writes state to disk or bytes. | ||||
|     |  All containers, now share the same, consistent API for saving and | ||||
|     |  loading. Replace saving with #[code to_disk()] or #[code to_bytes()], and | ||||
|     |  loading with #[code from_disk()] and #[code from_bytes()]. | ||||
| 
 | ||||
| +code-new. | ||||
|     nlp.to_disk('/model') | ||||
|     nlp.vocab.to_disk('/vocab') | ||||
| 
 | ||||
| +code-old. | ||||
|     nlp.save_to_directory('/model') | ||||
|     nlp.vocab.dump('/vocab') | ||||
| 
 | ||||
| p | ||||
|     |  If you've trained models with input from v1.x, you'll need to | ||||
|     |  #[strong retrain them] with spaCy v2.0. All previous models will not | ||||
|     |  be compatible with the new version. | ||||
| 
 | ||||
| +h(3, "migrating-languages") Processing pipelines and language data | ||||
| 
 | ||||
| p | ||||
|     |  If you're importing language data or #[code Language] classes, make sure | ||||
|     |  to change your import statements to import from #[code spacy.lang]. If | ||||
|     |  you've added your own custom language, it needs to be moved to | ||||
|     |  #[code spacy/lang/xx] and adjusted accordingly. | ||||
| 
 | ||||
| .o-block | ||||
|     +code-new from spacy.lang.en import English | ||||
|     +code-old from spacy.en import English | ||||
| 
 | ||||
| p | ||||
|     |  If you've been using custom pipeline components, check out the new | ||||
|     |  guide on #[+a("/usage/language-processing-pipelines") processing pipelines]. | ||||
|     |  Pipeline components are now #[code (name, func)] tuples. Appending | ||||
|     |  them to the pipeline still works – but the | ||||
|     |  #[+api("language#add_pipe") #[code add_pipe]] method now makes this | ||||
|     |  much more convenient. Methods for removing, renaming, replacing and | ||||
|     |  retrieving components have been added as well. Components can now | ||||
|     |  be disabled by passing a list of their names to the #[code disable] | ||||
|     |  keyword argument on load, or by using | ||||
|     |  #[+api("language#disable_pipes") #[code disable_pipes]] as a method | ||||
|     |  or contextmanager: | ||||
| 
 | ||||
| .o-block | ||||
|     +code-new. | ||||
|         nlp = spacy.load('en', disable=['tagger', 'ner']) | ||||
|         with nlp.disable_pipes('parser'): | ||||
|             doc = nlp(u"I don't want parsed") | ||||
|     +code-old. | ||||
|         nlp = spacy.load('en', tagger=False, entity=False) | ||||
|         doc = nlp(u"I don't want parsed", parse=False) | ||||
| 
 | ||||
| p | ||||
|     |  To add spaCy's built-in pipeline components to your pipeline, | ||||
|     |  you can still import and instantiate them directly – but it's more | ||||
|     |  convenient to use the new | ||||
|     |  #[+api("language#create_pipe") #[code create_pipe]] method with the | ||||
|     |  component name, i.e. #[code 'tagger'], #[code 'parser'], #[code 'ner'] | ||||
|     |  or #[code 'textcat']. | ||||
| 
 | ||||
| +code-new. | ||||
|     tagger = nlp.create_pipe('tagger') | ||||
|     nlp.add_pipe(tagger, first=True) | ||||
| 
 | ||||
| +code-old. | ||||
|     from spacy.pipeline import Tagger | ||||
|     tagger = Tagger(nlp.vocab) | ||||
|     nlp.pipeline.insert(0, tagger) | ||||
| 
 | ||||
| +h(3, "migrating-training") Training | ||||
| 
 | ||||
| p | ||||
|     |  All built-in pipeline components are now subclasses of | ||||
|     |  #[+api("pipe") #[code Pipe]] are fully trainable and serializable, | ||||
|     |  and follow the same API. Instead of updating the model and telling | ||||
|     |  spaCy when to #[em stop], you can now explicitly call | ||||
|     |  #[+api("language#begin_training") #[code begin_taining]], which | ||||
|     |  returns an optimizer you can pass into the | ||||
|     |  #[+api("language#update") #[code update]] function. | ||||
| 
 | ||||
| +code-new. | ||||
|     optimizer = nlp.begin_training() | ||||
|     for itn in range(1000): | ||||
|         for doc, gold in train_data: | ||||
|             nlp.update([doc], [gold], sgd=optimizer) | ||||
|     nlp.to_disk('/model') | ||||
| +code-old. | ||||
|     for itn in range(1000): | ||||
|         for doc, gold in train_data: | ||||
|             nlp.update(doc, gold) | ||||
|     nlp.end_training() | ||||
|     nlp.save_to_directory('/model') | ||||
| 
 | ||||
| +h(3, "migrating-doc") Attaching custom data to the Doc | ||||
| 
 | ||||
| p | ||||
|     |  Previously, you had to create a new container in order to attach custom | ||||
|     |  data to a #[code Doc] object. This often required converting the | ||||
|     | #[code Doc] objects to and from arrays. In spaCy v2.0, you can set your | ||||
|     |  own attributes, properties and methods on the #[code Doc], #[code Token] | ||||
|     |  and #[code Span] via | ||||
|     |  #[+a("/usage/processing-pipelines#custom-components-attributes") custom extensions]. | ||||
|     |  This means that your application can – and should – only pass around | ||||
|     |  #[code Doc] objects and refer to them as the single source of truth. | ||||
| 
 | ||||
| +code-new. | ||||
|     Doc.set_extension('meta', getter=get_doc_meta) | ||||
|     doc_with_meta = nlp(u'This is a doc with meta data') | ||||
|     meta = doc._.meta | ||||
| 
 | ||||
| +code-old. | ||||
|     doc = nlp(u'This is a regular doc') | ||||
|     doc_array = doc.to_array(['ORTH', 'POS']) | ||||
|     doc_with_meta = {'doc_array': doc_array, 'meta': get_doc_meta(doc_array)} | ||||
| 
 | ||||
| p | ||||
|     |  If you wrap your extension attributes in a | ||||
|     |  #[+a("/usage/processing-pipelines#custom-components") custom pipeline component], | ||||
|     |  they will be assigned automatically when you call #[code nlp] on a text. | ||||
|     |  If your application assigns custom data to spaCy's container objects, | ||||
|     |  or includes other utilities that interact with the pipeline, consider | ||||
|     |  moving this logic into its own extension module. | ||||
| 
 | ||||
| +code-new. | ||||
|     nlp.add_pipe(meta_component) | ||||
|     doc = nlp(u'Doc with a custom pipeline that assigns meta') | ||||
|     meta = doc._.meta | ||||
| 
 | ||||
| +code-old. | ||||
|     doc = nlp(u'Doc with a standard pipeline') | ||||
|     meta = get_meta(doc) | ||||
| 
 | ||||
| +h(3, "migrating-strings") Strings and hash values | ||||
| 
 | ||||
| p | ||||
|     |  The change from integer IDs to hash values may not actually affect your | ||||
|     |  code very much. However, if you're adding strings to the vocab manually, | ||||
|     |  you now need to call #[+api("stringstore#add") #[code StringStore.add()]] | ||||
|     |  explicitly. You can also now be sure that the string-to-hash mapping will | ||||
|     |  always match across vocabularies. | ||||
| 
 | ||||
| +code-new. | ||||
|     nlp.vocab.strings.add(u'coffee') | ||||
|     nlp.vocab.strings[u'coffee']       # 3197928453018144401 | ||||
|     other_nlp.vocab.strings[u'coffee'] # 3197928453018144401 | ||||
| 
 | ||||
| +code-old. | ||||
|     nlp.vocab.strings[u'coffee']       # 3672 | ||||
|     other_nlp.vocab.strings[u'coffee'] # 40259 | ||||
| 
 | ||||
| +h(3, "migrating-matcher") Adding patterns and callbacks to the matcher | ||||
| 
 | ||||
| p | ||||
|     |  If you're using the matcher, you can now add patterns in one step. This | ||||
|     |  should be easy to update – simply merge the ID, callback and patterns | ||||
|     |  into one call to #[+api("matcher#add") #[code Matcher.add()]]. The | ||||
|     |  matcher now also supports string keys, which saves you an extra import. | ||||
|     |  If you've been using #[strong acceptor functions], you'll need to move | ||||
|     |  this logic into the | ||||
|     |  #[+a("/usage/rule-based-matching#on_match") #[code on_match] callbacks]. | ||||
|     |  The callback function is invoked on every match and will give you access to | ||||
|     |  the doc, the index of the current match and all total matches. This lets | ||||
|     |  you both accept or reject the match, and define the actions to be | ||||
|     |  triggered. | ||||
| 
 | ||||
| .o-block | ||||
|     +code-new. | ||||
|         matcher.add('GoogleNow', merge_phrases, [{'ORTH': 'Google'}, {'ORTH': 'Now'}]) | ||||
| 
 | ||||
|     +code-old. | ||||
|         matcher.add_entity('GoogleNow', on_match=merge_phrases) | ||||
|         matcher.add_pattern('GoogleNow', [{ORTH: 'Google'}, {ORTH: 'Now'}]) | ||||
| 
 | ||||
| p | ||||
|     |  If you need to match large terminology lists, you can now also | ||||
|     |  use the #[+api("phrasematcher") #[code PhraseMatcher]], which accepts | ||||
|     |  #[code Doc] objects as match patterns and is more efficient than the | ||||
|     |  regular, rule-based matcher. | ||||
| 
 | ||||
| +code-new. | ||||
|     from spacy.matcher import PhraseMatcher | ||||
|     matcher = PhraseMatcher(nlp.vocab) | ||||
|     patterns = [nlp(text) for text in large_terminology_list] | ||||
|     matcher.add('PRODUCT', None, *patterns) | ||||
| 
 | ||||
| +code-old. | ||||
|     matcher = Matcher(nlp.vocab) | ||||
|     matcher.add_entity('PRODUCT') | ||||
|     for text in large_terminology_list | ||||
|         matcher.add_pattern('PRODUCT', [{ORTH: text}]) | ||||
							
								
								
									
										74
									
								
								website/usage/_v2/_summary.jade
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										74
									
								
								website/usage/_v2/_summary.jade
									
									
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,74 @@ | |||
| //- 💫 DOCS > USAGE > WHAT'S NEW IN V2.0 > SUMMARY | ||||
| 
 | ||||
| p | ||||
|     |  We're very excited to finally introduce spaCy v2.0! On this page, you'll | ||||
|     |  find a summary of the new features, information on the backwards | ||||
|     |  incompatibilities, including a handy overview of what's been renamed or | ||||
|     |  deprecated. To help you make the most of v2.0, we also | ||||
|     |  #[strong re-wrote almost all of the usage guides and API docs], and added | ||||
|     |  more #[+a("/usage/examples") real-world examples]. If you're new to | ||||
|     |  spaCy, or just want to brush up on some NLP basics and the details of | ||||
|     |  the library, check out the | ||||
|     |  #[+a("/usage/spacy-101") spaCy 101 guide] that explains the most | ||||
|     |  important concepts with examples and illustrations. | ||||
| 
 | ||||
| +h(2, "summary") Summary | ||||
| 
 | ||||
| +grid.o-no-block | ||||
|     +grid-col("half") | ||||
| 
 | ||||
|         p | ||||
|             |  This release features entirely new | ||||
|             |  #[strong deep learning-powered models] for spaCy's tagger, | ||||
|             |  parser and entity recognizer. The new models are | ||||
|             |  #[strong 10× smaller], #[strong 20% more accurate] and | ||||
|             |  just as fast as the previous generation. | ||||
| 
 | ||||
|         p | ||||
|             |  We've also made several usability improvements that are | ||||
|             |  particularly helpful for #[strong production deployments]. | ||||
|             |  spaCy v2 now fully supports the Pickle protocol, making it | ||||
|             |  easy to use spaCy with | ||||
|             |  #[+a("https://spark.apache.org/") Apache Spark]. The | ||||
|             |  string-to-integer mapping is #[strong no longer stateful], | ||||
|             |  making it easy to reconcile annotations made in different | ||||
|             |  processes. Models are smaller and use less memory, and the | ||||
|             |  APIs for serialization are now much more consistent. Custom | ||||
|             |  pipeline components let you modify the #[code Doc] at any | ||||
|             |  stage in the pipeline. You can now also add your own | ||||
|             |  custom attributes, properties and methods to the #[code Doc], | ||||
|             |  #[code Token] and #[code Span]. | ||||
| 
 | ||||
|     +table-of-contents | ||||
|         +item #[+a("#summary") Summary] | ||||
|         +item #[+a("#features") New features] | ||||
|         +item #[+a("#features-models") Neural network models] | ||||
|         +item #[+a("#features-pipelines") Improved processing pipelines] | ||||
|         +item #[+a("#features-text-classification") Text classification] | ||||
|         +item #[+a("#features-hash-ids") Hash values as IDs] | ||||
|         +item #[+a("#features-vectors") Improved word vectors support] | ||||
|         +item #[+a("#features-serializer") Saving, loading and serialization] | ||||
|         +item #[+a("#features-displacy") displaCy visualizer] | ||||
|         +item #[+a("#features-language") Language data and lazy loading] | ||||
|         +item #[+a("#features-matcher") Revised matcher API and phrase matcher] | ||||
|         +item #[+a("#incompat") Backwards incompatibilities] | ||||
|         +item #[+a("#migrating") Migrating from spaCy v1.x] | ||||
|         +item #[+a("#benchmarks") Benchmarks] | ||||
| 
 | ||||
| p | ||||
|     |  The main usability improvements you'll notice in spaCy v2.0 are around | ||||
|     |  #[strong defining, training and loading your own models] and components. | ||||
|     |  The new neural network models make it much easier to train a model from | ||||
|     |  scratch, or update an existing model with a few examples. In v1.x, the | ||||
|     |  statistical models depended on the state of the #[code Vocab]. If you | ||||
|     |  taught the model a new word, you would have to save and load a lot of | ||||
|     |  data — otherwise the model wouldn't correctly recall the features of your | ||||
|     |  new example. That's no longer the case. | ||||
| 
 | ||||
| p | ||||
|     |  Due to some clever use of hashing, the statistical models | ||||
|     |  #[strong never change size], even as they learn new vocabulary items. | ||||
|     |  The whole pipeline is also now fully differentiable. Even if you don't | ||||
|     |  have explicitly annotated data, you can update spaCy using all the | ||||
|     |  #[strong latest deep learning tricks] like adversarial training, noise | ||||
|     |  contrastive estimation or reinforcement learning. | ||||
|  | @ -2,531 +2,22 @@ | |||
| 
 | ||||
| include ../_includes/_mixins | ||||
| 
 | ||||
| p | ||||
|     |  We're very excited to finally introduce spaCy v2.0! On this page, you'll | ||||
|     |  find a summary of the new features, information on the backwards | ||||
|     |  incompatibilities, including a handy overview of what's been renamed or | ||||
|     |  deprecated. To help you make the most of v2.0, we also | ||||
|     |  #[strong re-wrote almost all of the usage guides and API docs], and added | ||||
|     |  more real-world examples. If you're new to spaCy, or just want to brush | ||||
|     |  up on some NLP basics and the details of the library, check out | ||||
|     |  the #[+a("/usage/spacy-101") spaCy 101 guide] that explains the most | ||||
|     |  important concepts with examples and illustrations. | ||||
| +section("summary") | ||||
|     include _v2/_summary | ||||
| 
 | ||||
| +h(2, "summary") Summary | ||||
| 
 | ||||
| +grid.o-no-block | ||||
|     +grid-col("half") | ||||
| 
 | ||||
|         p This release features | ||||
|             |  entirely new #[strong deep learning-powered models] for spaCy's tagger, | ||||
|             |  parser and entity recognizer. The new models are #[strong 20x smaller] | ||||
|             |  than the linear models that have powered spaCy until now: from 300 MB to | ||||
|             |  only 15 MB. | ||||
| 
 | ||||
|         p | ||||
|             |  We've also made several usability improvements that are | ||||
|             |  particularly helpful for #[strong production deployments]. spaCy | ||||
|             |  v2 now fully supports the Pickle protocol, making it easy to use | ||||
|             |  spaCy with #[+a("https://spark.apache.org/") Apache Spark]. The | ||||
|             |  string-to-integer mapping is #[strong no longer stateful], making | ||||
|             |  it easy to reconcile annotations made in different processes. | ||||
|             |  Models are smaller and use less memory, and the APIs for serialization | ||||
|             |  are now much more consistent. | ||||
| 
 | ||||
|     +table-of-contents | ||||
|         +item #[+a("#summary") Summary] | ||||
|         +item #[+a("#features") New features] | ||||
|         +item #[+a("#features-models") Neural network models] | ||||
|         +item #[+a("#features-pipelines") Improved processing pipelines] | ||||
|         +item #[+a("#features-text-classification") Text classification] | ||||
|         +item #[+a("#features-hash-ids") Hash values instead of integer IDs] | ||||
|         +item #[+a("#features-serializer") Saving, loading and serialization] | ||||
|         +item #[+a("#features-displacy") displaCy visualizer] | ||||
|         +item #[+a("#features-language") Language data and lazy loading] | ||||
|         +item #[+a("#features-matcher") Revised matcher API and phrase matcher] | ||||
|         +item #[+a("#incompat") Backwards incompatibilities] | ||||
|         +item #[+a("#migrating") Migrating from spaCy v1.x] | ||||
|         +item #[+a("#benchmarks") Benchmarks] | ||||
| 
 | ||||
| p | ||||
|     |  The main usability improvements you'll notice in spaCy v2.0 are around | ||||
|     |  #[strong defining, training and loading your own models] and components. | ||||
|     |  The new neural network models make it much easier to train a model from | ||||
|     |  scratch, or update an existing model with a few examples. In v1.x, the | ||||
|     |  statistical models depended on the state of the #[code Vocab]. If you | ||||
|     |  taught the model a new word, you would have to save and load a lot of | ||||
|     |  data — otherwise the model wouldn't correctly recall the features of your | ||||
|     |  new example. That's no longer the case. | ||||
| 
 | ||||
| p | ||||
|     |  Due to some clever use of hashing, the statistical models | ||||
|     |  #[strong never change size], even as they learn new vocabulary items. | ||||
|     |  The whole pipeline is also now fully differentiable. Even if you don't | ||||
|     |  have explicitly annotated data, you can update spaCy using all the | ||||
|     |  #[strong latest deep learning tricks] like adversarial training, noise | ||||
|     |  contrastive estimation or reinforcement learning. | ||||
| 
 | ||||
| +section("features") | ||||
|     +h(2, "features") New features | ||||
| 
 | ||||
|     p | ||||
|         |  This section contains an overview of the most important | ||||
|         |  #[strong new features and improvements]. The #[+a("/api") API docs] | ||||
|         |  include additional  deprecation notes. New methods and functions that | ||||
|         |  were introduced in this version are marked with a #[+tag-new(2)] tag. | ||||
| 
 | ||||
|     +h(3, "features-models") Convolutional neural network models | ||||
| 
 | ||||
|     +aside-code("Example", "bash"). | ||||
|         spacy download en # default English model | ||||
|         spacy download de # default German model | ||||
|         spacy download fr # default French model | ||||
|         spacy download es # default Spanish model | ||||
|         spacy download xx_ent_wiki_sm # multi-language NER | ||||
| 
 | ||||
|     p | ||||
|         |  spaCy v2.0 features new neural models for tagging, | ||||
|         |  parsing and entity recognition. The models have | ||||
|         |  been designed and implemented from scratch specifically for spaCy, to | ||||
|         |  give you an unmatched balance of speed, size and accuracy. The new | ||||
|         |  models are #[strong 10× smaller], #[strong 20% more accurate], | ||||
|         |  and #[strong just as fast] as the previous generation. | ||||
|         |  #[strong GPU usage] is now supported via | ||||
|         |  #[+a("http://chainer.org") Chainer]'s CuPy module. | ||||
| 
 | ||||
|     +infobox | ||||
|         |  #[+label-inline Usage:] #[+a("/models") Models directory], | ||||
|         |  #[+a("/usage/#gpu") Using spaCy with GPU] | ||||
| 
 | ||||
|     +h(3, "features-pipelines") Improved processing pipelines | ||||
| 
 | ||||
|     +aside-code("Example"). | ||||
|         # Set custom attributes | ||||
|         Doc.set_extension('my_attr', default=False) | ||||
|         Token.set_extension('my_attr', getter=my_token_getter) | ||||
|         assert doc._.my_attr, token._.my_attr | ||||
| 
 | ||||
|         # Add components to the pipeline | ||||
|         my_component = lambda doc: doc | ||||
|         nlp.add_pipe(my_component) | ||||
| 
 | ||||
|     p | ||||
|         |  It's now much easier to #[strong customise the pipeline] with your own | ||||
|         |  components: functions that receive a #[code Doc] object, modify and | ||||
|         |  return it. Extensions let you write any | ||||
|         |  #[strong attributes, properties and methods] to the #[code Doc], | ||||
|         |  #[code Token] and #[code Span]. You can add data, implement new | ||||
|         |  features, integrate other libraries with spaCy or plug in your own | ||||
|         |  machine learning models. | ||||
| 
 | ||||
|     +image | ||||
|         include ../assets/img/pipeline.svg | ||||
| 
 | ||||
|     +infobox | ||||
|         |  #[+label-inline API:] #[+api("language") #[code Language]], | ||||
|         |  #[+api("doc#set_extension") #[code Doc.set_extension]], | ||||
|         |  #[+api("span#set_extension") #[code Span.set_extension]], | ||||
|         |  #[+api("token#set_extension") #[code Token.set_extension]] | ||||
|         |  #[+label-inline Usage:] | ||||
|         |  #[+a("/usage/processing-pipelines") Processing pipelines] | ||||
|         |  #[+label-inline Code:] | ||||
|         |  #[+src("/usage/examples#section-pipeline") Pipeline examples] | ||||
| 
 | ||||
|     +h(3, "features-text-classification") Text classification | ||||
| 
 | ||||
|     +aside-code("Example"). | ||||
|         from spacy.lang.en import English | ||||
|         nlp = English(pipeline=['tensorizer', 'tagger', 'textcat']) | ||||
| 
 | ||||
|     p | ||||
|         |  spaCy v2.0 lets you add text categorization models to spaCy pipelines. | ||||
|         |  The model supports classification with multiple, non-mutually exclusive | ||||
|         |  labels – so multiple labels can apply at once. You can change the model | ||||
|         |  architecture rather easily, but by default, the #[code TextCategorizer] | ||||
|         |  class uses a convolutional neural network to assign position-sensitive | ||||
|         |  vectors to each word in the document. | ||||
| 
 | ||||
|     +infobox | ||||
|         |  #[+label-inline API:] #[+api("textcategorizer") #[code TextCategorizer]], | ||||
|         |  #[+api("doc#attributes") #[code Doc.cats]], | ||||
|         |  #[+api("goldparse#attributes") #[code GoldParse.cats]]#[br] | ||||
|         |  #[+label-inline Usage:] #[+a("/usage/text-classification") Text classification] | ||||
| 
 | ||||
|     +h(3, "features-hash-ids") Hash values instead of integer IDs | ||||
| 
 | ||||
|     +aside-code("Example"). | ||||
|         doc = nlp(u'I love coffee') | ||||
|         assert doc.vocab.strings[u'coffee'] == 3197928453018144401 | ||||
|         assert doc.vocab.strings[3197928453018144401] == u'coffee' | ||||
| 
 | ||||
|         beer_hash = doc.vocab.strings.add(u'beer') | ||||
|         assert doc.vocab.strings[u'beer'] == beer_hash | ||||
|         assert doc.vocab.strings[beer_hash] == u'beer' | ||||
| 
 | ||||
|     p | ||||
|         |  The #[+api("stringstore") #[code StringStore]] now resolves all strings | ||||
|         |  to hash values instead of integer IDs. This means that the string-to-int | ||||
|         |  mapping #[strong no longer depends on the vocabulary state], making a lot | ||||
|         |  of workflows much simpler, especially during training. Unlike integer IDs | ||||
|         |  in spaCy v1.x, hash values will #[strong always match] – even across | ||||
|         |  models. Strings can now be added explicitly using the new | ||||
|         |  #[+api("stringstore#add") #[code Stringstore.add]] method. A token's hash | ||||
|         |  is available via #[code token.orth]. | ||||
| 
 | ||||
|     +infobox | ||||
|         |  #[+label-inline API:] #[+api("stringstore") #[code StringStore]] | ||||
|         |  #[+label-inline Usage:] #[+a("/usage/spacy-101#vocab") Vocab, hashes and lexemes 101] | ||||
| 
 | ||||
|     +h(3, "features-serializer") Saving, loading and serialization | ||||
| 
 | ||||
|     +aside-code("Example"). | ||||
|         nlp = spacy.load('en') # shortcut link | ||||
|         nlp = spacy.load('en_core_web_sm') # package | ||||
|         nlp = spacy.load('/path/to/en') # unicode path | ||||
|         nlp = spacy.load(Path('/path/to/en')) # pathlib Path | ||||
| 
 | ||||
|         nlp.to_disk('/path/to/nlp') | ||||
|         nlp = English().from_disk('/path/to/nlp') | ||||
| 
 | ||||
|     p | ||||
|         |  spay's serialization API has been made consistent across classes and | ||||
|         |  objects. All container classes, i.e. #[code Language], #[code Doc], | ||||
|         |  #[code Vocab] and #[code StringStore] now have a #[code to_bytes()], | ||||
|         |  #[code from_bytes()], #[code to_disk()] and #[code from_disk()] method | ||||
|         |  that supports the Pickle protocol. | ||||
| 
 | ||||
|     p | ||||
|         |  The improved #[code spacy.load] makes loading models easier and more | ||||
|         |  transparent. You can load a model by supplying its | ||||
|         |  #[+a("/usage/models#usage") shortcut link], the name of an installed | ||||
|         |  #[+a("/usage/saving-loading#generating") model package] or a path. | ||||
|         |  The #[code Language] class to initialise will be determined based on the | ||||
|         |  model's settings. For a blank language, you can import the class directly, | ||||
|         |  e.g. #[code from spacy.lang.en import English]. | ||||
| 
 | ||||
|     +infobox | ||||
|         |  #[+label-inline API:] #[+api("spacy#load") #[code spacy.load]] | ||||
|         |  #[+label-inline Usage:] #[+a("/usage/saving-loading") Saving and loading] | ||||
| 
 | ||||
|     +h(3, "features-displacy") displaCy visualizer with Jupyter support | ||||
| 
 | ||||
|     +aside-code("Example"). | ||||
|         from spacy import displacy | ||||
|         doc = nlp(u'This is a sentence about Facebook.') | ||||
|         displacy.serve(doc, style='dep') # run the web server | ||||
|         html = displacy.render(doc, style='ent') # generate HTML | ||||
| 
 | ||||
|     p | ||||
|         |  Our popular dependency and named entity visualizers are now an official | ||||
|         |  part of the spaCy library. displaCy can run a simple web server, or | ||||
|         |  generate raw HTML markup or SVG files to be exported. You can pass in one | ||||
|         |  or more docs, and customise the style. displaCy also auto-detects whether | ||||
|         |  you're running #[+a("https://jupyter.org") Jupyter] and will render the | ||||
|         |  visualizations in your notebook. | ||||
| 
 | ||||
|     +infobox | ||||
|         |  #[+label-inline API:] #[+api("displacy") #[code displacy]] | ||||
|         |  #[+label-inline Usage:] #[+a("/usage/visualizers") Visualizing spaCy] | ||||
| 
 | ||||
|     +h(3, "features-language") Improved language data and lazy loading | ||||
| 
 | ||||
|     p | ||||
|         |  Language-specfic data now lives in its own submodule, #[code spacy.lang]. | ||||
|         |  Languages are lazy-loaded, i.e. only loaded when you import a | ||||
|         |  #[code Language] class, or load a model that initialises one. This allows | ||||
|         |  languages to contain more custom data, e.g. lemmatizer lookup tables, or | ||||
|         |  complex regular expressions. The language data has also been tidied up | ||||
|         |  and simplified. spaCy now also supports simple lookup-based lemmatization. | ||||
| 
 | ||||
|     +infobox | ||||
|         |  #[+label-inline API:] #[+api("language") #[code Language]] | ||||
|         |  #[+label-inline Code:] #[+src(gh("spaCy", "spacy/lang")) #[code spacy/lang]] | ||||
|         |  #[+label-inline Usage:] #[+a("/usage/adding-languages") Adding languages] | ||||
| 
 | ||||
|     +h(3, "features-matcher") Revised matcher API and phrase matcher | ||||
| 
 | ||||
|     +aside-code("Example"). | ||||
|         from spacy.matcher import Matcher, PhraseMatcher | ||||
| 
 | ||||
|         matcher = Matcher(nlp.vocab) | ||||
|         matcher.add('HEARTS', None, [{'ORTH': '❤️', 'OP': '+'}]) | ||||
| 
 | ||||
|         phrasematcher = PhraseMatcher(nlp.vocab) | ||||
|         phrasematcher.add('OBAMA', None, nlp(u"Barack Obama")) | ||||
| 
 | ||||
|     p | ||||
|         |  Patterns can now be added to the matcher by calling | ||||
|         |  #[+api("matcher-add") #[code matcher.add()]] with a match ID, an optional | ||||
|         |  callback function to be invoked on each match, and one or more patterns. | ||||
|         |  This allows you to write powerful, pattern-specific logic using only one | ||||
|         |  matcher. For example, you might only want to merge some entity types, | ||||
|         |  and set custom flags for other matched patterns. The new | ||||
|         |  #[+api("phrasematcher") #[code PhraseMatcher]] lets you efficiently | ||||
|         |  match very large terminology lists using #[code Doc] objects as match | ||||
|         |  patterns. | ||||
| 
 | ||||
|     +infobox | ||||
|         |  #[+label-inline API:] #[+api("matcher") #[code Matcher]], | ||||
|         |  #[+api("phrasematcher") #[code PhraseMatcher]] | ||||
|         |  #[+label-inline Usage:] #[+a("/usage/rule-based-matching") Rule-based matching] | ||||
|     include _v2/_features | ||||
| 
 | ||||
| +section("incompat") | ||||
|     +h(2, "incompat") Backwards incompatibilities | ||||
| 
 | ||||
|     +table(["Old", "New"]) | ||||
|         +row | ||||
|             +cell | ||||
|                 |  #[code spacy.en] | ||||
|                 |  #[code spacy.xx] | ||||
|             +cell | ||||
|                 |  #[code spacy.lang.en] | ||||
|                 |  #[code spacy.lang.xx] | ||||
| 
 | ||||
|         +row | ||||
|             +cell #[code orth] | ||||
|             +cell #[code lang.xx.lex_attrs] | ||||
| 
 | ||||
|         +row | ||||
|             +cell #[code syntax.iterators] | ||||
|             +cell #[code lang.xx.syntax_iterators] | ||||
| 
 | ||||
|         +row | ||||
|             +cell #[code Language.save_to_directory] | ||||
|             +cell #[+api("language#to_disk") #[code Language.to_disk]] | ||||
| 
 | ||||
|         +row | ||||
|             +cell #[code Language.create_make_doc] | ||||
|             +cell #[+api("language#attributes") #[code Language.tokenizer]] | ||||
| 
 | ||||
|         +row | ||||
|             +cell | ||||
|                 |  #[code Vocab.load] | ||||
|                 |  #[code Vocab.load_lexemes] | ||||
|             +cell | ||||
|                 |  #[+api("vocab#from_disk") #[code Vocab.from_disk]] | ||||
|                 |  #[+api("vocab#from_bytes") #[code Vocab.from_bytes]] | ||||
| 
 | ||||
|         +row | ||||
|             +cell | ||||
|                 |  #[code Vocab.dump] | ||||
|             +cell | ||||
|                 |  #[+api("vocab#to_disk") #[code Vocab.to_disk]]#[br] | ||||
|                 |  #[+api("vocab#to_bytes") #[code Vocab.to_bytes]] | ||||
| 
 | ||||
|         +row | ||||
|             +cell | ||||
|                 |  #[code Vocab.load_vectors] | ||||
|                 |  #[code Vocab.load_vectors_from_bin_loc] | ||||
|             +cell | ||||
|                 |  #[+api("vectors#from_disk") #[code Vectors.from_disk]] | ||||
|                 |  #[+api("vectors#from_bytes") #[code Vectors.from_bytes]] | ||||
| 
 | ||||
|         +row | ||||
|             +cell | ||||
|                 |  #[code Vocab.dump_vectors] | ||||
|             +cell | ||||
|                 |  #[+api("vectors#to_disk") #[code Vectors.to_disk]] | ||||
|                 |  #[+api("vectors#to_bytes") #[code Vectors.to_bytes]] | ||||
| 
 | ||||
|         +row | ||||
|             +cell | ||||
|                 |  #[code StringStore.load] | ||||
|             +cell | ||||
|                 |  #[+api("stringstore#from_disk") #[code StringStore.from_disk]] | ||||
|                 |  #[+api("stringstore#from_bytes") #[code StringStore.from_bytes]] | ||||
| 
 | ||||
|         +row | ||||
|             +cell | ||||
|                 |  #[code StringStore.dump] | ||||
|             +cell | ||||
|                 |  #[+api("stringstore#to_disk") #[code StringStore.to_disk]] | ||||
|                 |  #[+api("stringstore#to_bytes") #[code StringStore.to_bytes]] | ||||
| 
 | ||||
|         +row | ||||
|             +cell #[code Tokenizer.load] | ||||
|             +cell | ||||
|                 |  #[+api("tokenizer#from_disk") #[code Tokenizer.from_disk]] | ||||
|                 |  #[+api("tokenizer#from_bytes") #[code Tokenizer.from_bytes]] | ||||
| 
 | ||||
|         +row | ||||
|             +cell #[code Tagger.load] | ||||
|             +cell | ||||
|                 |  #[+api("tagger#from_disk") #[code Tagger.from_disk]] | ||||
|                 |  #[+api("tagger#from_bytes") #[code Tagger.from_bytes]] | ||||
| 
 | ||||
|         +row | ||||
|             +cell #[code DependencyParser.load] | ||||
|             +cell | ||||
|                 |  #[+api("dependencyparser#from_disk") #[code DependencyParser.from_disk]] | ||||
|                 |  #[+api("dependencyparser#from_bytes") #[code DependencyParser.from_bytes]] | ||||
| 
 | ||||
|         +row | ||||
|             +cell #[code EntityRecognizer.load] | ||||
|             +cell | ||||
|                 |  #[+api("entityrecognizer#from_disk") #[code EntityRecognizer.from_disk]] | ||||
|                 |  #[+api("entityrecognizer#from_bytes") #[code EntityRecognizer.from_bytes]] | ||||
| 
 | ||||
|         +row | ||||
|             +cell #[code Matcher.load] | ||||
|             +cell - | ||||
| 
 | ||||
|         +row | ||||
|             +cell | ||||
|                 |  #[code Matcher.add_pattern] | ||||
|                 |  #[code Matcher.add_entity] | ||||
|             +cell #[+api("matcher#add") #[code Matcher.add]] | ||||
| 
 | ||||
|         +row | ||||
|             +cell #[code Matcher.get_entity] | ||||
|             +cell #[+api("matcher#get") #[code Matcher.get]] | ||||
| 
 | ||||
|         +row | ||||
|             +cell #[code Matcher.has_entity] | ||||
|             +cell #[+api("matcher#contains") #[code Matcher.__contains__]] | ||||
| 
 | ||||
|         +row | ||||
|             +cell #[code Doc.read_bytes] | ||||
|             +cell | ||||
|                 |  #[+api("doc#to_bytes") #[code Doc.to_bytes]] | ||||
|                 |  #[+api("doc#from_bytes") #[code Doc.from_bytes]] | ||||
|                 |  #[+api("doc#to_disk") #[code Doc.to_disk]] | ||||
|                 |  #[+api("doc#from_disk") #[code Doc.from_disk]] | ||||
| 
 | ||||
|         +row | ||||
|             +cell #[code Token.is_ancestor_of] | ||||
|             +cell #[+api("token#is_ancestor") #[code Token.is_ancestor]] | ||||
| 
 | ||||
|         +row | ||||
|             +cell #[code cli.model] | ||||
|             +cell - | ||||
|     include _v2/_incompat | ||||
| 
 | ||||
| +section("migrating") | ||||
|     +h(2, "migrating") Migrating from spaCy 1.x | ||||
| 
 | ||||
|     p | ||||
|         |  Because we'e made so many architectural changes to the library, we've | ||||
|         |  tried to #[strong keep breaking changes to a minimum]. A lot of projects | ||||
|         |  follow the philosophy that if you're going to break anything, you may as | ||||
|         |  well break everything. We think migration is easier if there's a logic to | ||||
|         |  what has changed. | ||||
| 
 | ||||
|     p | ||||
|         |  We've therefore followed a policy of avoiding breaking changes to the | ||||
|         |  #[code Doc], #[code Span] and #[code Token] objects. This way, you can | ||||
|         |  focus on only migrating the code that does training, loading and | ||||
|         |  serialization — in other words, code that works with the #[code nlp] | ||||
|         |  object directly. Code that uses the annotations should continue to work. | ||||
| 
 | ||||
|     +infobox("Important note") | ||||
|         |  If you've trained your own models, keep in mind that your train and | ||||
|         |  runtime inputs must match. This means you'll have to | ||||
|         |  #[strong retrain your models] with spaCy v2.0. | ||||
| 
 | ||||
|     +h(3, "migrating-saving-loading") Saving, loading and serialization | ||||
| 
 | ||||
|     p | ||||
|         |  Double-check all calls to #[code spacy.load()] and make sure they don't | ||||
|         |  use the #[code path] keyword argument. If you're only loading in binary | ||||
|         |  data and not a model package that can construct its own #[code Language] | ||||
|         |  class and pipeline, you should now use the | ||||
|         |  #[+api("language#from_disk") #[code Language.from_disk()]] method. | ||||
| 
 | ||||
|     +code-new. | ||||
|         nlp = spacy.load('/model') | ||||
|         nlp = English().from_disk('/model/data') | ||||
|     +code-old nlp = spacy.load('en', path='/model') | ||||
| 
 | ||||
|     p | ||||
|         |  Review all other code that writes state to disk or bytes. | ||||
|         |  All containers, now share the same, consistent API for saving and | ||||
|         |  loading. Replace saving with #[code to_disk()] or #[code to_bytes()], and | ||||
|         |  loading with #[code from_disk()] and #[code from_bytes()]. | ||||
| 
 | ||||
|     +code-new. | ||||
|         nlp.to_disk('/model') | ||||
|         nlp.vocab.to_disk('/vocab') | ||||
| 
 | ||||
|     +code-old. | ||||
|         nlp.save_to_directory('/model') | ||||
|         nlp.vocab.dump('/vocab') | ||||
| 
 | ||||
|     p | ||||
|         |  If you've trained models with input from v1.x, you'll need to | ||||
|         |  #[strong retrain them] with spaCy v2.0. All previous models will not | ||||
|         |  be compatible with the new version. | ||||
| 
 | ||||
|     +h(3, "migrating-strings") Strings and hash values | ||||
| 
 | ||||
|     p | ||||
|         |  The change from integer IDs to hash values may not actually affect your | ||||
|         |  code very much. However, if you're adding strings to the vocab manually, | ||||
|         |  you now need to call #[+api("stringstore#add") #[code StringStore.add()]] | ||||
|         |  explicitly. You can also now be sure that the string-to-hash mapping will | ||||
|         |  always match across vocabularies. | ||||
| 
 | ||||
|     +code-new. | ||||
|         nlp.vocab.strings.add(u'coffee') | ||||
|         nlp.vocab.strings[u'coffee']       # 3197928453018144401 | ||||
|         other_nlp.vocab.strings[u'coffee'] # 3197928453018144401 | ||||
| 
 | ||||
|     +code-old. | ||||
|         nlp.vocab.strings[u'coffee']       # 3672 | ||||
|         other_nlp.vocab.strings[u'coffee'] # 40259 | ||||
| 
 | ||||
|     +h(3, "migrating-languages") Processing pipelines and language data | ||||
| 
 | ||||
|     p | ||||
|         |  If you're importing language data or #[code Language] classes, make sure | ||||
|         |  to change your import statements to import from #[code spacy.lang]. If | ||||
|         |  you've added your own custom language, it needs to be moved to | ||||
|         |  #[code spacy/lang/xx] and adjusted accordingly. | ||||
| 
 | ||||
|     +code-new from spacy.lang.en import English | ||||
|     +code-old from spacy.en import English | ||||
| 
 | ||||
|     p | ||||
|         |  If you've been using custom pipeline components, check out the new | ||||
|         |  guide on #[+a("/usage/language-processing-pipelines") processing pipelines]. | ||||
|         |  Appending functions to the pipeline still works – but the | ||||
|         |  #[+api("language#add_pipe") #[code add_pipe]] methods now makes this | ||||
|         |  much more convenient. Components of the processing pipeline can now | ||||
|         |  be disabled by passing a list of their names to the #[code disable] | ||||
|         |  keyword argument on load, or by simply demoving them from the | ||||
|         |  pipeline alltogether. | ||||
| 
 | ||||
|     +code-new. | ||||
|         nlp = spacy.load('en', disable=['tagger', 'ner']) | ||||
|         doc = nlp(u"I don't want parsed", disable['parser']) | ||||
|         nlp.remove_pipe('parser') | ||||
|     +code-old. | ||||
|         nlp = spacy.load('en', tagger=False, entity=False) | ||||
|         doc = nlp(u"I don't want parsed", parse=False) | ||||
| 
 | ||||
|     +h(3, "migrating-matcher") Adding patterns and callbacks to the matcher | ||||
| 
 | ||||
|     p | ||||
|         |  If you're using the matcher, you can now add patterns in one step. This | ||||
|         |  should be easy to update – simply merge the ID, callback and patterns | ||||
|         |  into one call to #[+api("matcher#add") #[code Matcher.add()]]. | ||||
| 
 | ||||
|     +code-new. | ||||
|         matcher.add('GoogleNow', merge_phrases, [{ORTH: 'Google'}, {ORTH: 'Now'}]) | ||||
| 
 | ||||
|     +code-old. | ||||
|         matcher.add_entity('GoogleNow', on_match=merge_phrases) | ||||
|         matcher.add_pattern('GoogleNow', [{ORTH: 'Google'}, {ORTH: 'Now'}]) | ||||
| 
 | ||||
|     p | ||||
|         |  If you've been using #[strong acceptor functions], you'll need to move | ||||
|         |  this logic into the | ||||
|         |  #[+a("/usage/rule-based-matching#on_match") #[code on_match] callbacks]. | ||||
|         |  The callback function is invoked on every match and will give you access to | ||||
|         |  the doc, the index of the current match and all total matches. This lets | ||||
|         |  you both accept or reject the match, and define the actions to be | ||||
|         |  triggered. | ||||
|     include _v2/_migrating | ||||
| 
 | ||||
| +section("benchmarks") | ||||
|     +h(2, "benchmarks") Benchmarks | ||||
| 
 | ||||
|     include _facts-figures/_benchmarks-models | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue
	
	Block a user