From 0b4b4f1819df2b8a885a2ce6a89a2bfe88249aba Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Thu, 12 Sep 2019 11:38:34 +0200 Subject: [PATCH] Documentation for Entity Linking (#4065) * document token ent_kb_id * document span kb_id * update pipeline documentation * prior and context weights as bool's instead * entitylinker api documentation * drop for both models * finish entitylinker documentation * small fixes * documentation for KB * candidate documentation * links to api pages in code * small fix * frequency examples as counts for consistency * consistent documentation about tensors returned by predict * add entity linking to usage 101 * add entity linking infobox and KB section to 101 * entity-linking in linguistic features * small typo corrections * training example and docs for entity_linker * predefined nlp and kb * revert back to similarity encodings for simplicity (for now) * set prior probabilities to 0 when excluded * code clean up * bugfix: deleting kb ID from tokens when entities were removed * refactor train el example to use either model or vocab * pretrain_kb example for example kb generation * add to training docs for KB + EL example scripts * small fixes * error numbering * ensure the language of vocab and nlp stay consistent across serialization * equality with = * avoid conflict in errors file * add error 151 * final adjustements to the train scripts - consistency * update of goldparse documentation * small corrections * push commit * typo fix * add candidate API to kb documentation * update API sidebar with EntityLinker and KnowledgeBase * remove EL from 101 docs * remove entity linker from 101 pipelines / rephrase * custom el model instead of existing model * set version to 2.2 for EL functionality * update documentation for 2 CLI scripts --- examples/pipeline/dummy_entity_linking.py | 0 examples/pipeline/wikidata_entity_linking.py | 0 examples/training/pretrain_kb.py | 5 +- examples/training/train_entity_linker.py | 4 +- spacy/kb.pyx | 2 +- website/docs/api/cli.md | 2 +- website/docs/api/entitylinker.md | 297 +++++++++++++++++++ website/docs/api/entityrecognizer.md | 11 +- website/docs/api/goldparse.md | 22 +- website/docs/api/kb.md | 268 +++++++++++++++++ website/docs/api/span.md | 21 +- website/docs/api/tagger.md | 16 +- website/docs/api/textcategorizer.md | 11 +- website/docs/api/token.md | 4 +- website/docs/usage/101/_named-entities.md | 12 +- website/docs/usage/101/_pipelines.md | 20 +- website/docs/usage/101/_training.md | 2 +- website/docs/usage/facts-figures.md | 2 +- website/docs/usage/linguistic-features.md | 48 +++ website/docs/usage/processing-pipelines.md | 1 + website/docs/usage/spacy-101.md | 79 +++++ website/docs/usage/training.md | 83 +++++- website/meta/sidebars.json | 2 + 23 files changed, 847 insertions(+), 65 deletions(-) create mode 100644 examples/pipeline/dummy_entity_linking.py create mode 100644 examples/pipeline/wikidata_entity_linking.py create mode 100644 website/docs/api/entitylinker.md create mode 100644 website/docs/api/kb.md diff --git a/examples/pipeline/dummy_entity_linking.py b/examples/pipeline/dummy_entity_linking.py new file mode 100644 index 000000000..e69de29bb diff --git a/examples/pipeline/wikidata_entity_linking.py b/examples/pipeline/wikidata_entity_linking.py new file mode 100644 index 000000000..e69de29bb diff --git a/examples/training/pretrain_kb.py b/examples/training/pretrain_kb.py index d5281ad42..2c494d5c4 100644 --- a/examples/training/pretrain_kb.py +++ b/examples/training/pretrain_kb.py @@ -8,8 +8,8 @@ For more details, see the documentation: * Knowledge base: https://spacy.io/api/kb * Entity Linking: https://spacy.io/usage/linguistic-features#entity-linking -Compatible with: spaCy vX.X -Last tested with: vX.X +Compatible with: spaCy v2.2 +Last tested with: v2.2 """ from __future__ import unicode_literals, print_function @@ -73,7 +73,6 @@ def main(vocab_path=None, model=None, output_dir=None, n_iter=50): input_dim=INPUT_DIM, desc_width=DESC_WIDTH, epochs=n_iter, - threshold=0.001, ) encoder.train(description_list=descriptions, to_print=True) diff --git a/examples/training/train_entity_linker.py b/examples/training/train_entity_linker.py index 12ed531a6..d2b2c2417 100644 --- a/examples/training/train_entity_linker.py +++ b/examples/training/train_entity_linker.py @@ -8,8 +8,8 @@ For more details, see the documentation: * Training: https://spacy.io/usage/training * Entity Linking: https://spacy.io/usage/linguistic-features#entity-linking -Compatible with: spaCy vX.X -Last tested with: vX.X +Compatible with: spaCy v2.2 +Last tested with: v2.2 """ from __future__ import unicode_literals, print_function diff --git a/spacy/kb.pyx b/spacy/kb.pyx index 176ac17de..6cbc06e2c 100644 --- a/spacy/kb.pyx +++ b/spacy/kb.pyx @@ -24,7 +24,7 @@ cdef class Candidate: algorithm which will disambiguate the various candidates to the correct one. Each candidate (alias, entity) pair is assigned to a certain prior probability. - DOCS: https://spacy.io/api/candidate + DOCS: https://spacy.io/api/kb/#candidate_init """ def __init__(self, KnowledgeBase kb, entity_hash, entity_freq, entity_vector, alias_hash, prior_prob): diff --git a/website/docs/api/cli.md b/website/docs/api/cli.md index 32e3623b0..d01637925 100644 --- a/website/docs/api/cli.md +++ b/website/docs/api/cli.md @@ -226,7 +226,7 @@ $ python -m spacy train [lang] [output_path] [train_path] [dev_path] | `--entity-multitasks`, `-et` | option | Side objectives for NER CNN, e.g. `'dep'` or `'dep,tag'` | | `--noise-level`, `-nl` | option | Float indicating the amount of corruption for data augmentation. | | `--gold-preproc`, `-G` | flag | Use gold preprocessing. | -| `--learn-tokens`, `-T` | flag | Make parser learn gold-standard tokenization by merging ] subtokens. Typically used for languages like Chinese. | +| `--learn-tokens`, `-T` | flag | Make parser learn gold-standard tokenization by merging subtokens. Typically used for languages like Chinese. | | `--verbose`, `-VV` 2.0.13 | flag | Show more detailed messages during training. | | `--help`, `-h` | flag | Show help message and available arguments. | | **CREATES** | model, pickle | A spaCy model on each epoch. | diff --git a/website/docs/api/entitylinker.md b/website/docs/api/entitylinker.md new file mode 100644 index 000000000..64db50943 --- /dev/null +++ b/website/docs/api/entitylinker.md @@ -0,0 +1,297 @@ +--- +title: EntityLinker +teaser: Functionality to disambiguate a named entity in text to a unique knowledge base identifier. +tag: class +source: spacy/pipeline/pipes.pyx +new: 2.2 +--- + +This class is a subclass of `Pipe` and follows the same API. The pipeline +component is available in the [processing pipeline](/usage/processing-pipelines) +via the ID `"entity_linker"`. + +## EntityLinker.Model {#model tag="classmethod"} + +Initialize a model for the pipe. The model should implement the +`thinc.neural.Model` API, and should contain a field `tok2vec` that contains +the context encoder. Wrappers are under development for most major machine +learning libraries. + +| Name | Type | Description | +| ----------- | ------ | ------------------------------------- | +| `**kwargs` | - | Parameters for initializing the model | +| **RETURNS** | object | The initialized model. | + +## EntityLinker.\_\_init\_\_ {#init tag="method"} + +Create a new pipeline instance. In your application, you would normally use a +shortcut for this and instantiate the component using its string name and +[`nlp.create_pipe`](/api/language#create_pipe). + +> #### Example +> +> ```python +> # Construction via create_pipe +> entity_linker = nlp.create_pipe("entity_linker") +> +> # Construction from class +> from spacy.pipeline import EntityLinker +> entity_linker = EntityLinker(nlp.vocab) +> entity_linker.from_disk("/path/to/model") +> ``` + +| Name | Type | Description | +| --------------- | ----------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------- | +| `vocab` | `Vocab` | The shared vocabulary. | +| `model` | `thinc.neural.Model` / `True` | The model powering the pipeline component. If no model is supplied, the model is created when you call `begin_training`, `from_disk` or `from_bytes`. | +| `hidden_width` | int | Width of the hidden layer of the entity linking model, defaults to 128. | +| `incl_prior` | bool | Whether or not to include prior probabilities in the model. Defaults to True. | +| `incl_context` | bool | Whether or not to include the local context in the model (if not: only prior probabilites are used). Defaults to True. | +| **RETURNS** | `EntityLinker` | The newly constructed object. | + +## EntityLinker.\_\_call\_\_ {#call tag="method"} + +Apply the pipe to one document. The document is modified in place, and returned. +This usually happens under the hood when the `nlp` object is called on a text +and all pipeline components are applied to the `Doc` in order. Both +[`__call__`](/api/entitylinker#call) and +[`pipe`](/api/entitylinker#pipe) delegate to the +[`predict`](/api/entitylinker#predict) and +[`set_annotations`](/api/entitylinker#set_annotations) methods. + +> #### Example +> +> ```python +> entity_linker = EntityLinker(nlp.vocab) +> doc = nlp(u"This is a sentence.") +> # This usually happens under the hood +> processed = entity_linker(doc) +> ``` + +| Name | Type | Description | +| ----------- | ----- | ------------------------ | +| `doc` | `Doc` | The document to process. | +| **RETURNS** | `Doc` | The processed document. | + +## EntityLinker.pipe {#pipe tag="method"} + +Apply the pipe to a stream of documents. This usually happens under the hood +when the `nlp` object is called on a text and all pipeline components are +applied to the `Doc` in order. Both [`__call__`](/api/entitylinker#call) and +[`pipe`](/api/entitylinker#pipe) delegate to the +[`predict`](/api/entitylinker#predict) and +[`set_annotations`](/api/entitylinker#set_annotations) methods. + +> #### Example +> +> ```python +> entity_linker = EntityLinker(nlp.vocab) +> for doc in entity_linker.pipe(docs, batch_size=50): +> pass +> ``` + +| Name | Type | Description | +| ------------ | -------- | ------------------------------------------------------ | +| `stream` | iterable | A stream of documents. | +| `batch_size` | int | The number of texts to buffer. Defaults to `128`. | +| **YIELDS** | `Doc` | Processed documents in the order of the original text. | + +## EntityLinker.predict {#predict tag="method"} + +Apply the pipeline's model to a batch of docs, without modifying them. + +> #### Example +> +> ```python +> entity_linker = EntityLinker(nlp.vocab) +> kb_ids, tensors = entity_linker.predict([doc1, doc2]) +> ``` + +| Name | Type | Description | +| ----------- | -------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `docs` | iterable | The documents to predict. | +| **RETURNS** | tuple | A `(kb_ids, tensors)` tuple where `kb_ids` are the model's predicted KB identifiers for the entities in the `docs`, and `tensors` are the token representations used to predict these identifiers. | + +## EntityLinker.set_annotations {#set_annotations tag="method"} + +Modify a batch of documents, using pre-computed entity IDs for a list of named entities. + +> #### Example +> +> ```python +> entity_linker = EntityLinker(nlp.vocab) +> kb_ids, tensors = entity_linker.predict([doc1, doc2]) +> entity_linker.set_annotations([doc1, doc2], kb_ids, tensors) +> ``` + +| Name | Type | Description | +| ---------- | -------- | --------------------------------------------------------------------------------------------------- | +| `docs` | iterable | The documents to modify. | +| `kb_ids` | iterable | The knowledge base identifiers for the entities in the docs, predicted by `EntityLinker.predict`. | +| `tensors` | iterable | The token representations used to predict the identifiers. | + +## EntityLinker.update {#update tag="method"} + +Learn from a batch of documents and gold-standard information, updating both the +pipe's entity linking model and context encoder. Delegates to [`predict`](/api/entitylinker#predict) and +[`get_loss`](/api/entitylinker#get_loss). + +> #### Example +> +> ```python +> entity_linker = EntityLinker(nlp.vocab) +> losses = {} +> optimizer = nlp.begin_training() +> entity_linker.update([doc1, doc2], [gold1, gold2], losses=losses, sgd=optimizer) +> ``` + +| Name | Type | Description | +| -------- | -------- | ------------------------------------------------------------------------------------------------------------- | +| `docs` | iterable | A batch of documents to learn from. | +| `golds` | iterable | The gold-standard data. Must have the same length as `docs`. | +| `drop` | float | The dropout rate, used both for the EL model and the context encoder. | +| `sgd` | callable | The optimizer for the EL model. Should take two arguments `weights` and `gradient`, and an optional ID. | +| `losses` | dict | Optional record of the loss during training. The value keyed by the model's name is updated. | + +## EntityLinker.get_loss {#get_loss tag="method"} + +Find the loss and gradient of loss for the entities in a batch of documents and their +predicted scores. + +> #### Example +> +> ```python +> entity_linker = EntityLinker(nlp.vocab) +> kb_ids, tensors = entity_linker.predict(docs) +> loss, d_loss = entity_linker.get_loss(docs, [gold1, gold2], kb_ids, tensors) +> ``` + +| Name | Type | Description | +| --------------- | -------- | ------------------------------------------------------------ | +| `docs` | iterable | The batch of documents. | +| `golds` | iterable | The gold-standard data. Must have the same length as `docs`. | +| `kb_ids` | iterable | KB identifiers representing the model's predictions. | +| `tensors` | iterable | The token representations used to predict the identifiers | +| **RETURNS** | tuple | The loss and the gradient, i.e. `(loss, gradient)`. | + +## EntityLinker.set_kb {#set_kb tag="method"} + +Define the knowledge base (KB) used for disambiguating named entities to KB identifiers. + +> #### Example +> +> ```python +> entity_linker = EntityLinker(nlp.vocab) +> entity_linker.set_kb(kb) +> ``` + +| Name | Type | Description | +| --------------- | --------------- | ------------------------------------------------------------ | +| `kb` | `KnowledgeBase` | The [`KnowledgeBase`](/api/kb). | + +## EntityLinker.begin_training {#begin_training tag="method"} + +Initialize the pipe for training, using data examples if available. If no model +has been initialized yet, the model is added. +Before calling this method, a knowledge base should have been defined with [`set_kb`](/api/entitylinker#set_kb). + +> #### Example +> +> ```python +> entity_linker = EntityLinker(nlp.vocab) +> entity_linker.set_kb(kb) +> nlp.add_pipe(entity_linker, last=True) +> optimizer = entity_linker.begin_training(pipeline=nlp.pipeline) +> ``` + +| Name | Type | Description | +| ------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `gold_tuples` | iterable | Optional gold-standard annotations from which to construct [`GoldParse`](/api/goldparse) objects. | +| `pipeline` | list | Optional list of pipeline components that this component is part of. | +| `sgd` | callable | An optional optimizer. Should take two arguments `weights` and `gradient`, and an optional ID. Will be created via [`EntityLinker`](/api/entitylinker#create_optimizer) if not set. | +| **RETURNS** | callable | An optimizer. | + +## EntityLinker.create_optimizer {#create_optimizer tag="method"} + +Create an optimizer for the pipeline component. + +> #### Example +> +> ```python +> entity_linker = EntityLinker(nlp.vocab) +> optimizer = entity_linker.create_optimizer() +> ``` + +| Name | Type | Description | +| ----------- | -------- | -------------- | +| **RETURNS** | callable | The optimizer. | + +## EntityLinker.use_params {#use_params tag="method, contextmanager"} + +Modify the pipe's EL model, to use the given parameter values. + +> #### Example +> +> ```python +> entity_linker = EntityLinker(nlp.vocab) +> with entity_linker.use_params(optimizer.averages): +> entity_linker.to_disk("/best_model") +> ``` + +| Name | Type | Description | +| -------- | ---- | ---------------------------------------------------------------------------------------------------------- | +| `params` | dict | The parameter values to use in the model. At the end of the context, the original parameters are restored. | + + +## EntityLinker.to_disk {#to_disk tag="method"} + +Serialize the pipe to disk. + +> #### Example +> +> ```python +> entity_linker = EntityLinker(nlp.vocab) +> entity_linker.to_disk("/path/to/entity_linker") +> ``` + +| Name | Type | Description | +| --------- | ---------------- | --------------------------------------------------------------------------------------------------------------------- | +| `path` | unicode / `Path` | A path to a directory, which will be created if it doesn't exist. Paths may be either strings or `Path`-like objects. | +| `exclude` | list | String names of [serialization fields](#serialization-fields) to exclude. | + +## EntityLinker.from_disk {#from_disk tag="method"} + +Load the pipe from disk. Modifies the object in place and returns it. + +> #### Example +> +> ```python +> entity_linker = EntityLinker(nlp.vocab) +> entity_linker.from_disk("/path/to/entity_linker") +> ``` + +| Name | Type | Description | +| ----------- | ------------------ | -------------------------------------------------------------------------- | +| `path` | unicode / `Path` | A path to a directory. Paths may be either strings or `Path`-like objects. | +| `exclude` | list | String names of [serialization fields](#serialization-fields) to exclude. | +| **RETURNS** | `EntityLinker` | The modified `EntityLinker` object. | + +## Serialization fields {#serialization-fields} + +During serialization, spaCy will export several data fields used to restore +different aspects of the object. If needed, you can exclude them from +serialization by passing in the string names via the `exclude` argument. + +> #### Example +> +> ```python +> data = entity_linker.to_disk("/path", exclude=["vocab"]) +> ``` + +| Name | Description | +| ------- | -------------------------------------------------------------- | +| `vocab` | The shared [`Vocab`](/api/vocab). | +| `cfg` | The config file. You usually don't want to exclude this. | +| `model` | The binary model data. You usually don't want to exclude this. | +| `kb` | The knowledge base. You usually don't want to exclude this. | + diff --git a/website/docs/api/entityrecognizer.md b/website/docs/api/entityrecognizer.md index 7279a7f77..46e8b44ee 100644 --- a/website/docs/api/entityrecognizer.md +++ b/website/docs/api/entityrecognizer.md @@ -99,7 +99,7 @@ Apply the pipeline's model to a batch of docs, without modifying them. > > ```python > ner = EntityRecognizer(nlp.vocab) -> scores = ner.predict([doc1, doc2]) +> scores, tensors = ner.predict([doc1, doc2]) > ``` | Name | Type | Description | @@ -115,14 +115,15 @@ Modify a batch of documents, using pre-computed scores. > > ```python > ner = EntityRecognizer(nlp.vocab) -> scores = ner.predict([doc1, doc2]) -> ner.set_annotations([doc1, doc2], scores) +> scores, tensors = ner.predict([doc1, doc2]) +> ner.set_annotations([doc1, doc2], scores, tensors) > ``` | Name | Type | Description | | -------- | -------- | ---------------------------------------------------------- | | `docs` | iterable | The documents to modify. | | `scores` | - | The scores to set, produced by `EntityRecognizer.predict`. | +| `tensors`| iterable | The token representations used to predict the scores. | ## EntityRecognizer.update {#update tag="method"} @@ -210,13 +211,13 @@ Modify the pipe's model, to use the given parameter values. > > ```python > ner = EntityRecognizer(nlp.vocab) -> with ner.use_params(): +> with ner.use_params(optimizer.averages): > ner.to_disk("/best_model") > ``` | Name | Type | Description | | -------- | ---- | ---------------------------------------------------------------------------------------------------------- | -| `params` | - | The parameter values to use in the model. At the end of the context, the original parameters are restored. | +| `params` | dict | The parameter values to use in the model. At the end of the context, the original parameters are restored. | ## EntityRecognizer.add_label {#add_label tag="method"} diff --git a/website/docs/api/goldparse.md b/website/docs/api/goldparse.md index 5a2d8a110..db7d07795 100644 --- a/website/docs/api/goldparse.md +++ b/website/docs/api/goldparse.md @@ -23,6 +23,7 @@ gradient for those labels will be zero. | `deps` | iterable | A sequence of strings, representing the syntactic relation types. | | `entities` | iterable | A sequence of named entity annotations, either as BILUO tag strings, or as `(start_char, end_char, label)` tuples, representing the entity positions. If BILUO tag strings, you can specify missing values by setting the tag to None. | | `cats` | dict | Labels for text classification. Each key in the dictionary may be a string or an int, or a `(start_char, end_char, label)` tuple, indicating that the label is applied to only part of the document (usually a sentence). | +| `links` | dict | Labels for entity linking. A dict with `(start_char, end_char)` keys, and the values being dicts with `kb_id:value` entries, representing external KB IDs mapped to either 1.0 (positive) or 0.0 (negative). | | **RETURNS** | `GoldParse` | The newly constructed object. | ## GoldParse.\_\_len\_\_ {#len tag="method"} @@ -43,16 +44,17 @@ Whether the provided syntactic annotations form a projective dependency tree. ## Attributes {#attributes} -| Name | Type | Description | -| --------------------------------- | ---- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `words` | list | The words. | -| `tags` | list | The part-of-speech tag annotations. | -| `heads` | list | The syntactic head annotations. | -| `labels` | list | The syntactic relation-type annotations. | -| `ner` | list | The named entity annotations as BILUO tags. | -| `cand_to_gold` | list | The alignment from candidate tokenization to gold tokenization. | -| `gold_to_cand` | list | The alignment from gold tokenization to candidate tokenization. | -| `cats` 2 | list | Entries in the list should be either a label, or a `(start, end, label)` triple. The tuple form is used for categories applied to spans of the document. | +| Name | Type | Description | +| ------------------------------------ | ---- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `words` | list | The words. | +| `tags` | list | The part-of-speech tag annotations. | +| `heads` | list | The syntactic head annotations. | +| `labels` | list | The syntactic relation-type annotations. | +| `ner` | list | The named entity annotations as BILUO tags. | +| `cand_to_gold` | list | The alignment from candidate tokenization to gold tokenization. | +| `gold_to_cand` | list | The alignment from gold tokenization to candidate tokenization. | +| `cats` 2 | list | Entries in the list should be either a label, or a `(start, end, label)` triple. The tuple form is used for categories applied to spans of the document. | +| `links` 2.2 | dict | Keys in the dictionary are `(start_char, end_char)` triples, and the values are dictionaries with `kb_id:value` entries. | ## Utilities {#util} diff --git a/website/docs/api/kb.md b/website/docs/api/kb.md new file mode 100644 index 000000000..639ababb6 --- /dev/null +++ b/website/docs/api/kb.md @@ -0,0 +1,268 @@ +--- +title: KnowledgeBase +teaser: A storage class for entities and aliases of a specific knowledge base (ontology) +tag: class +source: spacy/kb.pyx +new: 2.2 +--- + +The `KnowledgeBase` object provides a method to generate [`Candidate`](/api/kb/#candidate_init) +objects, which are plausible external identifiers given a certain textual mention. +Each such `Candidate` holds information from the relevant KB entities, +such as its frequency in text and possible aliases. +Each entity in the knowledge base also has a pre-trained entity vector of a fixed size. + +## KnowledgeBase.\_\_init\_\_ {#init tag="method"} + +Create the knowledge base. + +> #### Example +> +> ```python +> from spacy.kb import KnowledgeBase +> vocab = nlp.vocab +> kb = KnowledgeBase(vocab=vocab, entity_vector_length=64) +> ``` + +| Name | Type | Description | +| ----------------------- | ---------------- | ----------------------------------------- | +| `vocab` | `Vocab` | A `Vocab` object. | +| `entity_vector_length` | int | Length of the fixed-size entity vectors. | +| **RETURNS** | `KnowledgeBase` | The newly constructed object. | + + +## KnowledgeBase.entity_vector_length {#entity_vector_length tag="property"} + +The length of the fixed-size entity vectors in the knowledge base. + +| Name | Type | Description | +| ----------- | ---- | ----------------------------------------- | +| **RETURNS** | int | Length of the fixed-size entity vectors. | + +## KnowledgeBase.add_entity {#add_entity tag="method"} + +Add an entity to the knowledge base, specifying its corpus frequency +and entity vector, which should be of length [`entity_vector_length`](/api/kb#entity_vector_length). + +> #### Example +> +> ```python +> kb.add_entity(entity="Q42", freq=32, entity_vector=vector1) +> kb.add_entity(entity="Q463035", freq=111, entity_vector=vector2) +> ``` + +| Name | Type | Description | +| --------------- | ------------- | ------------------------------------------------- | +| `entity` | unicode | The unique entity identifier | +| `freq` | float | The frequency of the entity in a typical corpus | +| `entity_vector` | vector | The pre-trained vector of the entity | + +## KnowledgeBase.set_entities {#set_entities tag="method"} + +Define the full list of entities in the knowledge base, specifying the corpus frequency +and entity vector for each entity. + +> #### Example +> +> ```python +> kb.set_entities(entity_list=["Q42", "Q463035"], freq_list=[32, 111], vector_list=[vector1, vector2]) +> ``` + +| Name | Type | Description | +| ------------- | ------------- | ------------------------------------------------- | +| `entity_list` | iterable | List of unique entity identifiers | +| `freq_list` | iterable | List of entity frequencies | +| `vector_list` | iterable | List of entity vectors | + +## KnowledgeBase.add_alias {#add_alias tag="method"} + +Add an alias or mention to the knowledge base, specifying its potential KB identifiers +and their prior probabilities. The entity identifiers should refer to entities previously +added with [`add_entity`](/api/kb#add_entity) or [`set_entities`](/api/kb#set_entities). +The sum of the prior probabilities should not exceed 1. + +> #### Example +> +> ```python +> kb.add_alias(alias="Douglas", entities=["Q42", "Q463035"], probabilities=[0.6, 0.3]) +> ``` + +| Name | Type | Description | +| -------------- | ------------- | -------------------------------------------------- | +| `alias` | unicode | The textual mention or alias | +| `entities` | iterable | The potential entities that the alias may refer to | +| `probabilities`| iterable | The prior probabilities of each entity | + +## KnowledgeBase.\_\_len\_\_ {#len tag="method"} + +Get the total number of entities in the knowledge base. + +> #### Example +> +> ```python +> total_entities = len(kb) +> ``` + +| Name | Type | Description | +| ----------- | ---- | --------------------------------------------- | +| **RETURNS** | int | The number of entities in the knowledge base. | + +## KnowledgeBase.get_entity_strings {#get_entity_strings tag="method"} + +Get a list of all entity IDs in the knowledge base. + +> #### Example +> +> ```python +> all_entities = kb.get_entity_strings() +> ``` + +| Name | Type | Description | +| ----------- | ---- | --------------------------------------------- | +| **RETURNS** | list | The list of entities in the knowledge base. | + +## KnowledgeBase.get_size_aliases {#get_size_aliases tag="method"} + +Get the total number of aliases in the knowledge base. + +> #### Example +> +> ```python +> total_aliases = kb.get_size_aliases() +> ``` + +| Name | Type | Description | +| ----------- | ---- | --------------------------------------------- | +| **RETURNS** | int | The number of aliases in the knowledge base. | + +## KnowledgeBase.get_alias_strings {#get_alias_strings tag="method"} + +Get a list of all aliases in the knowledge base. + +> #### Example +> +> ```python +> all_aliases = kb.get_alias_strings() +> ``` + +| Name | Type | Description | +| ----------- | ---- | --------------------------------------------- | +| **RETURNS** | list | The list of aliases in the knowledge base. | + +## KnowledgeBase.get_candidates {#get_candidates tag="method"} + +Given a certain textual mention as input, retrieve a list of candidate entities +of type [`Candidate`](/api/kb/#candidate_init). + +> #### Example +> +> ```python +> candidates = kb.get_candidates("Douglas") +> ``` + +| Name | Type | Description | +| ------------- | ------------- | -------------------------------------------------- | +| `alias` | unicode | The textual mention or alias | +| **RETURNS** | iterable | The list of relevant `Candidate` objects | + +## KnowledgeBase.get_vector {#get_vector tag="method"} + +Given a certain entity ID, retrieve its pre-trained entity vector. + +> #### Example +> +> ```python +> vector = kb.get_vector("Q42") +> ``` + +| Name | Type | Description | +| ------------- | ------------- | -------------------------------------------------- | +| `entity` | unicode | The entity ID | +| **RETURNS** | vector | The entity vector | + +## KnowledgeBase.get_prior_prob {#get_prior_prob tag="method"} + +Given a certain entity ID and a certain textual mention, retrieve +the prior probability of the fact that the mention links to the entity ID. + +> #### Example +> +> ```python +> probability = kb.get_prior_prob("Q42", "Douglas") +> ``` + +| Name | Type | Description | +| ------------- | ------------- | --------------------------------------------------------------- | +| `entity` | unicode | The entity ID | +| `alias` | unicode | The textual mention or alias | +| **RETURNS** | float | The prior probability of the `alias` referring to the `entity` | + +## KnowledgeBase.dump {#dump tag="method"} + +Save the current state of the knowledge base to a directory. + +> #### Example +> +> ```python +> kb.dump(loc) +> ``` + +| Name | Type | Description | +| ------------- | ---------------- | ------------------------------------------------------------------------------------------------------------------------ | +| `loc` | unicode / `Path` | A path to a directory, which will be created if it doesn't exist. Paths may be either strings or `Path`-like objects. | + +## KnowledgeBase.load_bulk {#load_bulk tag="method"} + +Restore the state of the knowledge base from a given directory. Note that the [`Vocab`](/api/vocab) +should also be the same as the one used to create the KB. + +> #### Example +> +> ```python +> from spacy.kb import KnowledgeBase +> from spacy.vocab import Vocab +> vocab = Vocab().from_disk("/path/to/vocab") +> kb = KnowledgeBase(vocab=vocab, entity_vector_length=64) +> kb.load_bulk("/path/to/kb") +> ``` + + +| Name | Type | Description | +| ----------- | ---------------- | ----------------------------------------------------------------------------------------- | +| `loc` | unicode / `Path` | A path to a directory. Paths may be either strings or `Path`-like objects. | +| **RETURNS** | `KnowledgeBase` | The modified `KnowledgeBase` object. | + + +## Candidate.\_\_init\_\_ {#candidate_init tag="method"} + +Construct a `Candidate` object. Usually this constructor is not called directly, +but instead these objects are returned by the [`get_candidates`](/api/kb#get_candidates) method +of a `KnowledgeBase`. + +> #### Example +> +> ```python +> from spacy.kb import Candidate +> candidate = Candidate(kb, entity_hash, entity_freq, entity_vector, alias_hash, prior_prob) +> ``` + +| Name | Type | Description | +| ------------- | --------------- | -------------------------------------------------------------- | +| `kb` | `KnowledgeBase` | The knowledge base that defined this candidate. | +| `entity_hash` | int | The hash of the entity's KB ID. | +| `entity_freq` | float | The entity frequency as recorded in the KB. | +| `alias_hash` | int | The hash of the textual mention or alias. | +| `prior_prob` | float | The prior probability of the `alias` referring to the `entity` | +| **RETURNS** | `Candidate` | The newly constructed object. | + +## Candidate attributes {#candidate_attributes} + +| Name | Type | Description | +| ---------------------- | ------------ | ------------------------------------------------------------------ | +| `entity` | int | The entity's unique KB identifier | +| `entity_` | unicode | The entity's unique KB identifier | +| `alias` | int | The alias or textual mention | +| `alias_` | unicode | The alias or textual mention | +| `prior_prob` | long | The prior probability of the `alias` referring to the `entity` | +| `entity_freq` | long | The frequency of the entity in a typical corpus | +| `entity_vector` | vector | The pre-trained vector of the entity | diff --git a/website/docs/api/span.md b/website/docs/api/span.md index 0af305b37..79be81ef8 100644 --- a/website/docs/api/span.md +++ b/website/docs/api/span.md @@ -18,14 +18,15 @@ Create a Span object from the slice `doc[start : end]`. > assert [t.text for t in span] == [u"it", u"back", u"!"] > ``` -| Name | Type | Description | -| ----------- | ---------------------------------------- | ----------------------------------------------------------------------------------------------------------- | -| `doc` | `Doc` | The parent document. | -| `start` | int | The index of the first token of the span. | -| `end` | int | The index of the first token after the span. | -| `label` | int / unicode | A label to attach to the span, e.g. for named entities. As of v2.1, the label can also be a unicode string. | -| `vector` | `numpy.ndarray[ndim=1, dtype='float32']` | A meaning representation of the span. | -| **RETURNS** | `Span` | The newly constructed object. | +| Name | Type | Description | +| ----------- | ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------| +| `doc` | `Doc` | The parent document. | +| `start` | int | The index of the first token of the span. | +| `end` | int | The index of the first token after the span. | +| `label` | int / unicode | A label to attach to the span, e.g. for named entities. As of v2.1, the label can also be a unicode string. | +| `kb_id` | int / unicode | A knowledge base ID to attach to the span, e.g. for named entities. The ID can be an integer or a unicode string. | +| `vector` | `numpy.ndarray[ndim=1, dtype='float32']` | A meaning representation of the span. | +| **RETURNS** | `Span` | The newly constructed object. | ## Span.\_\_getitem\_\_ {#getitem tag="method"} @@ -477,9 +478,11 @@ The L2 norm of the span's vector representation. | `text_with_ws` | unicode | The text content of the span with a trailing whitespace character if the last token has one. | | `orth` | int | ID of the verbatim text content. | | `orth_` | unicode | Verbatim text content (identical to `Span.text`). Exists mostly for consistency with the other attributes. | -| `label` | int | The span's label. | +| `label` | int | The hash value of the span's label. | | `label_` | unicode | The span's label. | | `lemma_` | unicode | The span's lemma. | +| `kb_id` | int | The hash value of the knowledge base ID referred to by the span. | +| `kb_id_` | unicode | The knowledge base ID referred to by the span. | | `ent_id` | int | The hash value of the named entity the token is an instance of. | | `ent_id_` | unicode | The string ID of the named entity the token is an instance of. | | `sentiment` | float | A scalar value indicating the positivity or negativity of the span. | diff --git a/website/docs/api/tagger.md b/website/docs/api/tagger.md index a1d921b41..fc6fc67a6 100644 --- a/website/docs/api/tagger.md +++ b/website/docs/api/tagger.md @@ -97,7 +97,7 @@ Apply the pipeline's model to a batch of docs, without modifying them. > > ```python > tagger = Tagger(nlp.vocab) -> scores = tagger.predict([doc1, doc2]) +> scores, tensors = tagger.predict([doc1, doc2]) > ``` | Name | Type | Description | @@ -113,14 +113,16 @@ Modify a batch of documents, using pre-computed scores. > > ```python > tagger = Tagger(nlp.vocab) -> scores = tagger.predict([doc1, doc2]) -> tagger.set_annotations([doc1, doc2], scores) +> scores, tensors = tagger.predict([doc1, doc2]) +> tagger.set_annotations([doc1, doc2], scores, tensors) > ``` -| Name | Type | Description | -| -------- | -------- | ------------------------------------------------ | -| `docs` | iterable | The documents to modify. | -| `scores` | - | The scores to set, produced by `Tagger.predict`. | +| Name | Type | Description | +| -------- | -------- | ----------------------------------------------------- | +| `docs` | iterable | The documents to modify. | +| `scores` | - | The scores to set, produced by `Tagger.predict`. | +| `tensors`| iterable | The token representations used to predict the scores. | + ## Tagger.update {#update tag="method"} diff --git a/website/docs/api/textcategorizer.md b/website/docs/api/textcategorizer.md index 310122b9c..f7158541b 100644 --- a/website/docs/api/textcategorizer.md +++ b/website/docs/api/textcategorizer.md @@ -116,7 +116,7 @@ Apply the pipeline's model to a batch of docs, without modifying them. > > ```python > textcat = TextCategorizer(nlp.vocab) -> scores = textcat.predict([doc1, doc2]) +> scores, tensors = textcat.predict([doc1, doc2]) > ``` | Name | Type | Description | @@ -132,14 +132,15 @@ Modify a batch of documents, using pre-computed scores. > > ```python > textcat = TextCategorizer(nlp.vocab) -> scores = textcat.predict([doc1, doc2]) -> textcat.set_annotations([doc1, doc2], scores) +> scores, tensors = textcat.predict([doc1, doc2]) +> textcat.set_annotations([doc1, doc2], scores, tensors) > ``` | Name | Type | Description | | -------- | -------- | --------------------------------------------------------- | | `docs` | iterable | The documents to modify. | | `scores` | - | The scores to set, produced by `TextCategorizer.predict`. | +| `tensors`| iterable | The token representations used to predict the scores. | ## TextCategorizer.update {#update tag="method"} @@ -227,13 +228,13 @@ Modify the pipe's model, to use the given parameter values. > > ```python > textcat = TextCategorizer(nlp.vocab) -> with textcat.use_params(): +> with textcat.use_params(optimizer.averages): > textcat.to_disk("/best_model") > ``` | Name | Type | Description | | -------- | ---- | ---------------------------------------------------------------------------------------------------------- | -| `params` | - | The parameter values to use in the model. At the end of the context, the original parameters are restored. | +| `params` | dict | The parameter values to use in the model. At the end of the context, the original parameters are restored. | ## TextCategorizer.add_label {#add_label tag="method"} diff --git a/website/docs/api/token.md b/website/docs/api/token.md index 24816b401..8da13454b 100644 --- a/website/docs/api/token.md +++ b/website/docs/api/token.md @@ -425,8 +425,10 @@ The L2 norm of the token's vector representation. | `i` | int | The index of the token within the parent document. | | `ent_type` | int | Named entity type. | | `ent_type_` | unicode | Named entity type. | -| `ent_iob` | int | IOB code of named entity tag. `3` means the token begins an entity, `2` means it is outside an entity, `1` means it is inside an entity, and `0` means no entity tag is set. | | +| `ent_iob` | int | IOB code of named entity tag. `3` means the token begins an entity, `2` means it is outside an entity, `1` means it is inside an entity, and `0` means no entity tag is set. | | `ent_iob_` | unicode | IOB code of named entity tag. "B" means the token begins an entity, "I" means it is inside an entity, "O" means it is outside an entity, and "" means no entity tag is set. | +| `ent_kb_id` 2.2 | int | Knowledge base ID that refers to the named entity this token is a part of, if any. | +| `ent_kb_id_` 2.2 | unicode | Knowledge base ID that refers to the named entity this token is a part of, if any. | | `ent_id` | int | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. | | `ent_id_` | unicode | ID of the entity the token is an instance of, if any. Currently not used, but potentially for coreference resolution. | | `lemma` | int | Base form of the token, with no inflectional suffixes. | diff --git a/website/docs/usage/101/_named-entities.md b/website/docs/usage/101/_named-entities.md index 54db6dbe8..a282ec370 100644 --- a/website/docs/usage/101/_named-entities.md +++ b/website/docs/usage/101/_named-entities.md @@ -1,5 +1,5 @@ A named entity is a "real-world object" that's assigned a name – for example, a -person, a country, a product or a book title. spaCy can **recognize** +person, a country, a product or a book title. spaCy can **recognize** [various types](/api/annotation#named-entities) of named entities in a document, by asking the model for a **prediction**. Because models are statistical and strongly depend on the examples they were trained on, this doesn't always work @@ -21,12 +21,12 @@ for ent in doc.ents: > - **Text:** The original entity text. > - **Start:** Index of start of entity in the `Doc`. > - **End:** Index of end of entity in the `Doc`. -> - **LabeL:** Entity label, i.e. type. +> - **Label:** Entity label, i.e. type. -| Text | Start | End | Label | Description | -| ----------- | :---: | :-: | ------- | ---------------------------------------------------- | -| Apple | 0 | 5 | `ORG` | Companies, agencies, institutions. | -| U.K. | 27 | 31 | `GPE` | Geopolitical entity, i.e. countries, cities, states. | +| Text | Start | End | Label | Description | +| ----------- | :---: | :-: | ------- | ---------------------------------------------------- | +| Apple | 0 | 5 | `ORG` | Companies, agencies, institutions. | +| U.K. | 27 | 31 | `GPE` | Geopolitical entity, i.e. countries, cities, states. | | \$1 billion | 44 | 54 | `MONEY` | Monetary values, including unit. | Using spaCy's built-in [displaCy visualizer](/usage/visualizers), here's what diff --git a/website/docs/usage/101/_pipelines.md b/website/docs/usage/101/_pipelines.md index 68308a381..d33ea45fd 100644 --- a/website/docs/usage/101/_pipelines.md +++ b/website/docs/usage/101/_pipelines.md @@ -12,14 +12,14 @@ passed on to the next component. > - **Creates:** Objects, attributes and properties modified and set by the > component. -| Name | Component | Creates | Description | -| ------------- | ------------------------------------------------------------------ | ----------------------------------------------------------- | ------------------------------------------------ | -| **tokenizer** | [`Tokenizer`](/api/tokenizer) | `Doc` | Segment text into tokens. | -| **tagger** | [`Tagger`](/api/tagger) | `Doc[i].tag` | Assign part-of-speech tags. | -| **parser** | [`DependencyParser`](/api/dependencyparser) | `Doc[i].head`, `Doc[i].dep`, `Doc.sents`, `Doc.noun_chunks` | Assign dependency labels. | -| **ner** | [`EntityRecognizer`](/api/entityrecognizer) | `Doc.ents`, `Doc[i].ent_iob`, `Doc[i].ent_type` | Detect and label named entities. | -| **textcat** | [`TextCategorizer`](/api/textcategorizer) | `Doc.cats` | Assign document labels. | -| ... | [custom components](/usage/processing-pipelines#custom-components) | `Doc._.xxx`, `Token._.xxx`, `Span._.xxx` | Assign custom attributes, methods or properties. | +| Name | Component | Creates | Description | +| ----------------- | ------------------------------------------------------------------ | ----------------------------------------------------------- | ------------------------------------------------ | +| **tokenizer** | [`Tokenizer`](/api/tokenizer) | `Doc` | Segment text into tokens. | +| **tagger** | [`Tagger`](/api/tagger) | `Doc[i].tag` | Assign part-of-speech tags. | +| **parser** | [`DependencyParser`](/api/dependencyparser) | `Doc[i].head`, `Doc[i].dep`, `Doc.sents`, `Doc.noun_chunks` | Assign dependency labels. | +| **ner** | [`EntityRecognizer`](/api/entityrecognizer) | `Doc.ents`, `Doc[i].ent_iob`, `Doc[i].ent_type` | Detect and label named entities. | +| **textcat** | [`TextCategorizer`](/api/textcategorizer) | `Doc.cats` | Assign document labels. | +| ... | [custom components](/usage/processing-pipelines#custom-components) | `Doc._.xxx`, `Token._.xxx`, `Span._.xxx` | Assign custom attributes, methods or properties. | The processing pipeline always **depends on the statistical model** and its capabilities. For example, a pipeline can only include an entity recognizer @@ -49,6 +49,10 @@ them, its dependency predictions may be different. Similarly, it matters if you add the [`EntityRuler`](/api/entityruler) before or after the statistical entity recognizer: if it's added before, the entity recognizer will take the existing entities into account when making predictions. +The [`EntityLinker`](/api/entitylinker), which resolves named entities to +knowledge base IDs, should be preceded by +a pipeline component that recognizes entities such as the +[`EntityRecognizer`](/api/entityrecognizer). diff --git a/website/docs/usage/101/_training.md b/website/docs/usage/101/_training.md index 61e047748..baf3a1891 100644 --- a/website/docs/usage/101/_training.md +++ b/website/docs/usage/101/_training.md @@ -20,7 +20,7 @@ difference, the more significant the gradient and the updates to our model. ![The training process](../../images/training.svg) When training a model, we don't just want it to memorize our examples – we want -it to come up with theory that can be **generalized across other examples**. +it to come up with a theory that can be **generalized across other examples**. After all, we don't just want the model to learn that this one instance of "Amazon" right here is a company – we want it to learn that "Amazon", in contexts _like this_, is most likely a company. That's why the training data diff --git a/website/docs/usage/facts-figures.md b/website/docs/usage/facts-figures.md index a3683b668..40b39d871 100644 --- a/website/docs/usage/facts-figures.md +++ b/website/docs/usage/facts-figures.md @@ -26,7 +26,7 @@ Here's a quick comparison of the functionalities offered by spaCy, | Sentence segmentation | ✅ | ✅ | ✅ | | Dependency parsing | ✅ | ❌ | ✅ | | Entity recognition | ✅ | ✅ | ✅ | -| Entity linking | ❌ | ❌ | ❌ | +| Entity linking | ✅ | ❌ | ❌ | | Coreference resolution | ❌ | ❌ | ✅ | ### When should I use what? {#comparison-usage} diff --git a/website/docs/usage/linguistic-features.md b/website/docs/usage/linguistic-features.md index 66ad816f5..fc1f159ce 100644 --- a/website/docs/usage/linguistic-features.md +++ b/website/docs/usage/linguistic-features.md @@ -576,6 +576,54 @@ import DisplacyEntHtml from 'images/displacy-ent2.html'