From bd8f9b188bd423db79da6be60206e6e7f6711871 Mon Sep 17 00:00:00 2001 From: svlandeg Date: Tue, 8 Sep 2020 17:24:36 +0200 Subject: [PATCH] small fixes --- spacy/ml/models/tok2vec.py | 4 ++-- website/docs/api/architectures.md | 8 ++++---- website/docs/usage/training.md | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/spacy/ml/models/tok2vec.py b/spacy/ml/models/tok2vec.py index b9c9b8766..2e5f8a802 100644 --- a/spacy/ml/models/tok2vec.py +++ b/spacy/ml/models/tok2vec.py @@ -176,8 +176,8 @@ def CharacterEmbed(width: int, rows: int, nM: int, nC: int): ensures that the final character is always in the last position, instead of being in an arbitrary position depending on the word length. - The characters are embedded in a embedding table with 256 rows, and the - vectors concatenated. A hash-embedded vector of the NORM of the word is + The characters are embedded in a embedding table with a given number of rows, + and the vectors concatenated. A hash-embedded vector of the NORM of the word is also concatenated on, and the result is then passed through a feed-forward network to construct a single vector to represent the information. diff --git a/website/docs/api/architectures.md b/website/docs/api/architectures.md index ee844d961..bcca97324 100644 --- a/website/docs/api/architectures.md +++ b/website/docs/api/architectures.md @@ -181,10 +181,10 @@ characters would be `"jumpping"`: 4 from the start, 4 from the end. This ensures that the final character is always in the last position, instead of being in an arbitrary position depending on the word length. -The characters are embedded in a embedding table with 256 rows, and the vectors -concatenated. A hash-embedded vector of the `NORM` of the word is also -concatenated on, and the result is then passed through a feed-forward network to -construct a single vector to represent the information. +The characters are embedded in a embedding table with a given number of rows, +and the vectors concatenated. A hash-embedded vector of the `NORM` of the word +is also concatenated on, and the result is then passed through a feed-forward +network to construct a single vector to represent the information. | Name | Description | | ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | diff --git a/website/docs/usage/training.md b/website/docs/usage/training.md index 9c18e4606..3f0141d72 100644 --- a/website/docs/usage/training.md +++ b/website/docs/usage/training.md @@ -683,7 +683,7 @@ You can also implement your own batch size schedule to use during training. The import spacy @spacy.registry.schedules("my_custom_schedule.v1") -def my_custom_schedule(start: int = 1, factor: int = 1.001): +def my_custom_schedule(start: int = 1, factor: float = 1.001): while True: yield start start = start * factor