From c608baeecc2e7af749f1b6d418154f8f338c0da3 Mon Sep 17 00:00:00 2001 From: maurice Date: Tue, 16 Jan 2024 21:54:54 +0100 Subject: [PATCH 1/7] Fix typo in method name --- spacy/pipeline/_parser_internals/stateclass.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/pipeline/_parser_internals/stateclass.pyx b/spacy/pipeline/_parser_internals/stateclass.pyx index e3b063b7d..24b9f1adc 100644 --- a/spacy/pipeline/_parser_internals/stateclass.pyx +++ b/spacy/pipeline/_parser_internals/stateclass.pyx @@ -29,7 +29,7 @@ cdef class StateClass: return [self.B(i) for i in range(self.c.buffer_length())] @property - def token_vector_lenth(self): + def token_vector_length(self): return self.doc.tensor.shape[1] @property From afac7fb650ffa32c146d4107d653f8f711c71cce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dani=C3=ABl=20de=20Kok?= Date: Tue, 23 Jan 2024 20:11:16 +0100 Subject: [PATCH 2/7] test_find_available_port: use port 5001 (#13255) macOS now uses port 5000 for the AirPlay receiver functionality, so this test will always fail on a macOS desktop (unless AirPlay receiver functionality is disabled like in CI). --- spacy/tests/test_misc.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/spacy/tests/test_misc.py b/spacy/tests/test_misc.py index b1b4faa88..d2a41ff0f 100644 --- a/spacy/tests/test_misc.py +++ b/spacy/tests/test_misc.py @@ -486,8 +486,8 @@ def test_to_ternary_int(): def test_find_available_port(): host = "0.0.0.0" - port = 5000 - assert find_available_port(port, host) == port, "Port 5000 isn't free" + port = 5001 + assert find_available_port(port, host) == port, "Port 5001 isn't free" from wsgiref.simple_server import demo_app, make_server From a493981163002d0cd2409950512eeeccb6fa4690 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Wed, 24 Jan 2024 09:29:57 +0100 Subject: [PATCH 3/7] fix typo (#13254) --- website/docs/api/large-language-models.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/api/large-language-models.mdx b/website/docs/api/large-language-models.mdx index b0ef4c9f9..cefd5c66e 100644 --- a/website/docs/api/large-language-models.mdx +++ b/website/docs/api/large-language-models.mdx @@ -1507,7 +1507,7 @@ These models all take the same parameters: > ```ini > [components.llm.model] > @llm_models = "spacy.Llama2.v1" -> name = "llama2-7b-hf" +> name = "Llama-2-7b-hf" > ``` Currently, these models are provided as part of the core library: From 7496e03a2c18c24454af924347af667e6df0ac70 Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Fri, 26 Jan 2024 10:58:48 +0100 Subject: [PATCH 4/7] Clarify vocab docs (#13273) * add line to ensure that apple is in fact in the vocab * add that the vocab may be empty --- website/docs/api/vocab.mdx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/docs/api/vocab.mdx b/website/docs/api/vocab.mdx index fe774d1a8..57618397d 100644 --- a/website/docs/api/vocab.mdx +++ b/website/docs/api/vocab.mdx @@ -13,7 +13,7 @@ between `Doc` objects. Note that a `Vocab` instance is not static. It increases in size as texts with -new tokens are processed. +new tokens are processed. Some models may have an empty vocab at initialization. @@ -93,6 +93,7 @@ given string, you need to look it up in > #### Example > > ```python +> nlp("I'm eating an apple") > apple = nlp.vocab.strings["apple"] > oov = nlp.vocab.strings["dskfodkfos"] > assert apple in nlp.vocab From 68b85ea950492e4f83d9b1552806ab4a9631236e Mon Sep 17 00:00:00 2001 From: Sofie Van Landeghem Date: Fri, 26 Jan 2024 12:10:05 +0100 Subject: [PATCH 5/7] Clarify data_path loading for apply CLI command (#13272) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * attempt to clarify additional annotations on .spacy file * suggestion by Daniƫl * pipeline instead of pipe --- website/docs/api/cli.mdx | 3 +++ 1 file changed, 3 insertions(+) diff --git a/website/docs/api/cli.mdx b/website/docs/api/cli.mdx index 51cae960b..db91e1062 100644 --- a/website/docs/api/cli.mdx +++ b/website/docs/api/cli.mdx @@ -1296,6 +1296,9 @@ input formats are: When a directory is provided it is traversed recursively to collect all files. +When loading a .spacy file, any potential annotations stored on the `Doc` that are not overwritten by the pipeline will be preserved. +If you want to evaluate the pipeline on raw text only, make sure that the .spacy file does not contain any annotations. + ```bash $ python -m spacy apply [model] [data-path] [output-file] [--code] [--text-key] [--force-overwrite] [--gpu-id] [--batch-size] [--n-process] ``` From 00e938a7c3a74c559d0cc5c33437b698f3b3e770 Mon Sep 17 00:00:00 2001 From: Eliana Vornov Date: Fri, 26 Jan 2024 07:29:22 -0500 Subject: [PATCH 6/7] add custom code support to CLI speed benchmark (#13247) * add custom code support to CLI speed benchmark * sort imports * better copying for warmup docs --- spacy/cli/benchmark_speed.py | 6 ++++-- website/docs/api/cli.mdx | 25 +++++++++++++------------ 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/spacy/cli/benchmark_speed.py b/spacy/cli/benchmark_speed.py index c7fd771c3..4dd10049c 100644 --- a/spacy/cli/benchmark_speed.py +++ b/spacy/cli/benchmark_speed.py @@ -13,7 +13,7 @@ from .. import util from ..language import Language from ..tokens import Doc from ..training import Corpus -from ._util import Arg, Opt, benchmark_cli, setup_gpu +from ._util import Arg, Opt, benchmark_cli, import_code, setup_gpu @benchmark_cli.command( @@ -30,12 +30,14 @@ def benchmark_speed_cli( use_gpu: int = Opt(-1, "--gpu-id", "-g", help="GPU ID or -1 for CPU"), n_batches: int = Opt(50, "--batches", help="Minimum number of batches to benchmark", min=30,), warmup_epochs: int = Opt(3, "--warmup", "-w", min=0, help="Number of iterations over the data for warmup"), + code_path: Optional[Path] = Opt(None, "--code", "-c", help="Path to Python file with additional code (registered functions) to be imported"), # fmt: on ): """ Benchmark a pipeline. Expects a loadable spaCy pipeline and benchmark data in the binary .spacy format. """ + import_code(code_path) setup_gpu(use_gpu=use_gpu, silent=False) nlp = util.load_model(model) @@ -171,5 +173,5 @@ def print_outliers(sample: numpy.ndarray): def warmup( nlp: Language, docs: List[Doc], warmup_epochs: int, batch_size: Optional[int] ) -> numpy.ndarray: - docs = warmup_epochs * docs + docs = [doc.copy() for doc in docs * warmup_epochs] return annotate(nlp, docs, batch_size) diff --git a/website/docs/api/cli.mdx b/website/docs/api/cli.mdx index db91e1062..950d98c1f 100644 --- a/website/docs/api/cli.mdx +++ b/website/docs/api/cli.mdx @@ -1268,20 +1268,21 @@ the [binary `.spacy` format](/api/data-formats#binary-training). The pipeline is warmed up before any measurements are taken. ```cli -$ python -m spacy benchmark speed [model] [data_path] [--batch_size] [--no-shuffle] [--gpu-id] [--batches] [--warmup] +$ python -m spacy benchmark speed [model] [data_path] [--code] [--batch_size] [--no-shuffle] [--gpu-id] [--batches] [--warmup] ``` -| Name | Description | -| -------------------- | -------------------------------------------------------------------------------------------------------- | -| `model` | Pipeline to benchmark the speed of. Can be a package or a path to a data directory. ~~str (positional)~~ | -| `data_path` | Location of benchmark data in spaCy's [binary format](/api/data-formats#training). ~~Path (positional)~~ | -| `--batch-size`, `-b` | Set the batch size. If not set, the pipeline's batch size is used. ~~Optional[int] \(option)~~ | -| `--no-shuffle` | Do not shuffle documents in the benchmark data. ~~bool (flag)~~ | -| `--gpu-id`, `-g` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ | -| `--batches` | Number of batches to benchmark on. Defaults to `50`. ~~Optional[int] \(option)~~ | -| `--warmup`, `-w` | Iterations over the benchmark data for warmup. Defaults to `3` ~~Optional[int] \(option)~~ | -| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | -| **PRINTS** | Pipeline speed in words per second with a 95% confidence interval. | +| Name | Description | +| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `model` | Pipeline to benchmark the speed of. Can be a package or a path to a data directory. ~~str (positional)~~ | +| `data_path` | Location of benchmark data in spaCy's [binary format](/api/data-formats#training). ~~Path (positional)~~ | +| `--code`, `-c` | Path to Python file with additional code to be imported. Allows [registering custom functions](/usage/training#custom-functions) for new architectures. ~~Optional[Path] \(option)~~ | +| `--batch-size`, `-b` | Set the batch size. If not set, the pipeline's batch size is used. ~~Optional[int] \(option)~~ | +| `--no-shuffle` | Do not shuffle documents in the benchmark data. ~~bool (flag)~~ | +| `--gpu-id`, `-g` | GPU to use, if any. Defaults to `-1` for CPU. ~~int (option)~~ | +| `--batches` | Number of batches to benchmark on. Defaults to `50`. ~~Optional[int] \(option)~~ | +| `--warmup`, `-w` | Iterations over the benchmark data for warmup. Defaults to `3` ~~Optional[int] \(option)~~ | +| `--help`, `-h` | Show help message and available arguments. ~~bool (flag)~~ | +| **PRINTS** | Pipeline speed in words per second with a 95% confidence interval. | ## apply {id="apply", version="3.5", tag="command"} From 68d7841df593986655d07f9840fcd35e79b28c7a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dani=C3=ABl=20de=20Kok?= Date: Mon, 29 Jan 2024 13:51:56 +0100 Subject: [PATCH 7/7] Extension serialization attr tests: add teardown (#13284) The doc/token extension serialization tests add extensions that are not serializable with pickle. This didn't cause issues before due to the implicit run order of tests. However, test ordering has changed with pytest 8.0.0, leading to failed tests in test_language. Update the fixtures in the extension serialization tests to do proper teardown and remove the extensions. --- spacy/tests/serialize/test_serialize_extension_attrs.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/spacy/tests/serialize/test_serialize_extension_attrs.py b/spacy/tests/serialize/test_serialize_extension_attrs.py index f3b6cb000..2fb56c848 100644 --- a/spacy/tests/serialize/test_serialize_extension_attrs.py +++ b/spacy/tests/serialize/test_serialize_extension_attrs.py @@ -15,7 +15,12 @@ def doc_w_attrs(en_tokenizer): Token.set_extension("_test_token", default="t0") doc[1]._._test_token = "t1" - return doc + yield doc + + Doc.remove_extension("_test_attr") + Doc.remove_extension("_test_prop") + Doc.remove_extension("_test_method") + Token.remove_extension("_test_token") def test_serialize_ext_attrs_from_bytes(doc_w_attrs):