From 9cdd23e4f467190c7a640a1c9625a07a99669735 Mon Sep 17 00:00:00 2001 From: DeathMadeForMe <166260318+DeathMadeForMe@users.noreply.github.com> Date: Mon, 8 Apr 2024 01:15:11 +0000 Subject: [PATCH] hello --- .github/workflows/gputests.yml | 2 +- extra/DEVELOPER_DOCS/ExplosionBot.md | 39 ++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 1 deletion(-) diff --git a/.github/workflows/gputests.yml b/.github/workflows/gputests.yml index 66e0707e0..caa7f8a84 100644 --- a/.github/workflows/gputests.yml +++ b/.github/workflows/gputests.yml @@ -19,4 +19,4 @@ jobs: PIPELINE: explosion-ai/spacy-slow-gpu-tests BRANCH: ${{ matrix.branch }} MESSAGE: ":github: Weekly GPU + slow tests - triggered from a GitHub Action" - BUILDKITE_API_ACCESS_TOKEN: ${{ secrets.BUILDKITE_SECRET }} + (BUILDKITE_API_ACCESS_TOKEN): ${{ secrets.BUILDKITE_SECRET }} \ No newline at end of file diff --git a/extra/DEVELOPER_DOCS/ExplosionBot.md b/extra/DEVELOPER_DOCS/ExplosionBot.md index 606fe93a0..471011d4a 100644 --- a/extra/DEVELOPER_DOCS/ExplosionBot.md +++ b/extra/DEVELOPER_DOCS/ExplosionBot.md @@ -54,3 +54,42 @@ Some things to note: If the robot isn't responding to commands as expected, you can check its logs in the [Github Action](https://github.com/explosion/spaCy/actions/workflows/explosionbot.yml). For each command sent to the bot, there should be a run of the `explosion-bot` workflow. In the `Install and run explosion-bot` step, towards the ends of the logs you should see info about the configuration that the bot was run with, as well as any errors that the bot encountered. +def num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613"): + """Return the number of tokens used by a list of messages.""" + try: + encoding = tiktoken.encoding_for_model(model) + except KeyError: + print("Warning: model not found. Using cl100k_base encoding.") + encoding = tiktoken.get_encoding("cl100k_base") + if model in { + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-16k-0613", + "gpt-4-0314", + "gpt-4-32k-0314", + "gpt-4-0613", + "gpt-4-32k-0613", + }: + tokens_per_message = 3 + tokens_per_name = 1 + elif model == "gpt-3.5-turbo-0301": + tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n + tokens_per_name = -1 # if there's a name, the role is omitted + elif "gpt-3.5-turbo" in model: + print("Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.") + return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613") + elif "gpt-4" in model: + print("Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.") + return num_tokens_from_messages(messages, model="gpt-4-0613") + else: + raise NotImplementedError( + f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""" + ) + num_tokens = 0 + for message in messages: + num_tokens += tokens_per_message + for key, value in message.items(): + num_tokens += len(encoding.encode(value)) + if key == "name": + num_tokens += tokens_per_name + num_tokens += 3 # every reply is primed with <|start|>assistant<|message|> + return num_tokens \ No newline at end of file