From 8fa2ef195970a1455b78389fa0840da3380b77b3 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Fri, 26 May 2023 03:00:35 -0400 Subject: [PATCH] Format --- llama_cpp/llama.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index b43dfe7..0978e1e 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -696,9 +696,7 @@ class Llama: llama_cpp.llama_reset_timings(self.ctx) if len(prompt_tokens) + max_tokens > self._n_ctx: - raise ValueError( - f"Requested tokens exceed context window of {self._n_ctx}" - ) + raise ValueError(f"Requested tokens exceed context window of {self._n_ctx}") if stop != []: stop_sequences = [s.encode("utf-8") for s in stop]