From 3cd67c7bd730a721dcea915042ad8568afe76111 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Sat, 15 Apr 2023 11:39:21 -0400 Subject: [PATCH] Add type annotations --- llama_cpp/llama.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index 0754a8d..54a2f4a 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -332,13 +332,15 @@ class Llama: stream: bool = False, ) -> Union[Iterator[Completion], Iterator[CompletionChunk]]: assert self.ctx is not None - completion_id = f"cmpl-{str(uuid.uuid4())}" - created = int(time.time()) + completion_id: str = f"cmpl-{str(uuid.uuid4())}" + created: int = int(time.time()) completion_tokens: List[llama_cpp.llama_token] = [] # Add blank space to start of prompt to match OG llama tokenizer - prompt_tokens = self.tokenize(b" " + prompt.encode("utf-8")) - text = b"" - returned_characters = 0 + prompt_tokens: List[llama_cpp.llama_token] = self.tokenize( + b" " + prompt.encode("utf-8") + ) + text: bytes = b"" + returned_characters: int = 0 stop = stop if stop is not None else [] if self.verbose: