From cefc69ea43ee62eb563f534297b3f935bdc77cb8 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Wed, 5 Apr 2023 03:25:37 -0400 Subject: [PATCH] Add runtime check to ensure embedding is enabled if trying to generate embeddings --- llama_cpp/llama.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index b7811c6..182f855 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -242,6 +242,11 @@ class Llama: """ assert self.ctx is not None + if self.params.embedding == False: + raise RuntimeError( + "Llama model must be created with embedding=True to call this method" + ) + if self.verbose: llama_cpp.llama_reset_timings(self.ctx)