From c854c2564b8cd97c87702480c106a86ca1828d31 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Wed, 12 Apr 2023 14:07:14 -0400 Subject: [PATCH] Don't serialize stateful parameters --- llama_cpp/llama.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index 45e09d1..c545420 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -763,8 +763,6 @@ class Llama: use_mlock=self.params.use_mlock, embedding=self.params.embedding, last_n_tokens_size=self.last_n_tokens_size, - last_n_tokens_data=self.last_n_tokens_data, - tokens_consumed=self.tokens_consumed, n_batch=self.n_batch, n_threads=self.n_threads, ) @@ -786,9 +784,6 @@ class Llama: last_n_tokens_size=state["last_n_tokens_size"], verbose=state["verbose"], ) - self.last_n_tokens_data = state["last_n_tokens_data"] - self.tokens_consumed = state["tokens_consumed"] - @staticmethod def token_eos() -> llama_cpp.llama_token: