2023-10-17 20:55:16 +00:00
|
|
|
From 8dbb5449db259a9c24796e7927d89bee98b6c8f5 Mon Sep 17 00:00:00 2001
|
|
|
|
From: Bruce MacDonald <brucewmacdonald@gmail.com>
|
|
|
|
Date: Thu, 5 Oct 2023 11:21:12 -0400
|
2023-09-21 21:48:00 +00:00
|
|
|
Subject: [PATCH] remove warm up logging
|
|
|
|
|
|
|
|
---
|
|
|
|
common/common.cpp | 2 --
|
|
|
|
1 file changed, 2 deletions(-)
|
|
|
|
|
|
|
|
diff --git a/common/common.cpp b/common/common.cpp
|
2023-10-17 20:55:16 +00:00
|
|
|
index 7370017..c4433fe 100644
|
2023-09-21 21:48:00 +00:00
|
|
|
--- a/common/common.cpp
|
|
|
|
+++ b/common/common.cpp
|
2023-10-17 20:55:16 +00:00
|
|
|
@@ -839,8 +839,6 @@ std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_par
|
2023-09-21 21:48:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
- LOG("warming up the model with an empty run\n");
|
|
|
|
-
|
2023-10-17 20:55:16 +00:00
|
|
|
std::vector<llama_token> tmp = { llama_token_bos(lctx), llama_token_eos(lctx), };
|
|
|
|
llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch), 0, 0));
|
|
|
|
llama_kv_cache_tokens_rm(lctx, -1, -1);
|
2023-09-21 21:48:00 +00:00
|
|
|
--
|
2023-10-17 20:55:16 +00:00
|
|
|
2.39.2 (Apple Git-143)
|
2023-09-21 21:48:00 +00:00
|
|
|
|