From c9167494cbc3de3771ab7b7c10b15caa795229d0 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Mon, 23 Oct 2023 10:41:18 -0700 Subject: [PATCH] update default log target --- llm/llama.cpp/generate_darwin_amd64.go | 2 +- llm/llama.cpp/generate_darwin_arm64.go | 2 +- llm/llama.cpp/generate_linux.go | 2 +- llm/llama.cpp/generate_windows.go | 2 +- .../patches/0001-remove-warm-up-logging.patch | 25 ------------------- .../0001-update-default-log-target.patch | 25 +++++++++++++++++++ 6 files changed, 29 insertions(+), 29 deletions(-) delete mode 100644 llm/llama.cpp/patches/0001-remove-warm-up-logging.patch create mode 100644 llm/llama.cpp/patches/0001-update-default-log-target.patch diff --git a/llm/llama.cpp/generate_darwin_amd64.go b/llm/llama.cpp/generate_darwin_amd64.go index 72ea715a..dfa1eac3 100644 --- a/llm/llama.cpp/generate_darwin_amd64.go +++ b/llm/llama.cpp/generate_darwin_amd64.go @@ -12,7 +12,7 @@ package llm //go:generate mv ggml/build/cpu/bin/server ggml/build/cpu/bin/ollama-runner //go:generate git submodule update --force gguf -//go:generate git -C gguf apply ../patches/0001-remove-warm-up-logging.patch +//go:generate git -C gguf apply ../patches/0001-update-default-log-target.patch //go:generate cmake -S gguf -B gguf/build/cpu -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DCMAKE_SYSTEM_PROCESSOR=x86_64 -DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_OSX_DEPLOYMENT_TARGET=11.0 //go:generate cmake --build gguf/build/cpu --target server --config Release //go:generate mv gguf/build/cpu/bin/server gguf/build/cpu/bin/ollama-runner diff --git a/llm/llama.cpp/generate_darwin_arm64.go b/llm/llama.cpp/generate_darwin_arm64.go index 3413850d..81fd8914 100644 --- a/llm/llama.cpp/generate_darwin_arm64.go +++ b/llm/llama.cpp/generate_darwin_arm64.go @@ -12,7 +12,7 @@ package llm //go:generate mv ggml/build/metal/bin/server ggml/build/metal/bin/ollama-runner //go:generate git submodule update --force gguf -//go:generate git -C gguf apply ../patches/0001-remove-warm-up-logging.patch +//go:generate git -C gguf apply ../patches/0001-update-default-log-target.patch //go:generate cmake -S gguf -B gguf/build/metal -DLLAMA_METAL=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DCMAKE_SYSTEM_PROCESSOR=arm64 -DCMAKE_OSX_ARCHITECTURES=arm64 -DCMAKE_OSX_DEPLOYMENT_TARGET=11.0 //go:generate cmake --build gguf/build/metal --target server --config Release //go:generate mv gguf/build/metal/bin/server gguf/build/metal/bin/ollama-runner diff --git a/llm/llama.cpp/generate_linux.go b/llm/llama.cpp/generate_linux.go index 07849f85..23920265 100644 --- a/llm/llama.cpp/generate_linux.go +++ b/llm/llama.cpp/generate_linux.go @@ -13,7 +13,7 @@ package llm //go:generate git submodule update --force gguf //go:generate git -C gguf apply ../patches/0001-copy-cuda-runtime-libraries.patch -//go:generate git -C gguf apply ../patches/0001-remove-warm-up-logging.patch +//go:generate git -C gguf apply ../patches/0001-update-default-log-target.patch //go:generate cmake -S gguf -B gguf/build/cpu -DLLAMA_K_QUANTS=on //go:generate cmake --build gguf/build/cpu --target server --config Release //go:generate mv gguf/build/cpu/bin/server gguf/build/cpu/bin/ollama-runner diff --git a/llm/llama.cpp/generate_windows.go b/llm/llama.cpp/generate_windows.go index 20353c2c..3db1a3e0 100644 --- a/llm/llama.cpp/generate_windows.go +++ b/llm/llama.cpp/generate_windows.go @@ -10,7 +10,7 @@ package llm //go:generate cmd /c move ggml\build\cpu\bin\Release\server.exe ggml\build\cpu\bin\Release\ollama-runner.exe //go:generate git submodule update --force gguf -//go:generate git -C gguf apply ../patches/0001-remove-warm-up-logging.patch +//go:generate git -C gguf apply ../patches/0001-update-default-log-target.patch //go:generate cmake -S gguf -B gguf/build/cpu -DLLAMA_K_QUANTS=on //go:generate cmake --build gguf/build/cpu --target server --config Release //go:generate cmd /c move gguf\build\cpu\bin\Release\server.exe gguf\build\cpu\bin\Release\ollama-runner.exe diff --git a/llm/llama.cpp/patches/0001-remove-warm-up-logging.patch b/llm/llama.cpp/patches/0001-remove-warm-up-logging.patch deleted file mode 100644 index 662b651e..00000000 --- a/llm/llama.cpp/patches/0001-remove-warm-up-logging.patch +++ /dev/null @@ -1,25 +0,0 @@ -From 8dbb5449db259a9c24796e7927d89bee98b6c8f5 Mon Sep 17 00:00:00 2001 -From: Bruce MacDonald -Date: Thu, 5 Oct 2023 11:21:12 -0400 -Subject: [PATCH] remove warm up logging - ---- - common/common.cpp | 2 -- - 1 file changed, 2 deletions(-) - -diff --git a/common/common.cpp b/common/common.cpp -index 7370017..c4433fe 100644 ---- a/common/common.cpp -+++ b/common/common.cpp -@@ -839,8 +839,6 @@ std::tuple llama_init_from_gpt_par - } - - { -- LOG("warming up the model with an empty run\n"); -- - std::vector tmp = { llama_token_bos(lctx), llama_token_eos(lctx), }; - llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch), 0, 0)); - llama_kv_cache_tokens_rm(lctx, -1, -1); --- -2.39.2 (Apple Git-143) - diff --git a/llm/llama.cpp/patches/0001-update-default-log-target.patch b/llm/llama.cpp/patches/0001-update-default-log-target.patch new file mode 100644 index 00000000..568ca716 --- /dev/null +++ b/llm/llama.cpp/patches/0001-update-default-log-target.patch @@ -0,0 +1,25 @@ +From 6465fec6290f0a7f5d4d0fbe6bcf634e4810dde6 Mon Sep 17 00:00:00 2001 +From: Michael Yang +Date: Mon, 23 Oct 2023 10:39:34 -0700 +Subject: [PATCH] default log stderr + +--- + common/log.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/common/log.h b/common/log.h +index b8953fd..25522cd 100644 +--- a/common/log.h ++++ b/common/log.h +@@ -90,7 +90,7 @@ + // } + // + #ifndef LOG_TARGET +- #define LOG_TARGET log_handler() ++ #define LOG_TARGET nullptr + #endif + + #ifndef LOG_TEE_TARGET +-- +2.42.0 +