Merge pull request #1675 from dhiltgen/less_verbose

Quiet down llama.cpp logging by default
This commit is contained in:
Daniel Hiltgen 2023-12-22 08:57:17 -08:00 committed by GitHub
commit 9db28af84e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 25 additions and 14 deletions

View file

@ -14,7 +14,13 @@ Install required tools:
brew install go cmake gcc brew install go cmake gcc
``` ```
Get the required libraries: Optionally enable debugging and more verbose logging:
```bash
export CGO_CFLAGS="-g"
```
Get the required libraries and build the native LLM code:
```bash ```bash
go generate ./... go generate ./...

View file

@ -3,14 +3,14 @@
init_vars() { init_vars() {
LLAMACPP_DIR=gguf LLAMACPP_DIR=gguf
PATCHES="0001-Expose-callable-API-for-server.patch" PATCHES="0001-Expose-callable-API-for-server.patch"
CMAKE_DEFS="-DLLAMA_ACCELERATE=on -DLLAMA_SERVER_VERBOSE=off" CMAKE_DEFS="-DLLAMA_ACCELERATE=on"
# TODO - LLAMA_K_QUANTS is stale and needs to be mapped to newer cmake settings # TODO - LLAMA_K_QUANTS is stale and needs to be mapped to newer cmake settings
CMAKE_TARGETS="--target ggml --target ggml_static --target llama --target build_info --target common --target ext_server --target llava_static" CMAKE_TARGETS="--target ggml --target ggml_static --target llama --target build_info --target common --target ext_server --target llava_static"
if echo "${CGO_CFLAGS}" | grep -- '-g' >/dev/null; then if echo "${CGO_CFLAGS}" | grep -- '-g' >/dev/null; then
CMAKE_DEFS="-DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_VERBOSE_MAKEFILE=on -DLLAMA_GPROF=on ${CMAKE_DEFS}" CMAKE_DEFS="-DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_VERBOSE_MAKEFILE=on -DLLAMA_GPROF=on -DLLAMA_SERVER_VERBOSE=on ${CMAKE_DEFS}"
else else
# TODO - add additional optimization flags... # TODO - add additional optimization flags...
CMAKE_DEFS="-DCMAKE_BUILD_TYPE=Release ${CMAKE_DEFS}" CMAKE_DEFS="-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ${CMAKE_DEFS}"
fi fi
} }

View file

@ -7,9 +7,10 @@ function init_vars {
$script:cmakeDefs = @("-DBUILD_SHARED_LIBS=on", "-DLLAMA_NATIVE=off", "-DLLAMA_F16C=off", "-DLLAMA_FMA=off", "-DLLAMA_AVX512=off", "-DLLAMA_AVX2=off", "-DLLAMA_AVX=on", "-DLLAMA_K_QUANTS=on", "-DLLAMA_ACCELERATE=on", "-A","x64") $script:cmakeDefs = @("-DBUILD_SHARED_LIBS=on", "-DLLAMA_NATIVE=off", "-DLLAMA_F16C=off", "-DLLAMA_FMA=off", "-DLLAMA_AVX512=off", "-DLLAMA_AVX2=off", "-DLLAMA_AVX=on", "-DLLAMA_K_QUANTS=on", "-DLLAMA_ACCELERATE=on", "-A","x64")
if ($env:CGO_CFLAGS -contains "-g") { if ($env:CGO_CFLAGS -contains "-g") {
$script:cmakeDefs += @("-DCMAKE_VERBOSE_MAKEFILE=on") $script:cmakeDefs += @("-DCMAKE_VERBOSE_MAKEFILE=on", "-DLLAMA_SERVER_VERBOSE=on")
$script:config = "RelWithDebInfo" $script:config = "RelWithDebInfo"
} else { } else {
$script:cmakeDefs += @("-DLLAMA_SERVER_VERBOSE=off")
$script:config = "Release" $script:config = "Release"
} }
} }

View file

@ -1,22 +1,22 @@
From 4c72576c5f6c2217b1ecf7fd8523616acc5526ae Mon Sep 17 00:00:00 2001 From 90c332fe2ef61149b38561d02836e66715df214d Mon Sep 17 00:00:00 2001
From: Daniel Hiltgen <daniel@ollama.com> From: Daniel Hiltgen <daniel@ollama.com>
Date: Mon, 13 Nov 2023 12:25:58 -0800 Date: Mon, 13 Nov 2023 12:25:58 -0800
Subject: [PATCH] Expose callable API for server Subject: [PATCH] Expose callable API for server
This adds an extern "C" interface within the example server This adds an extern "C" interface within the example server
--- ---
examples/server/CMakeLists.txt | 24 +++ examples/server/CMakeLists.txt | 27 ++++
examples/server/server.cpp | 279 +++++++++++++++++++++++++++++++++ examples/server/server.cpp | 280 +++++++++++++++++++++++++++++++++
examples/server/server.h | 89 +++++++++++ examples/server/server.h | 89 +++++++++++
ggml-cuda.cu | 1 + ggml-cuda.cu | 1 +
4 files changed, 393 insertions(+) 4 files changed, 397 insertions(+)
create mode 100644 examples/server/server.h create mode 100644 examples/server/server.h
diff --git a/examples/server/CMakeLists.txt b/examples/server/CMakeLists.txt diff --git a/examples/server/CMakeLists.txt b/examples/server/CMakeLists.txt
index 859cd12..4ea47a7 100644 index 859cd12..da2b9bf 100644
--- a/examples/server/CMakeLists.txt --- a/examples/server/CMakeLists.txt
+++ b/examples/server/CMakeLists.txt +++ b/examples/server/CMakeLists.txt
@@ -11,3 +11,27 @@ if (WIN32) @@ -11,3 +11,30 @@ if (WIN32)
TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32) TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32)
endif() endif()
target_compile_features(${TARGET} PRIVATE cxx_std_11) target_compile_features(${TARGET} PRIVATE cxx_std_11)
@ -29,6 +29,9 @@ index 859cd12..4ea47a7 100644
+target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_definitions(${TARGET} PUBLIC LLAMA_SERVER_LIBRARY=1) +target_compile_definitions(${TARGET} PUBLIC LLAMA_SERVER_LIBRARY=1)
+target_link_libraries(${TARGET} PRIVATE common llama llava ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${TARGET} PRIVATE common llama llava ${CMAKE_THREAD_LIBS_INIT})
+target_compile_definitions(${TARGET} PRIVATE
+ SERVER_VERBOSE=$<BOOL:${LLAMA_SERVER_VERBOSE}>
+)
+ +
+if (BUILD_SHARED_LIBS) +if (BUILD_SHARED_LIBS)
+ set_target_properties(ext_server PROPERTIES POSITION_INDEPENDENT_CODE ON) + set_target_properties(ext_server PROPERTIES POSITION_INDEPENDENT_CODE ON)
@ -46,7 +49,7 @@ index 859cd12..4ea47a7 100644
+endif() +endif()
\ No newline at end of file \ No newline at end of file
diff --git a/examples/server/server.cpp b/examples/server/server.cpp diff --git a/examples/server/server.cpp b/examples/server/server.cpp
index 0403853..5e78e4d 100644 index 0403853..07fb05c 100644
--- a/examples/server/server.cpp --- a/examples/server/server.cpp
+++ b/examples/server/server.cpp +++ b/examples/server/server.cpp
@@ -5,6 +5,9 @@ @@ -5,6 +5,9 @@
@ -67,7 +70,7 @@ index 0403853..5e78e4d 100644
int main(int argc, char **argv) int main(int argc, char **argv)
{ {
#if SERVER_VERBOSE != 1 #if SERVER_VERBOSE != 1
@@ -3123,3 +3127,278 @@ int main(int argc, char **argv) @@ -3123,3 +3127,279 @@ int main(int argc, char **argv)
llama_backend_free(); llama_backend_free();
return 0; return 0;
} }
@ -81,6 +84,7 @@ index 0403853..5e78e4d 100644
+void llama_server_init(ext_server_params *sparams, ext_server_resp_t *err) +void llama_server_init(ext_server_params *sparams, ext_server_resp_t *err)
+{ +{
+#if SERVER_VERBOSE != 1 +#if SERVER_VERBOSE != 1
+ LOG_TEE("disabling verbose llm logging\n");
+ log_disable(); + log_disable();
+#endif +#endif
+ assert(err != NULL && sparams != NULL); + assert(err != NULL && sparams != NULL);

View file

@ -8,7 +8,7 @@ export GOFLAGS="'-ldflags=-w -s \"-X=github.com/jmorganca/ollama/version.Version
mkdir -p dist mkdir -p dist
for TARGETARCH in amd64 arm64; do for TARGETARCH in amd64 arm64; do
docker buildx build --load --progress=plain --platform=linux/$TARGETARCH --build-arg=VERSION --build-arg=GOFLAGS -f Dockerfile.build -t builder:$TARGETARCH . docker buildx build --load --platform=linux/$TARGETARCH --build-arg=VERSION --build-arg=GOFLAGS --build-arg=CGO_CFLAGS -f Dockerfile.build -t builder:$TARGETARCH .
docker create --platform linux/$TARGETARCH --name builder-$TARGETARCH builder:$TARGETARCH docker create --platform linux/$TARGETARCH --name builder-$TARGETARCH builder:$TARGETARCH
docker cp builder-$TARGETARCH:/go/src/github.com/jmorganca/ollama/ollama ./dist/ollama-linux-$TARGETARCH docker cp builder-$TARGETARCH:/go/src/github.com/jmorganca/ollama/ollama ./dist/ollama-linux-$TARGETARCH
docker rm builder-$TARGETARCH docker rm builder-$TARGETARCH