ollama/llama/patches/0004-ggml-metal.patch
Daniel Hiltgen bf4018b9ec
llama: Decouple patching script from submodule (#7139)
* Refine llama.cpp vendoring workflow tools

Switch from the sync.sh over to make based tooling

* Run new make sync and patch flow
2024-10-17 15:03:09 -07:00

24 lines
1,004 B
Diff

From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: jmorganca <jmorganca@gmail.com>
Date: Wed, 12 Jun 2024 12:18:40 -0700
Subject: [PATCH] ggml-metal
---
ggml/src/ggml-metal.m | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/ggml/src/ggml-metal.m b/ggml/src/ggml-metal.m
index 3a433703..829c5e39 100644
--- a/ggml/src/ggml-metal.m
+++ b/ggml/src/ggml-metal.m
@@ -392,8 +392,8 @@ static void ggml_metal_log(enum ggml_log_level level, const char * format, ...){
#if GGML_METAL_EMBED_LIBRARY
GGML_METAL_LOG_INFO("%s: using embedded metal library\n", __func__);
- extern const char ggml_metallib_start[];
- extern const char ggml_metallib_end[];
+ extern const char *ggml_metallib_start;
+ extern const char *ggml_metallib_end;
NSString * src = [[NSString alloc] initWithBytes:ggml_metallib_start length:(ggml_metallib_end-ggml_metallib_start) encoding:NSUTF8StringEncoding];
#else