2023-11-29 19:00:37 +00:00
|
|
|
package llm
|
|
|
|
|
|
|
|
import (
|
2023-12-19 02:32:04 +00:00
|
|
|
"embed"
|
2023-11-29 19:00:37 +00:00
|
|
|
"fmt"
|
2023-12-19 02:32:04 +00:00
|
|
|
"log"
|
|
|
|
"os"
|
2023-11-29 19:00:37 +00:00
|
|
|
|
|
|
|
"github.com/jmorganca/ollama/api"
|
|
|
|
)
|
|
|
|
|
2023-12-19 02:32:04 +00:00
|
|
|
//go:embed llama.cpp/gguf/build/*/bin/ggml-metal.metal
|
|
|
|
var libEmbed embed.FS
|
2023-11-29 19:00:37 +00:00
|
|
|
|
|
|
|
func newRocmShimExtServer(model string, adapters, projectors []string, numLayers int64, opts api.Options) (extServer, error) {
|
|
|
|
// should never happen...
|
|
|
|
return nil, fmt.Errorf("ROCM GPUs not supported on Mac")
|
|
|
|
}
|
|
|
|
|
2023-12-19 02:32:04 +00:00
|
|
|
func nativeInit(workdir string) error {
|
|
|
|
err := extractLib(workdir, "llama.cpp/gguf/build/*/bin/ggml-metal.metal")
|
|
|
|
if err != nil {
|
|
|
|
if err == payloadMissing {
|
|
|
|
// TODO perhaps consider this a hard failure on arm macs?
|
|
|
|
log.Printf("ggml-meta.metal payload missing")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
os.Setenv("GGML_METAL_PATH_RESOURCES", workdir)
|
2023-11-29 19:00:37 +00:00
|
|
|
return nil
|
|
|
|
}
|