Merge pull request #5128 from zhewang1-intc/fix_levelzero_empty_symbol_detect
Fix levelzero empty symbol detect
This commit is contained in:
commit
2abebb2cbe
3 changed files with 27 additions and 36 deletions
|
@ -57,8 +57,6 @@ var (
|
||||||
SchedSpread bool
|
SchedSpread bool
|
||||||
// Set via OLLAMA_TMPDIR in the environment
|
// Set via OLLAMA_TMPDIR in the environment
|
||||||
TmpDir string
|
TmpDir string
|
||||||
// Set via OLLAMA_INTEL_GPU in the environment
|
|
||||||
IntelGpu bool
|
|
||||||
|
|
||||||
// Set via CUDA_VISIBLE_DEVICES in the environment
|
// Set via CUDA_VISIBLE_DEVICES in the environment
|
||||||
CudaVisibleDevices string
|
CudaVisibleDevices string
|
||||||
|
@ -103,7 +101,6 @@ func AsMap() map[string]EnvVar {
|
||||||
ret["ROCR_VISIBLE_DEVICES"] = EnvVar{"ROCR_VISIBLE_DEVICES", RocrVisibleDevices, "Set which AMD devices are visible"}
|
ret["ROCR_VISIBLE_DEVICES"] = EnvVar{"ROCR_VISIBLE_DEVICES", RocrVisibleDevices, "Set which AMD devices are visible"}
|
||||||
ret["GPU_DEVICE_ORDINAL"] = EnvVar{"GPU_DEVICE_ORDINAL", GpuDeviceOrdinal, "Set which AMD devices are visible"}
|
ret["GPU_DEVICE_ORDINAL"] = EnvVar{"GPU_DEVICE_ORDINAL", GpuDeviceOrdinal, "Set which AMD devices are visible"}
|
||||||
ret["HSA_OVERRIDE_GFX_VERSION"] = EnvVar{"HSA_OVERRIDE_GFX_VERSION", HsaOverrideGfxVersion, "Override the gfx used for all detected AMD GPUs"}
|
ret["HSA_OVERRIDE_GFX_VERSION"] = EnvVar{"HSA_OVERRIDE_GFX_VERSION", HsaOverrideGfxVersion, "Override the gfx used for all detected AMD GPUs"}
|
||||||
ret["OLLAMA_INTEL_GPU"] = EnvVar{"OLLAMA_INTEL_GPU", IntelGpu, "Enable experimental Intel GPU detection"}
|
|
||||||
}
|
}
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
@ -279,10 +276,6 @@ func LoadConfig() {
|
||||||
slog.Error("invalid setting", "OLLAMA_HOST", Host, "error", err, "using default port", Host.Port)
|
slog.Error("invalid setting", "OLLAMA_HOST", Host, "error", err, "using default port", Host.Port)
|
||||||
}
|
}
|
||||||
|
|
||||||
if set, err := strconv.ParseBool(clean("OLLAMA_INTEL_GPU")); err == nil {
|
|
||||||
IntelGpu = set
|
|
||||||
}
|
|
||||||
|
|
||||||
CudaVisibleDevices = clean("CUDA_VISIBLE_DEVICES")
|
CudaVisibleDevices = clean("CUDA_VISIBLE_DEVICES")
|
||||||
HipVisibleDevices = clean("HIP_VISIBLE_DEVICES")
|
HipVisibleDevices = clean("HIP_VISIBLE_DEVICES")
|
||||||
RocrVisibleDevices = clean("ROCR_VISIBLE_DEVICES")
|
RocrVisibleDevices = clean("ROCR_VISIBLE_DEVICES")
|
||||||
|
|
54
gpu/gpu.go
54
gpu/gpu.go
|
@ -280,35 +280,33 @@ func GetGPUInfo() GpuInfoList {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Intel
|
// Intel
|
||||||
if envconfig.IntelGpu {
|
oHandles = initOneAPIHandles()
|
||||||
oHandles = initOneAPIHandles()
|
for d := 0; oHandles.oneapi != nil && d < int(oHandles.oneapi.num_drivers); d++ {
|
||||||
for d := range oHandles.oneapi.num_drivers {
|
if oHandles.oneapi == nil {
|
||||||
if oHandles.oneapi == nil {
|
// shouldn't happen
|
||||||
// shouldn't happen
|
slog.Warn("nil oneapi handle with driver count", "count", int(oHandles.oneapi.num_drivers))
|
||||||
slog.Warn("nil oneapi handle with driver count", "count", int(oHandles.oneapi.num_drivers))
|
continue
|
||||||
continue
|
}
|
||||||
}
|
devCount := C.oneapi_get_device_count(*oHandles.oneapi, C.int(d))
|
||||||
devCount := C.oneapi_get_device_count(*oHandles.oneapi, C.int(d))
|
for i := range devCount {
|
||||||
for i := range devCount {
|
gpuInfo := OneapiGPUInfo{
|
||||||
gpuInfo := OneapiGPUInfo{
|
GpuInfo: GpuInfo{
|
||||||
GpuInfo: GpuInfo{
|
Library: "oneapi",
|
||||||
Library: "oneapi",
|
},
|
||||||
},
|
driverIndex: d,
|
||||||
driverIndex: int(d),
|
gpuIndex: int(i),
|
||||||
gpuIndex: int(i),
|
|
||||||
}
|
|
||||||
// TODO - split bootstrapping from updating free memory
|
|
||||||
C.oneapi_check_vram(*oHandles.oneapi, C.int(d), i, &memInfo)
|
|
||||||
// TODO - convert this to MinimumMemory based on testing...
|
|
||||||
var totalFreeMem float64 = float64(memInfo.free) * 0.95 // work-around: leave some reserve vram for mkl lib used in ggml-sycl backend.
|
|
||||||
memInfo.free = C.uint64_t(totalFreeMem)
|
|
||||||
gpuInfo.TotalMemory = uint64(memInfo.total)
|
|
||||||
gpuInfo.FreeMemory = uint64(memInfo.free)
|
|
||||||
gpuInfo.ID = C.GoString(&memInfo.gpu_id[0])
|
|
||||||
gpuInfo.Name = C.GoString(&memInfo.gpu_name[0])
|
|
||||||
// TODO dependency path?
|
|
||||||
oneapiGPUs = append(oneapiGPUs, gpuInfo)
|
|
||||||
}
|
}
|
||||||
|
// TODO - split bootstrapping from updating free memory
|
||||||
|
C.oneapi_check_vram(*oHandles.oneapi, C.int(d), i, &memInfo)
|
||||||
|
// TODO - convert this to MinimumMemory based on testing...
|
||||||
|
var totalFreeMem float64 = float64(memInfo.free) * 0.95 // work-around: leave some reserve vram for mkl lib used in ggml-sycl backend.
|
||||||
|
memInfo.free = C.uint64_t(totalFreeMem)
|
||||||
|
gpuInfo.TotalMemory = uint64(memInfo.total)
|
||||||
|
gpuInfo.FreeMemory = uint64(memInfo.free)
|
||||||
|
gpuInfo.ID = C.GoString(&memInfo.gpu_id[0])
|
||||||
|
gpuInfo.Name = C.GoString(&memInfo.gpu_name[0])
|
||||||
|
// TODO dependency path?
|
||||||
|
oneapiGPUs = append(oneapiGPUs, gpuInfo)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -50,7 +50,7 @@ void oneapi_init(char *oneapi_lib_path, oneapi_init_resp_t *resp) {
|
||||||
LOG(resp->oh.verbose, "dlsym: %s\n", l[i].s);
|
LOG(resp->oh.verbose, "dlsym: %s\n", l[i].s);
|
||||||
|
|
||||||
*l[i].p = LOAD_SYMBOL(resp->oh.handle, l[i].s);
|
*l[i].p = LOAD_SYMBOL(resp->oh.handle, l[i].s);
|
||||||
if (!l[i].p) {
|
if (!*(l[i].p)) {
|
||||||
resp->oh.handle = NULL;
|
resp->oh.handle = NULL;
|
||||||
char *msg = LOAD_ERR();
|
char *msg = LOAD_ERR();
|
||||||
LOG(resp->oh.verbose, "dlerr: %s\n", msg);
|
LOG(resp->oh.verbose, "dlerr: %s\n", msg);
|
||||||
|
|
Loading…
Reference in a new issue