This commit is contained in:
Andrei Betlen 2024-06-04 00:49:26 -04:00
commit 951e39caf9

View file

@ -41,14 +41,16 @@ if (LLAMA_BUILD)
RESOURCE DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/llama_cpp RESOURCE DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/llama_cpp
) )
# Workaround for Windows + CUDA https://github.com/abetlen/llama-cpp-python/issues/563 # Workaround for Windows + CUDA https://github.com/abetlen/llama-cpp-python/issues/563
install( if (WIN32 AND (LLAMA_CUDA OR LLAMA_CUBLAS))
FILES $<TARGET_RUNTIME_DLLS:llama> install(
DESTINATION ${SKBUILD_PLATLIB_DIR}/llama_cpp FILES $<TARGET_RUNTIME_DLLS:llama>
) DESTINATION ${SKBUILD_PLATLIB_DIR}/llama_cpp
install( )
FILES $<TARGET_RUNTIME_DLLS:llama> install(
DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/llama_cpp FILES $<TARGET_RUNTIME_DLLS:llama>
) DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/llama_cpp
)
endif()
if (LLAVA_BUILD) if (LLAVA_BUILD)
if (LLAMA_CUBLAS OR LLAMA_CUDA) if (LLAMA_CUBLAS OR LLAMA_CUDA)