130 lines
3.8 KiB
Text
130 lines
3.8 KiB
Text
|
/**
|
||
|
* llama.cpp - commit 8962422b1c6f9b8b15f5aeaea42600bcc2d44177 - do not edit this file
|
||
|
*
|
||
|
* MIT License
|
||
|
*
|
||
|
* Copyright (c) 2023-2024 The ggml authors
|
||
|
*
|
||
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||
|
* of this software and associated documentation files (the "Software"), to deal
|
||
|
* in the Software without restriction, including without limitation the rights
|
||
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||
|
* copies of the Software, and to permit persons to whom the Software is
|
||
|
* furnished to do so, subject to the following conditions:
|
||
|
*
|
||
|
* The above copyright notice and this permission notice shall be included in all
|
||
|
* copies or substantial portions of the Software.
|
||
|
*
|
||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||
|
* SOFTWARE.
|
||
|
*/
|
||
|
|
||
|
#include "common.cuh"
|
||
|
|
||
|
static __device__ __forceinline__ void dequantize_q4_0(const void * vx, const int64_t ib, const int iqs, dfloat2 & v){
|
||
|
const block_q4_0 * x = (const block_q4_0 *) vx;
|
||
|
|
||
|
const dfloat d = x[ib].d;
|
||
|
|
||
|
const int vui = x[ib].qs[iqs];
|
||
|
|
||
|
v.x = vui & 0xF;
|
||
|
v.y = vui >> 4;
|
||
|
|
||
|
#ifdef GGML_CUDA_F16
|
||
|
v = __hsub2(v, {8.0f, 8.0f});
|
||
|
v = __hmul2(v, {d, d});
|
||
|
#else
|
||
|
v.x = (v.x - 8.0f) * d;
|
||
|
v.y = (v.y - 8.0f) * d;
|
||
|
#endif // GGML_CUDA_F16
|
||
|
}
|
||
|
|
||
|
static __device__ __forceinline__ void dequantize_q4_1(const void * vx, const int64_t ib, const int iqs, dfloat2 & v){
|
||
|
const block_q4_1 * x = (const block_q4_1 *) vx;
|
||
|
|
||
|
const dfloat d = __low2half(x[ib].dm);
|
||
|
const dfloat m = __high2half(x[ib].dm);
|
||
|
|
||
|
const int vui = x[ib].qs[iqs];
|
||
|
|
||
|
v.x = vui & 0xF;
|
||
|
v.y = vui >> 4;
|
||
|
|
||
|
#ifdef GGML_CUDA_F16
|
||
|
v = __hmul2(v, {d, d});
|
||
|
v = __hadd2(v, {m, m});
|
||
|
#else
|
||
|
v.x = (v.x * d) + m;
|
||
|
v.y = (v.y * d) + m;
|
||
|
#endif // GGML_CUDA_F16
|
||
|
}
|
||
|
|
||
|
static __device__ __forceinline__ void dequantize_q5_0(const void * vx, const int64_t ib, const int iqs, dfloat2 & v){
|
||
|
const block_q5_0 * x = (const block_q5_0 *) vx;
|
||
|
|
||
|
const dfloat d = x[ib].d;
|
||
|
|
||
|
uint32_t qh;
|
||
|
memcpy(&qh, x[ib].qh, sizeof(qh));
|
||
|
|
||
|
const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10;
|
||
|
const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10;
|
||
|
|
||
|
v.x = ((x[ib].qs[iqs] & 0xf) | xh_0);
|
||
|
v.y = ((x[ib].qs[iqs] >> 4) | xh_1);
|
||
|
|
||
|
#ifdef GGML_CUDA_F16
|
||
|
v = __hsub2(v, {16.0f, 16.0f});
|
||
|
v = __hmul2(v, {d, d});
|
||
|
#else
|
||
|
v.x = (v.x - 16.0f) * d;
|
||
|
v.y = (v.y - 16.0f) * d;
|
||
|
#endif // GGML_CUDA_F16
|
||
|
}
|
||
|
|
||
|
static __device__ __forceinline__ void dequantize_q5_1(const void * vx, const int64_t ib, const int iqs, dfloat2 & v){
|
||
|
const block_q5_1 * x = (const block_q5_1 *) vx;
|
||
|
|
||
|
const dfloat d = __low2half(x[ib].dm);
|
||
|
const dfloat m = __high2half(x[ib].dm);
|
||
|
|
||
|
uint32_t qh;
|
||
|
memcpy(&qh, x[ib].qh, sizeof(qh));
|
||
|
|
||
|
const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10;
|
||
|
const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10;
|
||
|
|
||
|
v.x = ((x[ib].qs[iqs] & 0xf) | xh_0);
|
||
|
v.y = ((x[ib].qs[iqs] >> 4) | xh_1);
|
||
|
|
||
|
#ifdef GGML_CUDA_F16
|
||
|
v = __hmul2(v, {d, d});
|
||
|
v = __hadd2(v, {m, m});
|
||
|
#else
|
||
|
v.x = (v.x * d) + m;
|
||
|
v.y = (v.y * d) + m;
|
||
|
#endif // GGML_CUDA_F16
|
||
|
}
|
||
|
|
||
|
static __device__ __forceinline__ void dequantize_q8_0(const void * vx, const int64_t ib, const int iqs, dfloat2 & v){
|
||
|
const block_q8_0 * x = (const block_q8_0 *) vx;
|
||
|
|
||
|
const dfloat d = x[ib].d;
|
||
|
|
||
|
v.x = x[ib].qs[iqs + 0];
|
||
|
v.y = x[ib].qs[iqs + 1];
|
||
|
|
||
|
#ifdef GGML_CUDA_F16
|
||
|
v = __hmul2(v, {d, d});
|
||
|
#else
|
||
|
v.x *= d;
|
||
|
v.y *= d;
|
||
|
#endif // GGML_CUDA_F16
|
||
|
}
|