74 lines
3.1 KiB
Text
74 lines
3.1 KiB
Text
|
/**
|
||
|
* llama.cpp - commit 8962422b1c6f9b8b15f5aeaea42600bcc2d44177 - do not edit this file
|
||
|
*
|
||
|
* MIT License
|
||
|
*
|
||
|
* Copyright (c) 2023-2024 The ggml authors
|
||
|
*
|
||
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||
|
* of this software and associated documentation files (the "Software"), to deal
|
||
|
* in the Software without restriction, including without limitation the rights
|
||
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||
|
* copies of the Software, and to permit persons to whom the Software is
|
||
|
* furnished to do so, subject to the following conditions:
|
||
|
*
|
||
|
* The above copyright notice and this permission notice shall be included in all
|
||
|
* copies or substantial portions of the Software.
|
||
|
*
|
||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||
|
* SOFTWARE.
|
||
|
*/
|
||
|
|
||
|
#include "acc.cuh"
|
||
|
|
||
|
static __global__ void acc_f32(const float * x, const float * y, float * dst, const int ne,
|
||
|
const int ne10, const int ne11, const int ne12,
|
||
|
const int nb1, const int nb2, int offset) {
|
||
|
const int i = blockDim.x * blockIdx.x + threadIdx.x;
|
||
|
if (i >= ne) {
|
||
|
return;
|
||
|
}
|
||
|
int src1_idx = i - offset;
|
||
|
int oz = src1_idx / nb2;
|
||
|
int oy = (src1_idx - (oz * nb2)) / nb1;
|
||
|
int ox = src1_idx % nb1;
|
||
|
if (src1_idx >= 0 && ox < ne10 && oy < ne11 && oz < ne12) {
|
||
|
dst[i] = x[i] + y[ox + oy * ne10 + oz * ne10 * ne11];
|
||
|
} else {
|
||
|
dst[i] = x[i];
|
||
|
}
|
||
|
}
|
||
|
|
||
|
static void acc_f32_cuda(const float * x, const float * y, float * dst, const int n_elements,
|
||
|
const int ne10, const int ne11, const int ne12,
|
||
|
const int nb1, const int nb2, const int offset, cudaStream_t stream) {
|
||
|
int num_blocks = (n_elements + CUDA_ACC_BLOCK_SIZE - 1) / CUDA_ACC_BLOCK_SIZE;
|
||
|
acc_f32<<<num_blocks, CUDA_ACC_BLOCK_SIZE, 0, stream>>>(x, y, dst, n_elements, ne10, ne11, ne12, nb1, nb2, offset);
|
||
|
}
|
||
|
|
||
|
void ggml_cuda_op_acc(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||
|
const ggml_tensor * src0 = dst->src[0];
|
||
|
const ggml_tensor * src1 = dst->src[1];
|
||
|
const float * src0_d = (const float *)src0->data;
|
||
|
const float * src1_d = (const float *)src1->data;
|
||
|
float * dst_d = (float *)dst->data;
|
||
|
cudaStream_t stream = ctx.stream();
|
||
|
|
||
|
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
||
|
GGML_ASSERT(src1->type == GGML_TYPE_F32);
|
||
|
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
||
|
GGML_ASSERT(dst->ne[3] == 1); // just 3D tensors supported
|
||
|
|
||
|
int nb1 = dst->op_params[0] / 4; // 4 bytes of float32
|
||
|
int nb2 = dst->op_params[1] / 4; // 4 bytes of float32
|
||
|
// int nb3 = dst->op_params[2] / 4; // 4 bytes of float32 - unused
|
||
|
int offset = dst->op_params[3] / 4; // offset in bytes
|
||
|
|
||
|
acc_f32_cuda(src0_d, src1_d, dst_d, ggml_nelements(dst), src1->ne[0], src1->ne[1], src1->ne[2], nb1, nb2, offset, stream);
|
||
|
}
|