133 lines
4.6 KiB
Text
133 lines
4.6 KiB
Text
|
/**
|
||
|
* llama.cpp - commit 8962422b1c6f9b8b15f5aeaea42600bcc2d44177 - do not edit this file
|
||
|
*
|
||
|
* MIT License
|
||
|
*
|
||
|
* Copyright (c) 2023-2024 The ggml authors
|
||
|
*
|
||
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||
|
* of this software and associated documentation files (the "Software"), to deal
|
||
|
* in the Software without restriction, including without limitation the rights
|
||
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||
|
* copies of the Software, and to permit persons to whom the Software is
|
||
|
* furnished to do so, subject to the following conditions:
|
||
|
*
|
||
|
* The above copyright notice and this permission notice shall be included in all
|
||
|
* copies or substantial portions of the Software.
|
||
|
*
|
||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||
|
* SOFTWARE.
|
||
|
*/
|
||
|
|
||
|
#include "common.cuh"
|
||
|
#include "cross-entropy-loss.cuh"
|
||
|
#include "sumrows.cuh"
|
||
|
|
||
|
#include <cmath>
|
||
|
#include <cstdint>
|
||
|
|
||
|
static __global__ void cross_entropy_loss_f32(const float * logits, const float * labels, float * dst, const int nclasses, const int k) {
|
||
|
const int warp_id = threadIdx.x / WARP_SIZE;
|
||
|
const int lane_id = threadIdx.x % WARP_SIZE;
|
||
|
const int i0 = blockDim.x*blockIdx.x + warp_id*WARP_SIZE;
|
||
|
|
||
|
const int ne_tmp = WARP_SIZE*nclasses;
|
||
|
|
||
|
extern __shared__ float tmp_all[];
|
||
|
float * tmp_logits = tmp_all + (2*warp_id + 0)*ne_tmp;
|
||
|
float * tmp_labels = tmp_all + (2*warp_id + 1)*ne_tmp;
|
||
|
|
||
|
// Each warp first loads ne_tmp logits/labels into shared memory:
|
||
|
for (int i = lane_id; i < ne_tmp; i += WARP_SIZE) {
|
||
|
const int ig = i0*nclasses + i; // ig == i global
|
||
|
|
||
|
tmp_logits[i] = ig < k*nclasses ? logits[ig] : 0.0f;
|
||
|
tmp_labels[i] = ig < k*nclasses ? labels[ig] : 0.0f;
|
||
|
}
|
||
|
|
||
|
// Each thread in the warp then calculates the cross entropy loss for a single row.
|
||
|
// TODO: pad in order to avoid shared memory bank conflicts.
|
||
|
|
||
|
// Find maximum for softmax:
|
||
|
float max = -INFINITY;
|
||
|
for (int i = 0; i < nclasses; ++i) {
|
||
|
max = fmaxf(max, tmp_logits[lane_id*nclasses + i]);
|
||
|
}
|
||
|
|
||
|
// Calculate log(softmax(logits)) which is just logits - max:
|
||
|
float sum = 0.0f;
|
||
|
for (int i = 0; i < nclasses; ++i) {
|
||
|
float val = tmp_logits[lane_id*nclasses + i] - max;
|
||
|
sum += expf(val);
|
||
|
tmp_logits[lane_id*nclasses + i] = val;
|
||
|
}
|
||
|
sum = logf(sum);
|
||
|
|
||
|
// log(exp(logits - max) / sum) = (logits - max) - log(sum)
|
||
|
float loss = 0.0f;
|
||
|
for (int i = 0; i < nclasses; ++i) {
|
||
|
loss += (tmp_logits[lane_id*nclasses + i] - sum) * tmp_labels[lane_id*nclasses + i];
|
||
|
}
|
||
|
loss = -warp_reduce_sum(loss) / (float)k;
|
||
|
|
||
|
__syncthreads();
|
||
|
|
||
|
if (lane_id == 0) {
|
||
|
tmp_all[warp_id] = loss;
|
||
|
}
|
||
|
|
||
|
__syncthreads();
|
||
|
|
||
|
if (warp_id != 0) {
|
||
|
return;
|
||
|
}
|
||
|
|
||
|
loss = lane_id < CUDA_CROSS_ENTROPY_LOSS_BLOCK_SIZE/WARP_SIZE ? tmp_all[lane_id] : 0.0f;
|
||
|
loss = warp_reduce_sum(loss);
|
||
|
|
||
|
if (lane_id != 0) {
|
||
|
return;
|
||
|
}
|
||
|
|
||
|
dst[blockIdx.x] = loss;
|
||
|
}
|
||
|
|
||
|
void ggml_cuda_cross_entropy_loss(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||
|
const ggml_tensor * src0 = dst->src[0];
|
||
|
const ggml_tensor * src1 = dst->src[1];
|
||
|
|
||
|
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
||
|
GGML_ASSERT(src1->type == GGML_TYPE_F32);
|
||
|
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
||
|
|
||
|
GGML_ASSERT(ggml_is_contiguous(src0));
|
||
|
GGML_ASSERT(ggml_is_contiguous(src1));
|
||
|
GGML_ASSERT(ggml_is_contiguous(dst));
|
||
|
|
||
|
const int64_t ne00 = src0->ne[0];
|
||
|
const int64_t nrows = ggml_nrows(src0);
|
||
|
|
||
|
const float * src0_d = (const float *) src0->data;
|
||
|
const float * src1_d = (const float *) src1->data;
|
||
|
float * dst_d = (float *) dst->data;
|
||
|
|
||
|
ggml_cuda_pool & pool = ctx.pool();
|
||
|
cudaStream_t stream = ctx.stream();
|
||
|
|
||
|
const dim3 blocks_dim(CUDA_CROSS_ENTROPY_LOSS_BLOCK_SIZE, 1, 1);
|
||
|
const dim3 blocks_num((nrows + CUDA_CROSS_ENTROPY_LOSS_BLOCK_SIZE - 1) / CUDA_CROSS_ENTROPY_LOSS_BLOCK_SIZE, 1, 1);
|
||
|
const int shmem = 2*CUDA_CROSS_ENTROPY_LOSS_BLOCK_SIZE*ne00*sizeof(float);
|
||
|
|
||
|
ggml_cuda_pool_alloc<float> dst_tmp(pool, blocks_num.x);
|
||
|
|
||
|
cross_entropy_loss_f32<<<blocks_num, blocks_dim, shmem, stream>>>(src0_d, src1_d, dst_tmp.ptr, ne00, nrows);
|
||
|
|
||
|
// Combine results from individual blocks:
|
||
|
sum_rows_f32_cuda(dst_tmp.ptr, dst_d, blocks_num.x, 1, stream);
|
||
|
}
|