| 1 | #include "acc.cuh" |
| 2 | |
| 3 | static __global__ void acc_f32(const float * x, const float * y, float * dst, const int64_t ne, |
| 4 | const int64_t ne10, const int64_t ne11, const int64_t ne12, const int64_t ne13, |
| 5 | const int64_t s11, const int64_t s12, const int64_t s13, const int64_t offset) { |
| 6 | const int64_t i = blockDim.x * blockIdx.x + threadIdx.x; |
| 7 | |
| 8 | if (i >= ne) { |
| 9 | return; |
| 10 | } |
| 11 | |
| 12 | int64_t src1_idx = i - offset; |
| 13 | |
| 14 | int64_t tmp = src1_idx; |
| 15 | const int64_t i13 = tmp / s13; |
| 16 | tmp -= i13 * s13; |
| 17 | const int64_t i12 = tmp / s12; |
| 18 | tmp -= i12 * s12; |
| 19 | const int64_t i11 = tmp / s11; |
| 20 | tmp -= i11 * s11; |
| 21 | const int64_t i10 = tmp; |
| 22 | |
| 23 | float val = x[i]; |
| 24 | if (src1_idx >= 0 && i10 < ne10 && i11 < ne11 && i12 < ne12 && i13 < ne13) { |
| 25 | val += y[((i13*ne12 + i12) * ne11 + i11) * ne10 + i10]; |
| 26 | } |
| 27 | dst[i] = val; |
| 28 | } |
| 29 | |
| 30 | static void acc_f32_cuda(const float * x, const float * y, float * dst, const int64_t n_elements, |
| 31 | const int64_t ne10, const int64_t ne11, const int64_t ne12, const int64_t ne13, |
| 32 | const int64_t s1, const int64_t s2, const int64_t s3, const int64_t offset, cudaStream_t stream) { |
| 33 | const int num_blocks = (n_elements + CUDA_ACC_BLOCK_SIZE - 1) / CUDA_ACC_BLOCK_SIZE; |
| 34 | acc_f32<<<gridDim: num_blocks, CUDA_ACC_BLOCK_SIZE, sharedMem: 0, stream>>>(x, y, dst, ne: n_elements, ne10, ne11, ne12, ne13, s11: s1, s12: s2, s13: s3, offset); |
| 35 | } |
| 36 | |
| 37 | void ggml_cuda_op_acc(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { |
| 38 | const ggml_tensor * src0 = dst->src[0]; |
| 39 | const ggml_tensor * src1 = dst->src[1]; |
| 40 | |
| 41 | const float * src0_d = (const float *) src0->data; |
| 42 | const float * src1_d = (const float *) src1->data; |
| 43 | float * dst_d = (float *) dst->data; |
| 44 | |
| 45 | cudaStream_t stream = ctx.stream(); |
| 46 | |
| 47 | GGML_ASSERT(src0->type == GGML_TYPE_F32); |
| 48 | GGML_ASSERT(src1->type == GGML_TYPE_F32); |
| 49 | GGML_ASSERT( dst->type == GGML_TYPE_F32); |
| 50 | |
| 51 | GGML_ASSERT(ggml_is_contiguous(src1)); |
| 52 | GGML_ASSERT(dst->nb[0] == ggml_element_size(dst)); |
| 53 | GGML_ASSERT(ggml_is_contiguously_allocated(dst)); |
| 54 | |
| 55 | const int64_t s1 = dst->op_params[0] / sizeof(float); |
| 56 | const int64_t s2 = dst->op_params[1] / sizeof(float); |
| 57 | const int64_t s3 = dst->op_params[2] / sizeof(float); |
| 58 | const int64_t offset = dst->op_params[3] / sizeof(float); |
| 59 | |
| 60 | acc_f32_cuda(src0_d, src1_d, dst_d, ggml_nelements(dst), src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3], s1, s2, s3, offset, stream); |
| 61 | } |
| 62 | |