whisper.cpp / ggml /src /ggml-cuda /template-instances /fattn-mma-f16-instance-cpb8.cu
JohannesGaessler's picture
CUDA: use mma PTX instructions for FlashAttention (llama/11583)
f328957
raw
history blame
312 Bytes
// This file has been autogenerated by generate_cu_files.py, do not edit manually.
#include "../fattn-mma-f16.cuh"
DECL_FATTN_MMA_F16_CASE(64, 8);
DECL_FATTN_MMA_F16_CASE(80, 8);
DECL_FATTN_MMA_F16_CASE(96, 8);
DECL_FATTN_MMA_F16_CASE(112, 8);
DECL_FATTN_MMA_F16_CASE(128, 8);
DECL_FATTN_MMA_F16_CASE(256, 8);