Spaces:
Running
Fix SYCL `im2col` and `convert` Overflow with Large Dims (llama/9052)
Browse files* sycl: fix im2col overflow and sync with cuda
Signed-off-by: zhentaoyu <[email protected]>
* sycl: fix convert overflow
Signed-off-by: zhentaoyu <[email protected]>
* sycl: fix convert and dequantize
Signed-off-by: zhentaoyu <[email protected]>
* sycl: fix ib in dmmv
Signed-off-by: zhentaoyu <[email protected]>
* sycl:refine convert
Signed-off-by: zhentaoyu <[email protected]>
* sycl: move downsample global_range into common
Signed-off-by: zhentaoyu <[email protected]>
* test: add im2col and convert test cases
Signed-off-by: zhentaoyu <[email protected]>
* test: make new cases only in sycl
Signed-off-by: zhentaoyu <[email protected]>
* test: comment new test_cases for only local testing
Signed-off-by: zhentaoyu <[email protected]>
---------
Signed-off-by: zhentaoyu <[email protected]>
- ggml/src/ggml-sycl.cpp +0 -104
- ggml/src/ggml-sycl/backend.hpp +1 -0
- ggml/src/ggml-sycl/common.cpp +11 -0
- ggml/src/ggml-sycl/common.hpp +2 -0
- ggml/src/ggml-sycl/convert.cpp +57 -57
- ggml/src/ggml-sycl/convert.hpp +1 -1
- ggml/src/ggml-sycl/dequantize.hpp +98 -98
- ggml/src/ggml-sycl/dmmv.cpp +2 -2
- ggml/src/ggml-sycl/im2col.cpp +125 -0
- ggml/src/ggml-sycl/im2col.hpp +23 -0
|
@@ -893,43 +893,6 @@ static void clamp_f32(const float * x, float * dst, const float min, const float
|
|
| 893 |
dst[i] = x[i] < min ? min : (x[i] > max ? max : x[i]);
|
| 894 |
}
|
| 895 |
|
| 896 |
-
template <typename T>
|
| 897 |
-
static void im2col_kernel(const float *x, T *dst, int offset_delta,
|
| 898 |
-
int IW, int IH, int OW, int KW, int KH,
|
| 899 |
-
int pelements, int CHW, int s0, int s1, int p0,
|
| 900 |
-
int p1, int d0, int d1,
|
| 901 |
-
const sycl::nd_item<3> &item_ct1) {
|
| 902 |
-
const int i = item_ct1.get_local_id(2) +
|
| 903 |
-
item_ct1.get_group(2) * item_ct1.get_local_range(2);
|
| 904 |
-
if (i >= pelements) {
|
| 905 |
-
return;
|
| 906 |
-
}
|
| 907 |
-
|
| 908 |
-
const int ksize = OW * (KH > 1 ? KW : 1);
|
| 909 |
-
const int kx = i / ksize;
|
| 910 |
-
const int kd = kx * ksize;
|
| 911 |
-
const int ky = (i - kd) / OW;
|
| 912 |
-
const int ix = i % OW;
|
| 913 |
-
|
| 914 |
-
const int64_t iiw = ix * s0 + kx * d0 - p0;
|
| 915 |
-
const int64_t iih = item_ct1.get_group(1) * s1 + ky * d1 - p1;
|
| 916 |
-
|
| 917 |
-
const int64_t offset_dst =
|
| 918 |
-
(item_ct1.get_group(1) * OW + ix) * CHW +
|
| 919 |
-
(item_ct1.get_group(0) * (KW * KH) + ky * KW + kx);
|
| 920 |
-
|
| 921 |
-
if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) {
|
| 922 |
-
dst[offset_dst] =
|
| 923 |
-
sycl::vec<float, 1>(0.0f)
|
| 924 |
-
.convert<sycl::half, sycl::rounding_mode::automatic>()[0];
|
| 925 |
-
} else {
|
| 926 |
-
const int64_t offset_src = item_ct1.get_group(0) * offset_delta;
|
| 927 |
-
dst[offset_dst] =
|
| 928 |
-
sycl::vec<float, 1>(x[offset_src + iih * IW + iiw])
|
| 929 |
-
.convert<sycl::half, sycl::rounding_mode::automatic>()[0];
|
| 930 |
-
}
|
| 931 |
-
}
|
| 932 |
-
|
| 933 |
template <typename Ti, typename To>
|
| 934 |
static void pool2d_nchw_kernel(
|
| 935 |
const int ih, const int iw, const int oh, const int ow,
|
|
@@ -1742,32 +1705,6 @@ static void diag_mask_inf_f32_sycl(const float *x, float *dst,
|
|
| 1742 |
});
|
| 1743 |
}
|
| 1744 |
|
| 1745 |
-
template <typename T>
|
| 1746 |
-
static void im2col_sycl(const float *x, T *dst, int IW, int IH,
|
| 1747 |
-
int OW, int OH, int KW, int KH, int IC,
|
| 1748 |
-
int offset_delta, int s0, int s1, int p0,
|
| 1749 |
-
int p1, int d0, int d1,
|
| 1750 |
-
queue_ptr stream) {
|
| 1751 |
-
const int parallel_elements = OW * KW * KH;
|
| 1752 |
-
const int num_blocks = (parallel_elements + SYCL_IM2COL_BLOCK_SIZE - 1) / SYCL_IM2COL_BLOCK_SIZE;
|
| 1753 |
-
sycl::range<3> block_nums(IC, OH, num_blocks);
|
| 1754 |
-
{
|
| 1755 |
-
dpct::has_capability_or_fail(stream->get_device(),
|
| 1756 |
-
{sycl::aspect::fp16});
|
| 1757 |
-
|
| 1758 |
-
stream->parallel_for(
|
| 1759 |
-
sycl::nd_range<3>(block_nums *
|
| 1760 |
-
sycl::range<3>(1, 1, SYCL_IM2COL_BLOCK_SIZE),
|
| 1761 |
-
sycl::range<3>(1, 1, SYCL_IM2COL_BLOCK_SIZE)),
|
| 1762 |
-
[=](sycl::nd_item<3> item_ct1) {
|
| 1763 |
-
im2col_kernel(x, dst, offset_delta, IW, IH, OW, KW, KH,
|
| 1764 |
-
parallel_elements, (IC * KH * KW), s0, s1, p0,
|
| 1765 |
-
p1, d0, d1, item_ct1);
|
| 1766 |
-
});
|
| 1767 |
-
}
|
| 1768 |
-
}
|
| 1769 |
-
|
| 1770 |
-
|
| 1771 |
static bool g_sycl_loaded = false;
|
| 1772 |
|
| 1773 |
bool ggml_sycl_loaded(void) {
|
|
@@ -2636,47 +2573,6 @@ static void ggml_sycl_op_pool2d(ggml_backend_sycl_context & ctx, const ggml_tens
|
|
| 2636 |
(void) src1_dd;
|
| 2637 |
}
|
| 2638 |
|
| 2639 |
-
inline void ggml_sycl_op_im2col(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
|
| 2640 |
-
const ggml_tensor *src1, ggml_tensor *dst,
|
| 2641 |
-
const float *src0_dd, const float *src1_dd,
|
| 2642 |
-
float *dst_dd,
|
| 2643 |
-
const queue_ptr &main_stream) {
|
| 2644 |
-
|
| 2645 |
-
GGML_ASSERT(src0->type == GGML_TYPE_F16);
|
| 2646 |
-
GGML_ASSERT(src1->type == GGML_TYPE_F32);
|
| 2647 |
-
GGML_ASSERT( dst->type == GGML_TYPE_F16 || dst->type == GGML_TYPE_F32);
|
| 2648 |
-
|
| 2649 |
-
const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
|
| 2650 |
-
const int32_t s1 = ((const int32_t*)(dst->op_params))[1];
|
| 2651 |
-
const int32_t p0 = ((const int32_t*)(dst->op_params))[2];
|
| 2652 |
-
const int32_t p1 = ((const int32_t*)(dst->op_params))[3];
|
| 2653 |
-
const int32_t d0 = ((const int32_t*)(dst->op_params))[4];
|
| 2654 |
-
const int32_t d1 = ((const int32_t*)(dst->op_params))[5];
|
| 2655 |
-
|
| 2656 |
-
const bool is_2D = ((const int32_t*)(dst->op_params))[6] == 1;
|
| 2657 |
-
|
| 2658 |
-
const int64_t IC = src1->ne[is_2D ? 2 : 1];
|
| 2659 |
-
const int64_t IH = is_2D ? src1->ne[1] : 1;
|
| 2660 |
-
const int64_t IW = src1->ne[0];
|
| 2661 |
-
|
| 2662 |
-
const int64_t KH = is_2D ? src0->ne[1] : 1;
|
| 2663 |
-
const int64_t KW = src0->ne[0];
|
| 2664 |
-
|
| 2665 |
-
const int64_t OH = is_2D ? dst->ne[2] : 1;
|
| 2666 |
-
const int64_t OW = dst->ne[1];
|
| 2667 |
-
|
| 2668 |
-
const size_t delta_offset = src1->nb[is_2D ? 2 : 1] / 4; // nb is byte offset, src is type float32
|
| 2669 |
-
|
| 2670 |
-
if (dst->type == GGML_TYPE_F16) {
|
| 2671 |
-
im2col_sycl(src1_dd, (sycl::half *)dst_dd, IW, IH, OW, OH, KW, KH, IC, delta_offset, s0, s1, p0, p1, d0, d1, main_stream);
|
| 2672 |
-
} else {
|
| 2673 |
-
im2col_sycl(src1_dd, (float *)dst_dd, IW, IH, OW, OH, KW, KH, IC, delta_offset, s0, s1, p0, p1, d0, d1, main_stream);
|
| 2674 |
-
}
|
| 2675 |
-
|
| 2676 |
-
(void) src0;
|
| 2677 |
-
(void) src0_dd;
|
| 2678 |
-
}
|
| 2679 |
-
|
| 2680 |
inline void ggml_sycl_op_sum_rows(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
|
| 2681 |
const ggml_tensor *src1, ggml_tensor *dst,
|
| 2682 |
const float *src0_dd, const float *src1_dd,
|
|
|
|
| 893 |
dst[i] = x[i] < min ? min : (x[i] > max ? max : x[i]);
|
| 894 |
}
|
| 895 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 896 |
template <typename Ti, typename To>
|
| 897 |
static void pool2d_nchw_kernel(
|
| 898 |
const int ih, const int iw, const int oh, const int ow,
|
|
|
|
| 1705 |
});
|
| 1706 |
}
|
| 1707 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1708 |
static bool g_sycl_loaded = false;
|
| 1709 |
|
| 1710 |
bool ggml_sycl_loaded(void) {
|
|
|
|
| 2573 |
(void) src1_dd;
|
| 2574 |
}
|
| 2575 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2576 |
inline void ggml_sycl_op_sum_rows(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
|
| 2577 |
const ggml_tensor *src1, ggml_tensor *dst,
|
| 2578 |
const float *src0_dd, const float *src1_dd,
|
|
@@ -25,5 +25,6 @@
|
|
| 25 |
#include "norm.hpp"
|
| 26 |
#include "softmax.hpp"
|
| 27 |
#include "tsembd.hpp"
|
|
|
|
| 28 |
|
| 29 |
#endif // GGML_SYCL_BACKEND_HPP
|
|
|
|
| 25 |
#include "norm.hpp"
|
| 26 |
#include "softmax.hpp"
|
| 27 |
#include "tsembd.hpp"
|
| 28 |
+
#include "im2col.hpp"
|
| 29 |
|
| 30 |
#endif // GGML_SYCL_BACKEND_HPP
|
|
@@ -51,3 +51,14 @@ void ggml_sycl_host_free(void* ptr) try {
|
|
| 51 |
<< ", line:" << __LINE__ << std::endl;
|
| 52 |
std::exit(1);
|
| 53 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
<< ", line:" << __LINE__ << std::endl;
|
| 52 |
std::exit(1);
|
| 53 |
}
|
| 54 |
+
|
| 55 |
+
int64_t downsample_sycl_global_range(int64_t accumulate_block_num, int64_t block_size) {
|
| 56 |
+
const int64_t max_range = std::numeric_limits<int>::max();
|
| 57 |
+
int64_t sycl_down_blk_size = block_size;
|
| 58 |
+
int64_t global_range = accumulate_block_num * sycl_down_blk_size;
|
| 59 |
+
while(global_range > max_range) {
|
| 60 |
+
sycl_down_blk_size /= 2;
|
| 61 |
+
global_range = accumulate_block_num * sycl_down_blk_size;
|
| 62 |
+
}
|
| 63 |
+
return sycl_down_blk_size;
|
| 64 |
+
}
|
|
@@ -352,4 +352,6 @@ static __dpct_inline__ Tp* get_pointer(sycl::local_accessor<Tp, dim> acc) {
|
|
| 352 |
return acc.template get_multi_ptr<sycl::access::decorated::no>().get();
|
| 353 |
}
|
| 354 |
|
|
|
|
|
|
|
| 355 |
#endif // GGML_SYCL_COMMON_HPP
|
|
|
|
| 352 |
return acc.template get_multi_ptr<sycl::access::decorated::no>().get();
|
| 353 |
}
|
| 354 |
|
| 355 |
+
int64_t downsample_sycl_global_range(int64_t accumulate_block_num, int64_t block_size);
|
| 356 |
+
|
| 357 |
#endif // GGML_SYCL_COMMON_HPP
|
|
@@ -3,19 +3,19 @@
|
|
| 3 |
#include "presets.hpp"
|
| 4 |
|
| 5 |
template <int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t>
|
| 6 |
-
static void dequantize_block(const void * __restrict__ vx, dst_t * __restrict__ y, const
|
| 7 |
const sycl::nd_item<3> &item_ct1) {
|
| 8 |
-
const
|
| 9 |
item_ct1.get_local_id(2));
|
| 10 |
|
| 11 |
if (i >= k) {
|
| 12 |
return;
|
| 13 |
}
|
| 14 |
|
| 15 |
-
const
|
| 16 |
-
const
|
| 17 |
-
const
|
| 18 |
-
const
|
| 19 |
|
| 20 |
// dequantize
|
| 21 |
dfloat2 v;
|
|
@@ -27,9 +27,9 @@ static void dequantize_block(const void * __restrict__ vx, dst_t * __restrict__
|
|
| 27 |
|
| 28 |
template <int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t>
|
| 29 |
static void dequantize_block_sycl(const void *__restrict__ vx,
|
| 30 |
-
dst_t *__restrict__ y, const
|
| 31 |
dpct::queue_ptr stream) {
|
| 32 |
-
const
|
| 33 |
{
|
| 34 |
dpct::has_capability_or_fail(stream->get_device(),
|
| 35 |
{sycl::aspect::fp16});
|
|
@@ -45,9 +45,9 @@ static void dequantize_block_sycl(const void *__restrict__ vx,
|
|
| 45 |
}
|
| 46 |
|
| 47 |
template <typename dst_t>
|
| 48 |
-
static void dequantize_row_q2_K_sycl(const void *vx, dst_t *y, const
|
| 49 |
dpct::queue_ptr stream) {
|
| 50 |
-
const
|
| 51 |
#if QK_K == 256
|
| 52 |
{
|
| 53 |
dpct::has_capability_or_fail(stream->get_device(),
|
|
@@ -77,9 +77,9 @@ static void dequantize_row_q2_K_sycl(const void *vx, dst_t *y, const int k,
|
|
| 77 |
}
|
| 78 |
|
| 79 |
template <typename dst_t>
|
| 80 |
-
static void dequantize_row_q3_K_sycl(const void *vx, dst_t *y, const
|
| 81 |
dpct::queue_ptr stream) {
|
| 82 |
-
const
|
| 83 |
#if QK_K == 256
|
| 84 |
{
|
| 85 |
dpct::has_capability_or_fail(stream->get_device(),
|
|
@@ -108,10 +108,10 @@ static void dequantize_row_q3_K_sycl(const void *vx, dst_t *y, const int k,
|
|
| 108 |
}
|
| 109 |
|
| 110 |
template <typename dst_t>
|
| 111 |
-
static void dequantize_row_q4_0_sycl(const void *vx, dst_t *y, const
|
| 112 |
dpct::queue_ptr stream) {
|
| 113 |
-
const
|
| 114 |
-
const
|
| 115 |
{
|
| 116 |
dpct::has_capability_or_fail(stream->get_device(),
|
| 117 |
{sycl::aspect::fp16});
|
|
@@ -126,10 +126,10 @@ static void dequantize_row_q4_0_sycl(const void *vx, dst_t *y, const int k,
|
|
| 126 |
}
|
| 127 |
|
| 128 |
template <typename dst_t>
|
| 129 |
-
static void dequantize_row_q4_1_sycl(const void *vx, dst_t *y, const
|
| 130 |
dpct::queue_ptr stream) {
|
| 131 |
-
const
|
| 132 |
-
const
|
| 133 |
{
|
| 134 |
dpct::has_capability_or_fail(stream->get_device(),
|
| 135 |
{sycl::aspect::fp16});
|
|
@@ -145,9 +145,9 @@ static void dequantize_row_q4_1_sycl(const void *vx, dst_t *y, const int k,
|
|
| 145 |
|
| 146 |
|
| 147 |
template <typename dst_t>
|
| 148 |
-
static void dequantize_row_q4_K_sycl(const void *vx, dst_t *y, const
|
| 149 |
dpct::queue_ptr stream) {
|
| 150 |
-
const
|
| 151 |
{
|
| 152 |
dpct::has_capability_or_fail(stream->get_device(),
|
| 153 |
{sycl::aspect::fp16});
|
|
@@ -165,9 +165,9 @@ static void dequantize_row_q4_K_sycl(const void *vx, dst_t *y, const int k,
|
|
| 165 |
}
|
| 166 |
|
| 167 |
template <typename dst_t>
|
| 168 |
-
static void dequantize_row_q5_K_sycl(const void *vx, dst_t *y, const
|
| 169 |
dpct::queue_ptr stream) {
|
| 170 |
-
const
|
| 171 |
#if QK_K == 256
|
| 172 |
{
|
| 173 |
dpct::has_capability_or_fail(stream->get_device(),
|
|
@@ -197,9 +197,9 @@ static void dequantize_row_q5_K_sycl(const void *vx, dst_t *y, const int k,
|
|
| 197 |
}
|
| 198 |
|
| 199 |
template <typename dst_t>
|
| 200 |
-
static void dequantize_row_q6_K_sycl(const void *vx, dst_t *y, const
|
| 201 |
dpct::queue_ptr stream) {
|
| 202 |
-
const
|
| 203 |
#if QK_K == 256
|
| 204 |
{
|
| 205 |
dpct::has_capability_or_fail(stream->get_device(),
|
|
@@ -229,9 +229,9 @@ static void dequantize_row_q6_K_sycl(const void *vx, dst_t *y, const int k,
|
|
| 229 |
}
|
| 230 |
|
| 231 |
template <typename dst_t>
|
| 232 |
-
static void dequantize_row_iq1_s_sycl(const void *vx, dst_t *y, const
|
| 233 |
dpct::queue_ptr stream) {
|
| 234 |
-
const
|
| 235 |
{
|
| 236 |
dpct::has_capability_or_fail(stream->get_device(),
|
| 237 |
{sycl::aspect::fp16});
|
|
@@ -250,9 +250,9 @@ static void dequantize_row_iq1_s_sycl(const void *vx, dst_t *y, const int k,
|
|
| 250 |
}
|
| 251 |
|
| 252 |
template <typename dst_t>
|
| 253 |
-
static void dequantize_row_iq1_m_sycl(const void *vx, dst_t *y, const
|
| 254 |
dpct::queue_ptr stream) {
|
| 255 |
-
const
|
| 256 |
{
|
| 257 |
dpct::has_capability_or_fail(stream->get_device(),
|
| 258 |
{sycl::aspect::fp16});
|
|
@@ -271,9 +271,9 @@ static void dequantize_row_iq1_m_sycl(const void *vx, dst_t *y, const int k,
|
|
| 271 |
}
|
| 272 |
|
| 273 |
template <typename dst_t>
|
| 274 |
-
static void dequantize_row_iq2_xxs_sycl(const void *vx, dst_t *y, const
|
| 275 |
dpct::queue_ptr stream) {
|
| 276 |
-
const
|
| 277 |
{
|
| 278 |
dpct::has_capability_or_fail(stream->get_device(),
|
| 279 |
{sycl::aspect::fp16});
|
|
@@ -292,9 +292,9 @@ static void dequantize_row_iq2_xxs_sycl(const void *vx, dst_t *y, const int k,
|
|
| 292 |
}
|
| 293 |
|
| 294 |
template <typename dst_t>
|
| 295 |
-
static void dequantize_row_iq2_xs_sycl(const void *vx, dst_t *y, const
|
| 296 |
dpct::queue_ptr stream) {
|
| 297 |
-
const
|
| 298 |
{
|
| 299 |
dpct::has_capability_or_fail(stream->get_device(),
|
| 300 |
{sycl::aspect::fp16});
|
|
@@ -313,9 +313,9 @@ static void dequantize_row_iq2_xs_sycl(const void *vx, dst_t *y, const int k,
|
|
| 313 |
}
|
| 314 |
|
| 315 |
template <typename dst_t>
|
| 316 |
-
static void dequantize_row_iq2_s_sycl(const void *vx, dst_t *y, const
|
| 317 |
dpct::queue_ptr stream) {
|
| 318 |
-
const
|
| 319 |
{
|
| 320 |
dpct::has_capability_or_fail(stream->get_device(),
|
| 321 |
{sycl::aspect::fp16});
|
|
@@ -333,9 +333,9 @@ static void dequantize_row_iq2_s_sycl(const void *vx, dst_t *y, const int k,
|
|
| 333 |
|
| 334 |
|
| 335 |
template <typename dst_t>
|
| 336 |
-
static void dequantize_row_iq3_xxs_sycl(const void *vx, dst_t *y, const
|
| 337 |
dpct::queue_ptr stream) {
|
| 338 |
-
const
|
| 339 |
{
|
| 340 |
dpct::has_capability_or_fail(stream->get_device(),
|
| 341 |
{sycl::aspect::fp16});
|
|
@@ -354,9 +354,9 @@ static void dequantize_row_iq3_xxs_sycl(const void *vx, dst_t *y, const int k,
|
|
| 354 |
}
|
| 355 |
|
| 356 |
template <typename dst_t>
|
| 357 |
-
static void dequantize_row_iq3_s_sycl(const void *vx, dst_t *y, const
|
| 358 |
dpct::queue_ptr stream) {
|
| 359 |
-
const
|
| 360 |
{
|
| 361 |
dpct::has_capability_or_fail(stream->get_device(),
|
| 362 |
{sycl::aspect::fp16});
|
|
@@ -374,9 +374,9 @@ static void dequantize_row_iq3_s_sycl(const void *vx, dst_t *y, const int k,
|
|
| 374 |
}
|
| 375 |
|
| 376 |
template <typename dst_t>
|
| 377 |
-
static void dequantize_row_iq4_xs_sycl(const void *vx, dst_t *y, const
|
| 378 |
dpct::queue_ptr stream) {
|
| 379 |
-
const
|
| 380 |
#if QK_K == 64
|
| 381 |
dequantize_row_iq4_nl_sycl(vx, y, k, stream);
|
| 382 |
#else
|
|
@@ -398,9 +398,9 @@ static void dequantize_row_iq4_xs_sycl(const void *vx, dst_t *y, const int k,
|
|
| 398 |
}
|
| 399 |
|
| 400 |
template <typename dst_t>
|
| 401 |
-
static void dequantize_row_iq4_nl_sycl(const void *vx, dst_t *y, const
|
| 402 |
dpct::queue_ptr stream) {
|
| 403 |
-
const
|
| 404 |
{
|
| 405 |
dpct::has_capability_or_fail(stream->get_device(),
|
| 406 |
{sycl::aspect::fp16});
|
|
@@ -418,34 +418,34 @@ static void dequantize_row_iq4_nl_sycl(const void *vx, dst_t *y, const int k,
|
|
| 418 |
}
|
| 419 |
|
| 420 |
template <typename src_t, typename dst_t>
|
| 421 |
-
static void convert_unary(const void * __restrict__ vx, dst_t * __restrict__ y, const
|
| 422 |
const sycl::nd_item<3> &item_ct1) {
|
| 423 |
-
const
|
| 424 |
-
|
| 425 |
-
|
| 426 |
-
if (i >= k) {
|
| 427 |
-
return;
|
| 428 |
-
}
|
| 429 |
|
|
|
|
| 430 |
const src_t * x = (src_t *) vx;
|
| 431 |
-
|
| 432 |
-
|
|
|
|
| 433 |
}
|
| 434 |
|
| 435 |
template <typename src_t, typename dst_t>
|
| 436 |
static void convert_unary_sycl(const void *__restrict__ vx,
|
| 437 |
-
dst_t *__restrict__ y, const
|
| 438 |
dpct::queue_ptr stream) {
|
| 439 |
-
const
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 440 |
{
|
| 441 |
dpct::has_capability_or_fail(stream->get_device(),
|
| 442 |
{sycl::aspect::fp16});
|
| 443 |
|
| 444 |
stream->parallel_for(
|
| 445 |
-
sycl::nd_range<3>(
|
| 446 |
-
sycl::range<3>(1, 1, num_blocks) *
|
| 447 |
-
sycl::range<3>(1, 1, SYCL_DEQUANTIZE_BLOCK_SIZE),
|
| 448 |
-
sycl::range<3>(1, 1, SYCL_DEQUANTIZE_BLOCK_SIZE)),
|
| 449 |
[=](sycl::nd_item<3> item_ct1) {
|
| 450 |
convert_unary<src_t>(vx, y, k, item_ct1);
|
| 451 |
});
|
|
|
|
| 3 |
#include "presets.hpp"
|
| 4 |
|
| 5 |
template <int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t>
|
| 6 |
+
static void dequantize_block(const void * __restrict__ vx, dst_t * __restrict__ y, const int64_t k,
|
| 7 |
const sycl::nd_item<3> &item_ct1) {
|
| 8 |
+
const int64_t i = 2 * (item_ct1.get_local_range(2) * item_ct1.get_group(2) +
|
| 9 |
item_ct1.get_local_id(2));
|
| 10 |
|
| 11 |
if (i >= k) {
|
| 12 |
return;
|
| 13 |
}
|
| 14 |
|
| 15 |
+
const int64_t ib = i/qk; // block index
|
| 16 |
+
const int64_t iqs = (i%qk)/qr; // quant index
|
| 17 |
+
const int64_t iybs = i - i%qk; // y block start index
|
| 18 |
+
const int64_t y_offset = qr == 1 ? 1 : qk/2;
|
| 19 |
|
| 20 |
// dequantize
|
| 21 |
dfloat2 v;
|
|
|
|
| 27 |
|
| 28 |
template <int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t>
|
| 29 |
static void dequantize_block_sycl(const void *__restrict__ vx,
|
| 30 |
+
dst_t *__restrict__ y, const int64_t k,
|
| 31 |
dpct::queue_ptr stream) {
|
| 32 |
+
const int64_t num_blocks = (k + 2*SYCL_DEQUANTIZE_BLOCK_SIZE - 1) / (2*SYCL_DEQUANTIZE_BLOCK_SIZE);
|
| 33 |
{
|
| 34 |
dpct::has_capability_or_fail(stream->get_device(),
|
| 35 |
{sycl::aspect::fp16});
|
|
|
|
| 45 |
}
|
| 46 |
|
| 47 |
template <typename dst_t>
|
| 48 |
+
static void dequantize_row_q2_K_sycl(const void *vx, dst_t *y, const int64_t k,
|
| 49 |
dpct::queue_ptr stream) {
|
| 50 |
+
const int64_t nb = k / QK_K;
|
| 51 |
#if QK_K == 256
|
| 52 |
{
|
| 53 |
dpct::has_capability_or_fail(stream->get_device(),
|
|
|
|
| 77 |
}
|
| 78 |
|
| 79 |
template <typename dst_t>
|
| 80 |
+
static void dequantize_row_q3_K_sycl(const void *vx, dst_t *y, const int64_t k,
|
| 81 |
dpct::queue_ptr stream) {
|
| 82 |
+
const int64_t nb = k / QK_K;
|
| 83 |
#if QK_K == 256
|
| 84 |
{
|
| 85 |
dpct::has_capability_or_fail(stream->get_device(),
|
|
|
|
| 108 |
}
|
| 109 |
|
| 110 |
template <typename dst_t>
|
| 111 |
+
static void dequantize_row_q4_0_sycl(const void *vx, dst_t *y, const int64_t k,
|
| 112 |
dpct::queue_ptr stream) {
|
| 113 |
+
const int64_t nb32 = k / 32;
|
| 114 |
+
const int64_t nb = (k + 255) / 256;
|
| 115 |
{
|
| 116 |
dpct::has_capability_or_fail(stream->get_device(),
|
| 117 |
{sycl::aspect::fp16});
|
|
|
|
| 126 |
}
|
| 127 |
|
| 128 |
template <typename dst_t>
|
| 129 |
+
static void dequantize_row_q4_1_sycl(const void *vx, dst_t *y, const int64_t k,
|
| 130 |
dpct::queue_ptr stream) {
|
| 131 |
+
const int64_t nb32 = k / 32;
|
| 132 |
+
const int64_t nb = (k + 255) / 256;
|
| 133 |
{
|
| 134 |
dpct::has_capability_or_fail(stream->get_device(),
|
| 135 |
{sycl::aspect::fp16});
|
|
|
|
| 145 |
|
| 146 |
|
| 147 |
template <typename dst_t>
|
| 148 |
+
static void dequantize_row_q4_K_sycl(const void *vx, dst_t *y, const int64_t k,
|
| 149 |
dpct::queue_ptr stream) {
|
| 150 |
+
const int64_t nb = k / QK_K;
|
| 151 |
{
|
| 152 |
dpct::has_capability_or_fail(stream->get_device(),
|
| 153 |
{sycl::aspect::fp16});
|
|
|
|
| 165 |
}
|
| 166 |
|
| 167 |
template <typename dst_t>
|
| 168 |
+
static void dequantize_row_q5_K_sycl(const void *vx, dst_t *y, const int64_t k,
|
| 169 |
dpct::queue_ptr stream) {
|
| 170 |
+
const int64_t nb = k / QK_K;
|
| 171 |
#if QK_K == 256
|
| 172 |
{
|
| 173 |
dpct::has_capability_or_fail(stream->get_device(),
|
|
|
|
| 197 |
}
|
| 198 |
|
| 199 |
template <typename dst_t>
|
| 200 |
+
static void dequantize_row_q6_K_sycl(const void *vx, dst_t *y, const int64_t k,
|
| 201 |
dpct::queue_ptr stream) {
|
| 202 |
+
const int64_t nb = k / QK_K;
|
| 203 |
#if QK_K == 256
|
| 204 |
{
|
| 205 |
dpct::has_capability_or_fail(stream->get_device(),
|
|
|
|
| 229 |
}
|
| 230 |
|
| 231 |
template <typename dst_t>
|
| 232 |
+
static void dequantize_row_iq1_s_sycl(const void *vx, dst_t *y, const int64_t k,
|
| 233 |
dpct::queue_ptr stream) {
|
| 234 |
+
const int64_t nb = k / QK_K;
|
| 235 |
{
|
| 236 |
dpct::has_capability_or_fail(stream->get_device(),
|
| 237 |
{sycl::aspect::fp16});
|
|
|
|
| 250 |
}
|
| 251 |
|
| 252 |
template <typename dst_t>
|
| 253 |
+
static void dequantize_row_iq1_m_sycl(const void *vx, dst_t *y, const int64_t k,
|
| 254 |
dpct::queue_ptr stream) {
|
| 255 |
+
const int64_t nb = k / QK_K;
|
| 256 |
{
|
| 257 |
dpct::has_capability_or_fail(stream->get_device(),
|
| 258 |
{sycl::aspect::fp16});
|
|
|
|
| 271 |
}
|
| 272 |
|
| 273 |
template <typename dst_t>
|
| 274 |
+
static void dequantize_row_iq2_xxs_sycl(const void *vx, dst_t *y, const int64_t k,
|
| 275 |
dpct::queue_ptr stream) {
|
| 276 |
+
const int64_t nb = k / QK_K;
|
| 277 |
{
|
| 278 |
dpct::has_capability_or_fail(stream->get_device(),
|
| 279 |
{sycl::aspect::fp16});
|
|
|
|
| 292 |
}
|
| 293 |
|
| 294 |
template <typename dst_t>
|
| 295 |
+
static void dequantize_row_iq2_xs_sycl(const void *vx, dst_t *y, const int64_t k,
|
| 296 |
dpct::queue_ptr stream) {
|
| 297 |
+
const int64_t nb = k / QK_K;
|
| 298 |
{
|
| 299 |
dpct::has_capability_or_fail(stream->get_device(),
|
| 300 |
{sycl::aspect::fp16});
|
|
|
|
| 313 |
}
|
| 314 |
|
| 315 |
template <typename dst_t>
|
| 316 |
+
static void dequantize_row_iq2_s_sycl(const void *vx, dst_t *y, const int64_t k,
|
| 317 |
dpct::queue_ptr stream) {
|
| 318 |
+
const int64_t nb = k / QK_K;
|
| 319 |
{
|
| 320 |
dpct::has_capability_or_fail(stream->get_device(),
|
| 321 |
{sycl::aspect::fp16});
|
|
|
|
| 333 |
|
| 334 |
|
| 335 |
template <typename dst_t>
|
| 336 |
+
static void dequantize_row_iq3_xxs_sycl(const void *vx, dst_t *y, const int64_t k,
|
| 337 |
dpct::queue_ptr stream) {
|
| 338 |
+
const int64_t nb = k / QK_K;
|
| 339 |
{
|
| 340 |
dpct::has_capability_or_fail(stream->get_device(),
|
| 341 |
{sycl::aspect::fp16});
|
|
|
|
| 354 |
}
|
| 355 |
|
| 356 |
template <typename dst_t>
|
| 357 |
+
static void dequantize_row_iq3_s_sycl(const void *vx, dst_t *y, const int64_t k,
|
| 358 |
dpct::queue_ptr stream) {
|
| 359 |
+
const int64_t nb = k / QK_K;
|
| 360 |
{
|
| 361 |
dpct::has_capability_or_fail(stream->get_device(),
|
| 362 |
{sycl::aspect::fp16});
|
|
|
|
| 374 |
}
|
| 375 |
|
| 376 |
template <typename dst_t>
|
| 377 |
+
static void dequantize_row_iq4_xs_sycl(const void *vx, dst_t *y, const int64_t k,
|
| 378 |
dpct::queue_ptr stream) {
|
| 379 |
+
const int64_t nb = (k + QK_K - 1) / QK_K;
|
| 380 |
#if QK_K == 64
|
| 381 |
dequantize_row_iq4_nl_sycl(vx, y, k, stream);
|
| 382 |
#else
|
|
|
|
| 398 |
}
|
| 399 |
|
| 400 |
template <typename dst_t>
|
| 401 |
+
static void dequantize_row_iq4_nl_sycl(const void *vx, dst_t *y, const int64_t k,
|
| 402 |
dpct::queue_ptr stream) {
|
| 403 |
+
const int64_t nb = (k + QK_K - 1) / QK_K;
|
| 404 |
{
|
| 405 |
dpct::has_capability_or_fail(stream->get_device(),
|
| 406 |
{sycl::aspect::fp16});
|
|
|
|
| 418 |
}
|
| 419 |
|
| 420 |
template <typename src_t, typename dst_t>
|
| 421 |
+
static void convert_unary(const void * __restrict__ vx, dst_t * __restrict__ y, const int64_t k,
|
| 422 |
const sycl::nd_item<3> &item_ct1) {
|
| 423 |
+
const int64_t work_group_size = item_ct1.get_local_range(2);
|
| 424 |
+
const int64_t global_id = item_ct1.get_local_id(2) + work_group_size * item_ct1.get_group(2);
|
|
|
|
|
|
|
|
|
|
|
|
|
| 425 |
|
| 426 |
+
// make each work-item deal with more elements since sycl global range can not exceed max int
|
| 427 |
const src_t * x = (src_t *) vx;
|
| 428 |
+
for (int64_t i = global_id; i < k; i += work_group_size * item_ct1.get_group_range(2)) {
|
| 429 |
+
y[i] = x[i];
|
| 430 |
+
}
|
| 431 |
}
|
| 432 |
|
| 433 |
template <typename src_t, typename dst_t>
|
| 434 |
static void convert_unary_sycl(const void *__restrict__ vx,
|
| 435 |
+
dst_t *__restrict__ y, const int64_t k,
|
| 436 |
dpct::queue_ptr stream) {
|
| 437 |
+
const int64_t num_blocks = (k + SYCL_DEQUANTIZE_BLOCK_SIZE - 1) / SYCL_DEQUANTIZE_BLOCK_SIZE;
|
| 438 |
+
|
| 439 |
+
// decrease global range when it exceeds the max int
|
| 440 |
+
int64_t local_size = downsample_sycl_global_range(num_blocks, SYCL_DEQUANTIZE_BLOCK_SIZE);
|
| 441 |
+
sycl::range<3> block_nums(1, 1, num_blocks);
|
| 442 |
+
sycl::range<3> local_range(1, 1, local_size);
|
| 443 |
{
|
| 444 |
dpct::has_capability_or_fail(stream->get_device(),
|
| 445 |
{sycl::aspect::fp16});
|
| 446 |
|
| 447 |
stream->parallel_for(
|
| 448 |
+
sycl::nd_range<3>(block_nums * local_range, local_range),
|
|
|
|
|
|
|
|
|
|
| 449 |
[=](sycl::nd_item<3> item_ct1) {
|
| 450 |
convert_unary<src_t>(vx, y, k, item_ct1);
|
| 451 |
});
|
|
@@ -17,7 +17,7 @@
|
|
| 17 |
|
| 18 |
template <typename T>
|
| 19 |
using to_t_sycl_t = void (*)(const void *__restrict__ x, T *__restrict__ y,
|
| 20 |
-
|
| 21 |
typedef to_t_sycl_t<float> to_fp32_sycl_t;
|
| 22 |
typedef to_t_sycl_t<sycl::half> to_fp16_sycl_t;
|
| 23 |
|
|
|
|
| 17 |
|
| 18 |
template <typename T>
|
| 19 |
using to_t_sycl_t = void (*)(const void *__restrict__ x, T *__restrict__ y,
|
| 20 |
+
int64_t k, dpct::queue_ptr stream);
|
| 21 |
typedef to_t_sycl_t<float> to_fp32_sycl_t;
|
| 22 |
typedef to_t_sycl_t<sycl::half> to_fp16_sycl_t;
|
| 23 |
|
|
@@ -15,9 +15,9 @@
|
|
| 15 |
|
| 16 |
#include "common.hpp"
|
| 17 |
|
| 18 |
-
typedef void (*dequantize_kernel_t)(const void * vx, const
|
| 19 |
|
| 20 |
-
static __dpct_inline__ void dequantize_q4_0(const void *vx, const
|
| 21 |
const int iqs, dfloat2 &v) {
|
| 22 |
const block_q4_0 * x = (const block_q4_0 *) vx;
|
| 23 |
|
|
@@ -40,7 +40,7 @@ static __dpct_inline__ void dequantize_q4_0(const void *vx, const int ib,
|
|
| 40 |
#endif // GGML_SYCL_F16
|
| 41 |
}
|
| 42 |
|
| 43 |
-
static __dpct_inline__ void dequantize_q4_1(const void *vx, const
|
| 44 |
const int iqs, dfloat2 &v) {
|
| 45 |
const block_q4_1 * x = (const block_q4_1 *) vx;
|
| 46 |
|
|
@@ -64,7 +64,7 @@ static __dpct_inline__ void dequantize_q4_1(const void *vx, const int ib,
|
|
| 64 |
#endif // GGML_SYCL_F16
|
| 65 |
}
|
| 66 |
|
| 67 |
-
static __dpct_inline__ void dequantize_q5_0(const void *vx, const
|
| 68 |
const int iqs, dfloat2 &v) {
|
| 69 |
const block_q5_0 * x = (const block_q5_0 *) vx;
|
| 70 |
|
|
@@ -91,7 +91,7 @@ static __dpct_inline__ void dequantize_q5_0(const void *vx, const int ib,
|
|
| 91 |
#endif // GGML_SYCL_F16
|
| 92 |
}
|
| 93 |
|
| 94 |
-
static __dpct_inline__ void dequantize_q5_1(const void *vx, const
|
| 95 |
const int iqs, dfloat2 &v) {
|
| 96 |
const block_q5_1 * x = (const block_q5_1 *) vx;
|
| 97 |
|
|
@@ -118,7 +118,7 @@ static __dpct_inline__ void dequantize_q5_1(const void *vx, const int ib,
|
|
| 118 |
#endif // GGML_SYCL_F16
|
| 119 |
}
|
| 120 |
|
| 121 |
-
static __dpct_inline__ void dequantize_q8_0(const void *vx, const
|
| 122 |
const int iqs, dfloat2 &v) {
|
| 123 |
const block_q8_0 * x = (const block_q8_0 *) vx;
|
| 124 |
|
|
@@ -138,16 +138,16 @@ static __dpct_inline__ void dequantize_q8_0(const void *vx, const int ib,
|
|
| 138 |
}
|
| 139 |
|
| 140 |
template<typename dst_t>
|
| 141 |
-
static void dequantize_block_q4_0(const void * __restrict__ vx, dst_t * __restrict__ yy,
|
| 142 |
const sycl::nd_item<3> &item_ct1) {
|
| 143 |
|
| 144 |
-
const
|
| 145 |
|
| 146 |
// assume 32 threads
|
| 147 |
-
const
|
| 148 |
-
const
|
| 149 |
-
const
|
| 150 |
-
const
|
| 151 |
if (ib >= nb32) {
|
| 152 |
return;
|
| 153 |
}
|
|
@@ -168,16 +168,16 @@ static void dequantize_block_q4_0(const void * __restrict__ vx, dst_t * __restri
|
|
| 168 |
}
|
| 169 |
|
| 170 |
template<typename dst_t>
|
| 171 |
-
static void dequantize_block_q4_1(const void * __restrict__ vx, dst_t * __restrict__ yy,
|
| 172 |
const sycl::nd_item<3> &item_ct1) {
|
| 173 |
|
| 174 |
-
const
|
| 175 |
|
| 176 |
// assume 32 threads
|
| 177 |
-
const
|
| 178 |
-
const
|
| 179 |
-
const
|
| 180 |
-
const
|
| 181 |
if (ib >= nb32) {
|
| 182 |
return;
|
| 183 |
}
|
|
@@ -203,14 +203,14 @@ template<typename dst_t>
|
|
| 203 |
static void dequantize_block_q2_K(const void * __restrict__ vx, dst_t * __restrict__ yy,
|
| 204 |
const sycl::nd_item<3> &item_ct1) {
|
| 205 |
|
| 206 |
-
const
|
| 207 |
const block_q2_K * x = (const block_q2_K *) vx;
|
| 208 |
|
| 209 |
-
const
|
| 210 |
#if QK_K == 256
|
| 211 |
-
const
|
| 212 |
-
const
|
| 213 |
-
const
|
| 214 |
|
| 215 |
const uint8_t q = x[i].qs[32*n + l];
|
| 216 |
dst_t * y = yy + i*QK_K + 128*n;
|
|
@@ -222,8 +222,8 @@ static void dequantize_block_q2_K(const void * __restrict__ vx, dst_t * __restri
|
|
| 222 |
y[l+64] = dall * (x[i].scales[is+4] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is+4] >> 4);
|
| 223 |
y[l+96] = dall * (x[i].scales[is+6] & 0xF) * ((q >> 6) & 3) - dmin * (x[i].scales[is+6] >> 4);
|
| 224 |
#else
|
| 225 |
-
const
|
| 226 |
-
const
|
| 227 |
const uint8_t q = x[i].qs[il] >> (2*is);
|
| 228 |
dst_t * y = yy + i*QK_K + 16*is + il;
|
| 229 |
|
|
@@ -239,19 +239,19 @@ template<typename dst_t>
|
|
| 239 |
static void dequantize_block_q3_K(const void * __restrict__ vx, dst_t * __restrict__ yy,
|
| 240 |
const sycl::nd_item<3> &item_ct1) {
|
| 241 |
|
| 242 |
-
const
|
| 243 |
const block_q3_K * x = (const block_q3_K *) vx;
|
| 244 |
|
| 245 |
#if QK_K == 256
|
| 246 |
-
const
|
| 247 |
-
const
|
| 248 |
-
const
|
| 249 |
-
const
|
| 250 |
-
const
|
| 251 |
-
const
|
| 252 |
|
| 253 |
uint8_t m = 1 << (4*n + j);
|
| 254 |
-
|
| 255 |
int shift = 2*j;
|
| 256 |
|
| 257 |
int8_t us = is < 4 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+8] >> 0) & 3) << 4) :
|
|
@@ -267,11 +267,11 @@ static void dequantize_block_q3_K(const void * __restrict__ vx, dst_t * __restri
|
|
| 267 |
|
| 268 |
for (int l = l0; l < l0+4; ++l) y[l] = dl * ((int8_t)((q[l] >> shift) & 3) - ((hm[l] & m) ? 0 : 4));
|
| 269 |
#else
|
| 270 |
-
const
|
| 271 |
-
const
|
| 272 |
-
const
|
| 273 |
-
const
|
| 274 |
-
const
|
| 275 |
|
| 276 |
dst_t * y = yy + i*QK_K + 16*is + il;
|
| 277 |
|
|
@@ -307,15 +307,15 @@ static void dequantize_block_q4_K(const void * __restrict__ vx, dst_t * __restri
|
|
| 307 |
uint8_t* scales_local, const sycl::nd_item<3> &item_ct1) {
|
| 308 |
const block_q4_K * x = (const block_q4_K *) vx;
|
| 309 |
|
| 310 |
-
const
|
| 311 |
|
| 312 |
#if QK_K == 256
|
| 313 |
// assume 32 threads
|
| 314 |
-
const
|
| 315 |
-
const
|
| 316 |
-
const
|
| 317 |
-
const
|
| 318 |
-
const
|
| 319 |
|
| 320 |
dst_t * y = yy + i*QK_K + 64*il + n*ir;
|
| 321 |
|
|
@@ -341,7 +341,7 @@ static void dequantize_block_q4_K(const void * __restrict__ vx, dst_t * __restri
|
|
| 341 |
y[l +32] = d2 * (q_vec[l] >> 4) - m2;
|
| 342 |
}
|
| 343 |
#else
|
| 344 |
-
const
|
| 345 |
const uint8_t * q = x[i].qs;
|
| 346 |
dst_t * y = yy + i*QK_K;
|
| 347 |
const float d = (float)x[i].dm[0];
|
|
@@ -356,14 +356,14 @@ static void dequantize_block_q5_K(const void * __restrict__ vx, dst_t * __restri
|
|
| 356 |
const sycl::nd_item<3> &item_ct1) {
|
| 357 |
const block_q5_K * x = (const block_q5_K *) vx;
|
| 358 |
|
| 359 |
-
const
|
| 360 |
|
| 361 |
#if QK_K == 256
|
| 362 |
// assume 64 threads - this is very slightly better than the one below
|
| 363 |
-
const
|
| 364 |
-
const
|
| 365 |
-
const
|
| 366 |
-
const
|
| 367 |
|
| 368 |
dst_t * y = yy + i*QK_K + 64*il + 2*ir;
|
| 369 |
|
|
@@ -386,11 +386,11 @@ static void dequantize_block_q5_K(const void * __restrict__ vx, dst_t * __restri
|
|
| 386 |
y[32] = d2 * ((ql[ 0] >> 4) + (qh[ 0] & hm ? 16 : 0)) - m2;
|
| 387 |
y[33] = d2 * ((ql[ 1] >> 4) + (qh[ 1] & hm ? 16 : 0)) - m2;
|
| 388 |
#else
|
| 389 |
-
const
|
| 390 |
const uint8_t q = x[i].qs[tid];
|
| 391 |
-
const
|
| 392 |
-
const
|
| 393 |
-
const
|
| 394 |
const uint8_t h = x[i].qh[in] >> im;
|
| 395 |
const float d = x[i].d;
|
| 396 |
dst_t * y = yy + i*QK_K + tid;
|
|
@@ -404,14 +404,14 @@ static void dequantize_block_q6_K(const void * __restrict__ vx, dst_t * __restri
|
|
| 404 |
const sycl::nd_item<3> &item_ct1) {
|
| 405 |
const block_q6_K * x = (const block_q6_K *) vx;
|
| 406 |
|
| 407 |
-
const
|
| 408 |
#if QK_K == 256
|
| 409 |
|
| 410 |
// assume 64 threads - this is very slightly better than the one below
|
| 411 |
-
const
|
| 412 |
-
const
|
| 413 |
-
const
|
| 414 |
-
const
|
| 415 |
|
| 416 |
dst_t * y = yy + i*QK_K + 128*ip + il;
|
| 417 |
|
|
@@ -428,9 +428,9 @@ static void dequantize_block_q6_K(const void * __restrict__ vx, dst_t * __restri
|
|
| 428 |
#else
|
| 429 |
|
| 430 |
// assume 32 threads
|
| 431 |
-
const
|
| 432 |
-
const
|
| 433 |
-
const
|
| 434 |
|
| 435 |
dst_t * y = yy + i*QK_K + 16*ip + il;
|
| 436 |
|
|
@@ -452,13 +452,13 @@ static void dequantize_block_iq2_xxs(const void * __restrict__ vx, dst_t * __res
|
|
| 452 |
const uint8_t *ksigns_iq2xs_ptr,
|
| 453 |
const uint8_t *kmask_iq2xs_ptr) {
|
| 454 |
|
| 455 |
-
const
|
| 456 |
const block_iq2_xxs * x = (const block_iq2_xxs *) vx;
|
| 457 |
|
| 458 |
-
const
|
| 459 |
#if QK_K == 256
|
| 460 |
-
const
|
| 461 |
-
const
|
| 462 |
dst_t * y = yy + i*QK_K + 32*ib + 8*il;
|
| 463 |
const uint16_t * q2 = x[i].qs + 4*ib;
|
| 464 |
const uint8_t * aux8 = (const uint8_t *)q2;
|
|
@@ -480,13 +480,13 @@ static void dequantize_block_iq2_xs(const void * __restrict__ vx, dst_t * __rest
|
|
| 480 |
const uint8_t *ksigns_iq2xs,
|
| 481 |
const uint8_t *kmask_iq2xs) {
|
| 482 |
|
| 483 |
-
const
|
| 484 |
const block_iq2_xs * x = (const block_iq2_xs *) vx;
|
| 485 |
|
| 486 |
-
const
|
| 487 |
#if QK_K == 256
|
| 488 |
-
const
|
| 489 |
-
const
|
| 490 |
dst_t * y = yy + i*QK_K + 32*ib + 8*il;
|
| 491 |
const uint16_t * q2 = x[i].qs + 4*ib;
|
| 492 |
const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[il] & 511));
|
|
@@ -504,13 +504,13 @@ __dpct_inline__ static void
|
|
| 504 |
dequantize_block_iq2_s(const void *__restrict__ vx, dst_t *__restrict__ yy,
|
| 505 |
const sycl::nd_item<3> &item_ct1) {
|
| 506 |
|
| 507 |
-
const
|
| 508 |
const block_iq2_s * x = (const block_iq2_s *) vx;
|
| 509 |
|
| 510 |
-
const
|
| 511 |
#if QK_K == 256
|
| 512 |
-
const
|
| 513 |
-
const
|
| 514 |
dst_t * y = yy + i*QK_K + 32*ib + 8*il;
|
| 515 |
const uint8_t * grid = (const uint8_t *)(iq2s_grid + (x[i].qs[4*ib+il] | ((x[i].qh[ib] << (8-2*il)) & 0x300)));
|
| 516 |
const float d = (float)x[i].d * (0.5f + ((x[i].scales[ib] >> 4*(il/2)) & 0xf)) * 0.25f;
|
|
@@ -532,13 +532,13 @@ static void dequantize_block_iq3_xxs(const void * __restrict__ vx, dst_t * __res
|
|
| 532 |
const uint8_t *ksigns_iq2xs,
|
| 533 |
const uint8_t *kmask_iq2xs) {
|
| 534 |
|
| 535 |
-
const
|
| 536 |
const block_iq3_xxs * x = (const block_iq3_xxs *) vx;
|
| 537 |
|
| 538 |
-
const
|
| 539 |
#if QK_K == 256
|
| 540 |
-
const
|
| 541 |
-
const
|
| 542 |
dst_t * y = yy + i*QK_K + 32*ib + 8*il;
|
| 543 |
const uint8_t * q3 = x[i].qs + 8*ib;
|
| 544 |
const uint16_t * gas = (const uint16_t *)(x[i].qs + QK_K/4) + 2*ib;
|
|
@@ -563,13 +563,13 @@ dequantize_block_iq3_s(const void *__restrict__ vx, dst_t *__restrict__ yy,
|
|
| 563 |
const sycl::nd_item<3> &item_ct1,
|
| 564 |
const uint8_t *kmask_iq2xs, const uint32_t *iq3s_grid) {
|
| 565 |
|
| 566 |
-
const
|
| 567 |
const block_iq3_s * x = (const block_iq3_s *) vx;
|
| 568 |
|
| 569 |
-
const
|
| 570 |
#if QK_K == 256
|
| 571 |
-
const
|
| 572 |
-
const
|
| 573 |
dst_t * y = yy + i*QK_K + 32*ib + 8*il;
|
| 574 |
const uint8_t * qs = x[i].qs + 8*ib;
|
| 575 |
const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*il+0] | ((x[i].qh[ib] << (8-2*il)) & 256)));
|
|
@@ -593,13 +593,13 @@ dequantize_block_iq1_s(const void *__restrict__ vx, dst_t *__restrict__ yy,
|
|
| 593 |
const sycl::nd_item<3> &item_ct1,
|
| 594 |
const uint32_t *iq1s_grid_gpu) {
|
| 595 |
|
| 596 |
-
const
|
| 597 |
const block_iq1_s * x = (const block_iq1_s *) vx;
|
| 598 |
|
| 599 |
-
const
|
| 600 |
#if QK_K == 256
|
| 601 |
-
const
|
| 602 |
-
const
|
| 603 |
dst_t * y = yy + i*QK_K + 32*ib + 8*il;
|
| 604 |
const float delta = x[i].qh[ib] & 0x8000 ? -1 - IQ1S_DELTA : -1 + IQ1S_DELTA;
|
| 605 |
const float d = (float)x[i].d * (2*((x[i].qh[ib] >> 12) & 7) + 1);
|
|
@@ -623,13 +623,13 @@ dequantize_block_iq1_m(const void *__restrict__ vx, dst_t *__restrict__ yy,
|
|
| 623 |
const sycl::nd_item<3> &item_ct1,
|
| 624 |
const uint32_t *iq1s_grid_gpu) {
|
| 625 |
|
| 626 |
-
const
|
| 627 |
const block_iq1_m * x = (const block_iq1_m *) vx;
|
| 628 |
|
| 629 |
-
const
|
| 630 |
#if QK_K == 256
|
| 631 |
-
const
|
| 632 |
-
const
|
| 633 |
dst_t * y = yy + i*QK_K + 32*ib + 8*il;
|
| 634 |
const uint16_t * sc = (const uint16_t *)x[i].scales;
|
| 635 |
iq1m_scale_t scale;
|
|
@@ -656,12 +656,12 @@ __dpct_inline__ static void
|
|
| 656 |
dequantize_block_iq4_nl(const void *__restrict__ vx, dst_t *__restrict__ yy,
|
| 657 |
const sycl::nd_item<3> &item_ct1) {
|
| 658 |
|
| 659 |
-
const
|
| 660 |
const block_iq4_nl * x = (const block_iq4_nl *) vx + i*(QK_K/QK4_NL);
|
| 661 |
|
| 662 |
-
const
|
| 663 |
-
const
|
| 664 |
-
const
|
| 665 |
dst_t * y = yy + i*QK_K + 32*ib + 4*il;
|
| 666 |
const uint8_t * q4 = x[ib].qs + 4*il;
|
| 667 |
const float d = (float)x[ib].d;
|
|
@@ -678,12 +678,12 @@ template <typename dst_t>
|
|
| 678 |
__dpct_inline__ static void
|
| 679 |
dequantize_block_iq4_xs(const void *__restrict__ vx, dst_t *__restrict__ yy,
|
| 680 |
const sycl::nd_item<3> &item_ct1) {
|
| 681 |
-
const
|
| 682 |
const block_iq4_xs * x = (const block_iq4_xs *)vx;
|
| 683 |
|
| 684 |
-
const
|
| 685 |
-
const
|
| 686 |
-
const
|
| 687 |
dst_t * y = yy + i*QK_K + 32*ib + 4*il;
|
| 688 |
const uint8_t * q4 = x[i].qs + 16*ib + 4*il;
|
| 689 |
const float d = (float)x[i].d * ((((x[i].scales_l[ib/2] >> 4*(ib%2)) & 0xf) | (((x[i].scales_h >> 2*ib) & 3) << 4)) - 32);
|
|
|
|
| 15 |
|
| 16 |
#include "common.hpp"
|
| 17 |
|
| 18 |
+
typedef void (*dequantize_kernel_t)(const void * vx, const int64_t ib, const int iqs, dfloat2 & v);
|
| 19 |
|
| 20 |
+
static __dpct_inline__ void dequantize_q4_0(const void *vx, const int64_t ib,
|
| 21 |
const int iqs, dfloat2 &v) {
|
| 22 |
const block_q4_0 * x = (const block_q4_0 *) vx;
|
| 23 |
|
|
|
|
| 40 |
#endif // GGML_SYCL_F16
|
| 41 |
}
|
| 42 |
|
| 43 |
+
static __dpct_inline__ void dequantize_q4_1(const void *vx, const int64_t ib,
|
| 44 |
const int iqs, dfloat2 &v) {
|
| 45 |
const block_q4_1 * x = (const block_q4_1 *) vx;
|
| 46 |
|
|
|
|
| 64 |
#endif // GGML_SYCL_F16
|
| 65 |
}
|
| 66 |
|
| 67 |
+
static __dpct_inline__ void dequantize_q5_0(const void *vx, const int64_t ib,
|
| 68 |
const int iqs, dfloat2 &v) {
|
| 69 |
const block_q5_0 * x = (const block_q5_0 *) vx;
|
| 70 |
|
|
|
|
| 91 |
#endif // GGML_SYCL_F16
|
| 92 |
}
|
| 93 |
|
| 94 |
+
static __dpct_inline__ void dequantize_q5_1(const void *vx, const int64_t ib,
|
| 95 |
const int iqs, dfloat2 &v) {
|
| 96 |
const block_q5_1 * x = (const block_q5_1 *) vx;
|
| 97 |
|
|
|
|
| 118 |
#endif // GGML_SYCL_F16
|
| 119 |
}
|
| 120 |
|
| 121 |
+
static __dpct_inline__ void dequantize_q8_0(const void *vx, const int64_t ib,
|
| 122 |
const int iqs, dfloat2 &v) {
|
| 123 |
const block_q8_0 * x = (const block_q8_0 *) vx;
|
| 124 |
|
|
|
|
| 138 |
}
|
| 139 |
|
| 140 |
template<typename dst_t>
|
| 141 |
+
static void dequantize_block_q4_0(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t nb32,
|
| 142 |
const sycl::nd_item<3> &item_ct1) {
|
| 143 |
|
| 144 |
+
const int64_t i = item_ct1.get_group(2);
|
| 145 |
|
| 146 |
// assume 32 threads
|
| 147 |
+
const int64_t tid = item_ct1.get_local_id(2);
|
| 148 |
+
const int64_t il = tid/8;
|
| 149 |
+
const int64_t ir = tid%8;
|
| 150 |
+
const int64_t ib = 8*i + ir;
|
| 151 |
if (ib >= nb32) {
|
| 152 |
return;
|
| 153 |
}
|
|
|
|
| 168 |
}
|
| 169 |
|
| 170 |
template<typename dst_t>
|
| 171 |
+
static void dequantize_block_q4_1(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t nb32,
|
| 172 |
const sycl::nd_item<3> &item_ct1) {
|
| 173 |
|
| 174 |
+
const int64_t i = item_ct1.get_group(2);
|
| 175 |
|
| 176 |
// assume 32 threads
|
| 177 |
+
const int64_t tid = item_ct1.get_local_id(2);
|
| 178 |
+
const int64_t il = tid/8;
|
| 179 |
+
const int64_t ir = tid%8;
|
| 180 |
+
const int64_t ib = 8*i + ir;
|
| 181 |
if (ib >= nb32) {
|
| 182 |
return;
|
| 183 |
}
|
|
|
|
| 203 |
static void dequantize_block_q2_K(const void * __restrict__ vx, dst_t * __restrict__ yy,
|
| 204 |
const sycl::nd_item<3> &item_ct1) {
|
| 205 |
|
| 206 |
+
const int64_t i = item_ct1.get_group(2);
|
| 207 |
const block_q2_K * x = (const block_q2_K *) vx;
|
| 208 |
|
| 209 |
+
const int64_t tid = item_ct1.get_local_id(2);
|
| 210 |
#if QK_K == 256
|
| 211 |
+
const int64_t n = tid/32;
|
| 212 |
+
const int64_t l = tid - 32*n;
|
| 213 |
+
const int64_t is = 8*n + l/16;
|
| 214 |
|
| 215 |
const uint8_t q = x[i].qs[32*n + l];
|
| 216 |
dst_t * y = yy + i*QK_K + 128*n;
|
|
|
|
| 222 |
y[l+64] = dall * (x[i].scales[is+4] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is+4] >> 4);
|
| 223 |
y[l+96] = dall * (x[i].scales[is+6] & 0xF) * ((q >> 6) & 3) - dmin * (x[i].scales[is+6] >> 4);
|
| 224 |
#else
|
| 225 |
+
const int64_t is = tid/16; // 0 or 1
|
| 226 |
+
const int64_t il = tid%16; // 0...15
|
| 227 |
const uint8_t q = x[i].qs[il] >> (2*is);
|
| 228 |
dst_t * y = yy + i*QK_K + 16*is + il;
|
| 229 |
|
|
|
|
| 239 |
static void dequantize_block_q3_K(const void * __restrict__ vx, dst_t * __restrict__ yy,
|
| 240 |
const sycl::nd_item<3> &item_ct1) {
|
| 241 |
|
| 242 |
+
const int64_t i = item_ct1.get_group(2);
|
| 243 |
const block_q3_K * x = (const block_q3_K *) vx;
|
| 244 |
|
| 245 |
#if QK_K == 256
|
| 246 |
+
const int64_t r = item_ct1.get_local_id(2) / 4;
|
| 247 |
+
const int64_t tid = r/2;
|
| 248 |
+
const int64_t is0 = r%2;
|
| 249 |
+
const int64_t l0 = 16 * is0 + 4 * (item_ct1.get_local_id(2) % 4);
|
| 250 |
+
const int64_t n = tid / 4;
|
| 251 |
+
const int64_t j = tid - 4*n;
|
| 252 |
|
| 253 |
uint8_t m = 1 << (4*n + j);
|
| 254 |
+
int64_t is = 8*n + 2*j + is0;
|
| 255 |
int shift = 2*j;
|
| 256 |
|
| 257 |
int8_t us = is < 4 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+8] >> 0) & 3) << 4) :
|
|
|
|
| 267 |
|
| 268 |
for (int l = l0; l < l0+4; ++l) y[l] = dl * ((int8_t)((q[l] >> shift) & 3) - ((hm[l] & m) ? 0 : 4));
|
| 269 |
#else
|
| 270 |
+
const int64_t tid = item_ct1.get_local_id(2);
|
| 271 |
+
const int64_t is = tid/16; // 0 or 1
|
| 272 |
+
const int64_t il = tid%16; // 0...15
|
| 273 |
+
const int64_t im = il/8; // 0...1
|
| 274 |
+
const int64_t in = il%8; // 0...7
|
| 275 |
|
| 276 |
dst_t * y = yy + i*QK_K + 16*is + il;
|
| 277 |
|
|
|
|
| 307 |
uint8_t* scales_local, const sycl::nd_item<3> &item_ct1) {
|
| 308 |
const block_q4_K * x = (const block_q4_K *) vx;
|
| 309 |
|
| 310 |
+
const int64_t i = item_ct1.get_group(2);
|
| 311 |
|
| 312 |
#if QK_K == 256
|
| 313 |
// assume 32 threads
|
| 314 |
+
const int64_t tid = item_ct1.get_local_id(2);
|
| 315 |
+
const int64_t il = tid/8;
|
| 316 |
+
const int64_t ir = tid%8;
|
| 317 |
+
const int64_t is = 2*il;
|
| 318 |
+
const int64_t n = 4;
|
| 319 |
|
| 320 |
dst_t * y = yy + i*QK_K + 64*il + n*ir;
|
| 321 |
|
|
|
|
| 341 |
y[l +32] = d2 * (q_vec[l] >> 4) - m2;
|
| 342 |
}
|
| 343 |
#else
|
| 344 |
+
const int64_t tid = item_ct1.get_local_id(2);
|
| 345 |
const uint8_t * q = x[i].qs;
|
| 346 |
dst_t * y = yy + i*QK_K;
|
| 347 |
const float d = (float)x[i].dm[0];
|
|
|
|
| 356 |
const sycl::nd_item<3> &item_ct1) {
|
| 357 |
const block_q5_K * x = (const block_q5_K *) vx;
|
| 358 |
|
| 359 |
+
const int64_t i = item_ct1.get_group(2);
|
| 360 |
|
| 361 |
#if QK_K == 256
|
| 362 |
// assume 64 threads - this is very slightly better than the one below
|
| 363 |
+
const int64_t tid = item_ct1.get_local_id(2);
|
| 364 |
+
const int64_t il = tid/16; // il is in 0...3
|
| 365 |
+
const int64_t ir = tid%16; // ir is in 0...15
|
| 366 |
+
const int64_t is = 2*il; // is is in 0...6
|
| 367 |
|
| 368 |
dst_t * y = yy + i*QK_K + 64*il + 2*ir;
|
| 369 |
|
|
|
|
| 386 |
y[32] = d2 * ((ql[ 0] >> 4) + (qh[ 0] & hm ? 16 : 0)) - m2;
|
| 387 |
y[33] = d2 * ((ql[ 1] >> 4) + (qh[ 1] & hm ? 16 : 0)) - m2;
|
| 388 |
#else
|
| 389 |
+
const int64_t tid = item_ct1.get_local_id(2);
|
| 390 |
const uint8_t q = x[i].qs[tid];
|
| 391 |
+
const int64_t im = tid/8; // 0...3
|
| 392 |
+
const int64_t in = tid%8; // 0...7
|
| 393 |
+
const int64_t is = tid/16; // 0 or 1
|
| 394 |
const uint8_t h = x[i].qh[in] >> im;
|
| 395 |
const float d = x[i].d;
|
| 396 |
dst_t * y = yy + i*QK_K + tid;
|
|
|
|
| 404 |
const sycl::nd_item<3> &item_ct1) {
|
| 405 |
const block_q6_K * x = (const block_q6_K *) vx;
|
| 406 |
|
| 407 |
+
const int64_t i = item_ct1.get_group(2);
|
| 408 |
#if QK_K == 256
|
| 409 |
|
| 410 |
// assume 64 threads - this is very slightly better than the one below
|
| 411 |
+
const int64_t tid = item_ct1.get_local_id(2);
|
| 412 |
+
const int64_t ip = tid/32; // ip is 0 or 1
|
| 413 |
+
const int64_t il = tid - 32*ip; // 0...32
|
| 414 |
+
const int64_t is = 8*ip + il/16;
|
| 415 |
|
| 416 |
dst_t * y = yy + i*QK_K + 128*ip + il;
|
| 417 |
|
|
|
|
| 428 |
#else
|
| 429 |
|
| 430 |
// assume 32 threads
|
| 431 |
+
const int64_t tid = item_ct1.get_local_id(2);
|
| 432 |
+
const int64_t ip = tid/16; // 0 or 1
|
| 433 |
+
const int64_t il = tid - 16*ip; // 0...15
|
| 434 |
|
| 435 |
dst_t * y = yy + i*QK_K + 16*ip + il;
|
| 436 |
|
|
|
|
| 452 |
const uint8_t *ksigns_iq2xs_ptr,
|
| 453 |
const uint8_t *kmask_iq2xs_ptr) {
|
| 454 |
|
| 455 |
+
const int64_t i = item_ct1.get_group(2);
|
| 456 |
const block_iq2_xxs * x = (const block_iq2_xxs *) vx;
|
| 457 |
|
| 458 |
+
const int64_t tid = item_ct1.get_local_id(2);
|
| 459 |
#if QK_K == 256
|
| 460 |
+
const int64_t il = tid/8; // 0...3
|
| 461 |
+
const int64_t ib = tid%8; // 0...7
|
| 462 |
dst_t * y = yy + i*QK_K + 32*ib + 8*il;
|
| 463 |
const uint16_t * q2 = x[i].qs + 4*ib;
|
| 464 |
const uint8_t * aux8 = (const uint8_t *)q2;
|
|
|
|
| 480 |
const uint8_t *ksigns_iq2xs,
|
| 481 |
const uint8_t *kmask_iq2xs) {
|
| 482 |
|
| 483 |
+
const int64_t i = item_ct1.get_group(2);
|
| 484 |
const block_iq2_xs * x = (const block_iq2_xs *) vx;
|
| 485 |
|
| 486 |
+
const int64_t tid = item_ct1.get_local_id(2);
|
| 487 |
#if QK_K == 256
|
| 488 |
+
const int64_t il = tid/8; // 0...3
|
| 489 |
+
const int64_t ib = tid%8; // 0...7
|
| 490 |
dst_t * y = yy + i*QK_K + 32*ib + 8*il;
|
| 491 |
const uint16_t * q2 = x[i].qs + 4*ib;
|
| 492 |
const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[il] & 511));
|
|
|
|
| 504 |
dequantize_block_iq2_s(const void *__restrict__ vx, dst_t *__restrict__ yy,
|
| 505 |
const sycl::nd_item<3> &item_ct1) {
|
| 506 |
|
| 507 |
+
const int64_t i = item_ct1.get_group(2);
|
| 508 |
const block_iq2_s * x = (const block_iq2_s *) vx;
|
| 509 |
|
| 510 |
+
const int64_t tid = item_ct1.get_local_id(2);
|
| 511 |
#if QK_K == 256
|
| 512 |
+
const int64_t il = tid/8; // 0...3
|
| 513 |
+
const int64_t ib = tid%8; // 0...7
|
| 514 |
dst_t * y = yy + i*QK_K + 32*ib + 8*il;
|
| 515 |
const uint8_t * grid = (const uint8_t *)(iq2s_grid + (x[i].qs[4*ib+il] | ((x[i].qh[ib] << (8-2*il)) & 0x300)));
|
| 516 |
const float d = (float)x[i].d * (0.5f + ((x[i].scales[ib] >> 4*(il/2)) & 0xf)) * 0.25f;
|
|
|
|
| 532 |
const uint8_t *ksigns_iq2xs,
|
| 533 |
const uint8_t *kmask_iq2xs) {
|
| 534 |
|
| 535 |
+
const int64_t i = item_ct1.get_group(2);
|
| 536 |
const block_iq3_xxs * x = (const block_iq3_xxs *) vx;
|
| 537 |
|
| 538 |
+
const int64_t tid = item_ct1.get_local_id(2);
|
| 539 |
#if QK_K == 256
|
| 540 |
+
const int64_t il = tid/8; // 0...3
|
| 541 |
+
const int64_t ib = tid%8; // 0...7
|
| 542 |
dst_t * y = yy + i*QK_K + 32*ib + 8*il;
|
| 543 |
const uint8_t * q3 = x[i].qs + 8*ib;
|
| 544 |
const uint16_t * gas = (const uint16_t *)(x[i].qs + QK_K/4) + 2*ib;
|
|
|
|
| 563 |
const sycl::nd_item<3> &item_ct1,
|
| 564 |
const uint8_t *kmask_iq2xs, const uint32_t *iq3s_grid) {
|
| 565 |
|
| 566 |
+
const int64_t i = item_ct1.get_group(2);
|
| 567 |
const block_iq3_s * x = (const block_iq3_s *) vx;
|
| 568 |
|
| 569 |
+
const int64_t tid = item_ct1.get_local_id(2);
|
| 570 |
#if QK_K == 256
|
| 571 |
+
const int64_t il = tid/8; // 0...3
|
| 572 |
+
const int64_t ib = tid%8; // 0...7
|
| 573 |
dst_t * y = yy + i*QK_K + 32*ib + 8*il;
|
| 574 |
const uint8_t * qs = x[i].qs + 8*ib;
|
| 575 |
const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*il+0] | ((x[i].qh[ib] << (8-2*il)) & 256)));
|
|
|
|
| 593 |
const sycl::nd_item<3> &item_ct1,
|
| 594 |
const uint32_t *iq1s_grid_gpu) {
|
| 595 |
|
| 596 |
+
const int64_t i = item_ct1.get_group(2);
|
| 597 |
const block_iq1_s * x = (const block_iq1_s *) vx;
|
| 598 |
|
| 599 |
+
const int64_t tid = item_ct1.get_local_id(2);
|
| 600 |
#if QK_K == 256
|
| 601 |
+
const int64_t il = tid/8; // 0...3
|
| 602 |
+
const int64_t ib = tid%8; // 0...7
|
| 603 |
dst_t * y = yy + i*QK_K + 32*ib + 8*il;
|
| 604 |
const float delta = x[i].qh[ib] & 0x8000 ? -1 - IQ1S_DELTA : -1 + IQ1S_DELTA;
|
| 605 |
const float d = (float)x[i].d * (2*((x[i].qh[ib] >> 12) & 7) + 1);
|
|
|
|
| 623 |
const sycl::nd_item<3> &item_ct1,
|
| 624 |
const uint32_t *iq1s_grid_gpu) {
|
| 625 |
|
| 626 |
+
const int64_t i = item_ct1.get_group(2);
|
| 627 |
const block_iq1_m * x = (const block_iq1_m *) vx;
|
| 628 |
|
| 629 |
+
const int64_t tid = item_ct1.get_local_id(2);
|
| 630 |
#if QK_K == 256
|
| 631 |
+
const int64_t il = tid/8; // 0...3
|
| 632 |
+
const int64_t ib = tid%8; // 0...7
|
| 633 |
dst_t * y = yy + i*QK_K + 32*ib + 8*il;
|
| 634 |
const uint16_t * sc = (const uint16_t *)x[i].scales;
|
| 635 |
iq1m_scale_t scale;
|
|
|
|
| 656 |
dequantize_block_iq4_nl(const void *__restrict__ vx, dst_t *__restrict__ yy,
|
| 657 |
const sycl::nd_item<3> &item_ct1) {
|
| 658 |
|
| 659 |
+
const int64_t i = item_ct1.get_group(2);
|
| 660 |
const block_iq4_nl * x = (const block_iq4_nl *) vx + i*(QK_K/QK4_NL);
|
| 661 |
|
| 662 |
+
const int64_t tid = item_ct1.get_local_id(2);
|
| 663 |
+
const int64_t il = tid/8; // 0...3
|
| 664 |
+
const int64_t ib = tid%8; // 0...7
|
| 665 |
dst_t * y = yy + i*QK_K + 32*ib + 4*il;
|
| 666 |
const uint8_t * q4 = x[ib].qs + 4*il;
|
| 667 |
const float d = (float)x[ib].d;
|
|
|
|
| 678 |
__dpct_inline__ static void
|
| 679 |
dequantize_block_iq4_xs(const void *__restrict__ vx, dst_t *__restrict__ yy,
|
| 680 |
const sycl::nd_item<3> &item_ct1) {
|
| 681 |
+
const int64_t i = item_ct1.get_group(2);
|
| 682 |
const block_iq4_xs * x = (const block_iq4_xs *)vx;
|
| 683 |
|
| 684 |
+
const int64_t tid = item_ct1.get_local_id(2);
|
| 685 |
+
const int64_t il = tid/8; // 0...3
|
| 686 |
+
const int64_t ib = tid%8; // 0...7
|
| 687 |
dst_t * y = yy + i*QK_K + 32*ib + 4*il;
|
| 688 |
const uint8_t * q4 = x[i].qs + 16*ib + 4*il;
|
| 689 |
const float d = (float)x[i].d * ((((x[i].scales_l[ib/2] >> 4*(ib%2)) & 0xf) | (((x[i].scales_h >> 2*ib) & 3) << 4)) - 32);
|
|
@@ -4,7 +4,7 @@
|
|
| 4 |
#include "presets.hpp"
|
| 5 |
|
| 6 |
|
| 7 |
-
static void convert_f16(const void * vx, const
|
| 8 |
const sycl::half *x = (const sycl::half *)vx;
|
| 9 |
|
| 10 |
// automatic half -> float type cast if dfloat == float
|
|
@@ -12,7 +12,7 @@ static void convert_f16(const void * vx, const int ib, const int iqs, dfloat2 &
|
|
| 12 |
v.y() = x[ib + iqs + 1];
|
| 13 |
}
|
| 14 |
|
| 15 |
-
static void convert_f32(const void * vx, const
|
| 16 |
const float * x = (const float *) vx;
|
| 17 |
|
| 18 |
// automatic half -> float type cast if dfloat == float
|
|
|
|
| 4 |
#include "presets.hpp"
|
| 5 |
|
| 6 |
|
| 7 |
+
static void convert_f16(const void * vx, const int64_t ib, const int iqs, dfloat2 & v){
|
| 8 |
const sycl::half *x = (const sycl::half *)vx;
|
| 9 |
|
| 10 |
// automatic half -> float type cast if dfloat == float
|
|
|
|
| 12 |
v.y() = x[ib + iqs + 1];
|
| 13 |
}
|
| 14 |
|
| 15 |
+
static void convert_f32(const void * vx, const int64_t ib, const int iqs, dfloat2 & v){
|
| 16 |
const float * x = (const float *) vx;
|
| 17 |
|
| 18 |
// automatic half -> float type cast if dfloat == float
|
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
//
|
| 2 |
+
// MIT license
|
| 3 |
+
// Copyright (C) 2024 Intel Corporation
|
| 4 |
+
// SPDX-License-Identifier: MIT
|
| 5 |
+
//
|
| 6 |
+
|
| 7 |
+
//
|
| 8 |
+
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
| 9 |
+
// See https://llvm.org/LICENSE.txt for license information.
|
| 10 |
+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
| 11 |
+
//
|
| 12 |
+
|
| 13 |
+
#include "im2col.hpp"
|
| 14 |
+
|
| 15 |
+
template <typename T>
|
| 16 |
+
static void im2col_kernel(
|
| 17 |
+
const float *x, T *dst, int64_t batch_offset, int64_t offset_delta,
|
| 18 |
+
int64_t IC, int64_t IW, int64_t IH, int64_t OH, int64_t OW, int64_t KW, int64_t KH,
|
| 19 |
+
int64_t pelements, int64_t CHW, int s0, int s1, int p0, int p1, int d0, int d1,
|
| 20 |
+
const sycl::nd_item<3> &item_ct1) {
|
| 21 |
+
const int64_t work_group_size = item_ct1.get_local_range(2);
|
| 22 |
+
const int64_t global_id = item_ct1.get_local_id(2) + work_group_size * item_ct1.get_group(2);
|
| 23 |
+
|
| 24 |
+
// make each work-item deal with more elements since sycl global range can not exceed max int
|
| 25 |
+
for (int64_t i = global_id; i < pelements; i += work_group_size * item_ct1.get_group_range(2)) {
|
| 26 |
+
|
| 27 |
+
const int64_t ksize = OW * (KH > 1 ? KW : 1);
|
| 28 |
+
const int64_t kx = i / ksize;
|
| 29 |
+
const int64_t kd = kx * ksize;
|
| 30 |
+
const int64_t ky = (i - kd) / OW;
|
| 31 |
+
const int64_t ix = i % OW;
|
| 32 |
+
|
| 33 |
+
const int64_t oh = item_ct1.get_group(1);
|
| 34 |
+
const int64_t batch = item_ct1.get_group(0) / IC;
|
| 35 |
+
const int64_t ic = item_ct1.get_group(0) % IC;
|
| 36 |
+
|
| 37 |
+
const int64_t iiw = ix * s0 + kx * d0 - p0;
|
| 38 |
+
const int64_t iih = oh * s1 + ky * d1 - p1;
|
| 39 |
+
|
| 40 |
+
const int64_t offset_dst =
|
| 41 |
+
((batch * OH + oh) * OW + ix) * CHW +
|
| 42 |
+
(ic * (KW * KH) + ky * KW + kx);
|
| 43 |
+
|
| 44 |
+
if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) {
|
| 45 |
+
dst[offset_dst] =
|
| 46 |
+
sycl::vec<float, 1>(0.0f)
|
| 47 |
+
.convert<sycl::half, sycl::rounding_mode::automatic>()[0];
|
| 48 |
+
} else {
|
| 49 |
+
const int64_t offset_src = ic * offset_delta + batch * batch_offset;
|
| 50 |
+
dst[offset_dst] =
|
| 51 |
+
sycl::vec<float, 1>(x[offset_src + iih * IW + iiw])
|
| 52 |
+
.convert<sycl::half, sycl::rounding_mode::automatic>()[0];
|
| 53 |
+
}
|
| 54 |
+
}
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
template <typename T>
|
| 58 |
+
static void im2col_sycl(
|
| 59 |
+
const float *x, T *dst, int64_t IW, int64_t IH, int64_t OW, int64_t OH, int64_t KW,
|
| 60 |
+
int64_t KH, int64_t IC, int64_t batch, int64_t batch_offset, int64_t offset_delta,
|
| 61 |
+
int s0, int s1, int p0, int p1, int d0, int d1,
|
| 62 |
+
queue_ptr stream) {
|
| 63 |
+
const int64_t parallel_elements = OW * KW * KH;
|
| 64 |
+
const int64_t num_blocks = (parallel_elements + SYCL_IM2COL_BLOCK_SIZE - 1) / SYCL_IM2COL_BLOCK_SIZE;
|
| 65 |
+
|
| 66 |
+
// decrease global range when it exceeds the max int
|
| 67 |
+
int64_t local_size = downsample_sycl_global_range(batch * IC * OH * num_blocks, SYCL_IM2COL_BLOCK_SIZE);
|
| 68 |
+
sycl::range<3> block_nums(batch * IC, OH, num_blocks);
|
| 69 |
+
sycl::range<3> local_range(1, 1, local_size);
|
| 70 |
+
|
| 71 |
+
{
|
| 72 |
+
dpct::has_capability_or_fail(stream->get_device(),
|
| 73 |
+
{sycl::aspect::fp16});
|
| 74 |
+
|
| 75 |
+
stream->parallel_for(
|
| 76 |
+
sycl::nd_range<3>(block_nums * local_range, local_range),
|
| 77 |
+
[=](sycl::nd_item<3> item_ct1) {
|
| 78 |
+
im2col_kernel(x, dst, batch_offset, offset_delta, IC, IW, IH, OH, OW, KW, KH,
|
| 79 |
+
parallel_elements, (IC * KH * KW), s0, s1, p0,
|
| 80 |
+
p1, d0, d1, item_ct1);
|
| 81 |
+
});
|
| 82 |
+
}
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
void ggml_sycl_op_im2col(
|
| 86 |
+
ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1,
|
| 87 |
+
ggml_tensor *dst, const float *src0_dd, const float *src1_dd, float *dst_dd,
|
| 88 |
+
const queue_ptr &main_stream) {
|
| 89 |
+
|
| 90 |
+
GGML_ASSERT(src0->type == GGML_TYPE_F16);
|
| 91 |
+
GGML_ASSERT(src1->type == GGML_TYPE_F32);
|
| 92 |
+
GGML_ASSERT(dst->type == GGML_TYPE_F16 || dst->type == GGML_TYPE_F32);
|
| 93 |
+
|
| 94 |
+
const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
|
| 95 |
+
const int32_t s1 = ((const int32_t*)(dst->op_params))[1];
|
| 96 |
+
const int32_t p0 = ((const int32_t*)(dst->op_params))[2];
|
| 97 |
+
const int32_t p1 = ((const int32_t*)(dst->op_params))[3];
|
| 98 |
+
const int32_t d0 = ((const int32_t*)(dst->op_params))[4];
|
| 99 |
+
const int32_t d1 = ((const int32_t*)(dst->op_params))[5];
|
| 100 |
+
|
| 101 |
+
const bool is_2D = ((const int32_t*)(dst->op_params))[6] == 1;
|
| 102 |
+
|
| 103 |
+
const int64_t IC = src1->ne[is_2D ? 2 : 1];
|
| 104 |
+
const int64_t IH = is_2D ? src1->ne[1] : 1;
|
| 105 |
+
const int64_t IW = src1->ne[0];
|
| 106 |
+
|
| 107 |
+
const int64_t KH = is_2D ? src0->ne[1] : 1;
|
| 108 |
+
const int64_t KW = src0->ne[0];
|
| 109 |
+
|
| 110 |
+
const int64_t OH = is_2D ? dst->ne[2] : 1;
|
| 111 |
+
const int64_t OW = dst->ne[1];
|
| 112 |
+
|
| 113 |
+
const size_t delta_offset = src1->nb[is_2D ? 2 : 1] / 4; // nb is byte offset, src is type float32
|
| 114 |
+
const int64_t batch = src1->ne[3];
|
| 115 |
+
const size_t batch_offset = src1->nb[3] / 4; // nb is byte offset, src is type float32
|
| 116 |
+
|
| 117 |
+
if (dst->type == GGML_TYPE_F16) {
|
| 118 |
+
im2col_sycl(src1_dd, (sycl::half *)dst_dd, IW, IH, OW, OH, KW, KH, IC, batch, batch_offset, delta_offset, s0, s1, p0, p1, d0, d1, main_stream);
|
| 119 |
+
} else {
|
| 120 |
+
im2col_sycl(src1_dd, (float *)dst_dd, IW, IH, OW, OH, KW, KH, IC, batch, batch_offset, delta_offset, s0, s1, p0, p1, d0, d1, main_stream);
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
(void) src0;
|
| 124 |
+
(void) src0_dd;
|
| 125 |
+
}
|
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
//
|
| 2 |
+
// MIT license
|
| 3 |
+
// Copyright (C) 2024 Intel Corporation
|
| 4 |
+
// SPDX-License-Identifier: MIT
|
| 5 |
+
//
|
| 6 |
+
|
| 7 |
+
//
|
| 8 |
+
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
| 9 |
+
// See https://llvm.org/LICENSE.txt for license information.
|
| 10 |
+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
| 11 |
+
//
|
| 12 |
+
|
| 13 |
+
#ifndef GGML_SYCL_IM2COL_HPP
|
| 14 |
+
#define GGML_SYCL_IM2COL_HPP
|
| 15 |
+
|
| 16 |
+
#include "common.hpp"
|
| 17 |
+
|
| 18 |
+
void ggml_sycl_op_im2col(
|
| 19 |
+
ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1,
|
| 20 |
+
ggml_tensor *dst, const float *src0_dd, const float *src1_dd, float *dst_dd,
|
| 21 |
+
const queue_ptr &main_stream);
|
| 22 |
+
|
| 23 |
+
#endif // GGML_SYCL_IM2COL_HPP
|