Spaces:
Sleeping
Sleeping
Alberto Cabrera Pérez
commited on
Commit
·
31edd77
1
Parent(s):
3020711
sycl: refactor quantization to q8_1 (llama/14815)
Browse files* sycl: quantization to q8_1 refactor
* Refactored src1 copy logic in op_mul_mat
- ggml/src/ggml-sycl/backend.hpp +1 -0
- ggml/src/ggml-sycl/ggml-sycl.cpp +50 -206
- ggml/src/ggml-sycl/quantize.hpp +133 -0
ggml/src/ggml-sycl/backend.hpp
CHANGED
|
@@ -28,6 +28,7 @@
|
|
| 28 |
#include "mmvq.hpp"
|
| 29 |
#include "norm.hpp"
|
| 30 |
#include "outprod.hpp"
|
|
|
|
| 31 |
#include "quants.hpp"
|
| 32 |
#include "rope.hpp"
|
| 33 |
#include "set_rows.hpp"
|
|
|
|
| 28 |
#include "mmvq.hpp"
|
| 29 |
#include "norm.hpp"
|
| 30 |
#include "outprod.hpp"
|
| 31 |
+
#include "quantize.hpp"
|
| 32 |
#include "quants.hpp"
|
| 33 |
#include "rope.hpp"
|
| 34 |
#include "set_rows.hpp"
|
ggml/src/ggml-sycl/ggml-sycl.cpp
CHANGED
|
@@ -44,6 +44,7 @@
|
|
| 44 |
#include "ggml-sycl/set_rows.hpp"
|
| 45 |
#include "ggml-sycl/sycl_hw.hpp"
|
| 46 |
#include "ggml-sycl/getrows.hpp"
|
|
|
|
| 47 |
#include "ggml.h"
|
| 48 |
|
| 49 |
static bool g_sycl_loaded = false;
|
|
@@ -1373,120 +1374,6 @@ typedef void (*ggml_sycl_op_mul_mat_t)(
|
|
| 1373 |
|
| 1374 |
|
| 1375 |
|
| 1376 |
-
template<int QUANT_BLOCK_TILE>
|
| 1377 |
-
static void quantize_q8_1(const float * __restrict__ x, void * __restrict__ vy, const int kx, const int kx_padded,
|
| 1378 |
-
const sycl::nd_item<3> &item_ct1) {
|
| 1379 |
-
const int ix = (item_ct1.get_local_range(2) * item_ct1.get_group(2) +
|
| 1380 |
-
item_ct1.get_local_id(2)) * QUANT_BLOCK_TILE;
|
| 1381 |
-
|
| 1382 |
-
if (ix >= kx_padded) {
|
| 1383 |
-
return;
|
| 1384 |
-
}
|
| 1385 |
-
|
| 1386 |
-
const int iy = item_ct1.get_local_range(1) * item_ct1.get_group(1) +
|
| 1387 |
-
item_ct1.get_local_id(1);
|
| 1388 |
-
|
| 1389 |
-
const int i_padded = iy*kx_padded + ix;
|
| 1390 |
-
|
| 1391 |
-
block_q8_1 * y = (block_q8_1 *) vy;
|
| 1392 |
-
|
| 1393 |
-
const int ib = i_padded / QK8_1; // block index
|
| 1394 |
-
const int iqs = i_padded % QK8_1; // quant index
|
| 1395 |
-
typedef sycl::vec<float, QUANT_BLOCK_TILE> TC;
|
| 1396 |
-
typedef sycl::vec<int8_t, QUANT_BLOCK_TILE> TQ;
|
| 1397 |
-
TC zeros;
|
| 1398 |
-
TQ qzeros;
|
| 1399 |
-
#pragma unroll
|
| 1400 |
-
for (int i = 0; i < QUANT_BLOCK_TILE; i++)
|
| 1401 |
-
{
|
| 1402 |
-
zeros[i] = 0.f;
|
| 1403 |
-
qzeros[i] = 0;
|
| 1404 |
-
}
|
| 1405 |
-
const TC xi = ix < kx ? *(const TC *)&x[iy * kx + ix] : zeros;
|
| 1406 |
-
float sum = xi[0];
|
| 1407 |
-
float amax = sycl::fabs(xi[0]);
|
| 1408 |
-
#pragma unroll
|
| 1409 |
-
for (int i = 1; i < QUANT_BLOCK_TILE; i++)
|
| 1410 |
-
{
|
| 1411 |
-
sum += xi[i];
|
| 1412 |
-
amax = sycl::fmax(sycl::fabs(xi[i]), amax);
|
| 1413 |
-
}
|
| 1414 |
-
sum = warp_reduce_sum(sum, item_ct1);
|
| 1415 |
-
amax = warp_reduce_max(amax, item_ct1);
|
| 1416 |
-
|
| 1417 |
-
const float d = amax / 127;
|
| 1418 |
-
TQ q = qzeros;
|
| 1419 |
-
if (amax != 0.0f)
|
| 1420 |
-
{
|
| 1421 |
-
#pragma unroll
|
| 1422 |
-
for (int i = 0; i < QUANT_BLOCK_TILE; i++) {
|
| 1423 |
-
q[i] = sycl::round(xi[i] / d);
|
| 1424 |
-
}
|
| 1425 |
-
}
|
| 1426 |
-
|
| 1427 |
-
*(TQ *)&y[ib].qs[iqs] = q;
|
| 1428 |
-
|
| 1429 |
-
if (iqs > 0) {
|
| 1430 |
-
return;
|
| 1431 |
-
}
|
| 1432 |
-
|
| 1433 |
-
reinterpret_cast<sycl::half &>(y[ib].ds.x()) = d;
|
| 1434 |
-
reinterpret_cast<sycl::half &>(y[ib].ds.y()) = sum;
|
| 1435 |
-
}
|
| 1436 |
-
|
| 1437 |
-
template <int ElementsPerWI>
|
| 1438 |
-
static __dpct_inline__ void quantize_and_reorder_q8_1(const float * __restrict__ x, void * reordered_q8_tensor,
|
| 1439 |
-
const int kx, const int kx_padded, const sycl::nd_item<1> & it) {
|
| 1440 |
-
/*
|
| 1441 |
-
Quantizes and reorders the resultant q8 tensor in a per row fashion
|
| 1442 |
-
Each sub-group calculates one quant block. i.e. QK8_1 quant values and the d and sum values
|
| 1443 |
-
*/
|
| 1444 |
-
|
| 1445 |
-
auto subgroup_id = it.get_group(0);
|
| 1446 |
-
auto wi_id = it.get_local_id(0);
|
| 1447 |
-
|
| 1448 |
-
const int num_blocks_per_row = kx / QK8_1;
|
| 1449 |
-
auto row = subgroup_id / num_blocks_per_row;
|
| 1450 |
-
auto col = subgroup_id % num_blocks_per_row;
|
| 1451 |
-
|
| 1452 |
-
auto row_offset = row * (kx_padded / QK8_1) * sizeof(block_q8_1);
|
| 1453 |
-
auto col_offset = QK8_1 * col + wi_id * ElementsPerWI;
|
| 1454 |
-
|
| 1455 |
-
auto quant_ptr = (int8_t *) ((char *) reordered_q8_tensor + row_offset + col_offset);
|
| 1456 |
-
auto ds_ptr = (sycl::half2 *) ((char *) reordered_q8_tensor + row_offset + kx + col * sizeof(sycl::half2));
|
| 1457 |
-
|
| 1458 |
-
sycl::vec<float, ElementsPerWI> wi_f32_vals;
|
| 1459 |
-
sycl::vec<int8_t, ElementsPerWI> quantized_values;
|
| 1460 |
-
|
| 1461 |
-
auto float_ptr_offset = subgroup_id * QK8_1 + ElementsPerWI * wi_id;
|
| 1462 |
-
wi_f32_vals = *reinterpret_cast<const sycl::vec<float, ElementsPerWI> *>(x + float_ptr_offset);
|
| 1463 |
-
|
| 1464 |
-
float sum = 0.0f;
|
| 1465 |
-
float amax = 0.0f;
|
| 1466 |
-
|
| 1467 |
-
#pragma unroll(ElementsPerWI)
|
| 1468 |
-
for (int i = 0; i < ElementsPerWI; i++) {
|
| 1469 |
-
sum += wi_f32_vals[i];
|
| 1470 |
-
amax = sycl::fmax(amax, sycl::fabs(wi_f32_vals[i]));
|
| 1471 |
-
quantized_values[i] = 0;
|
| 1472 |
-
}
|
| 1473 |
-
sum = sycl::reduce_over_group(it.get_group(), sum, sycl::plus<float>());
|
| 1474 |
-
amax = sycl::reduce_over_group(it.get_group(), amax, sycl::maximum<float>());
|
| 1475 |
-
float d = amax == 0 ? 1 : amax / 127;
|
| 1476 |
-
|
| 1477 |
-
#pragma unroll(ElementsPerWI)
|
| 1478 |
-
for (int i = 0; i < ElementsPerWI; i++) {
|
| 1479 |
-
quantized_values[i] = sycl::round(wi_f32_vals[i] / d);
|
| 1480 |
-
}
|
| 1481 |
-
|
| 1482 |
-
d = amax == 0 ? 0 : d;
|
| 1483 |
-
|
| 1484 |
-
*reinterpret_cast<sycl::vec<int8_t, ElementsPerWI> *>(quant_ptr) = quantized_values;
|
| 1485 |
-
if (wi_id == 0) {
|
| 1486 |
-
*ds_ptr = sycl::half2(sycl::half(d), sycl::half(sum));
|
| 1487 |
-
}
|
| 1488 |
-
}
|
| 1489 |
-
|
| 1490 |
static void mul_mat_p021_f16_f32(
|
| 1491 |
const void * __restrict__ vx, const float * __restrict__ y, float * __restrict__ dst,
|
| 1492 |
const int ncols_x, const int nrows_x, const int nchannels_x, const int nchannels_y,
|
|
@@ -1770,32 +1657,6 @@ static void pool2d_nchw_kernel(
|
|
| 1770 |
o_ptr[cur_oh * ow + cur_ow] = res;
|
| 1771 |
}
|
| 1772 |
|
| 1773 |
-
static void quantize_row_q8_1_sycl(const float * x, void * vy, const int kx, const int ky, const int kx_padded,
|
| 1774 |
-
bool reorder_q8_tensor, queue_ptr stream) {
|
| 1775 |
-
if (reorder_q8_tensor) {
|
| 1776 |
-
auto local_range = std::size_t(WARP_SIZE);
|
| 1777 |
-
auto num_quant_blocks = ky * (kx / QK8_1);
|
| 1778 |
-
auto global_range = num_quant_blocks * local_range;
|
| 1779 |
-
stream->parallel_for(sycl::nd_range<1>({ global_range }, { local_range }),
|
| 1780 |
-
[=](sycl::nd_item<1> it) [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
|
| 1781 |
-
quantize_and_reorder_q8_1<QK8_1 / WARP_SIZE>(x, vy, kx, kx_padded, it);
|
| 1782 |
-
});
|
| 1783 |
-
} else {
|
| 1784 |
-
const int block_num_x = (kx_padded + SYCL_QUANTIZE_BLOCK_SIZE - 1) / SYCL_QUANTIZE_BLOCK_SIZE;
|
| 1785 |
-
const sycl::range<3> num_blocks(1, ky, block_num_x);
|
| 1786 |
-
int constexpr QUANT_BLOCK_TILE = QK8_1 / WARP_SIZE;
|
| 1787 |
-
static_assert(QK8_1 % WARP_SIZE == 0);
|
| 1788 |
-
const sycl::range<3> block_size(1, 1, SYCL_QUANTIZE_BLOCK_SIZE / QUANT_BLOCK_TILE);
|
| 1789 |
-
{
|
| 1790 |
-
dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 });
|
| 1791 |
-
|
| 1792 |
-
stream->parallel_for(sycl::nd_range<3>(num_blocks * block_size, block_size),
|
| 1793 |
-
[=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
|
| 1794 |
-
quantize_q8_1<QUANT_BLOCK_TILE>(x, vy, kx, kx_padded, item_ct1);
|
| 1795 |
-
});
|
| 1796 |
-
}
|
| 1797 |
-
}
|
| 1798 |
-
}
|
| 1799 |
|
| 1800 |
static void ggml_mul_mat_p021_f16_f32_sycl(const void *vx, const float *y,
|
| 1801 |
float *dst, const int ncols_x,
|
|
@@ -2372,10 +2233,10 @@ static void ggml_sycl_set_peer_access(const int n_tokens, int main_device) {
|
|
| 2372 |
peer_access_enabled = enable_peer_access;
|
| 2373 |
}
|
| 2374 |
|
|
|
|
| 2375 |
static void ggml_sycl_op_mul_mat(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
|
| 2376 |
const ggml_tensor *src1, ggml_tensor *dst,
|
| 2377 |
-
ggml_sycl_op_mul_mat_t op
|
| 2378 |
-
const bool convert_src1_to_q8_1) try {
|
| 2379 |
|
| 2380 |
GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne);
|
| 2381 |
|
|
@@ -2470,6 +2331,8 @@ static void ggml_sycl_op_mul_mat(ggml_backend_sycl_context & ctx, const ggml_ten
|
|
| 2470 |
}
|
| 2471 |
}
|
| 2472 |
|
|
|
|
|
|
|
| 2473 |
for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
|
| 2474 |
if ((!split && i != ctx.device) || dev[i].row_low == dev[i].row_high) {
|
| 2475 |
continue;
|
|
@@ -2495,20 +2358,19 @@ static void ggml_sycl_op_mul_mat(ggml_backend_sycl_context & ctx, const ggml_ten
|
|
| 2495 |
dev[i].src1_ddf = dev[i].src1_ddf_alloc.alloc(ctx.pool(i), ggml_nelements(src1));
|
| 2496 |
}
|
| 2497 |
|
| 2498 |
-
if (
|
| 2499 |
dev[i].src1_ddq = dev[i].src1_ddq_alloc.alloc(ctx.pool(i), nrows1*src1_padded_col_size*q8_1_ts/q8_1_bs);
|
| 2500 |
|
| 2501 |
if (src1_on_device && src1_is_contiguous) {
|
| 2502 |
-
bool reorder_q8_tensor = src0->extra && ((ggml_tensor_extra_gpu *)src0->extra)->optimized_feature.reorder;
|
| 2503 |
scope_op_debug_print scope_dbg_print(__func__, "/quantize_row_q8_1_sycl", dst,
|
| 2504 |
/*num_src=*/2, " : converting src1 to Q8_1");
|
| 2505 |
-
|
| 2506 |
-
|
| 2507 |
-
|
| 2508 |
-
|
| 2509 |
-
|
| 2510 |
-
|
| 2511 |
-
|
| 2512 |
}
|
| 2513 |
}
|
| 2514 |
|
|
@@ -2524,11 +2386,6 @@ static void ggml_sycl_op_mul_mat(ggml_backend_sycl_context & ctx, const ggml_ten
|
|
| 2524 |
// here an event is recorded that signals that the main device has finished calculating the input data
|
| 2525 |
if (split && used_devices > 1) {
|
| 2526 |
ggml_sycl_set_device(ctx.device);
|
| 2527 |
-
/*
|
| 2528 |
-
DPCT1024:91: The original code returned the error code that was further
|
| 2529 |
-
consumed by the program logic. This original code was replaced with 0.
|
| 2530 |
-
You may need to rewrite the program logic consuming the error code.
|
| 2531 |
-
*/
|
| 2532 |
SYCL_CHECK(CHECK_TRY_ERROR(
|
| 2533 |
*src0_extra->events[ctx.device][0] =
|
| 2534 |
ctx.stream()->ext_oneapi_submit_barrier()));
|
|
@@ -2552,11 +2409,6 @@ static void ggml_sycl_op_mul_mat(ggml_backend_sycl_context & ctx, const ggml_ten
|
|
| 2552 |
|
| 2553 |
// wait for main GPU data if necessary
|
| 2554 |
if (split && (i != ctx.device || is != 0)) {
|
| 2555 |
-
/*
|
| 2556 |
-
DPCT1009:163: SYCL uses exceptions to report errors and does not
|
| 2557 |
-
use the error codes. The original code was commented out and a
|
| 2558 |
-
warning string was inserted. You need to rewrite this code.
|
| 2559 |
-
*/
|
| 2560 |
SYCL_CHECK(CHECK_TRY_ERROR(stream->ext_oneapi_submit_barrier(
|
| 2561 |
{*src0_extra->events[ctx.device][0]})));
|
| 2562 |
}
|
|
@@ -2582,39 +2434,42 @@ static void ggml_sycl_op_mul_mat(ggml_backend_sycl_context & ctx, const ggml_ten
|
|
| 2582 |
// copy src0, src1 to device if necessary
|
| 2583 |
if (src1_is_contiguous) {
|
| 2584 |
if (i != ctx.device) {
|
| 2585 |
-
if (
|
| 2586 |
char * src1_ddq_i_source = dev[ctx.device].src1_ddq + src1_ddq_i_offset;
|
| 2587 |
-
|
| 2588 |
-
|
| 2589 |
-
|
| 2590 |
-
|
|
|
|
| 2591 |
} else {
|
| 2592 |
-
|
| 2593 |
float * src1_ddf_i_source = (float *) src1_extra->data_device[ctx.device];
|
| 2594 |
-
src1_ddf_i_source += (i0*ne11 + src1_col_0) * ne10;
|
| 2595 |
|
| 2596 |
-
SYCL_CHECK(
|
| 2597 |
-
src1_ddf_i, src1_ddf_i_source,
|
| 2598 |
-
|
| 2599 |
}
|
| 2600 |
}
|
| 2601 |
-
} else if (src1_on_device && !src1_is_contiguous) {
|
| 2602 |
-
SYCL_CHECK(ggml_sycl_cpy_tensor_2d(
|
| 2603 |
-
src1_ddf_i, src1, i03, i02, src1_col_0, src1_col_0+src1_ncols, stream));
|
| 2604 |
} else {
|
| 2605 |
-
|
| 2606 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2607 |
|
| 2608 |
-
|
| 2609 |
-
|
| 2610 |
-
|
| 2611 |
-
|
| 2612 |
-
|
| 2613 |
-
|
| 2614 |
-
|
| 2615 |
-
|
| 2616 |
-
|
| 2617 |
-
|
|
|
|
|
|
|
| 2618 |
}
|
| 2619 |
|
| 2620 |
if (src1_col_0 == 0 && !src0_is_contiguous && i02 % i02_divisor == 0) {
|
|
@@ -2626,12 +2481,6 @@ static void ggml_sycl_op_mul_mat(ggml_backend_sycl_context & ctx, const ggml_ten
|
|
| 2626 |
// do the computation
|
| 2627 |
SYCL_CHECK(CHECK_TRY_ERROR(op(ctx, src0, src1, dst, src0_dd_i, src1_ddf_i, src1_ddq_i, dst_dd_i,
|
| 2628 |
dev[i].row_low, dev[i].row_high, src1_ncols, src1_padded_col_size, stream)));
|
| 2629 |
-
/*
|
| 2630 |
-
DPCT1010:93: SYCL uses exceptions to report errors and does not
|
| 2631 |
-
use the error codes. The call was replaced with 0. You need to
|
| 2632 |
-
rewrite this code.
|
| 2633 |
-
*/
|
| 2634 |
-
SYCL_CHECK(0);
|
| 2635 |
|
| 2636 |
// copy dst to host or other device if necessary
|
| 2637 |
if (!dst_on_device) {
|
|
@@ -2662,12 +2511,6 @@ static void ggml_sycl_op_mul_mat(ggml_backend_sycl_context & ctx, const ggml_ten
|
|
| 2662 |
|
| 2663 |
// add event for the main device to wait on until other device is done
|
| 2664 |
if (split && (i != ctx.device || is != 0)) {
|
| 2665 |
-
/*
|
| 2666 |
-
DPCT1024:94: The original code returned the error code that
|
| 2667 |
-
was further consumed by the program logic. This original
|
| 2668 |
-
code was replaced with 0. You may need to rewrite the
|
| 2669 |
-
program logic consuming the error code.
|
| 2670 |
-
*/
|
| 2671 |
SYCL_CHECK(CHECK_TRY_ERROR(
|
| 2672 |
*src0_extra->events[i][is] =
|
| 2673 |
stream->ext_oneapi_submit_barrier()));
|
|
@@ -3351,19 +3194,20 @@ static void ggml_sycl_mul_mat(ggml_backend_sycl_context & ctx, const ggml_tensor
|
|
| 3351 |
// KQ + KQV multi-batch
|
| 3352 |
ggml_sycl_mul_mat_batched_sycl(ctx, src0, src1, dst);
|
| 3353 |
} else if (use_dequantize_mul_mat_vec) {
|
| 3354 |
-
constexpr bool convert_src1_to_q8_1 = false;
|
| 3355 |
opt_for_reorder(&ctx, src0, src1, dst, mul_mat_algo::DMMV);
|
| 3356 |
-
ggml_sycl_op_mul_mat(ctx, src0, src1, dst, ggml_sycl_op_dequantize_mul_mat_vec
|
| 3357 |
} else if (use_mul_mat_vec_q) {
|
| 3358 |
-
constexpr bool convert_src1_to_q8_1 = true;
|
| 3359 |
opt_for_reorder(&ctx, src0, src1, dst, mul_mat_algo::MMVQ);
|
| 3360 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3361 |
} else if (use_mul_mat_q) {
|
| 3362 |
-
|
| 3363 |
-
ggml_sycl_op_mul_mat(ctx, src0, src1, dst, ggml_sycl_op_mul_mat_q, convert_src1_to_q8_1);
|
| 3364 |
} else {
|
| 3365 |
-
|
| 3366 |
-
ggml_sycl_op_mul_mat(ctx, src0, src1, dst, ggml_sycl_op_mul_mat_sycl, convert_src1_to_q8_1);
|
| 3367 |
}
|
| 3368 |
}
|
| 3369 |
|
|
|
|
| 44 |
#include "ggml-sycl/set_rows.hpp"
|
| 45 |
#include "ggml-sycl/sycl_hw.hpp"
|
| 46 |
#include "ggml-sycl/getrows.hpp"
|
| 47 |
+
#include "ggml-sycl/quantize.hpp"
|
| 48 |
#include "ggml.h"
|
| 49 |
|
| 50 |
static bool g_sycl_loaded = false;
|
|
|
|
| 1374 |
|
| 1375 |
|
| 1376 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1377 |
static void mul_mat_p021_f16_f32(
|
| 1378 |
const void * __restrict__ vx, const float * __restrict__ y, float * __restrict__ dst,
|
| 1379 |
const int ncols_x, const int nrows_x, const int nchannels_x, const int nchannels_y,
|
|
|
|
| 1657 |
o_ptr[cur_oh * ow + cur_ow] = res;
|
| 1658 |
}
|
| 1659 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1660 |
|
| 1661 |
static void ggml_mul_mat_p021_f16_f32_sycl(const void *vx, const float *y,
|
| 1662 |
float *dst, const int ncols_x,
|
|
|
|
| 2233 |
peer_access_enabled = enable_peer_access;
|
| 2234 |
}
|
| 2235 |
|
| 2236 |
+
template <template <int> typename quantize_f>
|
| 2237 |
static void ggml_sycl_op_mul_mat(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
|
| 2238 |
const ggml_tensor *src1, ggml_tensor *dst,
|
| 2239 |
+
ggml_sycl_op_mul_mat_t op) try {
|
|
|
|
| 2240 |
|
| 2241 |
GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne);
|
| 2242 |
|
|
|
|
| 2331 |
}
|
| 2332 |
}
|
| 2333 |
|
| 2334 |
+
constexpr bool quantize_enabled = !std::is_same_v<quantize_f<QK8_1 / WARP_SIZE>,
|
| 2335 |
+
no_quantize_q8_1<QK8_1 / WARP_SIZE>>;
|
| 2336 |
for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
|
| 2337 |
if ((!split && i != ctx.device) || dev[i].row_low == dev[i].row_high) {
|
| 2338 |
continue;
|
|
|
|
| 2358 |
dev[i].src1_ddf = dev[i].src1_ddf_alloc.alloc(ctx.pool(i), ggml_nelements(src1));
|
| 2359 |
}
|
| 2360 |
|
| 2361 |
+
if constexpr(quantize_enabled) {
|
| 2362 |
dev[i].src1_ddq = dev[i].src1_ddq_alloc.alloc(ctx.pool(i), nrows1*src1_padded_col_size*q8_1_ts/q8_1_bs);
|
| 2363 |
|
| 2364 |
if (src1_on_device && src1_is_contiguous) {
|
|
|
|
| 2365 |
scope_op_debug_print scope_dbg_print(__func__, "/quantize_row_q8_1_sycl", dst,
|
| 2366 |
/*num_src=*/2, " : converting src1 to Q8_1");
|
| 2367 |
+
try {
|
| 2368 |
+
quantize_row_q8_1_sycl<quantize_f>(dev[i].src1_ddf, dev[i].src1_ddq, ne10, nrows1, src1_padded_col_size, stream);
|
| 2369 |
+
} catch (sycl::exception const &exc) {
|
| 2370 |
+
std::cerr << "Quantize_row_q8_1_sycl error" << exc.what() << "Exception caught at file:" << __FILE__
|
| 2371 |
+
<< ", line:" << __LINE__ << std::endl;
|
| 2372 |
+
std::exit(1);
|
| 2373 |
+
}
|
| 2374 |
}
|
| 2375 |
}
|
| 2376 |
|
|
|
|
| 2386 |
// here an event is recorded that signals that the main device has finished calculating the input data
|
| 2387 |
if (split && used_devices > 1) {
|
| 2388 |
ggml_sycl_set_device(ctx.device);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2389 |
SYCL_CHECK(CHECK_TRY_ERROR(
|
| 2390 |
*src0_extra->events[ctx.device][0] =
|
| 2391 |
ctx.stream()->ext_oneapi_submit_barrier()));
|
|
|
|
| 2409 |
|
| 2410 |
// wait for main GPU data if necessary
|
| 2411 |
if (split && (i != ctx.device || is != 0)) {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2412 |
SYCL_CHECK(CHECK_TRY_ERROR(stream->ext_oneapi_submit_barrier(
|
| 2413 |
{*src0_extra->events[ctx.device][0]})));
|
| 2414 |
}
|
|
|
|
| 2434 |
// copy src0, src1 to device if necessary
|
| 2435 |
if (src1_is_contiguous) {
|
| 2436 |
if (i != ctx.device) {
|
| 2437 |
+
if constexpr (quantize_enabled) {
|
| 2438 |
char * src1_ddq_i_source = dev[ctx.device].src1_ddq + src1_ddq_i_offset;
|
| 2439 |
+
SYCL_CHECK(
|
| 2440 |
+
CHECK_TRY_ERROR(stream
|
| 2441 |
+
->memcpy(src1_ddq_i, src1_ddq_i_source,
|
| 2442 |
+
src1_ncols * src1_padded_col_size * q8_1_ts / q8_1_bs)
|
| 2443 |
+
.wait()));
|
| 2444 |
} else {
|
|
|
|
| 2445 |
float * src1_ddf_i_source = (float *) src1_extra->data_device[ctx.device];
|
| 2446 |
+
src1_ddf_i_source += (i0 * ne11 + src1_col_0) * ne10;
|
| 2447 |
|
| 2448 |
+
SYCL_CHECK(
|
| 2449 |
+
CHECK_TRY_ERROR(dev2dev_memcpy(*stream, *main_stream, src1_ddf_i, src1_ddf_i_source,
|
| 2450 |
+
src1_ncols * ne10 * sizeof(float))));
|
| 2451 |
}
|
| 2452 |
}
|
|
|
|
|
|
|
|
|
|
| 2453 |
} else {
|
| 2454 |
+
if (src1_on_device) {
|
| 2455 |
+
SYCL_CHECK(ggml_sycl_cpy_tensor_2d(src1_ddf_i, src1, i03, i02, src1_col_0,
|
| 2456 |
+
src1_col_0 + src1_ncols, stream));
|
| 2457 |
+
} else {
|
| 2458 |
+
GGML_ABORT("src1 is non-contiguous and not on device");
|
| 2459 |
+
}
|
| 2460 |
|
| 2461 |
+
if constexpr (quantize_enabled) {
|
| 2462 |
+
scope_op_debug_print scope_dbg_print(__func__, "/quantize_row_q8_1_sycl", dst,
|
| 2463 |
+
/*num_src=*/2, " : converting src1 to Q8_1");
|
| 2464 |
+
try {
|
| 2465 |
+
quantize_row_q8_1_sycl<quantize_q8_1>(src1_ddf_i, src1_ddq_i, ne10, src1_ncols,
|
| 2466 |
+
src1_padded_col_size, stream);
|
| 2467 |
+
} catch (const sycl::exception & exc) {
|
| 2468 |
+
std::cerr << "Quantize_row_q8_1_sycl error" << exc.what()
|
| 2469 |
+
<< "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl;
|
| 2470 |
+
std::exit(1);
|
| 2471 |
+
}
|
| 2472 |
+
}
|
| 2473 |
}
|
| 2474 |
|
| 2475 |
if (src1_col_0 == 0 && !src0_is_contiguous && i02 % i02_divisor == 0) {
|
|
|
|
| 2481 |
// do the computation
|
| 2482 |
SYCL_CHECK(CHECK_TRY_ERROR(op(ctx, src0, src1, dst, src0_dd_i, src1_ddf_i, src1_ddq_i, dst_dd_i,
|
| 2483 |
dev[i].row_low, dev[i].row_high, src1_ncols, src1_padded_col_size, stream)));
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2484 |
|
| 2485 |
// copy dst to host or other device if necessary
|
| 2486 |
if (!dst_on_device) {
|
|
|
|
| 2511 |
|
| 2512 |
// add event for the main device to wait on until other device is done
|
| 2513 |
if (split && (i != ctx.device || is != 0)) {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2514 |
SYCL_CHECK(CHECK_TRY_ERROR(
|
| 2515 |
*src0_extra->events[i][is] =
|
| 2516 |
stream->ext_oneapi_submit_barrier()));
|
|
|
|
| 3194 |
// KQ + KQV multi-batch
|
| 3195 |
ggml_sycl_mul_mat_batched_sycl(ctx, src0, src1, dst);
|
| 3196 |
} else if (use_dequantize_mul_mat_vec) {
|
|
|
|
| 3197 |
opt_for_reorder(&ctx, src0, src1, dst, mul_mat_algo::DMMV);
|
| 3198 |
+
ggml_sycl_op_mul_mat<no_quantize_q8_1>(ctx, src0, src1, dst, ggml_sycl_op_dequantize_mul_mat_vec);
|
| 3199 |
} else if (use_mul_mat_vec_q) {
|
|
|
|
| 3200 |
opt_for_reorder(&ctx, src0, src1, dst, mul_mat_algo::MMVQ);
|
| 3201 |
+
ggml_tensor_extra_gpu * extra = static_cast<ggml_tensor_extra_gpu *>(src0->extra);
|
| 3202 |
+
if (extra && extra->optimized_feature.reorder) {
|
| 3203 |
+
ggml_sycl_op_mul_mat<quantize_and_reorder_q8_1_soa>(ctx, src0, src1, dst, ggml_sycl_op_mul_mat_vec_q);
|
| 3204 |
+
} else {
|
| 3205 |
+
ggml_sycl_op_mul_mat<quantize_q8_1>(ctx, src0, src1, dst, ggml_sycl_op_mul_mat_vec_q);
|
| 3206 |
+
}
|
| 3207 |
} else if (use_mul_mat_q) {
|
| 3208 |
+
ggml_sycl_op_mul_mat<quantize_q8_1>(ctx, src0, src1, dst, ggml_sycl_op_mul_mat_q);
|
|
|
|
| 3209 |
} else {
|
| 3210 |
+
ggml_sycl_op_mul_mat<no_quantize_q8_1>(ctx, src0, src1, dst, ggml_sycl_op_mul_mat_sycl);
|
|
|
|
| 3211 |
}
|
| 3212 |
}
|
| 3213 |
|
ggml/src/ggml-sycl/quantize.hpp
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/***************************************************************************
|
| 2 |
+
*
|
| 3 |
+
* Copyright (C) 2025 Codeplay Software Ltd.
|
| 4 |
+
* Copyright (C) 2025 Intel Corporation
|
| 5 |
+
*
|
| 6 |
+
* MIT License
|
| 7 |
+
*
|
| 8 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 9 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 10 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 11 |
+
* See the License for the specific language governing permissions and
|
| 12 |
+
* limitations under the License.
|
| 13 |
+
*
|
| 14 |
+
* quantize.hpp
|
| 15 |
+
*
|
| 16 |
+
* Description:
|
| 17 |
+
* Sycl backend specific quantization functions
|
| 18 |
+
**************************************************************************/
|
| 19 |
+
|
| 20 |
+
#pragma once
|
| 21 |
+
|
| 22 |
+
#include <sycl/nd_item.hpp>
|
| 23 |
+
|
| 24 |
+
#include "ggml-sycl/dpct/helper.hpp"
|
| 25 |
+
|
| 26 |
+
template <int ElementsPerWI>
|
| 27 |
+
__dpct_inline__ static void quantize_q8_1_impl(const float * __restrict__ x,
|
| 28 |
+
sycl::vec<int8_t, ElementsPerWI> & quantized_values, float & d,
|
| 29 |
+
float & sum, const sycl::nd_item<1> & it) {
|
| 30 |
+
auto subgroup_id = it.get_group(0);
|
| 31 |
+
auto wi_id = it.get_local_id(0);
|
| 32 |
+
|
| 33 |
+
sycl::vec<float, ElementsPerWI> wi_f32_vals;
|
| 34 |
+
|
| 35 |
+
auto float_ptr_offset = subgroup_id * QK8_1 + ElementsPerWI * wi_id;
|
| 36 |
+
wi_f32_vals = *reinterpret_cast<const sycl::vec<float, ElementsPerWI> *>(x + float_ptr_offset);
|
| 37 |
+
|
| 38 |
+
float amax = 0.0f;
|
| 39 |
+
|
| 40 |
+
#pragma unroll(ElementsPerWI)
|
| 41 |
+
for (int i = 0; i < ElementsPerWI; i++) {
|
| 42 |
+
sum += wi_f32_vals[i];
|
| 43 |
+
amax = sycl::fmax(amax, sycl::fabs(wi_f32_vals[i]));
|
| 44 |
+
quantized_values[i] = 0;
|
| 45 |
+
}
|
| 46 |
+
sum = sycl::reduce_over_group(it.get_sub_group(), sum, sycl::plus<float>());
|
| 47 |
+
amax = sycl::reduce_over_group(it.get_sub_group(), amax, sycl::maximum<float>());
|
| 48 |
+
d = amax == 0 ? 1 : amax / 127;
|
| 49 |
+
|
| 50 |
+
#pragma unroll(ElementsPerWI)
|
| 51 |
+
for (int i = 0; i < ElementsPerWI; i++) {
|
| 52 |
+
quantized_values[i] = sycl::round(wi_f32_vals[i] / d);
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
d = amax == 0 ? 0 : d;
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
// No op to control codepath in ggml_sycl_op_mul_mat
|
| 59 |
+
template <int ElementsPerWI> struct no_quantize_q8_1 {
|
| 60 |
+
void operator()(const float *, void *, int, int, const sycl::nd_item<1> &) const {}
|
| 61 |
+
};
|
| 62 |
+
|
| 63 |
+
template <int ElementsPerWI> struct quantize_and_reorder_q8_1_soa {
|
| 64 |
+
__dpct_inline__ void operator()(const float * __restrict__ x, void * reordered_q8_tensor, const int kx,
|
| 65 |
+
const int kx_padded, const sycl::nd_item<1> & it) const {
|
| 66 |
+
/*
|
| 67 |
+
Quantizes and reorders the resultant q8 tensor in a per row fashion
|
| 68 |
+
Each sub-group calculates one quant block. i.e. QK8_1 quant values and the d and sum values
|
| 69 |
+
*/
|
| 70 |
+
auto subgroup_id = it.get_group(0);
|
| 71 |
+
auto wi_id = it.get_local_id(0);
|
| 72 |
+
|
| 73 |
+
sycl::vec<int8_t, ElementsPerWI> quantized_values;
|
| 74 |
+
float d = 0.0f;
|
| 75 |
+
float sum = 0.0f;
|
| 76 |
+
quantize_q8_1_impl<ElementsPerWI>(x, quantized_values, d, sum, it);
|
| 77 |
+
|
| 78 |
+
const int num_blocks_per_row = kx / QK8_1;
|
| 79 |
+
auto row = subgroup_id / num_blocks_per_row;
|
| 80 |
+
auto col = subgroup_id % num_blocks_per_row;
|
| 81 |
+
auto row_offset = row * (kx_padded / QK8_1) * sizeof(block_q8_1);
|
| 82 |
+
auto col_offset = QK8_1 * col + wi_id * ElementsPerWI;
|
| 83 |
+
|
| 84 |
+
auto quant_ptr = (int8_t *) ((char *) reordered_q8_tensor + row_offset + col_offset);
|
| 85 |
+
*reinterpret_cast<sycl::vec<int8_t, ElementsPerWI> *>(quant_ptr) = quantized_values;
|
| 86 |
+
|
| 87 |
+
auto ds_ptr = (sycl::half2 *) ((char *) reordered_q8_tensor + row_offset + kx + col * sizeof(sycl::half2));
|
| 88 |
+
if (wi_id == 0) {
|
| 89 |
+
*ds_ptr = sycl::half2(sycl::half(d), sycl::half(sum));
|
| 90 |
+
}
|
| 91 |
+
}
|
| 92 |
+
};
|
| 93 |
+
|
| 94 |
+
template <int ElementsPerWI> struct quantize_q8_1 {
|
| 95 |
+
__dpct_inline__ void operator()(const float * __restrict__ x, void * q8_tensor, const int kx, const int kx_padded,
|
| 96 |
+
const sycl::nd_item<1> & it) const {
|
| 97 |
+
auto subgroup_id = it.get_group(0);
|
| 98 |
+
auto wi_id = it.get_local_id(0);
|
| 99 |
+
|
| 100 |
+
const int num_blocks_per_row = kx / QK8_1;
|
| 101 |
+
auto row = subgroup_id / num_blocks_per_row;
|
| 102 |
+
const int pitch = kx_padded / QK8_1;
|
| 103 |
+
|
| 104 |
+
sycl::vec<int8_t, ElementsPerWI> quantized_values;
|
| 105 |
+
float d = 0.0f;
|
| 106 |
+
float sum = 0.0f;
|
| 107 |
+
quantize_q8_1_impl<ElementsPerWI>(x, quantized_values, d, sum, it);
|
| 108 |
+
|
| 109 |
+
block_q8_1 * quant_ptr = (block_q8_1 *) q8_tensor;
|
| 110 |
+
auto block_id = subgroup_id % num_blocks_per_row + row * pitch;
|
| 111 |
+
|
| 112 |
+
int8_t * qs = &(quant_ptr[block_id].qs[wi_id * ElementsPerWI]);
|
| 113 |
+
*reinterpret_cast<sycl::vec<int8_t, ElementsPerWI> *>(qs) = quantized_values;
|
| 114 |
+
if (wi_id == 0) {
|
| 115 |
+
quant_ptr[block_id].ds = sycl::half2(sycl::half(d), sycl::half(sum));
|
| 116 |
+
}
|
| 117 |
+
}
|
| 118 |
+
};
|
| 119 |
+
|
| 120 |
+
template <template <int> typename quantize_f>
|
| 121 |
+
void quantize_row_q8_1_sycl(const float * x, void * vy, const int kx, const int ky, const int kx_padded,
|
| 122 |
+
dpct::queue_ptr stream) {
|
| 123 |
+
static_assert(QK8_1 % WARP_SIZE == 0);
|
| 124 |
+
auto local_range = std::size_t(WARP_SIZE);
|
| 125 |
+
auto num_quant_blocks = ky * (kx / QK8_1);
|
| 126 |
+
auto global_range = num_quant_blocks * local_range;
|
| 127 |
+
dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 });
|
| 128 |
+
|
| 129 |
+
stream->parallel_for(sycl::nd_range<1>({ global_range }, { local_range }),
|
| 130 |
+
[=](sycl::nd_item<1> it) [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
|
| 131 |
+
quantize_f<QK8_1 / WARP_SIZE>()(x, vy, kx, kx_padded, it);
|
| 132 |
+
});
|
| 133 |
+
}
|