diff --git a/ggml/src/ggml-sycl/backend.hpp b/ggml/src/ggml-sycl/backend.hpp index f839a42bc..410a67b01 100644 --- a/ggml/src/ggml-sycl/backend.hpp +++ b/ggml/src/ggml-sycl/backend.hpp @@ -28,6 +28,7 @@ #include "mmvq.hpp" #include "norm.hpp" #include "outprod.hpp" +#include "quantize.hpp" #include "quants.hpp" #include "rope.hpp" #include "set_rows.hpp" diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index a023d6fb4..b08941c32 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -44,6 +44,7 @@ #include "ggml-sycl/set_rows.hpp" #include "ggml-sycl/sycl_hw.hpp" #include "ggml-sycl/getrows.hpp" +#include "ggml-sycl/quantize.hpp" #include "ggml.h" static bool g_sycl_loaded = false; @@ -1373,120 +1374,6 @@ typedef void (*ggml_sycl_op_mul_mat_t)( -template -static void quantize_q8_1(const float * __restrict__ x, void * __restrict__ vy, const int kx, const int kx_padded, - const sycl::nd_item<3> &item_ct1) { - const int ix = (item_ct1.get_local_range(2) * item_ct1.get_group(2) + - item_ct1.get_local_id(2)) * QUANT_BLOCK_TILE; - - if (ix >= kx_padded) { - return; - } - - const int iy = item_ct1.get_local_range(1) * item_ct1.get_group(1) + - item_ct1.get_local_id(1); - - const int i_padded = iy*kx_padded + ix; - - block_q8_1 * y = (block_q8_1 *) vy; - - const int ib = i_padded / QK8_1; // block index - const int iqs = i_padded % QK8_1; // quant index - typedef sycl::vec TC; - typedef sycl::vec TQ; - TC zeros; - TQ qzeros; -#pragma unroll - for (int i = 0; i < QUANT_BLOCK_TILE; i++) - { - zeros[i] = 0.f; - qzeros[i] = 0; - } - const TC xi = ix < kx ? *(const TC *)&x[iy * kx + ix] : zeros; - float sum = xi[0]; - float amax = sycl::fabs(xi[0]); -#pragma unroll - for (int i = 1; i < QUANT_BLOCK_TILE; i++) - { - sum += xi[i]; - amax = sycl::fmax(sycl::fabs(xi[i]), amax); - } - sum = warp_reduce_sum(sum, item_ct1); - amax = warp_reduce_max(amax, item_ct1); - - const float d = amax / 127; - TQ q = qzeros; - if (amax != 0.0f) - { -#pragma unroll - for (int i = 0; i < QUANT_BLOCK_TILE; i++) { - q[i] = sycl::round(xi[i] / d); - } - } - - *(TQ *)&y[ib].qs[iqs] = q; - - if (iqs > 0) { - return; - } - - reinterpret_cast(y[ib].ds.x()) = d; - reinterpret_cast(y[ib].ds.y()) = sum; -} - -template -static __dpct_inline__ void quantize_and_reorder_q8_1(const float * __restrict__ x, void * reordered_q8_tensor, - const int kx, const int kx_padded, const sycl::nd_item<1> & it) { - /* - Quantizes and reorders the resultant q8 tensor in a per row fashion - Each sub-group calculates one quant block. i.e. QK8_1 quant values and the d and sum values - */ - - auto subgroup_id = it.get_group(0); - auto wi_id = it.get_local_id(0); - - const int num_blocks_per_row = kx / QK8_1; - auto row = subgroup_id / num_blocks_per_row; - auto col = subgroup_id % num_blocks_per_row; - - auto row_offset = row * (kx_padded / QK8_1) * sizeof(block_q8_1); - auto col_offset = QK8_1 * col + wi_id * ElementsPerWI; - - auto quant_ptr = (int8_t *) ((char *) reordered_q8_tensor + row_offset + col_offset); - auto ds_ptr = (sycl::half2 *) ((char *) reordered_q8_tensor + row_offset + kx + col * sizeof(sycl::half2)); - - sycl::vec wi_f32_vals; - sycl::vec quantized_values; - - auto float_ptr_offset = subgroup_id * QK8_1 + ElementsPerWI * wi_id; - wi_f32_vals = *reinterpret_cast *>(x + float_ptr_offset); - - float sum = 0.0f; - float amax = 0.0f; - -#pragma unroll(ElementsPerWI) - for (int i = 0; i < ElementsPerWI; i++) { - sum += wi_f32_vals[i]; - amax = sycl::fmax(amax, sycl::fabs(wi_f32_vals[i])); - quantized_values[i] = 0; - } - sum = sycl::reduce_over_group(it.get_group(), sum, sycl::plus()); - amax = sycl::reduce_over_group(it.get_group(), amax, sycl::maximum()); - float d = amax == 0 ? 1 : amax / 127; - -#pragma unroll(ElementsPerWI) - for (int i = 0; i < ElementsPerWI; i++) { - quantized_values[i] = sycl::round(wi_f32_vals[i] / d); - } - - d = amax == 0 ? 0 : d; - - *reinterpret_cast *>(quant_ptr) = quantized_values; - if (wi_id == 0) { - *ds_ptr = sycl::half2(sycl::half(d), sycl::half(sum)); - } -} - static void mul_mat_p021_f16_f32( const void * __restrict__ vx, const float * __restrict__ y, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int nchannels_x, const int nchannels_y, @@ -1770,32 +1657,6 @@ static void pool2d_nchw_kernel( o_ptr[cur_oh * ow + cur_ow] = res; } -static void quantize_row_q8_1_sycl(const float * x, void * vy, const int kx, const int ky, const int kx_padded, - bool reorder_q8_tensor, queue_ptr stream) { - if (reorder_q8_tensor) { - auto local_range = std::size_t(WARP_SIZE); - auto num_quant_blocks = ky * (kx / QK8_1); - auto global_range = num_quant_blocks * local_range; - stream->parallel_for(sycl::nd_range<1>({ global_range }, { local_range }), - [=](sycl::nd_item<1> it) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - quantize_and_reorder_q8_1(x, vy, kx, kx_padded, it); - }); - } else { - const int block_num_x = (kx_padded + SYCL_QUANTIZE_BLOCK_SIZE - 1) / SYCL_QUANTIZE_BLOCK_SIZE; - const sycl::range<3> num_blocks(1, ky, block_num_x); - int constexpr QUANT_BLOCK_TILE = QK8_1 / WARP_SIZE; - static_assert(QK8_1 % WARP_SIZE == 0); - const sycl::range<3> block_size(1, 1, SYCL_QUANTIZE_BLOCK_SIZE / QUANT_BLOCK_TILE); - { - dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 }); - - stream->parallel_for(sycl::nd_range<3>(num_blocks * block_size, block_size), - [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] { - quantize_q8_1(x, vy, kx, kx_padded, item_ct1); - }); - } - } -} static void ggml_mul_mat_p021_f16_f32_sycl(const void *vx, const float *y, float *dst, const int ncols_x, @@ -2372,10 +2233,10 @@ static void ggml_sycl_set_peer_access(const int n_tokens, int main_device) { peer_access_enabled = enable_peer_access; } +template