SYCL: Add fp16 type support to unary op kernels (#12788)

* SYCL: Add fp16 support to some elementwise OP kernels

* remove comment

ggml-ci

* Use static_cast directly

* remove not needed cast from tanh

* Use static cast and remove unneeded castings

* Adjust device_support_op for unary OPs

* Use cast_data and typed_data struct to deduplicate casting code
This commit is contained in:
Akarshan Biswas
2025-04-11 13:33:50 +05:30
committed by GitHub
parent ec6c09d0fa
commit fccf9cae83
3 changed files with 778 additions and 297 deletions

View File

@@ -2,6 +2,13 @@
#define GGML_SYCL_ELEMENTWISE_HPP
#include "common.hpp"
#include "ggml.h"
#include <limits.h>
template <typename T>
T neg_infinity() {
return -std::numeric_limits<T>::infinity();
}
static __dpct_inline__ float op_repeat(const float a, const float b) {
return b;
@@ -24,6 +31,19 @@ static __dpct_inline__ float op_div(const float a, const float b) {
return a / b;
}
template<typename T>
struct typed_data {
const T * src;
T * dst;
};
template<typename T>
typed_data<T> cast_data(ggml_tensor * dst) {
return {
/* .src = */ static_cast<const T *>(dst->src[0]->data),
/* .dst = */ static_cast<T *>(dst->data)
};
}
void ggml_sycl_sqrt(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
@@ -65,6 +85,10 @@ void ggml_sycl_upscale(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
void ggml_sycl_pad(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
void ggml_sycl_clamp(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
// ---------
void ggml_sycl_add(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
void ggml_sycl_sub(ggml_backend_sycl_context & ctx, ggml_tensor * dst);