From f1b1d98e8df06aabaca23d30a08056a8656e131a Mon Sep 17 00:00:00 2001 From: Aaron Teo Date: Sat, 21 Jun 2025 17:51:55 +0800 Subject: [PATCH] ggml-cpu: activate nnpa fp32->fp16 or fp16->fp32 compute Signed-off-by: Aaron Teo --- ggml/src/ggml-impl.h | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/ggml/src/ggml-impl.h b/ggml/src/ggml-impl.h index 6dc5ce0d9..4df0eeac7 100644 --- a/ggml/src/ggml-impl.h +++ b/ggml/src/ggml-impl.h @@ -417,6 +417,28 @@ GGML_API void ggml_aligned_free(void * ptr, size_t size); #define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x) #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x) +#elif defined(__NNPA__) + + #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x) + #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x) + + #define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x) + #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x) + + static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) { + uint16x8_t v_h = vec_splats(h); + uint16x8_t v_hd = vec_convert_from_fp16(v_h, 0); + return vec_extend_to_fp32_hi(v_hd, 0)[0]; + } + + static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { + float32x4_t v_f = vec_splats(f); + float32x4_t v_zero = vec_splats(0.0f); + uint16x8_t v_hd = vec_round_from_fp32(v_f, v_zero, 0); + uint16x8_t v_h = vec_convert_to_fp16(v_hd, 0); + return vec_extract(v_h, 0); + } + #else // FP16 <-> FP32