From ae9c5f928a9da40a8d2d3b7901b9ca1cba8f30ad Mon Sep 17 00:00:00 2001 From: Aaron Teo Date: Fri, 20 Jun 2025 19:54:38 +0800 Subject: [PATCH] ggml-cpu: add ggml fp16->fp32 and fp32->fp16 scalar simd Signed-off-by: Aaron Teo --- ggml/src/ggml-cpu/simd-mappings.h | 4 ++-- ggml/src/ggml-impl.h | 24 ++++++++++++++++++++++++ 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/ggml/src/ggml-cpu/simd-mappings.h b/ggml/src/ggml-cpu/simd-mappings.h index 773ed3de4..c3b7bcfe1 100644 --- a/ggml/src/ggml-cpu/simd-mappings.h +++ b/ggml/src/ggml-cpu/simd-mappings.h @@ -982,8 +982,8 @@ static inline float32x4_t __lzs_f16cx4_load(const ggml_fp16_t * x) { static inline void __lzs_f16cx4_store(ggml_fp16_t * x, float32x4_t v_y) { #ifdef __NNPA__ - float32x4_t zero = vec_splats(0.0f); - uint16x8_t v_x = vec_round_from_fp32(v_y, zero, 0); + float32x4_t v_zero = vec_splats(0.0f); + uint16x8_t v_x = vec_round_from_fp32(v_y, v_zero, 0); x[0] = vec_extract(v_x, 0); x[1] = vec_extract(v_x, 1); x[2] = vec_extract(v_x, 2); diff --git a/ggml/src/ggml-impl.h b/ggml/src/ggml-impl.h index 6dc5ce0d9..d28b3cce5 100644 --- a/ggml/src/ggml-impl.h +++ b/ggml/src/ggml-impl.h @@ -322,6 +322,7 @@ GGML_API void ggml_aligned_free(void * ptr, size_t size); // 16-bit float // on Arm, we use __fp16 // on x86, we use uint16_t +// on s390x, we use ZDNN_DLFLOAT16 with NNPA // // for old CUDA compilers (<= 11), we use uint16_t: ref https://github.com/ggml-org/llama.cpp/pull/10616 // for MUSA compilers , we use uint16_t: ref https://github.com/ggml-org/llama.cpp/pull/11843 @@ -417,6 +418,29 @@ GGML_API void ggml_aligned_free(void * ptr, size_t size); #define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x) #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x) +#elif defined(__NNPA__) + + #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x) + #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x) + + #define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x) + #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x) + + // TODO: Determine if inline assembly is faster + static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) { + uint16x8_t v_h = vec_splats(h); + uint16x8_t nnpa_dlf16 = vec_convert_from_fp16(v_h, 0); + return vec_extend_to_fp32_hi(nnpa_dlf16, 0)[0]; + } + + // TODO: Determine if inline assembly is faster + static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { + float32x4_t v_f = vec_splats(f); + float32x4_t v_zero = vec_splats(0.0f); + uint16x8_t v_h = vec_round_from_fp32(v_f, v_zero, 0); + return vec_extract(v_h, 0); + } + #else // FP16 <-> FP32