mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-07-28 03:55:06 -04:00
ggml-cpu: activate nnpa for ggml_cpu_fp32_to_fp16
Signed-off-by: Aaron Teo <aaron.teo1@ibm.com>
This commit is contained in:
@ -3142,6 +3142,16 @@ void ggml_cpu_fp32_to_fp16(const float * x, ggml_fp16_t * y, int64_t n) {
|
|||||||
_mm_storel_epi64((__m128i *)(y + i), y_vec);
|
_mm_storel_epi64((__m128i *)(y + i), y_vec);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if defined(__NNPA__)
|
||||||
|
for (; i + 7 < n; i += 8) {
|
||||||
|
float32x4_t v_xh = vec_xl(0, (const float *)(x + i + 0));
|
||||||
|
float32x4_t v_xl = vec_xl(0, (const float *)(x + i + 4));
|
||||||
|
uint16x8_t v_xd = vec_round_from_fp32(v_xh, v_xl, 0);
|
||||||
|
vec_xst(v_xd, 0, (ggml_fp16_t *)(y + i));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
for (; i < n; ++i) {
|
for (; i < n; ++i) {
|
||||||
y[i] = GGML_FP32_TO_FP16(x[i]);
|
y[i] = GGML_FP32_TO_FP16(x[i]);
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user