From 220860aa0c15f665a8225424064f6951e3696037 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 14 Aug 2025 16:08:31 +0300 Subject: [PATCH] graph : use F32 accumulators for gpt-oss ggml-ci --- src/llama-graph.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/llama-graph.cpp b/src/llama-graph.cpp index 053c72d6d..b0b1c7323 100644 --- a/src/llama-graph.cpp +++ b/src/llama-graph.cpp @@ -1566,6 +1566,11 @@ ggml_tensor * llm_graph_context::build_attn_with_sinks( if (wo) { cur = build_lora_mm(wo, cur); + if (arch == LLM_ARCH_OPENAI_MOE) { + // similar the original build_attn + // TODO: this is tmp until we refactor and remove the build_attn_with_sinks() path + ggml_mul_mat_set_prec(cur, GGML_PREC_F32); + } } if (wo_b) {