mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-07-19 09:08:04 +00:00
graph : Pass the graph placeholder message in debug mode (#14748)
Without that condition, this debug log clutters the screen every batch treated in the prompt processing, or every token generated in Kobold.cpp.
This commit is contained in:
@ -467,7 +467,9 @@ bool llm_graph_result::can_reuse(const llm_graph_params & params) {
|
||||
for (auto & input : inputs) {
|
||||
const bool cur = input->can_reuse(params);
|
||||
|
||||
if (debug > 1) {
|
||||
LLAMA_LOG_DEBUG("%s: can_reuse = %d\n", "placeholder", cur);
|
||||
}
|
||||
|
||||
res = res && cur;
|
||||
}
|
||||
|
Reference in New Issue
Block a user