mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-28 20:25:20 +00:00
ggml : better PERF prints + support "LLAMA_PERF=1 make"
This commit is contained in:
@ -1250,9 +1250,11 @@ static bool llama_eval_internal(
|
||||
ggml_build_forward_expand(&gf, inpL);
|
||||
ggml_graph_compute (ctx0, &gf);
|
||||
|
||||
#ifdef GGML_PERF
|
||||
// print timing information per ggml operation (for debugging purposes)
|
||||
// requires GGML_PERF to be defined
|
||||
//ggml_graph_print(&gf);
|
||||
ggml_graph_print(&gf);
|
||||
#endif
|
||||
|
||||
// plot the computation graph in dot format (for debugging purposes)
|
||||
//if (n_past%100 == 0) {
|
||||
|
Reference in New Issue
Block a user