Files
llama.cpp/ggml/src/ggml-cuda/argmax.cuh
2024-10-03 21:17:26 +03:00

4 lines
98 B
Plaintext

#include "common.cuh"
void ggml_cuda_argmax(ggml_backend_cuda_context & ctx, ggml_tensor * dst);