mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-07-04 18:16:58 +00:00
CUDA: use MMQ instead of cuBLAS by default (#8075)
This commit is contained in:
@ -1,5 +1,7 @@
|
||||
#include "common.cuh"
|
||||
|
||||
#define MMVQ_MAX_BATCH_SIZE 8 // Max. batch size for which to use MMVQ kernels.
|
||||
|
||||
void ggml_cuda_op_mul_mat_vec_q(
|
||||
ggml_backend_cuda_context & ctx,
|
||||
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i,
|
||||
|
Reference in New Issue
Block a user