mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-08-01 15:09:32 -04:00
CUDA: fix logic for V100 + GGML_CUDA_FORCE_MMQ (#12098)
This commit is contained in:
@@ -109,9 +109,9 @@ static constexpr __device__ int get_mmq_x_max_device() {
|
||||
|
||||
#if __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA
|
||||
#ifdef GGML_CUDA_FORCE_MMQ
|
||||
return MMQ_DP4A_MAX_BATCH_SIZE;
|
||||
#else // GGML_CUDA_FORCE_MMQ
|
||||
return 128;
|
||||
#else // GGML_CUDA_FORCE_MMQ
|
||||
return MMQ_DP4A_MAX_BATCH_SIZE;
|
||||
#endif // GGML_CUDA_FORCE_MMQ
|
||||
#else // __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA
|
||||
|
||||
|
Reference in New Issue
Block a user