Files
llama.cpp/ggml/src/ggml-cuda/template-instances/mmq-instance-iq4_nl.cu
2024-07-05 09:06:31 +02:00

6 lines
140 B
Plaintext

// This file has been autogenerated by generate_cu_files.py, do not edit manually.
#include "../mmq.cuh"
DECL_MMQ_CASE(GGML_TYPE_IQ4_NL);