Files
llama.cpp/ggml/src/ggml-cuda/count-equal.cuh
2024-10-03 21:17:26 +03:00

6 lines
144 B
Plaintext

#include "common.cuh"
#define CUDA_COUNT_EQUAL_CHUNK_SIZE 128
void ggml_cuda_count_equal(ggml_backend_cuda_context & ctx, ggml_tensor * dst);