mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-08-15 12:42:40 -04:00
* move ggml-cpu-aarch64 to repack * split quantize_row_q8_0/1 * split helper functions * split ggml_vec_dot_q4_0_q8_0 * split ggml_vec_dot_q4_1_q8_1 * split ggml_vec_dot_q5_0_q8_0 * split ggml_vec_dot_q5_1_q8_1 * split ggml_vec_dot_q8_0_q8_0 * split ggml_vec_dot_tq1_0_q8_K * split ggml_vec_dot_tq2_0_q8_K * split ggml_vec_dot_q2_K_q8_K * split ggml_vec_dot_q3_K_q8_K * split ggml_vec_dot_q4_K_q8_K * split ggml_vec_dot_q5_K_q8_K * split ggml_vec_dot_q6_K_q8_K * split ggml_vec_dot_iq2_xxs_q8_K * split ggml_vec_dot_iq2_xs_q8_K * split ggml_vec_dot_iq2_s_q8_K * split ggml_vec_dot_iq3_xxs_q8_K * split ggml_vec_dot_iq3_s_q8_K * split ggml_vec_dot_iq1_s_q8_K * split ggml_vec_dot_iq1_m_q8_K * split ggml_vec_dot_iq4_nl_q8_0 * split ggml_vec_dot_iq4_xs_q8_K * fix typos * fix missing prototypes * rename ggml-cpu-quants.c * rename ggml-cpu-traits * rename arm folder * move cpu-feats-x86.cpp * rename ggml-cpu-hbm * update arm detection macro in quants.c * move iq quant tables * split ggml_quantize_mat_q8_0/K * split ggml_gemv_* * split ggml_gemm_* * rename namespace aarch64 to repack * use weak aliases to replace test macros * rename GGML_CPU_AARCH64 to GGML_CPU_REPACK * rename more aarch64 to repack * clean up rebase leftover * fix compilation errors * remove trailing spaces * try to fix clang compilation errors * try to fix clang compilation errors again * try to fix clang compilation errors, 3rd attempt * try to fix clang compilation errors, 4th attempt * try to fix clang compilation errors, 5th attempt * try to fix clang compilation errors, 6th attempt * try to fix clang compilation errors, 7th attempt * try to fix clang compilation errors, 8th attempt * try to fix clang compilation errors, 9th attempt * more cleanup * fix compilation errors * fix apple targets * fix a typo in arm version of ggml_vec_dot_q4_K_q8_K Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
56 lines
2.0 KiB
C++
56 lines
2.0 KiB
C++
#ifdef GGML_USE_CPU_HBM
|
|
|
|
#include "ggml-backend.h"
|
|
#include "ggml-backend-impl.h"
|
|
#include "ggml-cpu.h"
|
|
#include "ggml-impl.h"
|
|
|
|
#include "hbm.h"
|
|
|
|
// buffer type HBM
|
|
|
|
#include <hbwmalloc.h>
|
|
|
|
static const char * ggml_backend_cpu_hbm_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
|
|
return "CPU_HBM";
|
|
|
|
GGML_UNUSED(buft);
|
|
}
|
|
|
|
static void ggml_backend_cpu_hbm_buffer_free_buffer(ggml_backend_buffer_t buffer) {
|
|
hbw_free(buffer->context);
|
|
}
|
|
|
|
static ggml_backend_buffer_t ggml_backend_cpu_hbm_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft,
|
|
size_t size) {
|
|
void * ptr;
|
|
int result = hbw_posix_memalign(&ptr, ggml_backend_cpu_buffer_type_get_alignment(buft), size);
|
|
if (result != 0) {
|
|
GGML_LOG_ERROR("failed to allocate HBM buffer of size %zu\n", size);
|
|
return NULL;
|
|
}
|
|
|
|
ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size);
|
|
buffer->buft = buft;
|
|
buffer->iface.free_buffer = ggml_backend_cpu_hbm_buffer_free_buffer;
|
|
|
|
return buffer;
|
|
}
|
|
|
|
ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type(void) {
|
|
static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_hbm = {
|
|
/* .iface = */ {
|
|
/* .get_name = */ ggml_backend_cpu_hbm_buffer_type_get_name,
|
|
/* .alloc_buffer = */ ggml_backend_cpu_hbm_buffer_type_alloc_buffer,
|
|
/* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment,
|
|
/* .get_max_size = */ nullptr, // defaults to SIZE_MAX
|
|
/* .get_alloc_size = */ nullptr, // defaults to ggml_nbytes
|
|
/* .is_host = */ ggml_backend_cpu_buffer_type_is_host,
|
|
},
|
|
/* .context = */ nullptr,
|
|
};
|
|
|
|
return &ggml_backend_cpu_buffer_type_hbm;
|
|
}
|
|
#endif
|