mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-27 20:05:20 +00:00
rpc : use backend registry, support dl backends (#13304)
This commit is contained in:
@ -18,17 +18,19 @@
|
|||||||
# include "kleidiai/kleidiai.h"
|
# include "kleidiai/kleidiai.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(__APPLE__)
|
|
||||||
#include <sys/types.h>
|
|
||||||
#include <sys/sysctl.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(_WIN32)
|
#if defined(_WIN32)
|
||||||
# define WIN32_LEAN_AND_MEAN
|
# define WIN32_LEAN_AND_MEAN
|
||||||
# ifndef NOMINMAX
|
# ifndef NOMINMAX
|
||||||
# define NOMINMAX
|
# define NOMINMAX
|
||||||
# endif
|
# endif
|
||||||
# include <windows.h>
|
# include <windows.h>
|
||||||
|
#else
|
||||||
|
# include <unistd.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(__APPLE__)
|
||||||
|
# include <sys/sysctl.h>
|
||||||
|
# include <sys/types.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// ggml-backend interface
|
// ggml-backend interface
|
||||||
@ -70,8 +72,10 @@ static ggml_backend_buffer_type_t * ggml_backend_cpu_device_get_extra_buffers_ty
|
|||||||
}
|
}
|
||||||
|
|
||||||
static bool ggml_backend_cpu_is_extra_buffer_type(ggml_backend_buffer_type_t buft) {
|
static bool ggml_backend_cpu_is_extra_buffer_type(ggml_backend_buffer_type_t buft) {
|
||||||
for (auto extra : ggml_backend_cpu_get_extra_buffers_type()) {
|
for (auto * extra : ggml_backend_cpu_get_extra_buffers_type()) {
|
||||||
if (extra && extra == buft) return true;
|
if (extra && extra == buft) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -330,9 +334,18 @@ static const char * ggml_backend_cpu_device_get_description(ggml_backend_dev_t d
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void ggml_backend_cpu_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) {
|
static void ggml_backend_cpu_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) {
|
||||||
// TODO
|
#ifdef _WIN32
|
||||||
*free = 0;
|
MEMORYSTATUSEX status;
|
||||||
*total = 0;
|
status.dwLength = sizeof(status);
|
||||||
|
GlobalMemoryStatusEx(&status);
|
||||||
|
*total = status.ullTotalPhys;
|
||||||
|
*free = status.ullAvailPhys;
|
||||||
|
#else
|
||||||
|
long pages = sysconf(_SC_PHYS_PAGES);
|
||||||
|
long page_size = sysconf(_SC_PAGE_SIZE);
|
||||||
|
*total = pages * page_size;
|
||||||
|
*free = *total;
|
||||||
|
#endif
|
||||||
|
|
||||||
GGML_UNUSED(dev);
|
GGML_UNUSED(dev);
|
||||||
}
|
}
|
||||||
|
@ -1594,6 +1594,14 @@ static void rpc_serve_client(ggml_backend_t backend, const char * cache_dir,
|
|||||||
void ggml_backend_rpc_start_server(ggml_backend_t backend, const char * endpoint,
|
void ggml_backend_rpc_start_server(ggml_backend_t backend, const char * endpoint,
|
||||||
const char * cache_dir,
|
const char * cache_dir,
|
||||||
size_t free_mem, size_t total_mem) {
|
size_t free_mem, size_t total_mem) {
|
||||||
|
printf("Starting RPC server v%d.%d.%d\n",
|
||||||
|
RPC_PROTO_MAJOR_VERSION,
|
||||||
|
RPC_PROTO_MINOR_VERSION,
|
||||||
|
RPC_PROTO_PATCH_VERSION);
|
||||||
|
printf(" endpoint : %s\n", endpoint);
|
||||||
|
printf(" local cache : %s\n", cache_dir ? cache_dir : "n/a");
|
||||||
|
printf(" backend memory : %zu MB\n", free_mem / (1024 * 1024));
|
||||||
|
|
||||||
std::string host;
|
std::string host;
|
||||||
int port;
|
int port;
|
||||||
if (!parse_endpoint(endpoint, host, port)) {
|
if (!parse_endpoint(endpoint, host, port)) {
|
||||||
@ -1753,6 +1761,9 @@ static void * ggml_backend_rpc_get_proc_address(ggml_backend_reg_t reg, const ch
|
|||||||
if (std::strcmp(name, "ggml_backend_rpc_add_device") == 0) {
|
if (std::strcmp(name, "ggml_backend_rpc_add_device") == 0) {
|
||||||
return (void *)ggml_backend_rpc_add_device;
|
return (void *)ggml_backend_rpc_add_device;
|
||||||
}
|
}
|
||||||
|
if (std::strcmp(name, "ggml_backend_rpc_start_server") == 0) {
|
||||||
|
return (void *)ggml_backend_rpc_start_server;
|
||||||
|
}
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
GGML_UNUSED(reg);
|
GGML_UNUSED(reg);
|
||||||
|
@ -28,12 +28,12 @@ else()
|
|||||||
add_subdirectory(tokenize)
|
add_subdirectory(tokenize)
|
||||||
add_subdirectory(tts)
|
add_subdirectory(tts)
|
||||||
add_subdirectory(llava)
|
add_subdirectory(llava)
|
||||||
|
if (GGML_RPC)
|
||||||
|
add_subdirectory(rpc)
|
||||||
|
endif()
|
||||||
if (NOT GGML_BACKEND_DL)
|
if (NOT GGML_BACKEND_DL)
|
||||||
# these examples use the backends directly and cannot be built with dynamic loading
|
# these examples use the backends directly and cannot be built with dynamic loading
|
||||||
add_subdirectory(cvector-generator)
|
add_subdirectory(cvector-generator)
|
||||||
add_subdirectory(export-lora)
|
add_subdirectory(export-lora)
|
||||||
if (GGML_RPC)
|
|
||||||
add_subdirectory(rpc)
|
|
||||||
endif()
|
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
@ -2,24 +2,6 @@
|
|||||||
#define _SILENCE_CXX17_CODECVT_HEADER_DEPRECATION_WARNING
|
#define _SILENCE_CXX17_CODECVT_HEADER_DEPRECATION_WARNING
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include "ggml-cpu.h"
|
|
||||||
|
|
||||||
#ifdef GGML_USE_CUDA
|
|
||||||
#include "ggml-cuda.h"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef GGML_USE_METAL
|
|
||||||
#include "ggml-metal.h"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef GGML_USE_VULKAN
|
|
||||||
#include "ggml-vulkan.h"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef GGML_USE_SYCL
|
|
||||||
#include "ggml-sycl.h"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include "ggml-rpc.h"
|
#include "ggml-rpc.h"
|
||||||
#ifdef _WIN32
|
#ifdef _WIN32
|
||||||
# define NOMINMAX
|
# define NOMINMAX
|
||||||
@ -154,6 +136,7 @@ struct rpc_server_params {
|
|||||||
size_t backend_mem = 0;
|
size_t backend_mem = 0;
|
||||||
bool use_cache = false;
|
bool use_cache = false;
|
||||||
int n_threads = std::max(1U, std::thread::hardware_concurrency()/2);
|
int n_threads = std::max(1U, std::thread::hardware_concurrency()/2);
|
||||||
|
std::string device;
|
||||||
};
|
};
|
||||||
|
|
||||||
static void print_usage(int /*argc*/, char ** argv, rpc_server_params params) {
|
static void print_usage(int /*argc*/, char ** argv, rpc_server_params params) {
|
||||||
@ -161,6 +144,7 @@ static void print_usage(int /*argc*/, char ** argv, rpc_server_params params) {
|
|||||||
fprintf(stderr, "options:\n");
|
fprintf(stderr, "options:\n");
|
||||||
fprintf(stderr, " -h, --help show this help message and exit\n");
|
fprintf(stderr, " -h, --help show this help message and exit\n");
|
||||||
fprintf(stderr, " -t, --threads number of threads for the CPU backend (default: %d)\n", params.n_threads);
|
fprintf(stderr, " -t, --threads number of threads for the CPU backend (default: %d)\n", params.n_threads);
|
||||||
|
fprintf(stderr, " -d DEV, --device device to use\n");
|
||||||
fprintf(stderr, " -H HOST, --host HOST host to bind to (default: %s)\n", params.host.c_str());
|
fprintf(stderr, " -H HOST, --host HOST host to bind to (default: %s)\n", params.host.c_str());
|
||||||
fprintf(stderr, " -p PORT, --port PORT port to bind to (default: %d)\n", params.port);
|
fprintf(stderr, " -p PORT, --port PORT port to bind to (default: %d)\n", params.port);
|
||||||
fprintf(stderr, " -m MEM, --mem MEM backend memory size (in MB)\n");
|
fprintf(stderr, " -m MEM, --mem MEM backend memory size (in MB)\n");
|
||||||
@ -186,6 +170,22 @@ static bool rpc_server_params_parse(int argc, char ** argv, rpc_server_params &
|
|||||||
fprintf(stderr, "error: invalid number of threads: %d\n", params.n_threads);
|
fprintf(stderr, "error: invalid number of threads: %d\n", params.n_threads);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
} else if (arg == "-d" || arg == "--device") {
|
||||||
|
if (++i >= argc) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
params.device = argv[i];
|
||||||
|
if (ggml_backend_dev_by_name(params.device.c_str()) == nullptr) {
|
||||||
|
fprintf(stderr, "error: unknown device: %s\n", params.device.c_str());
|
||||||
|
fprintf(stderr, "available devices:\n");
|
||||||
|
for (size_t i = 0; i < ggml_backend_dev_count(); i++) {
|
||||||
|
auto * dev = ggml_backend_dev_get(i);
|
||||||
|
size_t free, total;
|
||||||
|
ggml_backend_dev_memory(dev, &free, &total);
|
||||||
|
printf(" %s: %s (%zu MiB, %zu MiB free)\n", ggml_backend_dev_name(dev), ggml_backend_dev_description(dev), total / 1024 / 1024, free / 1024 / 1024);
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
} else if (arg == "-p" || arg == "--port") {
|
} else if (arg == "-p" || arg == "--port") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
return false;
|
return false;
|
||||||
@ -214,66 +214,53 @@ static bool rpc_server_params_parse(int argc, char ** argv, rpc_server_params &
|
|||||||
}
|
}
|
||||||
|
|
||||||
static ggml_backend_t create_backend(const rpc_server_params & params) {
|
static ggml_backend_t create_backend(const rpc_server_params & params) {
|
||||||
ggml_backend_t backend = NULL;
|
ggml_backend_t backend = nullptr;
|
||||||
#ifdef GGML_USE_CUDA
|
|
||||||
fprintf(stderr, "%s: using CUDA backend\n", __func__);
|
|
||||||
backend = ggml_backend_cuda_init(0); // init device 0
|
|
||||||
if (!backend) {
|
|
||||||
fprintf(stderr, "%s: ggml_backend_cuda_init() failed\n", __func__);
|
|
||||||
}
|
|
||||||
#elif GGML_USE_METAL
|
|
||||||
fprintf(stderr, "%s: using Metal backend\n", __func__);
|
|
||||||
backend = ggml_backend_metal_init();
|
|
||||||
if (!backend) {
|
|
||||||
fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__);
|
|
||||||
}
|
|
||||||
#elif GGML_USE_VULKAN
|
|
||||||
fprintf(stderr, "%s: using Vulkan backend\n", __func__);
|
|
||||||
backend = ggml_backend_vk_init(0); // init device 0
|
|
||||||
if (!backend) {
|
|
||||||
fprintf(stderr, "%s: ggml_backend_vulkan_init() failed\n", __func__);
|
|
||||||
}
|
|
||||||
#elif GGML_USE_SYCL
|
|
||||||
fprintf(stderr, "%s: using SYCL backend\n", __func__);
|
|
||||||
backend = ggml_backend_sycl_init(0); // init device 0
|
|
||||||
if (!backend) {
|
|
||||||
fprintf(stderr, "%s: ggml_backend_sycl_init() failed\n", __func__);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// if there aren't GPU Backends fallback to CPU backend
|
if (!params.device.empty()) {
|
||||||
|
ggml_backend_dev_t dev = ggml_backend_dev_by_name(params.device.c_str());
|
||||||
|
if (dev) {
|
||||||
|
backend = ggml_backend_dev_init(dev, nullptr);
|
||||||
if (!backend) {
|
if (!backend) {
|
||||||
fprintf(stderr, "%s: using CPU backend\n", __func__);
|
fprintf(stderr, "Failed to create backend for device %s\n", params.device.c_str());
|
||||||
backend = ggml_backend_cpu_init();
|
return nullptr;
|
||||||
ggml_backend_cpu_set_n_threads(backend, params.n_threads);
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// try to initialize a GPU backend first
|
||||||
|
if (!backend) {
|
||||||
|
backend = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_GPU, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// if there aren't GPU backends fallback to CPU backend
|
||||||
|
if (!backend) {
|
||||||
|
backend = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
fprintf(stderr, "%s: using %s backend\n", __func__, ggml_backend_name(backend));
|
||||||
|
|
||||||
|
// set the number of threads
|
||||||
|
ggml_backend_dev_t dev = ggml_backend_get_device(backend);
|
||||||
|
ggml_backend_reg_t reg = dev ? ggml_backend_dev_backend_reg(dev) : nullptr;
|
||||||
|
if (reg) {
|
||||||
|
auto ggml_backend_set_n_threads_fn = (ggml_backend_set_n_threads_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_n_threads");
|
||||||
|
if (ggml_backend_set_n_threads_fn) {
|
||||||
|
ggml_backend_set_n_threads_fn(backend, params.n_threads);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return backend;
|
return backend;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void get_backend_memory(size_t * free_mem, size_t * total_mem) {
|
static void get_backend_memory(ggml_backend_t backend, size_t * free_mem, size_t * total_mem) {
|
||||||
#ifdef GGML_USE_CUDA
|
ggml_backend_dev_t dev = ggml_backend_get_device(backend);
|
||||||
ggml_backend_cuda_get_device_memory(0, free_mem, total_mem);
|
GGML_ASSERT(dev != nullptr);
|
||||||
#elif GGML_USE_VULKAN
|
ggml_backend_dev_memory(dev, free_mem, total_mem);
|
||||||
ggml_backend_vk_get_device_memory(0, free_mem, total_mem);
|
|
||||||
#elif GGML_USE_SYCL
|
|
||||||
ggml_backend_sycl_get_device_memory(0, free_mem, total_mem);
|
|
||||||
#else
|
|
||||||
#ifdef _WIN32
|
|
||||||
MEMORYSTATUSEX status;
|
|
||||||
status.dwLength = sizeof(status);
|
|
||||||
GlobalMemoryStatusEx(&status);
|
|
||||||
*total_mem = status.ullTotalPhys;
|
|
||||||
*free_mem = status.ullAvailPhys;
|
|
||||||
#else
|
|
||||||
long pages = sysconf(_SC_PHYS_PAGES);
|
|
||||||
long page_size = sysconf(_SC_PAGE_SIZE);
|
|
||||||
*total_mem = pages * page_size;
|
|
||||||
*free_mem = *total_mem;
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int main(int argc, char * argv[]) {
|
int main(int argc, char * argv[]) {
|
||||||
|
ggml_backend_load_all();
|
||||||
|
|
||||||
rpc_server_params params;
|
rpc_server_params params;
|
||||||
if (!rpc_server_params_parse(argc, argv, params)) {
|
if (!rpc_server_params_parse(argc, argv, params)) {
|
||||||
fprintf(stderr, "Invalid parameters\n");
|
fprintf(stderr, "Invalid parameters\n");
|
||||||
@ -301,7 +288,7 @@ int main(int argc, char * argv[]) {
|
|||||||
free_mem = params.backend_mem;
|
free_mem = params.backend_mem;
|
||||||
total_mem = params.backend_mem;
|
total_mem = params.backend_mem;
|
||||||
} else {
|
} else {
|
||||||
get_backend_memory(&free_mem, &total_mem);
|
get_backend_memory(backend, &free_mem, &total_mem);
|
||||||
}
|
}
|
||||||
const char * cache_dir = nullptr;
|
const char * cache_dir = nullptr;
|
||||||
std::string cache_dir_str;
|
std::string cache_dir_str;
|
||||||
@ -313,14 +300,21 @@ int main(int argc, char * argv[]) {
|
|||||||
}
|
}
|
||||||
cache_dir = cache_dir_str.c_str();
|
cache_dir = cache_dir_str.c_str();
|
||||||
}
|
}
|
||||||
printf("Starting RPC server v%d.%d.%d\n",
|
|
||||||
RPC_PROTO_MAJOR_VERSION,
|
ggml_backend_reg_t reg = ggml_backend_reg_by_name("RPC");
|
||||||
RPC_PROTO_MINOR_VERSION,
|
if (!reg) {
|
||||||
RPC_PROTO_PATCH_VERSION);
|
fprintf(stderr, "Failed to find RPC backend\n");
|
||||||
printf(" endpoint : %s\n", endpoint.c_str());
|
return 1;
|
||||||
printf(" local cache : %s\n", cache_dir ? cache_dir : "n/a");
|
}
|
||||||
printf(" backend memory : %zu MB\n", free_mem / (1024 * 1024));
|
|
||||||
ggml_backend_rpc_start_server(backend, endpoint.c_str(), cache_dir, free_mem, total_mem);
|
auto start_server_fn = (decltype(ggml_backend_rpc_start_server)*) ggml_backend_reg_get_proc_address(reg, "ggml_backend_rpc_start_server");
|
||||||
|
if (!start_server_fn) {
|
||||||
|
fprintf(stderr, "Failed to obtain RPC backend start server function\n");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
start_server_fn(backend, endpoint.c_str(), cache_dir, free_mem, total_mem);
|
||||||
|
|
||||||
ggml_backend_free(backend);
|
ggml_backend_free(backend);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user