mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-27 20:05:20 +00:00
sycl : Overcoming workaround for mmap() allocation on Windows (#13482)
* Remove mmap workaround on windows After some testing I found that mmap is supported on windows and for many GPUs on Linux. Therefore I remove the workaround for windows since it is not necessary. * Update llama-bench README SYCL backend introduced a workaround that allows execution of llama-bench also without specifying `--mmp 0` flag
This commit is contained in:
@ -385,16 +385,17 @@ static void ggml_backend_sycl_buffer_set_tensor(ggml_backend_buffer_t buffer,
|
|||||||
ggml_backend_sycl_buffer_context * ctx = ( ggml_backend_sycl_buffer_context *)buffer->context;
|
ggml_backend_sycl_buffer_context * ctx = ( ggml_backend_sycl_buffer_context *)buffer->context;
|
||||||
ggml_sycl_set_device(ctx->device);
|
ggml_sycl_set_device(ctx->device);
|
||||||
auto stream = &(dpct::dev_mgr::instance().get_device(ctx->device).default_queue());
|
auto stream = &(dpct::dev_mgr::instance().get_device(ctx->device).default_queue());
|
||||||
SYCL_CHECK(
|
SYCL_CHECK(CHECK_TRY_ERROR(dpct::dev_mgr::instance().get_device(ctx->device).queues_wait_and_throw()));
|
||||||
CHECK_TRY_ERROR(dpct::dev_mgr::instance().get_device(ctx->device).queues_wait_and_throw()));
|
#ifndef _WIN32
|
||||||
// Note: Use host buffer to save the data from mmap(), then copy to device. It's workaround for mmap() issue on PVC GPU.
|
// Note: Use host buffer to save the data from mmap(), then copy to device. It's workaround for mmap() issue on PVC GPU.
|
||||||
// This function will be called during load model from disk. Use memory buffer replace dynamic won't save more time and brings potential memory leak risk here.
|
// This function will be called during load model from disk. Use memory buffer replace dynamic won't save more time and brings potential memory leak risk here.
|
||||||
char* host_buf = (char*)malloc(size);
|
char * host_buf = (char *) malloc(size);
|
||||||
memcpy(host_buf, data, size);
|
memcpy(host_buf, data, size);
|
||||||
SYCL_CHECK(
|
SYCL_CHECK(CHECK_TRY_ERROR((*stream).memcpy((char *) tensor->data + offset, host_buf, size).wait()));
|
||||||
CHECK_TRY_ERROR((*stream).memcpy((char *)tensor->data + offset, host_buf, size)
|
|
||||||
.wait()));
|
|
||||||
free(host_buf);
|
free(host_buf);
|
||||||
|
#else
|
||||||
|
SYCL_CHECK(CHECK_TRY_ERROR((*stream).memcpy((char *) tensor->data + offset, data, size).wait()));
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
catch (sycl::exception const &exc) {
|
catch (sycl::exception const &exc) {
|
||||||
std::cerr << exc.what() << "Exception caught at file:" << __FILE__
|
std::cerr << exc.what() << "Exception caught at file:" << __FILE__
|
||||||
|
@ -80,10 +80,6 @@ Using the `-d <n>` option, each test can be run at a specified context depth, pr
|
|||||||
|
|
||||||
For a description of the other options, see the [main example](../main/README.md).
|
For a description of the other options, see the [main example](../main/README.md).
|
||||||
|
|
||||||
Note:
|
|
||||||
|
|
||||||
- When using SYCL backend, there would be hang issue in some cases. Please set `--mmp 0`.
|
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
### Text generation with different models
|
### Text generation with different models
|
||||||
|
Reference in New Issue
Block a user