mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-08-18 05:56:00 -04:00
Vulkan Shader Refactor, Memory Debugging Option (#7947)
* Refactor shaders, extract GLSL code from ggml_vk_generate_shaders.py into vulkan-shaders directory * Improve debug log code * Add memory debug output option * Fix flake8 * Fix unnecessary high llama-3 VRAM use
This commit is contained in:
9
vulkan-shaders/generic_head.comp
Normal file
9
vulkan-shaders/generic_head.comp
Normal file
@@ -0,0 +1,9 @@
|
||||
#extension GL_EXT_shader_16bit_storage : require
|
||||
|
||||
layout (push_constant) uniform parameter
|
||||
{
|
||||
uint KX;
|
||||
uint KY;
|
||||
float param1;
|
||||
float param2;
|
||||
} p;
|
Reference in New Issue
Block a user