mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-08-18 05:56:00 -04:00
mtmd : add **vision** support for Mistral Small 3.1 (#13231)
* convert ok * load ok, missing patch merger * ah sheet it works * update llava/readme * add test * fix test
This commit is contained in:
@@ -1001,6 +1001,10 @@ class TensorNameMap:
|
||||
"multi_modal_projector.mm_input_projection",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.V_MM_INP_NORM: (
|
||||
"multi_modal_projector.norm",
|
||||
),
|
||||
|
||||
MODEL_TENSOR.V_MM_SOFT_EMB_NORM: (
|
||||
"multi_modal_projector.mm_soft_emb_norm",
|
||||
),
|
||||
@@ -1052,6 +1056,10 @@ class TensorNameMap:
|
||||
MODEL_TENSOR.V_TOK_EMBD_IMG_BREAK: (
|
||||
"v.token_embd.img_break", # for pixtral, this is a generated vector
|
||||
),
|
||||
|
||||
MODEL_TENSOR.V_MM_PATCH_MERGER: (
|
||||
"multi_modal_projector.patch_merger.merging_layer", # mistral small 3.1
|
||||
),
|
||||
}
|
||||
|
||||
# architecture-specific block mappings
|
||||
|
Reference in New Issue
Block a user