mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-28 20:25:20 +00:00
fix 32B model
This commit is contained in:
@ -2579,6 +2579,11 @@ class Qwen2VLVisionModel(VisionModel):
|
||||
elif self.global_config['model_type'] == 'qwen2_5_vl':
|
||||
self.gguf_writer.add_vision_projector_type(gguf.VisionProjectorType.QWEN25VL)
|
||||
self.gguf_writer.add_vision_use_silu(True)
|
||||
out_hidden_size = hparams.get("out_hidden_size")
|
||||
if out_hidden_size == 5120:
|
||||
# 32B model does not have n_wa_pattern, the other models do
|
||||
self.gguf_writer.add_vision_n_wa_pattern(0)
|
||||
else:
|
||||
# find n_wa_pattern (window attention pattern)
|
||||
fullatt_block_indexes = hparams.get("fullatt_block_indexes")
|
||||
assert fullatt_block_indexes is not None, "fullatt_block_indexes is required for qwen2_5_vl"
|
||||
|
Reference in New Issue
Block a user