fix 32B model

This commit is contained in:
Xuan Son Nguyen
2025-04-30 22:04:06 +02:00
parent 474933e252
commit 651752f1ae

View File

@ -2579,6 +2579,11 @@ class Qwen2VLVisionModel(VisionModel):
elif self.global_config['model_type'] == 'qwen2_5_vl': elif self.global_config['model_type'] == 'qwen2_5_vl':
self.gguf_writer.add_vision_projector_type(gguf.VisionProjectorType.QWEN25VL) self.gguf_writer.add_vision_projector_type(gguf.VisionProjectorType.QWEN25VL)
self.gguf_writer.add_vision_use_silu(True) self.gguf_writer.add_vision_use_silu(True)
out_hidden_size = hparams.get("out_hidden_size")
if out_hidden_size == 5120:
# 32B model does not have n_wa_pattern, the other models do
self.gguf_writer.add_vision_n_wa_pattern(0)
else:
# find n_wa_pattern (window attention pattern) # find n_wa_pattern (window attention pattern)
fullatt_block_indexes = hparams.get("fullatt_block_indexes") fullatt_block_indexes = hparams.get("fullatt_block_indexes")
assert fullatt_block_indexes is not None, "fullatt_block_indexes is required for qwen2_5_vl" assert fullatt_block_indexes is not None, "fullatt_block_indexes is required for qwen2_5_vl"