diff --git a/src/llama-hparams.cpp b/src/llama-hparams.cpp index ac05db46e..1499eb08a 100644 --- a/src/llama-hparams.cpp +++ b/src/llama-hparams.cpp @@ -2,10 +2,6 @@ #include "ggml.h" -llama_hparams::llama_hparams() { - swa_layers.fill(false); -} - void llama_hparams::set_swa_pattern(uint32_t n_pattern) { for (uint32_t il = 0; il < n_layer; ++il) { swa_layers[il] = n_pattern == 0 || (il % n_pattern < (n_pattern - 1)); diff --git a/src/llama-hparams.h b/src/llama-hparams.h index fb638dce7..2d72eab18 100644 --- a/src/llama-hparams.h +++ b/src/llama-hparams.h @@ -145,8 +145,6 @@ struct llama_hparams { enum llama_rope_type rope_type = LLAMA_ROPE_TYPE_NONE; enum llama_rope_scaling_type rope_scaling_type_train = LLAMA_ROPE_SCALING_TYPE_NONE; - llama_hparams(); - // this value n_pattern means that every nth layer is dense (i.e. non-SWA) // note that if n_pattern == 0, all layers are SWA // if n_pattern == 1, all layers are dense diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 227a1bcba..81b052e1b 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -463,11 +463,14 @@ void llama_model::load_hparams(llama_model_loader & ml) { GGML_ASSERT(hparams.n_expert_used == 0); } - // zero-out the array hparams std::fill(hparams.n_head_arr.begin(), hparams.n_head_arr.end(), 0); std::fill(hparams.n_head_kv_arr.begin(), hparams.n_head_kv_arr.end(), 0); std::fill(hparams.n_ff_arr.begin(), hparams.n_ff_arr.end(), 0); + std::fill(hparams.rope_sections.begin(), hparams.rope_sections.end(), 0); + + std::fill(hparams.swa_layers.begin(), hparams.swa_layers.end(), 0); + ml.get_key_or_arr(LLM_KV_FEED_FORWARD_LENGTH, hparams.n_ff_arr, hparams.n_layer, false); ml.get_key_or_arr(LLM_KV_ATTENTION_HEAD_COUNT, hparams.n_head_arr, hparams.n_layer, false);