mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-07-27 03:33:46 -04:00
vocab : minor [no ci]
This commit is contained in:
@ -1357,7 +1357,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
|
|||||||
// read vocab size from metadata
|
// read vocab size from metadata
|
||||||
uint32_t n_tokens = 0;
|
uint32_t n_tokens = 0;
|
||||||
if (ml.get_key(LLM_KV_VOCAB_SIZE, n_tokens, false)) {
|
if (ml.get_key(LLM_KV_VOCAB_SIZE, n_tokens, false)) {
|
||||||
LLAMA_LOG_WARN("%s: adding %d dummy tokens\n", __func__, n_tokens);
|
LLAMA_LOG_WARN("%s: adding %u dummy tokens\n", __func__, n_tokens);
|
||||||
id_to_token.resize(n_tokens);
|
id_to_token.resize(n_tokens);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user