mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-30 12:55:17 +00:00
metal : add f16 support
This commit is contained in:
@ -961,7 +961,6 @@ static void llama_model_load_internal(
|
||||
model.hparams = ml->file_loaders.at(0)->hparams;
|
||||
llama_file_version file_version = ml->file_loaders.at(0)->file_version;
|
||||
auto & hparams = model.hparams;
|
||||
uint32_t n_ff = ((2*(4*hparams.n_embd)/3 + hparams.n_mult - 1)/hparams.n_mult)*hparams.n_mult;
|
||||
|
||||
{
|
||||
switch (hparams.n_layer) {
|
||||
@ -975,6 +974,8 @@ static void llama_model_load_internal(
|
||||
hparams.n_ctx = n_ctx;
|
||||
}
|
||||
|
||||
const uint32_t n_ff = ((2*(4*hparams.n_embd)/3 + hparams.n_mult - 1)/hparams.n_mult)*hparams.n_mult;
|
||||
|
||||
{
|
||||
fprintf(stderr, "%s: format = %s\n", __func__, llama_file_version_name(file_version));
|
||||
fprintf(stderr, "%s: n_vocab = %u\n", __func__, hparams.n_vocab);
|
||||
|
Reference in New Issue
Block a user