context : initial abstraction

ggml-ci
This commit is contained in:
Georgi Gerganov
2025-02-11 11:25:18 +02:00
parent 2cd8a903c8
commit 02ef4be975
3 changed files with 1471 additions and 1128 deletions

File diff suppressed because it is too large Load Diff

View File

@ -16,38 +16,245 @@
using llama_loras = std::unordered_map<struct llama_adapter_lora *, float>; using llama_loras = std::unordered_map<struct llama_adapter_lora *, float>;
struct llama_batch_manager_i;
// TODO: make implementation details private
// TODO: become abstract base class, split the current implementation into different child classes
struct llama_context { struct llama_context {
// TODO: tmp until llama-model starts implementing the graph build function llama_context(const llama_model & model);
typedef std::function<ggml_cgraph *(llama_context &, const llama_ubatch &, bool worst_case)> build_graph_callback; virtual ~llama_context();
llama_context( virtual void synchronize();
const llama_model & model,
const llama_context_params & params,
build_graph_callback && cb_build_graph);
virtual ~llama_context() = default; virtual uint32_t n_ctx() const = 0;
virtual uint32_t n_batch() const = 0;
virtual uint32_t n_ubatch() const = 0;
virtual uint32_t n_seq_max() const = 0;
const struct llama_model & model; virtual llama_kv_cache * get_kv_self() = 0;
virtual const llama_kv_cache * get_kv_self() const = 0;
virtual void kv_self_update() = 0;
virtual enum llama_pooling_type pooling_type() const = 0;
virtual float * get_logits() = 0;
virtual float * get_logits_ith(int32_t i) = 0;
virtual float * get_embeddings() = 0;
virtual float * get_embeddings_ith(int32_t i) = 0;
virtual float * get_embeddings_seq(llama_seq_id seq_id) = 0;
int64_t n_pos_per_token() const; // vision
virtual ggml_context_ptr init();
virtual int decode(llama_batch & inp_batch) = 0;
virtual int encode(llama_batch & inp_batch) = 0;
// graph build API (generic)
// do mat_mul, while optionally apply lora
virtual ggml_tensor * build_lora_mm(
ggml_context * ctx0,
ggml_tensor * w,
ggml_tensor * cur);
// do mat_mul_id, while optionally apply lora
virtual ggml_tensor * build_lora_mm_id(
ggml_context * ctx0,
ggml_tensor * w, // struct ggml_tensor * as
ggml_tensor * cur, // struct ggml_tensor * b
ggml_tensor * ids);
// graph build API (context-specific)
virtual ggml_tensor * build_inp_embd(
ggml_context * ctx0,
ggml_tensor * tok_embd,
const llama_ubatch & ubatch) = 0;
virtual ggml_tensor * build_inp_pos(
ggml_context * ctx0,
int32_t n_tokens) = 0;
virtual ggml_tensor * build_inp_out_ids(
ggml_context * ctx0,
int32_t n_tokens,
bool worst_case) = 0;
virtual ggml_tensor * build_inp_mean(
ggml_context * ctx0,
int32_t n_tokens) = 0;
virtual ggml_tensor * build_inp_cls(
ggml_context * ctx0,
int32_t n_tokens) = 0;
virtual void build_attn_inp(
ggml_context * ctx0,
int32_t n_tokens,
bool causal,
bool swa,
bool worst_case) = 0;
virtual void build_attn_kv_store(
ggml_context * ctx0,
ggml_cgraph * graph,
ggml_tensor * k_cur,
ggml_tensor * v_cur,
int32_t n_tokens,
int64_t il,
bool worst_case) = 0;
virtual ggml_tensor * build_attn_qkv(
ggml_context * ctx0,
ggml_cgraph * graph,
ggml_tensor * wo,
ggml_tensor * wo_b,
ggml_tensor * q_cur,
int32_t n_tokens,
float kq_scale,
int il,
bool worst_case) = 0;
virtual ggml_tensor * build_soft_max_ext(
ggml_context * ctx0,
ggml_tensor * kq,
float kq_scale) = 0;
virtual ggml_tensor * get_rope_factors(int il) = 0;
virtual void build_k_shift(
ggml_context * ctx0,
ggml_cgraph * graph) = 0;
// find holes from the beginning of the KV cache and fill them by moving data from the end of the cache
virtual void build_defrag(
ggml_context * ctx0,
ggml_cgraph * graph) = 0;
virtual ggml_tensor * build_inp_embd_enc(
ggml_context * ctx0,
int32_t n_tokens,
bool worst_case) = 0;
virtual ggml_tensor * build_inp_KQ_mask_cross(
ggml_context * ctx0,
int32_t n_tokens,
bool worst_case) = 0;
virtual ggml_tensor * build_inp_s_copy(
ggml_context * ctx0,
bool worst_case) = 0;
virtual ggml_tensor * build_inp_s_mask(
ggml_context * ctx0,
bool worst_case) = 0;
virtual ggml_tensor * build_copy_mask_state(
ggml_context * ctx0,
ggml_cgraph * graph,
ggml_tensor * s,
ggml_tensor * state_copy,
ggml_tensor * state_mask,
int32_t n_tokens,
int32_t n_state,
int32_t n_seqs,
bool worst_case) = 0;
virtual ggml_tensor * build_mamba_layer(
ggml_context * ctx0,
ggml_cgraph * graph,
ggml_tensor * cur,
ggml_tensor * state_copy,
ggml_tensor * state_mask,
const llama_ubatch & ubatch,
int il,
bool worst_case) = 0;
virtual ggml_tensor * build_rwkv_token_shift_load(
ggml_context * ctx0,
ggml_cgraph * graph,
ggml_tensor * state_copy,
ggml_tensor * state_mask,
const llama_ubatch & ubatch,
int il,
bool worst_case) = 0;
virtual ggml_tensor * build_rwkv_token_shift_store(
ggml_context * ctx0,
ggml_tensor * token_shift,
const llama_ubatch & ubatch,
int il,
bool worst_case) = 0;
virtual ggml_tensor * build_rwkv6_time_mix(
ggml_context * ctx0,
ggml_cgraph * graph,
ggml_tensor * cur,
ggml_tensor * x_prev,
ggml_tensor * state_copy,
ggml_tensor * state_mask,
const llama_ubatch & ubatch,
int il,
bool worst_case) = 0;
// state save/load
virtual size_t state_get_size() = 0;
virtual size_t state_get_data( uint8_t * dst, size_t size) = 0;
virtual size_t state_set_data(const uint8_t * src, size_t size) = 0;
virtual size_t state_seq_get_size(llama_seq_id seq_id) = 0;
virtual size_t state_seq_get_data(llama_seq_id seq_id, uint8_t * dst, size_t size) = 0;
virtual size_t state_seq_set_data(llama_seq_id seq_id, const uint8_t * src, size_t size) = 0;
virtual bool state_load_file(
const char * filepath,
llama_token * tokens_out,
size_t n_token_capacity,
size_t * n_token_count_out) = 0;
virtual bool state_save_file(
const char * filepath,
const llama_token * tokens,
size_t n_token_count) = 0;
virtual size_t state_seq_load_file(
llama_seq_id seq_id,
const char * filepath,
llama_token * tokens_out,
size_t n_token_capacity,
size_t * n_token_count_out) = 0;
virtual size_t state_seq_save_file(
llama_seq_id seq_id,
const char * filepath,
const llama_token * tokens,
size_t n_token_count) = 0;
// members
const llama_model & model;
llama_cparams cparams; llama_cparams cparams;
llama_sbatch sbatch; // TODO: revisit if needed
llama_adapter_cvec cvec; llama_adapter_cvec cvec;
llama_loras loras; llama_loras loras;
build_graph_callback cb_build_graph; ggml_threadpool_t threadpool = nullptr;
ggml_threadpool_t threadpool_batch = nullptr;
ggml_abort_callback abort_callback = nullptr;
void * abort_callback_data = nullptr;
std::vector<ggml_backend_ptr> backends; std::vector<ggml_backend_ptr> backends;
std::vector<std::pair<ggml_backend_t, ggml_backend_set_n_threads_t>> set_n_threads_fns; std::vector<std::pair<ggml_backend_t, ggml_backend_set_n_threads_t>> set_n_threads_fns;
ggml_backend_t backend_cpu = nullptr; ggml_backend_t backend_cpu = nullptr;
ggml_threadpool_t threadpool = nullptr; ggml_backend_sched_ptr sched;
ggml_threadpool_t threadpool_batch = nullptr;
// memory buffers used to evaluate the model
std::vector<uint8_t> buf_compute_meta;
// perf
bool has_evaluated_once = false; bool has_evaluated_once = false;
mutable int64_t t_start_us; mutable int64_t t_start_us;
@ -60,6 +267,49 @@ struct llama_context {
mutable int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1) mutable int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
mutable int32_t n_eval = 0; // number of eval calls mutable int32_t n_eval = 0; // number of eval calls
};
// TODO: make implementation details private
struct llama_context_unified : public llama_context {
struct batch_manager;
// TODO: tmp until llama-model starts implementing the graph build function
typedef std::function<ggml_cgraph *(llama_context &, const llama_ubatch &, bool worst_case)> build_graph_callback;
llama_context_unified(
const llama_model & model,
const llama_context_params & params,
build_graph_callback && cb_build_graph);
virtual ~llama_context_unified();
virtual uint32_t n_ctx() const override;
virtual uint32_t n_batch() const override;
virtual uint32_t n_ubatch() const override;
virtual uint32_t n_seq_max() const override;
virtual llama_kv_cache * get_kv_self() override;
virtual const llama_kv_cache * get_kv_self() const override;
virtual void kv_self_update() override;
virtual enum llama_pooling_type pooling_type() const override;
virtual float * get_logits() override;
virtual float * get_logits_ith(int32_t i) override;
virtual float * get_embeddings() override;
virtual float * get_embeddings_ith(int32_t i) override;
virtual float * get_embeddings_seq(llama_seq_id seq_id) override;
virtual ggml_context_ptr init() override;
virtual int decode(llama_batch & inp_batch) override;
virtual int encode(llama_batch & inp_batch) override;
llama_sbatch sbatch;
build_graph_callback cb_build_graph;
// host buffer for the model output (logits and embeddings) // host buffer for the model output (logits and embeddings)
ggml_backend_buffer_ptr buf_output; ggml_backend_buffer_ptr buf_output;
@ -72,7 +322,7 @@ struct llama_context {
size_t output_size = 0; // capacity (of tokens positions) for the output buffers size_t output_size = 0; // capacity (of tokens positions) for the output buffers
int32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch int32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch
bool logits_all = false; bool logits_all = false;
bool need_reserve = false; bool need_reserve = false;
// embeddings output (2-dimensional array: [n_outputs][n_embd]) // embeddings output (2-dimensional array: [n_outputs][n_embd])
@ -84,17 +334,7 @@ struct llama_context {
// populated only when pooling_type != LLAMA_POOLING_TYPE_NONE // populated only when pooling_type != LLAMA_POOLING_TYPE_NONE
std::map<llama_seq_id, std::vector<float>> embd_seq; std::map<llama_seq_id, std::vector<float>> embd_seq;
// memory buffers used to evaluate the model virtual std::unique_ptr<batch_manager> prepare_batch(const llama_batch & batch);
std::vector<uint8_t> buf_compute_meta;
ggml_backend_sched_ptr sched;
ggml_abort_callback abort_callback = nullptr;
void * abort_callback_data = nullptr;
virtual std::unique_ptr<llama_batch_manager_i> prepare_batch(const llama_batch & batch);
virtual int decode(llama_batch & inp_batch);
virtual int encode(llama_batch & inp_batch);
// returns the result of ggml_backend_sched_graph_compute_async execution // returns the result of ggml_backend_sched_graph_compute_async execution
enum ggml_status compute_graph( enum ggml_status compute_graph(
@ -107,32 +347,19 @@ struct llama_context {
// certain implementations could require a padding for the context size // certain implementations could require a padding for the context size
uint32_t get_ctx_padding(const llama_cparams & cparams) const; uint32_t get_ctx_padding(const llama_cparams & cparams) const;
void reset();
void prepare_k_shift(); void prepare_k_shift();
void prepare_defrag(); void prepare_defrag();
void set_inputs(const llama_ubatch & ubatch); void set_inputs(const llama_ubatch & ubatch);
// make the outputs have the same order they had in the user-provided batch // make the outputs have the same order they had in the user-provided batch
// TODO: maybe deprecate this // TODO: maybe remove this
void reorder_outputs(); void reorder_outputs();
// Make sure enough space is available for outputs. // Make sure enough space is available for outputs.
// Returns max number of outputs for which space was reserved. // Returns max number of outputs for which space was reserved.
size_t reserve_outputs(size_t n_outputs); size_t reserve_outputs(size_t n_outputs);
ggml_tensor * build_lora_mm(
ggml_context * ctx0,
ggml_tensor * w,
ggml_tensor * cur);
ggml_tensor * build_lora_mm_id(
ggml_context * ctx0,
ggml_tensor * w, // struct ggml_tensor * as
ggml_tensor * cur, // struct ggml_tensor * b
ggml_tensor * ids);
// input tensors // input tensors
struct ggml_tensor * inp_tokens; // I32 [n_batch] struct ggml_tensor * inp_tokens; // I32 [n_batch]
struct ggml_tensor * inp_embd; // F32 [n_embd, n_batch] struct ggml_tensor * inp_embd; // F32 [n_embd, n_batch]
@ -141,6 +368,81 @@ struct llama_context {
struct ggml_tensor * inp_mean; // F32 [n_batch, n_batch] struct ggml_tensor * inp_mean; // F32 [n_batch, n_batch]
struct ggml_tensor * inp_cls; // I32 [n_batch] struct ggml_tensor * inp_cls; // I32 [n_batch]
// === unified KV cache ===
llama_kv_cache kv_self;
struct ggml_tensor * inp_KQ_mask; // F32 [kv_size, n_batch]
struct ggml_tensor * inp_KQ_mask_cnv; // [kv_size, n_batch]
struct ggml_tensor * inp_KQ_mask_swa; // F32 [kv_size, n_batch]
struct ggml_tensor * inp_KQ_mask_swa_cnv; // [kv_size, n_batch]
struct ggml_tensor * inp_K_shift; // I32 [kv_size]
virtual ggml_tensor * build_inp_embd(
ggml_context * ctx0,
ggml_tensor * tok_embd,
const llama_ubatch & ubatch) override;
virtual ggml_tensor * build_inp_pos(
ggml_context * ctx0,
int32_t n_tokens) override;
virtual ggml_tensor * build_inp_out_ids(
ggml_context * ctx0,
int32_t n_tokens,
bool worst_case) override;
virtual ggml_tensor * build_inp_mean(
ggml_context * ctx0,
int32_t n_tokens) override;
virtual ggml_tensor * build_inp_cls(
ggml_context * ctx0,
int32_t n_tokens) override;
virtual void build_attn_inp(
ggml_context * ctx0,
int32_t n_tokens,
bool causal,
bool swa,
bool worst_case) override;
virtual void build_attn_kv_store(
ggml_context * ctx0,
ggml_cgraph * graph,
ggml_tensor * k_cur,
ggml_tensor * v_cur,
int32_t n_tokens,
int64_t il,
bool worst_case) override;
virtual ggml_tensor * build_attn_qkv(
ggml_context * ctx0,
ggml_cgraph * graph,
ggml_tensor * wo,
ggml_tensor * wo_b,
ggml_tensor * q_cur,
int32_t n_tokens,
float kq_scale,
int il,
bool worst_case) override;
virtual ggml_tensor * build_soft_max_ext(
ggml_context * ctx0,
ggml_tensor * kq,
float kq_scale) override;
virtual ggml_tensor * get_rope_factors(int il) override;
virtual void build_k_shift(
ggml_context * ctx0,
ggml_cgraph * graph) override;
// find holes from the beginning of the KV cache and fill them by moving data from the end of the cache
virtual void build_defrag(
ggml_context * ctx0,
ggml_cgraph * graph) override;
// === encoder-decoder === // === encoder-decoder ===
// whether we are computing encoder output or decoder output // whether we are computing encoder output or decoder output
@ -152,79 +454,36 @@ struct llama_context {
struct ggml_tensor * inp_embd_enc; // F32 [n_embd, n_outputs_enc] struct ggml_tensor * inp_embd_enc; // F32 [n_embd, n_outputs_enc]
struct ggml_tensor * inp_pos_bucket; // I32 [n_batch|n_kv, n_batch] struct ggml_tensor * inp_pos_bucket; // I32 [n_batch|n_kv, n_batch]
struct ggml_tensor * inp_KQ_mask_cross; // F32 [n_outputs_enc, n_batch]
// === unified KV cache === virtual ggml_tensor * build_inp_embd_enc(
llama_kv_cache kv_self;
struct ggml_tensor * inp_KQ_mask; // F32 [kv_size, n_batch]
struct ggml_tensor * inp_KQ_mask_cnv; // [kv_size, n_batch]
struct ggml_tensor * inp_KQ_mask_swa; // F32 [kv_size, n_batch]
struct ggml_tensor * inp_KQ_mask_swa_cnv; // [kv_size, n_batch]
struct ggml_tensor * inp_KQ_mask_cross; // F32 [n_outputs_enc, n_batch]
struct ggml_tensor * inp_K_shift; // I32 [kv_size]
// return true if need to reserve new worst-case graph
void kv_self_update();
void build_attn_inp(
ggml_context * ctx0, ggml_context * ctx0,
int32_t n_tokens, int32_t n_tokens,
bool causal, bool worst_case) override;
bool swa,
bool worst_case);
void build_attn_kv_store( virtual ggml_tensor * build_inp_KQ_mask_cross(
ggml_context * ctx0, ggml_context * ctx0,
ggml_cgraph * graph,
ggml_tensor * k_cur,
ggml_tensor * v_cur,
int32_t n_tokens, int32_t n_tokens,
int64_t il, bool worst_case) override;
bool worst_case);
ggml_tensor * build_attn_qkv(
ggml_context * ctx0,
ggml_cgraph * graph,
ggml_tensor * wo,
ggml_tensor * wo_b,
ggml_tensor * q_cur,
int32_t n_tokens,
float kq_scale,
int il,
bool worst_case);
ggml_tensor * build_soft_max_ext(
ggml_context * ctx0,
ggml_tensor * kq,
float kq_scale);
ggml_tensor * get_rope_factors(int il);
void build_k_shift(
ggml_context * ctx0,
ggml_cgraph * graph);
// find holes from the beginning of the KV cache and fill them by moving data from the end of the cache
void build_defrag(
ggml_context * ctx0,
ggml_cgraph * graph);
// === recurrent === // === recurrent ===
struct ggml_tensor * inp_s_copy; // I32 [kv_size]
struct ggml_tensor * inp_s_mask; // F32 [1, n_kv]
// TODO: add recurrent cache // TODO: add recurrent cache
// TODO: add mamba-specific llama_context // TODO: add mamba-specific llama_context
// TODO: change these to build_mamba_inp and hide `state_copy` and `state_mask` inside the llama_context impl // TODO: change these to build_mamba_inp and hide `state_copy` and `state_mask` inside the llama_context impl
ggml_tensor * build_inp_s_copy( virtual ggml_tensor * build_inp_s_copy(
ggml_context * ctx0, ggml_context * ctx0,
bool worst_case); bool worst_case) override;
ggml_tensor * build_inp_s_mask( virtual ggml_tensor * build_inp_s_mask(
ggml_context * ctx0, ggml_context * ctx0,
bool worst_case); bool worst_case) override;
ggml_tensor * build_copy_mask_state( virtual ggml_tensor * build_copy_mask_state(
ggml_context * ctx0, ggml_context * ctx0,
ggml_cgraph * graph, ggml_cgraph * graph,
ggml_tensor * s, ggml_tensor * s,
@ -233,9 +492,9 @@ struct llama_context {
int32_t n_tokens, int32_t n_tokens,
int32_t n_state, int32_t n_state,
int32_t n_seqs, int32_t n_seqs,
bool worst_case); bool worst_case) override;
ggml_tensor * build_mamba_layer( virtual ggml_tensor * build_mamba_layer(
ggml_context * ctx0, ggml_context * ctx0,
ggml_cgraph * graph, ggml_cgraph * graph,
ggml_tensor * cur, ggml_tensor * cur,
@ -243,25 +502,25 @@ struct llama_context {
ggml_tensor * state_mask, ggml_tensor * state_mask,
const llama_ubatch & ubatch, const llama_ubatch & ubatch,
int il, int il,
bool worst_case); bool worst_case) override;
ggml_tensor * build_rwkv_token_shift_load( virtual ggml_tensor * build_rwkv_token_shift_load(
ggml_context * ctx0, ggml_context * ctx0,
ggml_cgraph * graph, ggml_cgraph * graph,
ggml_tensor * state_copy, ggml_tensor * state_copy,
ggml_tensor * state_mask, ggml_tensor * state_mask,
const llama_ubatch & ubatch, const llama_ubatch & ubatch,
int il, int il,
bool worst_case); bool worst_case) override;
ggml_tensor * build_rwkv_token_shift_store( virtual ggml_tensor * build_rwkv_token_shift_store(
ggml_context * ctx0, ggml_context * ctx0,
ggml_tensor * token_shift, ggml_tensor * token_shift,
const llama_ubatch & ubatch, const llama_ubatch & ubatch,
int il, int il,
bool worst_case); bool worst_case) override;
ggml_tensor * build_rwkv6_time_mix( virtual ggml_tensor * build_rwkv6_time_mix(
ggml_context * ctx0, ggml_context * ctx0,
ggml_cgraph * graph, ggml_cgraph * graph,
ggml_tensor * cur, ggml_tensor * cur,
@ -270,17 +529,48 @@ struct llama_context {
ggml_tensor * state_mask, ggml_tensor * state_mask,
const llama_ubatch & ubatch, const llama_ubatch & ubatch,
int il, int il,
bool worst_case); bool worst_case) override;
struct ggml_tensor * inp_s_copy; // I32 [kv_size] // state save/load
struct ggml_tensor * inp_s_mask; // F32 [1, n_kv]
// === vision === virtual size_t state_get_size() override;
virtual size_t state_get_data( uint8_t * dst, size_t size) override;
virtual size_t state_set_data(const uint8_t * src, size_t size) override;
// TODO: find a better way to accommodate mutli-dimension position encoding methods virtual size_t state_seq_get_size(llama_seq_id seq_id) override;
// number of position id each token get, 1 for each token in most cases. virtual size_t state_seq_get_data(llama_seq_id seq_id, uint8_t * dst, size_t size) override;
// when using m-rope, it will be 3 position ids per token to representing 3 dimension coordinate. virtual size_t state_seq_set_data(llama_seq_id seq_id, const uint8_t * src, size_t size) override;
int n_pos_per_token = 1;
virtual bool state_load_file(
const char * filepath,
llama_token * tokens_out,
size_t n_token_capacity,
size_t * n_token_count_out) override;
virtual bool state_save_file(
const char * filepath,
const llama_token * tokens,
size_t n_token_count) override;
virtual size_t state_seq_load_file(
llama_seq_id seq_id,
const char * filepath,
llama_token * tokens_out,
size_t n_token_capacity,
size_t * n_token_count_out) override;
virtual size_t state_seq_save_file(
llama_seq_id seq_id,
const char * filepath,
const llama_token * tokens,
size_t n_token_count) override;
private:
size_t state_get_data(struct llama_data_write & data_ctx);
size_t state_set_data(struct llama_data_read & data_ctx);
size_t state_seq_get_data(struct llama_data_write & data_ctx, llama_seq_id seq_id);
size_t state_seq_set_data(struct llama_data_read & data_ctx, llama_seq_id seq_id);
}; };
// For internal test use // For internal test use

View File

@ -8,7 +8,6 @@
#include "llama-model.h" #include "llama-model.h"
#include "ggml.h" #include "ggml.h"
#include "ggml-alloc.h"
#include "ggml-backend.h" #include "ggml-backend.h"
#include "ggml-cpp.h" #include "ggml-cpp.h"
@ -86,8 +85,6 @@ struct llm_build_context {
const float norm_rms_eps; const float norm_rms_eps;
const int32_t n_tokens; const int32_t n_tokens;
const int32_t n_outputs;
const int32_t n_outputs_enc;
const int32_t n_ctx_orig; const int32_t n_ctx_orig;
const bool worst_case; const bool worst_case;
@ -98,9 +95,8 @@ struct llm_build_context {
const llm_build_cb & cb; const llm_build_cb & cb;
std::vector<uint8_t> & buf_compute_meta; const ggml_context_ptr ctx = nullptr;
ggml_context * ctx0 = nullptr;
struct ggml_context * ctx0 = nullptr;
// TODO: consider making the entire interface noexcept // TODO: consider making the entire interface noexcept
llm_build_context( llm_build_context(
@ -136,132 +132,37 @@ struct llm_build_context {
norm_eps (hparams.f_norm_eps), norm_eps (hparams.f_norm_eps),
norm_rms_eps (hparams.f_norm_rms_eps), norm_rms_eps (hparams.f_norm_rms_eps),
n_tokens (ubatch.n_tokens), n_tokens (ubatch.n_tokens),
n_outputs (worst_case ? n_tokens : lctx.n_outputs),
n_outputs_enc (worst_case ? n_tokens : lctx.embd_enc.size() / hparams.n_embd),
n_ctx_orig (cparams.n_ctx_orig_yarn), n_ctx_orig (cparams.n_ctx_orig_yarn),
worst_case (worst_case), worst_case (worst_case),
flash_attn (cparams.flash_attn), flash_attn (cparams.flash_attn),
pooling_type (cparams.pooling_type), pooling_type (cparams.pooling_type),
rope_type (hparams.rope_type), rope_type (hparams.rope_type),
cb (cb), cb (cb),
buf_compute_meta (lctx.buf_compute_meta) { ctx (lctx.init()),
// all initializations should be done in init() ctx0 (ctx.get()) {
} }
void init() { // TODO: tmp
struct ggml_init_params params = {
/*.mem_size =*/ buf_compute_meta.size(),
/*.mem_buffer =*/ buf_compute_meta.data(),
/*.no_alloc =*/ true,
};
ctx0 = ggml_init(params);
lctx.reset();
}
void free() {
ggml_free(ctx0);
ctx0 = nullptr;
}
struct ggml_tensor * build_inp_embd(struct ggml_tensor * tok_embd) { struct ggml_tensor * build_inp_embd(struct ggml_tensor * tok_embd) {
struct ggml_tensor * inpL; struct ggml_tensor * inpL = lctx.build_inp_embd(ctx0, tok_embd, ubatch);
if (ubatch.token) {
lctx.inp_tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ubatch.n_tokens);
cb(lctx.inp_tokens, "inp_tokens", -1);
ggml_set_input(lctx.inp_tokens);
inpL = ggml_get_rows(ctx0, tok_embd, lctx.inp_tokens);
// apply lora for embedding tokens if needed
for (const auto & lora : loras) {
struct llama_adapter_lora_weight * lw = lora.first->get_weight(tok_embd);
if (lw == nullptr) {
continue;
}
const float adapter_scale = lora.second;
const float scale = lw->get_scale(lora.first->alpha, adapter_scale);
struct ggml_tensor * inpL_delta = ggml_scale(ctx0, ggml_mul_mat(
ctx0, lw->b, // non-transposed lora_b
ggml_get_rows(ctx0, lw->a, lctx.inp_tokens)
), scale);
inpL = ggml_add(ctx0, inpL, inpL_delta);
}
} else {
lctx.inp_embd = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, ubatch.n_tokens);
inpL = lctx.inp_embd;
ggml_set_input(lctx.inp_embd);
}
// For Granite architecture
if (hparams.f_embedding_scale != 0.0f) {
inpL = ggml_scale(ctx0, inpL, hparams.f_embedding_scale);
}
cb(inpL, "inp_embd", -1); cb(inpL, "inp_embd", -1);
return inpL; return inpL;
} }
// do mat_mul, while optionally apply lora // TODO: tmp
struct ggml_tensor * build_lora_mm( struct ggml_tensor * build_lora_mm(
struct ggml_tensor * w, struct ggml_tensor * w,
struct ggml_tensor * cur) { struct ggml_tensor * cur) {
struct ggml_tensor * res = ggml_mul_mat(ctx0, w, cur); return lctx.build_lora_mm(ctx0, w, cur);
for (const auto & lora : loras) {
struct llama_adapter_lora_weight * lw = lora.first->get_weight(w);
if (lw == nullptr) {
continue;
}
const float adapter_scale = lora.second;
const float scale = lw->get_scale(lora.first->alpha, adapter_scale);
struct ggml_tensor * ab_cur = ggml_mul_mat(
ctx0, lw->b,
ggml_mul_mat(ctx0, lw->a, cur)
);
ab_cur = ggml_scale(ctx0, ab_cur, scale);
res = ggml_add(ctx0, res, ab_cur);
}
return res;
} }
// do mat_mul_id, while optionally apply lora // TODO: tmp
struct ggml_tensor * build_lora_mm_id( struct ggml_tensor * build_lora_mm_id(
struct ggml_tensor * w, // struct ggml_tensor * as struct ggml_tensor * w, // struct ggml_tensor * as
struct ggml_tensor * cur, // struct ggml_tensor * b struct ggml_tensor * cur, // struct ggml_tensor * b
struct ggml_tensor * ids) { struct ggml_tensor * ids) {
struct ggml_tensor * res = ggml_mul_mat_id(ctx0, w, cur, ids); return lctx.build_lora_mm_id(ctx0, w, cur, ids);
for (const auto & lora : loras) {
struct llama_adapter_lora_weight * lw = lora.first->get_weight(w);
if (lw == nullptr) {
continue;
}
const float alpha = lora.first->alpha;
const float rank = (float) lw->b->ne[0];
const float scale = alpha ? lora.second * alpha / rank : lora.second;
struct ggml_tensor * ab_cur = ggml_mul_mat_id(
ctx0, lw->b,
ggml_mul_mat_id(ctx0, lw->a, cur, ids),
ids
);
ab_cur = ggml_scale(ctx0, ab_cur, scale);
res = ggml_add(ctx0, res, ab_cur);
}
return res;
} }
struct ggml_tensor * build_norm( struct ggml_tensor * build_norm(
@ -620,31 +521,31 @@ struct llm_build_context {
} }
struct ggml_tensor * build_inp_pos() { struct ggml_tensor * build_inp_pos() {
lctx.inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); ggml_tensor * cur = lctx.build_inp_pos(ctx0, n_tokens);
cb(lctx.inp_pos, "inp_pos", -1); cb(cur, "inp_pos", -1);
ggml_set_input(lctx.inp_pos);
return lctx.inp_pos; return cur;
} }
struct ggml_tensor * build_inp_out_ids() { struct ggml_tensor * build_inp_out_ids() {
lctx.inp_out_ids = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_outputs); ggml_tensor * cur = lctx.build_inp_out_ids(ctx0, n_tokens, worst_case);
cb(lctx.inp_out_ids, "inp_out_ids", -1); cb(cur, "inp_out_ids", -1);
ggml_set_input(lctx.inp_out_ids);
return lctx.inp_out_ids; return cur;
} }
struct ggml_tensor * build_inp_mean() { struct ggml_tensor * build_inp_mean() {
lctx.inp_mean = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, n_tokens); ggml_tensor * cur = lctx.build_inp_mean(ctx0, n_tokens);
cb(lctx.inp_mean, "inp_mean", -1); cb(cur, "inp_mean", -1);
ggml_set_input(lctx.inp_mean);
return lctx.inp_mean; return cur;
} }
struct ggml_tensor * build_inp_cls() { struct ggml_tensor * build_inp_cls() {
lctx.inp_cls = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); ggml_tensor * cur = lctx.build_inp_cls(ctx0, n_tokens);
cb(lctx.inp_cls, "inp_cls", -1); cb(cur, "inp_cls", -1);
ggml_set_input(lctx.inp_cls);
return lctx.inp_cls; return cur;
} }
struct ggml_cgraph * append_pooling(struct ggml_cgraph * gf) { struct ggml_cgraph * append_pooling(struct ggml_cgraph * gf) {
@ -745,26 +646,22 @@ struct llm_build_context {
//} //}
struct ggml_tensor * build_inp_embd_enc() { struct ggml_tensor * build_inp_embd_enc() {
const int64_t n_embd = hparams.n_embd; ggml_tensor * cur = lctx.build_inp_embd_enc(ctx0, n_tokens, worst_case);
lctx.inp_embd_enc = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, n_outputs_enc); cb(cur, "embd_enc", -1);
ggml_set_input(lctx.inp_embd_enc);
cb(lctx.inp_embd_enc, "embd_enc", -1); return cur;
return lctx.inp_embd_enc;
} }
struct ggml_tensor * build_inp_KQ_mask_cross() { struct ggml_tensor * build_inp_KQ_mask_cross() {
lctx.inp_KQ_mask_cross = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_outputs_enc, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); ggml_tensor * cur = lctx.build_inp_KQ_mask_cross(ctx0, n_tokens, worst_case);
ggml_set_input(lctx.inp_KQ_mask_cross); cb(cur, "KQ_mask_cross", -1);
cb(lctx.inp_KQ_mask_cross, "KQ_mask_cross", -1);
return lctx.inp_KQ_mask_cross; return cur;
} }
struct ggml_cgraph * build_llama() { struct ggml_cgraph * build_llama() {
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
// mutable variable, needed during the last layer of the computation to skip unused tokens
int32_t n_tokens = this->n_tokens;
const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_head = hparams.n_embd_head_v;
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
GGML_ASSERT(n_embd_head == hparams.n_rot); GGML_ASSERT(n_embd_head == hparams.n_rot);
@ -838,7 +735,6 @@ struct llm_build_context {
if (il == n_layer - 1) { if (il == n_layer - 1) {
// skip computing output for unused tokens // skip computing output for unused tokens
struct ggml_tensor * inp_out_ids = build_inp_out_ids(); struct ggml_tensor * inp_out_ids = build_inp_out_ids();
n_tokens = n_outputs;
cur = ggml_get_rows(ctx0, cur, inp_out_ids); cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
} }
@ -927,9 +823,6 @@ struct llm_build_context {
struct ggml_cgraph * build_deci() { struct ggml_cgraph * build_deci() {
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
// mutable variable, needed during the last layer of the computation to skip unused tokens
int32_t n_tokens = this->n_tokens;
const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_head = hparams.n_embd_head_v;
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
GGML_ASSERT(n_embd_head == hparams.n_rot); GGML_ASSERT(n_embd_head == hparams.n_rot);
@ -1014,7 +907,6 @@ struct llm_build_context {
if (il == n_layer - 1) { if (il == n_layer - 1) {
// skip computing output for unused tokens // skip computing output for unused tokens
struct ggml_tensor * inp_out_ids = build_inp_out_ids(); struct ggml_tensor * inp_out_ids = build_inp_out_ids();
n_tokens = n_outputs;
cur = ggml_get_rows(ctx0, cur, inp_out_ids); cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
} }
@ -1422,9 +1314,6 @@ struct llm_build_context {
struct ggml_cgraph * build_grok() { struct ggml_cgraph * build_grok() {
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
// mutable variable, needed during the last layer of the computation to skip unused tokens
int32_t n_tokens = this->n_tokens;
const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_head = hparams.n_embd_head_v;
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
GGML_ASSERT(n_embd_head == hparams.n_rot); GGML_ASSERT(n_embd_head == hparams.n_rot);
@ -1498,7 +1387,6 @@ struct llm_build_context {
if (il == n_layer - 1) { if (il == n_layer - 1) {
// skip computing output for unused tokens // skip computing output for unused tokens
struct ggml_tensor * inp_out_ids = build_inp_out_ids(); struct ggml_tensor * inp_out_ids = build_inp_out_ids();
n_tokens = n_outputs;
cur = ggml_get_rows(ctx0, cur, inp_out_ids); cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
} }
@ -1580,9 +1468,6 @@ struct llm_build_context {
struct ggml_cgraph * build_dbrx() { struct ggml_cgraph * build_dbrx() {
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
// mutable variable, needed during the last layer of the computation to skip unused tokens
int32_t n_tokens = this->n_tokens;
const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_head = hparams.n_embd_head_v;
const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
@ -1649,7 +1534,6 @@ struct llm_build_context {
if (il == n_layer - 1) { if (il == n_layer - 1) {
// skip computing output for unused tokens // skip computing output for unused tokens
struct ggml_tensor * inp_out_ids = build_inp_out_ids(); struct ggml_tensor * inp_out_ids = build_inp_out_ids();
n_tokens = n_outputs;
cur = ggml_get_rows(ctx0, cur, inp_out_ids); cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
} }
@ -2716,10 +2600,7 @@ struct llm_build_context {
inpL = build_inp_embd(model.tok_embd); inpL = build_inp_embd(model.tok_embd);
// inp_pos - contains the positions // inp_pos - contains the positions
lctx.inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens * 4); struct ggml_tensor * inp_pos = build_inp_pos();
cb(lctx.inp_pos, "inp_pos", -1);
ggml_set_input(lctx.inp_pos);
struct ggml_tensor * inp_pos = lctx.inp_pos;
lctx.build_attn_inp(ctx0, n_tokens, true, false, worst_case); lctx.build_attn_inp(ctx0, n_tokens, true, false, worst_case);
@ -2825,9 +2706,6 @@ struct llm_build_context {
struct ggml_cgraph * build_qwen2moe() { struct ggml_cgraph * build_qwen2moe() {
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
// mutable variable, needed during the last layer of the computation to skip unused tokens
int32_t n_tokens = this->n_tokens;
const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_head = hparams.n_embd_head_v;
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
GGML_ASSERT(n_embd_head == hparams.n_rot); GGML_ASSERT(n_embd_head == hparams.n_rot);
@ -2891,7 +2769,6 @@ struct llm_build_context {
if (il == n_layer - 1) { if (il == n_layer - 1) {
// skip computing output for unused tokens // skip computing output for unused tokens
struct ggml_tensor * inp_out_ids = build_inp_out_ids(); struct ggml_tensor * inp_out_ids = build_inp_out_ids();
n_tokens = n_outputs;
cur = ggml_get_rows(ctx0, cur, inp_out_ids); cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
} }
@ -4685,9 +4562,6 @@ struct llm_build_context {
struct ggml_cgraph * build_olmo() { struct ggml_cgraph * build_olmo() {
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
// mutable variable, needed during the last layer of the computation to skip unused tokens
int32_t n_tokens = this->n_tokens;
const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_head = hparams.n_embd_head_v;
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
GGML_ASSERT(n_embd_head == hparams.n_rot); GGML_ASSERT(n_embd_head == hparams.n_rot);
@ -4757,7 +4631,6 @@ struct llm_build_context {
if (il == n_layer - 1) { if (il == n_layer - 1) {
// skip computing output for unused tokens // skip computing output for unused tokens
struct ggml_tensor * inp_out_ids = build_inp_out_ids(); struct ggml_tensor * inp_out_ids = build_inp_out_ids();
n_tokens = n_outputs;
cur = ggml_get_rows(ctx0, cur, inp_out_ids); cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
} }
@ -4808,9 +4681,6 @@ struct llm_build_context {
struct ggml_cgraph * build_olmo2() { struct ggml_cgraph * build_olmo2() {
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
// mutable variable, needed during the last layer of the computation to skip unused tokens
int32_t n_tokens = this->n_tokens;
const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_head = hparams.n_embd_head_v;
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
GGML_ASSERT(n_embd_head == hparams.n_rot); GGML_ASSERT(n_embd_head == hparams.n_rot);
@ -4880,7 +4750,6 @@ struct llm_build_context {
if (il == n_layer - 1) { if (il == n_layer - 1) {
// skip computing output for unused tokens // skip computing output for unused tokens
struct ggml_tensor * inp_out_ids = build_inp_out_ids(); struct ggml_tensor * inp_out_ids = build_inp_out_ids();
n_tokens = n_outputs;
cur = ggml_get_rows(ctx0, cur, inp_out_ids); cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
} }
@ -4935,9 +4804,6 @@ struct llm_build_context {
struct ggml_cgraph * build_olmoe() { struct ggml_cgraph * build_olmoe() {
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
// mutable variable, needed during the last layer of the computation to skip unused tokens
int32_t n_tokens = this->n_tokens;
const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_head = hparams.n_embd_head_v;
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
GGML_ASSERT(n_embd_head == hparams.n_rot); GGML_ASSERT(n_embd_head == hparams.n_rot);
@ -5006,7 +4872,6 @@ struct llm_build_context {
if (il == n_layer - 1) { if (il == n_layer - 1) {
// skip computing output for unused tokens // skip computing output for unused tokens
struct ggml_tensor * inp_out_ids = build_inp_out_ids(); struct ggml_tensor * inp_out_ids = build_inp_out_ids();
n_tokens = n_outputs;
cur = ggml_get_rows(ctx0, cur, inp_out_ids); cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
} }
@ -5325,9 +5190,6 @@ struct llm_build_context {
struct ggml_cgraph * build_arctic() { struct ggml_cgraph * build_arctic() {
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
// mutable variable, needed during the last layer of the computation to skip unused tokens
int32_t n_tokens = this->n_tokens;
const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_head = hparams.n_embd_head_v;
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
GGML_ASSERT(n_embd_head == hparams.n_rot); GGML_ASSERT(n_embd_head == hparams.n_rot);
@ -5385,7 +5247,6 @@ struct llm_build_context {
if (il == n_layer - 1) { if (il == n_layer - 1) {
// skip computing output for unused tokens // skip computing output for unused tokens
struct ggml_tensor * inp_out_ids = build_inp_out_ids(); struct ggml_tensor * inp_out_ids = build_inp_out_ids();
n_tokens = n_outputs;
cur = ggml_get_rows(ctx0, cur, inp_out_ids); cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
} }
@ -5458,9 +5319,6 @@ struct llm_build_context {
struct ggml_cgraph * build_deepseek() { struct ggml_cgraph * build_deepseek() {
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
// mutable variable, needed during the last layer of the computation to skip unused tokens
int32_t n_tokens = this->n_tokens;
const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_head = hparams.n_embd_head_v;
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
GGML_ASSERT(n_embd_head == hparams.n_rot); GGML_ASSERT(n_embd_head == hparams.n_rot);
@ -5535,7 +5393,6 @@ struct llm_build_context {
if (il == n_layer - 1) { if (il == n_layer - 1) {
// skip computing output for unused tokens // skip computing output for unused tokens
struct ggml_tensor * inp_out_ids = build_inp_out_ids(); struct ggml_tensor * inp_out_ids = build_inp_out_ids();
n_tokens = n_outputs;
cur = ggml_get_rows(ctx0, cur, inp_out_ids); cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
} }
@ -5616,9 +5473,6 @@ struct llm_build_context {
struct ggml_cgraph * build_deepseek2() { struct ggml_cgraph * build_deepseek2() {
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
// mutable variable, needed during the last layer of the computation to skip unused tokens
int32_t n_tokens = this->n_tokens;
bool is_lite = (hparams.n_layer == 27); bool is_lite = (hparams.n_layer == 27);
// We have to pre-scale kq_scale and attn_factor to make the YaRN RoPE work correctly. // We have to pre-scale kq_scale and attn_factor to make the YaRN RoPE work correctly.
@ -5767,7 +5621,6 @@ struct llm_build_context {
if (il == n_layer - 1) { if (il == n_layer - 1) {
// skip computing output for unused tokens // skip computing output for unused tokens
struct ggml_tensor * inp_out_ids = build_inp_out_ids(); struct ggml_tensor * inp_out_ids = build_inp_out_ids();
n_tokens = n_outputs;
cur = ggml_get_rows(ctx0, cur, inp_out_ids); cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
} }
@ -5996,9 +5849,6 @@ struct llm_build_context {
//struct ggml_cgraph * build_t5_enc() { //struct ggml_cgraph * build_t5_enc() {
// struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); // struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
// // mutable variable, needed during the last layer of the computation to skip unused tokens
// int32_t n_tokens = this->n_tokens;
// const int64_t n_embd_head = hparams.n_embd_head_v; // const int64_t n_embd_head = hparams.n_embd_head_v;
// const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); // const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
// GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); // GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
@ -6072,7 +5922,6 @@ struct llm_build_context {
// if (il == n_layer - 1) { // if (il == n_layer - 1) {
// // skip computing output for unused tokens // // skip computing output for unused tokens
// struct ggml_tensor * inp_out_ids = build_inp_out_ids(); // struct ggml_tensor * inp_out_ids = build_inp_out_ids();
// n_tokens = n_outputs;
// cur = ggml_get_rows(ctx0, cur, inp_out_ids); // cur = ggml_get_rows(ctx0, cur, inp_out_ids);
// inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); // inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
// } // }
@ -6128,9 +5977,6 @@ struct llm_build_context {
//struct ggml_cgraph * build_t5_dec() { //struct ggml_cgraph * build_t5_dec() {
// struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); // struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
// // mutable variable, needed during the last layer of the computation to skip unused tokens
// int32_t n_tokens = this->n_tokens;
// const int64_t n_embd_head = hparams.n_embd_head_v; // const int64_t n_embd_head = hparams.n_embd_head_v;
// const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); // const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
// GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); // GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
@ -6272,7 +6118,6 @@ struct llm_build_context {
// if (il == n_layer - 1) { // if (il == n_layer - 1) {
// // skip computing output for unused tokens // // skip computing output for unused tokens
// struct ggml_tensor * inp_out_ids = build_inp_out_ids(); // struct ggml_tensor * inp_out_ids = build_inp_out_ids();
// n_tokens = n_outputs;
// cur = ggml_get_rows(ctx0, cur, inp_out_ids); // cur = ggml_get_rows(ctx0, cur, inp_out_ids);
// inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); // inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
// inpCA = ggml_get_rows(ctx0, inpCA, inp_out_ids); // inpCA = ggml_get_rows(ctx0, inpCA, inp_out_ids);
@ -6673,9 +6518,6 @@ struct llm_build_context {
struct ggml_cgraph * build_exaone() { struct ggml_cgraph * build_exaone() {
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
// mutable variable, needed during the last layer of the computation to skip unused tokens
int32_t n_tokens = this->n_tokens;
const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_head = hparams.n_embd_head_v;
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
GGML_ASSERT(n_embd_head == hparams.n_rot); GGML_ASSERT(n_embd_head == hparams.n_rot);
@ -6748,7 +6590,6 @@ struct llm_build_context {
if (il == n_layer - 1) { if (il == n_layer - 1) {
// skip computing output for unused tokens // skip computing output for unused tokens
struct ggml_tensor * inp_out_ids = build_inp_out_ids(); struct ggml_tensor * inp_out_ids = build_inp_out_ids();
n_tokens = n_outputs;
cur = ggml_get_rows(ctx0, cur, inp_out_ids); cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
} }
@ -6978,9 +6819,6 @@ struct llm_build_context {
struct ggml_cgraph * build_chameleon() { struct ggml_cgraph * build_chameleon() {
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false); struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
// mutable variable, needed during the last layer of the computation to skip unused tokens
int32_t n_tokens = this->n_tokens;
const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_head = hparams.n_embd_head_v;
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
GGML_ASSERT(n_embd_head == hparams.n_rot); GGML_ASSERT(n_embd_head == hparams.n_rot);
@ -7076,7 +6914,6 @@ struct llm_build_context {
if (il == n_layer - 1) { if (il == n_layer - 1) {
// skip computing output for unused tokens // skip computing output for unused tokens
struct ggml_tensor * inp_out_ids = build_inp_out_ids(); struct ggml_tensor * inp_out_ids = build_inp_out_ids();
n_tokens = n_outputs;
cur = ggml_get_rows(ctx0, cur, inp_out_ids); cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
} }
@ -7341,8 +7178,6 @@ static struct ggml_cgraph * llama_build_graph(
struct llm_build_context llm(lctx, ubatch, cb, worst_case); struct llm_build_context llm(lctx, ubatch, cb, worst_case);
llm.init();
switch (model.arch) { switch (model.arch) {
case LLM_ARCH_LLAMA: case LLM_ARCH_LLAMA:
case LLM_ARCH_MINICPM: case LLM_ARCH_MINICPM:
@ -7403,7 +7238,6 @@ static struct ggml_cgraph * llama_build_graph(
} break; } break;
case LLM_ARCH_QWEN2VL: case LLM_ARCH_QWEN2VL:
{ {
lctx.n_pos_per_token = 4;
result = llm.build_qwen2vl(); result = llm.build_qwen2vl();
} break; } break;
case LLM_ARCH_QWEN2MOE: case LLM_ARCH_QWEN2MOE:
@ -7564,8 +7398,6 @@ static struct ggml_cgraph * llama_build_graph(
result = llm.append_pooling(result); result = llm.append_pooling(result);
} }
llm.free();
return result; return result;
} }
@ -7908,7 +7740,7 @@ struct llama_context * llama_init_from_model(
try { try {
// TODO: add logic which llama_context implementation to construct // TODO: add logic which llama_context implementation to construct
ctx = new llama_context(*model, params, ctx = new llama_context_unified(*model, params,
[](llama_context & lctx, const llama_ubatch & ubatch, bool worst_case) { [](llama_context & lctx, const llama_ubatch & ubatch, bool worst_case) {
return llama_build_graph(lctx, ubatch, worst_case); return llama_build_graph(lctx, ubatch, worst_case);
}); });