mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-26 19:55:04 +00:00
* llama : refactor llama_context, llama_kv_cache, llm_build_context ggml-ci * graph : don't mutate the KV cache during defrag ggml-ci * context : reduce virtuals + remove test function ggml-ci * context : move interface implementation to source file + factory ggml-ci * graph : move KV cache build functions to llama_context impl ggml-ci * graph : remove model reference from build_pooling ggml-ci * graph : remove llama_model reference ggml-ci * kv_cache : provide rope factors ggml-ci * graph : rework inputs to use only unique_ptr, remove attn input abstraction ggml-ci * context : remove llama_context_i abstraction ggml-ci * context : clean-up ggml-ci * graph : clean-up ggml-ci * llama : remove redundant keywords (struct, enum) ggml-ci * model : adapt gemma3 ggml-ci * graph : restore same attention ops as on master ggml-ci * llama : remove TODO + fix indent ggml-ci
36 lines
788 B
C++
36 lines
788 B
C++
#pragma once
|
|
|
|
#include <cstddef>
|
|
#include <cstdint>
|
|
#include <string>
|
|
|
|
struct ggml_tensor;
|
|
|
|
class llama_io_write_i {
|
|
public:
|
|
llama_io_write_i() = default;
|
|
virtual ~llama_io_write_i() = default;
|
|
|
|
virtual void write(const void * src, size_t size) = 0;
|
|
virtual void write_tensor(const ggml_tensor * tensor, size_t offset, size_t size) = 0;
|
|
|
|
// bytes written so far
|
|
virtual size_t n_bytes() = 0;
|
|
|
|
void write_string(const std::string & str);
|
|
};
|
|
|
|
class llama_io_read_i {
|
|
public:
|
|
llama_io_read_i() = default;
|
|
virtual ~llama_io_read_i() = default;
|
|
|
|
virtual const uint8_t * read(size_t size) = 0;
|
|
virtual void read_to(void * dst, size_t size) = 0;
|
|
|
|
// bytes read so far
|
|
virtual size_t n_bytes() = 0;
|
|
|
|
void read_string(std::string & str);
|
|
};
|