mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-27 20:05:20 +00:00
server : update readme + return json for "meta" field
This commit is contained in:
@ -1040,7 +1040,7 @@ To know the `id` of the adapter, use GET `/lora-adapters`
|
|||||||
|
|
||||||
Returns information about the loaded model. See [OpenAI Models API documentation](https://platform.openai.com/docs/api-reference/models).
|
Returns information about the loaded model. See [OpenAI Models API documentation](https://platform.openai.com/docs/api-reference/models).
|
||||||
|
|
||||||
The returned list always has one single element.
|
The returned list always has one single element. The `meta` field can be `null` (for example, while the model is still loading).
|
||||||
|
|
||||||
By default, model `id` field is the path to model file, specified via `-m`. You can set a custom value for model `id` field via `--alias` argument. For example, `--alias gpt-4o-mini`.
|
By default, model `id` field is the path to model file, specified via `-m`. You can set a custom value for model `id` field via `--alias` argument. For example, `--alias gpt-4o-mini`.
|
||||||
|
|
||||||
|
@ -4368,7 +4368,7 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
const auto handle_models = [¶ms, &ctx_server, &state, &res_ok](const httplib::Request &, httplib::Response & res) {
|
const auto handle_models = [¶ms, &ctx_server, &state, &res_ok](const httplib::Request &, httplib::Response & res) {
|
||||||
server_state current_state = state.load();
|
server_state current_state = state.load();
|
||||||
std::string model_meta;
|
json model_meta = nullptr;
|
||||||
if (current_state == SERVER_STATE_READY) {
|
if (current_state == SERVER_STATE_READY) {
|
||||||
model_meta = ctx_server.model_meta();
|
model_meta = ctx_server.model_meta();
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user