diff --git a/tools/server/README.md b/tools/server/README.md index 7b944c35b..17ad93df6 100644 --- a/tools/server/README.md +++ b/tools/server/README.md @@ -1040,7 +1040,7 @@ To know the `id` of the adapter, use GET `/lora-adapters` Returns information about the loaded model. See [OpenAI Models API documentation](https://platform.openai.com/docs/api-reference/models). -The returned list always has one single element. +The returned list always has one single element. The `meta` field can be `null` (for example, while the model is still loading). By default, model `id` field is the path to model file, specified via `-m`. You can set a custom value for model `id` field via `--alias` argument. For example, `--alias gpt-4o-mini`. diff --git a/tools/server/server.cpp b/tools/server/server.cpp index 4531dbf93..1ecf048db 100644 --- a/tools/server/server.cpp +++ b/tools/server/server.cpp @@ -4368,7 +4368,7 @@ int main(int argc, char ** argv) { const auto handle_models = [¶ms, &ctx_server, &state, &res_ok](const httplib::Request &, httplib::Response & res) { server_state current_state = state.load(); - std::string model_meta; + json model_meta = nullptr; if (current_state == SERVER_STATE_READY) { model_meta = ctx_server.model_meta(); }