diff --git a/tools/server/server.cpp b/tools/server/server.cpp index 7169ffdce..4531dbf93 100644 --- a/tools/server/server.cpp +++ b/tools/server/server.cpp @@ -3705,6 +3705,9 @@ int main(int argc, char ** argv) { if (req.path == "/" || tmp.back() == "html") { res.set_content(reinterpret_cast(loading_html), loading_html_len, "text/html; charset=utf-8"); res.status = 503; + } else if (req.path == "/models" || req.path == "/v1/models") { + // allow the models endpoint to be accessed during loading + return true; } else { res_error(res, format_error_response("Loading model", ERROR_TYPE_UNAVAILABLE)); } @@ -4363,7 +4366,13 @@ int main(int argc, char ** argv) { res_ok(res, {{ "prompt", std::move(data.at("prompt")) }}); }; - const auto handle_models = [¶ms, &ctx_server, &res_ok](const httplib::Request &, httplib::Response & res) { + const auto handle_models = [¶ms, &ctx_server, &state, &res_ok](const httplib::Request &, httplib::Response & res) { + server_state current_state = state.load(); + std::string model_meta; + if (current_state == SERVER_STATE_READY) { + model_meta = ctx_server.model_meta(); + } + json models = { {"object", "list"}, {"data", { @@ -4372,7 +4381,7 @@ int main(int argc, char ** argv) { {"object", "model"}, {"created", std::time(0)}, {"owned_by", "llamacpp"}, - {"meta", ctx_server.model_meta()} + {"meta", model_meta}, }, }} };