mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-07-15 23:30:15 +00:00
eval-callback : check for empty input (#14539)
This commit is contained in:
@ -136,6 +136,11 @@ static bool run(llama_context * ctx, const common_params & params) {
|
|||||||
|
|
||||||
std::vector<llama_token> tokens = common_tokenize(ctx, params.prompt, add_bos);
|
std::vector<llama_token> tokens = common_tokenize(ctx, params.prompt, add_bos);
|
||||||
|
|
||||||
|
if (tokens.empty()) {
|
||||||
|
LOG_ERR("%s : there are not input tokens to process - (try to provide a prompt with '-p')\n", __func__);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size()))) {
|
if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size()))) {
|
||||||
LOG_ERR("%s : failed to eval\n", __func__);
|
LOG_ERR("%s : failed to eval\n", __func__);
|
||||||
return false;
|
return false;
|
||||||
|
Reference in New Issue
Block a user