mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-08-20 06:36:48 -04:00
rm redundant llama_batch_ext_set_output_last
This commit is contained in:
@@ -110,7 +110,6 @@ int main(int argc, char ** argv) {
|
||||
// prepare a batch for the prompt
|
||||
llama_pos n_past = 0;
|
||||
llama_batch_ext * batch = llama_batch_ext_init_from_text(prompt_tokens.data(), prompt_tokens.size(), n_past, 0, true);
|
||||
llama_batch_ext_set_output_last(batch);
|
||||
n_past += llama_batch_ext_get_n_tokens(batch);
|
||||
|
||||
llama_token new_token_id;
|
||||
|
Reference in New Issue
Block a user