mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-08-24 08:49:15 -04:00
main: replace --no-special with --special (#7534)
This also flips the default behavior of the output to not include control token by default.
This commit is contained in:
@@ -146,7 +146,7 @@ struct gpt_params {
|
||||
bool use_color = false; // use color to distinguish generations and inputs
|
||||
bool interactive = false; // interactive mode
|
||||
bool interactive_specials = false; // whether to allow special tokens from user, during interactive mode
|
||||
bool no_special = false; // disable control token output
|
||||
bool special = false; // enable special token output
|
||||
bool conversation = false; // conversation mode (does not print special tokens and suffix/prefix)
|
||||
bool chatml = false; // chatml mode (used for models trained on chatml syntax)
|
||||
bool prompt_cache_all = false; // save user input and generations to prompt cache
|
||||
|
Reference in New Issue
Block a user