mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-06-27 12:05:03 +00:00
Merge commit from fork
* vocab : prevent integer overflow during load * Add static cast and GGML_ABORT --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
This commit is contained in:
@ -19,6 +19,7 @@
|
|||||||
#include <set>
|
#include <set>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
#include <cctype>
|
#include <cctype>
|
||||||
|
#include <cinttypes>
|
||||||
|
|
||||||
//
|
//
|
||||||
// helpers
|
// helpers
|
||||||
@ -2572,6 +2573,10 @@ int32_t llama_vocab::impl::token_to_piece(llama_token token, char * buf, int32_t
|
|||||||
// copy piece chars to output text buffer
|
// copy piece chars to output text buffer
|
||||||
// skip up to 'lstrip' leading spaces before copying
|
// skip up to 'lstrip' leading spaces before copying
|
||||||
auto _try_copy = [=] (const char * token, size_t size) -> int32_t {
|
auto _try_copy = [=] (const char * token, size_t size) -> int32_t {
|
||||||
|
if (size >= static_cast<size_t>(std::numeric_limits<int32_t>::max())) {
|
||||||
|
GGML_ABORT("invalid token size: %zu exceeds int32_t limit", size);
|
||||||
|
}
|
||||||
|
|
||||||
for (int32_t i = 0; i < lstrip && size && *token == ' '; ++i) {
|
for (int32_t i = 0; i < lstrip && size && *token == ' '; ++i) {
|
||||||
token++;
|
token++;
|
||||||
size--;
|
size--;
|
||||||
|
Reference in New Issue
Block a user