llama : add llama_sampling API + move grammar in libllama

ggml-ci
This commit is contained in:
Georgi Gerganov
2024-08-05 10:08:25 +03:00
parent b69a480af4
commit f648ca2cee
48 changed files with 2481 additions and 2590 deletions

View File

@ -2,33 +2,18 @@
#undef NDEBUG
#endif
#define LLAMA_API_INTERNAL
#include "ggml.h"
#include "llama.h"
#include "grammar-parser.h"
#include "json-schema-to-grammar.h"
#include "unicode.h"
#include "llama-grammar.h"
#include "json-schema-to-grammar.h"
#include <cassert>
#include <string>
#include <vector>
using json = nlohmann::ordered_json;
static llama_grammar* build_grammar(const std::string & grammar_str) {
auto parsed_grammar = grammar_parser::parse(grammar_str.c_str());
// Ensure we parsed correctly
assert(!parsed_grammar.rules.empty());
// Ensure we have a root node
assert(!(parsed_grammar.symbol_ids.find("root") == parsed_grammar.symbol_ids.end()));
std::vector<const llama_grammar_element*> grammar_rules(parsed_grammar.c_rules());
llama_grammar* grammar = llama_grammar_init(
grammar_rules.data(), grammar_rules.size(), parsed_grammar.symbol_ids.at("root"));
return grammar;
static llama_grammar * build_grammar(const std::string & grammar_str) {
return llama_grammar_init_impl(nullptr, grammar_str.c_str(), "root");
}
static bool test_build_grammar_fails(const std::string & grammar_str) {
@ -45,17 +30,15 @@ static bool test_build_grammar_fails(const std::string & grammar_str) {
}
static bool match_string(const std::string & input, llama_grammar * grammar) {
auto decoded = decode_utf8(input, {});
const auto & code_points = decoded.first;
const auto cpts = unicode_cpts_from_utf8(input);
const llama_grammar_rules & rules = llama_grammar_get_rules (grammar);
llama_grammar_stacks & cur_stacks = llama_grammar_get_stacks(grammar);
for (auto it = code_points.begin(), end = code_points.end() - 1; it != end; ++it) {
for (const auto & cpt : cpts) {
const llama_grammar_stacks prev_stacks = llama_grammar_get_stacks(grammar); // copy
llama_grammar_accept(rules, prev_stacks, *it, cur_stacks);
cur_stacks = llama_grammar_accept(rules, prev_stacks, cpt);
if (cur_stacks.empty()) {
// no stacks means that the grammar failed to match at this point
@ -77,7 +60,7 @@ static void test(const std::string & test_desc, const std::string & grammar_str,
fprintf(stderr, "⚫ Testing %s\n%s\n", test_desc.c_str(), grammar_str.c_str());
fflush(stderr);
auto grammar = build_grammar(grammar_str);
auto * grammar = build_grammar(grammar_str);
// Save the original grammar stacks so that we can reset after every new string we want to test
const llama_grammar_stacks original_stacks = llama_grammar_get_stacks(grammar);
@ -143,7 +126,7 @@ static void test(const std::string & test_desc, const std::string & grammar_str,
}
// Clean up allocated memory
llama_grammar_free(grammar);
llama_grammar_free_impl(grammar);
}
static void test_grammar(const std::string & test_desc, const std::string & grammar_str, const std::vector<std::string> & passing_strings, const std::vector<std::string> & failing_strings) {
test(test_desc + ". Grammar: " + grammar_str, grammar_str, passing_strings, failing_strings);
@ -683,7 +666,8 @@ static void test_failure_missing_root() {
term ::= number
number ::= [0-9]+)""";
grammar_parser::parse_state parsed_grammar = grammar_parser::parse(grammar_str.c_str());
llama_grammar_parser parsed_grammar;
parsed_grammar.parse(grammar_str.c_str());
// Ensure we parsed correctly
assert(!parsed_grammar.rules.empty());
@ -705,7 +689,8 @@ static void test_failure_missing_reference() {
fprintf(stderr, " Expected error: ");
grammar_parser::parse_state parsed_grammar = grammar_parser::parse(grammar_str.c_str());
llama_grammar_parser parsed_grammar;
parsed_grammar.parse(grammar_str.c_str());
// Ensure we did NOT parsed correctly
assert(parsed_grammar.rules.empty());

View File

@ -3,7 +3,7 @@
#endif
#include "llama.h"
#include "grammar-parser.h"
#include "llama-grammar.h"
#include <cassert>
@ -22,7 +22,8 @@ static const char * type_str(llama_gretype type) {
static void verify_parsing(const char *grammar_bytes, const std::vector<std::pair<std::string, uint32_t>> expected, const std::vector<llama_grammar_element> &expected_rules) {
uint32_t index = 0;
grammar_parser::parse_state parsed_grammar = grammar_parser::parse(grammar_bytes);
llama_grammar_parser parsed_grammar;
parsed_grammar.parse(grammar_bytes);
std::map<uint32_t, std::string> symbol_names;
for (auto it = parsed_grammar.symbol_ids.begin(); it != parsed_grammar.symbol_ids.end(); ++it) {
@ -129,9 +130,10 @@ static void verify_parsing(const char *grammar_bytes, const std::vector<std::pai
}
}
static void verify_failure(const char *grammar_bytes) {
static void verify_failure(const char * grammar_bytes) {
fprintf(stderr, "Testing expected failure:%s\n", grammar_bytes);
auto result = grammar_parser::parse(grammar_bytes);
llama_grammar_parser result;
result.parse(grammar_bytes);
assert(result.rules.empty() && "should have failed");
}

View File

@ -2,14 +2,15 @@
#undef NDEBUG
#endif
#include "json-schema-to-grammar.h"
#include "llama-grammar.h"
#include <cassert>
#include <fstream>
#include <sstream>
#include <regex>
#include "json-schema-to-grammar.h"
#include "grammar-parser.h"
static std::string trim(const std::string & source) {
std::string s(source);
s.erase(0,s.find_first_not_of(" \n\r\t"));
@ -40,7 +41,8 @@ struct TestCase {
}
void verify_expectation_parseable() const {
try {
auto state = grammar_parser::parse(expected_grammar.c_str());
llama_grammar_parser state;
state.parse(expected_grammar.c_str());
if (state.symbol_ids.find("root") == state.symbol_ids.end()) {
throw std::runtime_error("Grammar failed to parse:\n" + expected_grammar);
}

View File

@ -2,16 +2,15 @@
#undef NDEBUG
#endif
#define LLAMA_API_INTERNAL
#include "llama.h"
#include "grammar-parser.h"
#include "llama-grammar.h"
#include <cassert>
#include <stdexcept>
int main()
{
grammar_parser::parse_state parsed_grammar;
llama_grammar_parser parsed_grammar;
std::vector<std::pair<std::string, uint32_t>> expected = {
{"expr", 2},
@ -117,7 +116,7 @@ int main()
llama_grammar * grammar = NULL;
std::vector<const llama_grammar_element *> grammar_rules(parsed_grammar.c_rules());
grammar = llama_grammar_init(grammar_rules.data(), grammar_rules.size(), parsed_grammar.symbol_ids.at("root"));
grammar = llama_grammar_init_impl(nullptr, grammar_rules.data(), grammar_rules.size(), parsed_grammar.symbol_ids.at("root"));
if (grammar == nullptr)
{
throw std::runtime_error("Failed to initialize llama_grammar");
@ -174,13 +173,13 @@ int main()
}};
auto index = 0;
for (auto stack : llama_grammar_get_stacks(grammar))
for (const llama_grammar_stack & stack : llama_grammar_get_stacks(grammar))
{
// compare stack to expected_stack
for (uint32_t i = 0; i < stack.size(); i++)
{
auto element = stack[i];
auto expected_element = expected_stacks[index][i];
const llama_grammar_element * element = stack[i];
const llama_grammar_element & expected_element = expected_stacks[index][i];
// pretty print error message before asserting
if (expected_element.type != element->type || expected_element.value != element->value)
@ -403,6 +402,8 @@ int main()
delete[] candidate.code_points;
candidate.code_points = nullptr;
}
llama_grammar_free(grammar);
llama_grammar_free_impl(grammar);
return 0;
}

View File

@ -1,5 +1,6 @@
#include "ggml.h"
#include "llama.h"
#include "llama-sampling.h"
#ifdef NDEBUG
#undef NDEBUG
@ -20,6 +21,7 @@ static void dump(const llama_token_data_array * candidates) {
static void test_top_k(const std::vector<float> & probs, const std::vector<float> & expected_probs, int k) {
const size_t n_vocab = probs.size();
std::vector<llama_token_data> candidates;
candidates.reserve(n_vocab);
for (llama_token token_id = 0; token_id < (llama_token)n_vocab; token_id++) {
@ -28,9 +30,9 @@ static void test_top_k(const std::vector<float> & probs, const std::vector<float
}
llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
llama_sample_softmax(nullptr, &candidates_p);
llama_sampling_softmax_impl(&candidates_p);
DUMP(&candidates_p);
llama_sample_top_k(nullptr, &candidates_p, k, 1);
llama_sampling_top_k_impl(&candidates_p, k, 1);
DUMP(&candidates_p);
GGML_ASSERT(candidates_p.size == expected_probs.size());
@ -41,6 +43,7 @@ static void test_top_k(const std::vector<float> & probs, const std::vector<float
static void test_top_p(const std::vector<float> & probs, const std::vector<float> & expected_probs, float p) {
const size_t n_vocab = probs.size();
std::vector<llama_token_data> candidates;
candidates.reserve(n_vocab);
for (llama_token token_id = 0; token_id < (llama_token)n_vocab; token_id++) {
@ -49,9 +52,9 @@ static void test_top_p(const std::vector<float> & probs, const std::vector<float
}
llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
llama_sample_softmax(nullptr, &candidates_p);
llama_sampling_softmax_impl(&candidates_p);
DUMP(&candidates_p);
llama_sample_top_p(nullptr, &candidates_p, p, 1);
llama_sampling_top_p_impl(&candidates_p, p, 1);
DUMP(&candidates_p);
GGML_ASSERT(candidates_p.size == expected_probs.size());
@ -62,6 +65,7 @@ static void test_top_p(const std::vector<float> & probs, const std::vector<float
static void test_tfs(const std::vector<float> & probs, const std::vector<float> & expected_probs, float z) {
const size_t n_vocab = probs.size();
std::vector<llama_token_data> candidates;
candidates.reserve(n_vocab);
for (llama_token token_id = 0; token_id < (llama_token)n_vocab; token_id++) {
@ -71,7 +75,7 @@ static void test_tfs(const std::vector<float> & probs, const std::vector<float>
llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
DUMP(&candidates_p);
llama_sample_tail_free(nullptr, &candidates_p, z, 1);
llama_sampling_tail_free_impl(&candidates_p, z, 1);
DUMP(&candidates_p);
GGML_ASSERT(candidates_p.size == expected_probs.size());
@ -82,6 +86,7 @@ static void test_tfs(const std::vector<float> & probs, const std::vector<float>
static void test_min_p(const std::vector<float> & probs, const std::vector<float> & expected_probs, float p) {
const size_t n_vocab = probs.size();
std::vector<llama_token_data> candidates;
candidates.reserve(n_vocab);
for (llama_token token_id = 0; token_id < (llama_token)n_vocab; token_id++) {
@ -91,9 +96,9 @@ static void test_min_p(const std::vector<float> & probs, const std::vector<float
llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
DUMP(&candidates_p);
llama_sample_min_p(nullptr, &candidates_p, p, 1);
llama_sampling_min_p_impl(&candidates_p, p, 1);
DUMP(&candidates_p);
llama_sample_softmax(nullptr, &candidates_p);
llama_sampling_softmax_impl(&candidates_p);
GGML_ASSERT(candidates_p.size == expected_probs.size());
for (size_t i = 0; i < candidates_p.size; i++) {
@ -103,6 +108,7 @@ static void test_min_p(const std::vector<float> & probs, const std::vector<float
static void test_typical(const std::vector<float> & probs, const std::vector<float> & expected_probs, float p) {
const size_t n_vocab = probs.size();
std::vector<llama_token_data> candidates;
candidates.reserve(n_vocab);
for (llama_token token_id = 0; token_id < (llama_token)n_vocab; token_id++) {
@ -112,7 +118,7 @@ static void test_typical(const std::vector<float> & probs, const std::vector<flo
llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
DUMP(&candidates_p);
llama_sample_typical(nullptr, &candidates_p, p, 1);
llama_sampling_typical_impl(&candidates_p, p, 1);
DUMP(&candidates_p);
GGML_ASSERT(candidates_p.size == expected_probs.size());
@ -121,13 +127,14 @@ static void test_typical(const std::vector<float> & probs, const std::vector<flo
}
}
static void test_repetition_penalties(
static void test_penalties(
const std::vector<float> & probs, const std::vector<llama_token> & last_tokens,
const std::vector<float> & expected_probs, float repeat_penalty, float alpha_frequency, float alpha_presence
) {
GGML_ASSERT(probs.size() == expected_probs.size());
const size_t n_vocab = probs.size();
std::vector<llama_token_data> candidates;
candidates.reserve(n_vocab);
for (llama_token token_id = 0; token_id < (llama_token)n_vocab; token_id++) {
@ -135,11 +142,16 @@ static void test_repetition_penalties(
candidates.emplace_back(llama_token_data{token_id, logit, 0.0f});
}
llama_token_cnt token_count;
for (size_t i = 0; i < last_tokens.size(); i++) {
token_count[last_tokens[i]]++;
}
llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
llama_sample_softmax(nullptr, &candidates_p);
llama_sampling_softmax_impl(&candidates_p);
DUMP(&candidates_p);
llama_sample_repetition_penalties(nullptr, &candidates_p, (const llama_token *) last_tokens.data(), last_tokens.size(), repeat_penalty, alpha_frequency, alpha_presence);
llama_sample_softmax(nullptr, &candidates_p);
llama_sampling_penalties_impl(&candidates_p, token_count, repeat_penalty, alpha_frequency, alpha_presence);
llama_sampling_softmax_impl(&candidates_p);
DUMP(&candidates_p);
GGML_ASSERT(candidates_p.size == expected_probs.size());
@ -148,8 +160,7 @@ static void test_repetition_penalties(
}
}
static void test_sampler_queue(
const size_t n_vocab, const std::string samplers_sequence, const int top_k, const float top_p, const float min_p
static void test_sampler_queue(const size_t n_vocab, const std::string & samplers_sequence, const int top_k, const float top_p, const float min_p
) {
std::vector<llama_token_data> candidates;
candidates.reserve(n_vocab);
@ -165,16 +176,16 @@ static void test_sampler_queue(
for (auto s : samplers_sequence) {
switch (s){
case 'k': llama_sample_top_k (nullptr, &candidates_p, top_k, 1); break;
case 'k': llama_sampling_top_k_impl(&candidates_p, top_k, 1); break;
case 'f': GGML_ABORT("tail_free test not implemented");
case 'y': GGML_ABORT("typical test not implemented");
case 'p': llama_sample_top_p (nullptr, &candidates_p, top_p, 1); break;
case 'm': llama_sample_min_p (nullptr, &candidates_p, min_p, 1); break;
case 'p': llama_sampling_top_p_impl(&candidates_p, top_p, 1); break;
case 'm': llama_sampling_min_p_impl(&candidates_p, min_p, 1); break;
case 't': GGML_ABORT("temperature test not implemented");
default : GGML_ABORT("Unknown sampler");
}
llama_sample_softmax(nullptr, &candidates_p); // make sure tokens are sorted for tests
llama_sampling_softmax_impl(&candidates_p); // make sure tokens are sorted for tests
const int size = candidates_p.size;
@ -259,13 +270,13 @@ int main(void) {
test_typical({0.97f, 0.01f, 0.01f, 0.01f}, {0.97f}, 0.5f);
test_typical({0.4f, 0.2f, 0.2f, 0.2f}, {0.2f, 0.2f, 0.2f}, 0.5f);
test_repetition_penalties({0.2f, 0.2f, 0.2f, 0.2f, 0.2f}, {0}, {0.25f, 0.25f, 0.25f, 0.25f, 0}, 50.0f, 0.0f, 0.0f);
test_repetition_penalties({0.2f, 0.2f, 0.2f, 0.2f, 0.2f}, {0, 1, 2}, {0.5f, 0.5f, 0, 0, 0}, 50.0f, 0.0f, 0.0f);
test_repetition_penalties({0.2f, 0.2f, 0.2f, 0.2f, 0.2f}, {0, 1, 2, 0, 0}, {0.5f, 0.5f, 0, 0, 0}, 50.0f, 0.0f, 0.0f);
test_penalties({0.2f, 0.2f, 0.2f, 0.2f, 0.2f}, {0}, {0.25f, 0.25f, 0.25f, 0.25f, 0}, 50.0f, 0.0f, 0.0f);
test_penalties({0.2f, 0.2f, 0.2f, 0.2f, 0.2f}, {0, 1, 2}, {0.5f, 0.5f, 0, 0, 0}, 50.0f, 0.0f, 0.0f);
test_penalties({0.2f, 0.2f, 0.2f, 0.2f, 0.2f}, {0, 1, 2, 0, 0}, {0.5f, 0.5f, 0, 0, 0}, 50.0f, 0.0f, 0.0f);
test_repetition_penalties({0.2f, 0.2f, 0.2f, 0.2f, 0.2f}, {0}, {0.249997f, 0.249997f, 0.249997f, 0.249997f, 0.000011f}, 1.0f, 5.0f, 5.0f);
test_repetition_penalties({0.2f, 0.2f, 0.2f, 0.2f, 0.2f}, {0, 1, 2}, {0.499966f, 0.499966f, 0.000023f, 0.000023f, 0.000023f}, 1.0f, 5.0f, 5.0f);
test_repetition_penalties({0.2f, 0.2f, 0.2f, 0.2f, 0.2f}, {0, 1, 2, 0, 0}, {0.499977f, 0.499977f, 0.000023f, 0.000023f, 0.000000f}, 1.0f, 5.0f, 5.0f);
test_penalties({0.2f, 0.2f, 0.2f, 0.2f, 0.2f}, {0}, {0.249997f, 0.249997f, 0.249997f, 0.249997f, 0.000011f}, 1.0f, 5.0f, 5.0f);
test_penalties({0.2f, 0.2f, 0.2f, 0.2f, 0.2f}, {0, 1, 2}, {0.499966f, 0.499966f, 0.000023f, 0.000023f, 0.000023f}, 1.0f, 5.0f, 5.0f);
test_penalties({0.2f, 0.2f, 0.2f, 0.2f, 0.2f}, {0, 1, 2, 0, 0}, {0.499977f, 0.499977f, 0.000023f, 0.000023f, 0.000000f}, 1.0f, 5.0f, 5.0f);
test_sampler_queue(10000, "k", 10000, 1.0f, 1.0f);
test_sampler_queue(10000, "k", 1, 1.0f, 1.0f);