mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-08-18 14:18:50 -04:00
download in batches
This commit is contained in:
@@ -4,7 +4,8 @@
|
||||
#include <string>
|
||||
#include <fstream>
|
||||
#include <vector>
|
||||
#include <json.hpp>
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
using json = nlohmann::json;
|
||||
|
||||
@@ -109,7 +110,24 @@ int main(void) {
|
||||
}
|
||||
}
|
||||
|
||||
if (common_download_file_multiple(files, {}, false)) {
|
||||
if (!files.empty()) {
|
||||
bool downloaded = false;
|
||||
const size_t batch_size = 6;
|
||||
size_t batches = (files.size() + batch_size - 1) / batch_size;
|
||||
|
||||
for (size_t i = 0; i < batches; i++) {
|
||||
size_t batch_pos = (i * batch_size);
|
||||
size_t batch_step = batch_pos + batch_size;
|
||||
auto batch_begin = files.begin() + batch_pos;
|
||||
auto batch_end = batch_step >= files.size() ? files.end() : files.begin() + batch_step;
|
||||
std::vector<std::pair<std::string, std::string>> batch(batch_begin, batch_end);
|
||||
|
||||
if (!(downloaded = common_download_file_multiple(batch, {}, false))) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (downloaded) {
|
||||
std::string dir_sep(1, DIRECTORY_SEPARATOR);
|
||||
|
||||
for (auto const & item : files) {
|
||||
@@ -136,6 +154,7 @@ int main(void) {
|
||||
} else {
|
||||
printf("test-tokenizers-remote: failed to download files, unable to perform tests...\n");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
printf("test-tokenizers-remote: failed to retrieve repository info, unable to perform tests...\n");
|
||||
}
|
||||
|
Reference in New Issue
Block a user