2011-03-19 06:37:00 +08:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
#include <sys/types.h>
|
|
|
|
|
2021-01-11 23:32:34 +08:00
|
|
|
#include <atomic>
|
2020-04-30 03:59:39 +08:00
|
|
|
#include <cstdio>
|
|
|
|
#include <cstdlib>
|
|
|
|
|
2011-03-31 02:35:40 +08:00
|
|
|
#include "leveldb/cache.h"
|
2021-01-11 23:32:34 +08:00
|
|
|
#include "leveldb/comparator.h"
|
2011-03-31 02:35:40 +08:00
|
|
|
#include "leveldb/db.h"
|
|
|
|
#include "leveldb/env.h"
|
2018-03-16 21:23:29 +08:00
|
|
|
#include "leveldb/filter_policy.h"
|
2011-03-31 02:35:40 +08:00
|
|
|
#include "leveldb/write_batch.h"
|
2011-03-26 04:27:43 +08:00
|
|
|
#include "port/port.h"
|
|
|
|
#include "util/crc32c.h"
|
2011-03-19 06:37:00 +08:00
|
|
|
#include "util/histogram.h"
|
2011-08-23 05:08:51 +08:00
|
|
|
#include "util/mutexlock.h"
|
2011-03-19 06:37:00 +08:00
|
|
|
#include "util/random.h"
|
|
|
|
#include "util/testutil.h"
|
|
|
|
|
|
|
|
// Comma-separated list of operations to run in the specified order
|
|
|
|
// Actual benchmarks:
|
2011-03-23 02:32:49 +08:00
|
|
|
// fillseq -- write N values in sequential key order in async mode
|
|
|
|
// fillrandom -- write N values in random key order in async mode
|
|
|
|
// overwrite -- overwrite N values in random key order in async mode
|
|
|
|
// fillsync -- write N/100 values in random key order in sync mode
|
|
|
|
// fill100K -- write N/1000 100K values in random order in async mode
|
2012-04-17 23:36:46 +08:00
|
|
|
// deleteseq -- delete N keys in sequential order
|
|
|
|
// deleterandom -- delete N keys in random order
|
2011-05-21 10:17:43 +08:00
|
|
|
// readseq -- read N times sequentially
|
|
|
|
// readreverse -- read N times in reverse order
|
|
|
|
// readrandom -- read N times in random order
|
2012-04-17 23:36:46 +08:00
|
|
|
// readmissing -- read N missing keys in random order
|
2011-05-21 10:17:43 +08:00
|
|
|
// readhot -- read N times in random order from 1% section of DB
|
2012-04-17 23:36:46 +08:00
|
|
|
// seekrandom -- N random seeks
|
2021-01-11 23:32:34 +08:00
|
|
|
// seekordered -- N ordered seeks
|
2014-12-12 00:08:57 +08:00
|
|
|
// open -- cost of opening a DB
|
2011-03-26 04:27:43 +08:00
|
|
|
// crc32c -- repeated crc32c of 4K of data
|
2011-03-19 06:37:00 +08:00
|
|
|
// Meta operations:
|
|
|
|
// compact -- Compact the entire DB
|
2011-04-13 03:38:58 +08:00
|
|
|
// stats -- Print DB stats
|
2012-04-17 23:36:46 +08:00
|
|
|
// sstables -- Print sstable info
|
2011-03-19 06:37:00 +08:00
|
|
|
// heapprofile -- Dump a heap profile (if supported by this port)
|
|
|
|
static const char* FLAGS_benchmarks =
|
2011-03-23 02:32:49 +08:00
|
|
|
"fillseq,"
|
2011-03-26 04:27:43 +08:00
|
|
|
"fillsync,"
|
2011-03-23 02:32:49 +08:00
|
|
|
"fillrandom,"
|
|
|
|
"overwrite,"
|
2011-03-26 04:27:43 +08:00
|
|
|
"readrandom,"
|
|
|
|
"readrandom," // Extra run to allow previous compactions to quiesce
|
2011-03-19 06:37:00 +08:00
|
|
|
"readseq,"
|
2011-03-22 03:40:57 +08:00
|
|
|
"readreverse,"
|
2011-03-19 06:37:00 +08:00
|
|
|
"compact,"
|
2011-03-26 04:27:43 +08:00
|
|
|
"readrandom,"
|
2011-03-19 06:37:00 +08:00
|
|
|
"readseq,"
|
2011-03-22 03:40:57 +08:00
|
|
|
"readreverse,"
|
2011-03-26 04:27:43 +08:00
|
|
|
"fill100K,"
|
|
|
|
"crc32c,"
|
2011-04-13 03:38:58 +08:00
|
|
|
"snappycomp,"
|
2023-02-16 08:04:43 +08:00
|
|
|
"snappyuncomp,"
|
|
|
|
"zstdcomp,"
|
|
|
|
"zstduncomp,";
|
2011-03-19 06:37:00 +08:00
|
|
|
|
|
|
|
// Number of key/values to place in database
|
|
|
|
static int FLAGS_num = 1000000;
|
|
|
|
|
2011-05-21 10:17:43 +08:00
|
|
|
// Number of read operations to do. If negative, do FLAGS_num reads.
|
|
|
|
static int FLAGS_reads = -1;
|
|
|
|
|
2011-08-23 05:08:51 +08:00
|
|
|
// Number of concurrent threads to run.
|
|
|
|
static int FLAGS_threads = 1;
|
|
|
|
|
2011-03-19 06:37:00 +08:00
|
|
|
// Size of each value
|
|
|
|
static int FLAGS_value_size = 100;
|
|
|
|
|
|
|
|
// Arrange to generate values that shrink to this fraction of
|
|
|
|
// their original size after compression
|
2011-03-23 02:32:49 +08:00
|
|
|
static double FLAGS_compression_ratio = 0.5;
|
2011-03-19 06:37:00 +08:00
|
|
|
|
|
|
|
// Print histogram of operation timings
|
|
|
|
static bool FLAGS_histogram = false;
|
|
|
|
|
2021-01-11 23:32:34 +08:00
|
|
|
// Count the number of string comparisons performed
|
|
|
|
static bool FLAGS_comparisons = false;
|
|
|
|
|
2011-03-19 06:37:00 +08:00
|
|
|
// Number of bytes to buffer in memtable before compacting
|
2011-04-13 03:38:58 +08:00
|
|
|
// (initialized to default value by "main")
|
|
|
|
static int FLAGS_write_buffer_size = 0;
|
|
|
|
|
2016-09-27 19:50:38 +08:00
|
|
|
// Number of bytes written to each file.
|
|
|
|
// (initialized to default value by "main")
|
|
|
|
static int FLAGS_max_file_size = 0;
|
|
|
|
|
|
|
|
// Approximate size of user data packed per block (before compression.
|
|
|
|
// (initialized to default value by "main")
|
|
|
|
static int FLAGS_block_size = 0;
|
|
|
|
|
2011-04-13 03:38:58 +08:00
|
|
|
// Number of bytes to use as a cache of uncompressed data.
|
|
|
|
// Negative means use default settings.
|
|
|
|
static int FLAGS_cache_size = -1;
|
2011-03-19 06:37:00 +08:00
|
|
|
|
2011-05-21 10:17:43 +08:00
|
|
|
// Maximum number of files to keep open at the same time (use default if == 0)
|
|
|
|
static int FLAGS_open_files = 0;
|
|
|
|
|
2012-04-17 23:36:46 +08:00
|
|
|
// Bloom filter bits per key.
|
|
|
|
// Negative means use default settings.
|
|
|
|
static int FLAGS_bloom_bits = -1;
|
|
|
|
|
2021-01-11 23:32:34 +08:00
|
|
|
// Common key prefix length.
|
|
|
|
static int FLAGS_key_prefix = 0;
|
|
|
|
|
2011-05-21 10:17:43 +08:00
|
|
|
// If true, do not destroy the existing database. If you set this
|
|
|
|
// flag and also specify a benchmark that wants a fresh database, that
|
|
|
|
// benchmark will fail.
|
|
|
|
static bool FLAGS_use_existing_db = false;
|
|
|
|
|
2014-12-12 00:13:18 +08:00
|
|
|
// If true, reuse existing log/MANIFEST files when re-opening a database.
|
|
|
|
static bool FLAGS_reuse_logs = false;
|
|
|
|
|
2022-06-19 16:19:58 +08:00
|
|
|
// If true, use compression.
|
|
|
|
static bool FLAGS_compression = true;
|
|
|
|
|
2011-06-22 10:36:45 +08:00
|
|
|
// Use the db with the following name.
|
2018-04-11 07:18:06 +08:00
|
|
|
static const char* FLAGS_db = nullptr;
|
2011-06-22 10:36:45 +08:00
|
|
|
|
2023-03-30 15:12:55 +08:00
|
|
|
// ZSTD compression level to try out
|
|
|
|
static int FLAGS_zstd_compression_level = 1;
|
|
|
|
|
2011-03-19 06:37:00 +08:00
|
|
|
namespace leveldb {
|
|
|
|
|
|
|
|
namespace {
|
2018-04-11 07:18:06 +08:00
|
|
|
leveldb::Env* g_env = nullptr;
|
2011-08-23 05:08:51 +08:00
|
|
|
|
2021-01-11 23:32:34 +08:00
|
|
|
class CountComparator : public Comparator {
|
|
|
|
public:
|
|
|
|
CountComparator(const Comparator* wrapped) : wrapped_(wrapped) {}
|
|
|
|
~CountComparator() override {}
|
2021-01-13 05:54:35 +08:00
|
|
|
int Compare(const Slice& a, const Slice& b) const override {
|
2021-01-11 23:32:34 +08:00
|
|
|
count_.fetch_add(1, std::memory_order_relaxed);
|
|
|
|
return wrapped_->Compare(a, b);
|
|
|
|
}
|
|
|
|
const char* Name() const override { return wrapped_->Name(); }
|
|
|
|
void FindShortestSeparator(std::string* start,
|
|
|
|
const Slice& limit) const override {
|
|
|
|
wrapped_->FindShortestSeparator(start, limit);
|
|
|
|
}
|
|
|
|
|
|
|
|
void FindShortSuccessor(std::string* key) const override {
|
|
|
|
return wrapped_->FindShortSuccessor(key);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t comparisons() const { return count_.load(std::memory_order_relaxed); }
|
|
|
|
|
|
|
|
void reset() { count_.store(0, std::memory_order_relaxed); }
|
|
|
|
|
|
|
|
private:
|
2021-01-13 05:54:35 +08:00
|
|
|
mutable std::atomic<size_t> count_{0};
|
2021-01-11 23:32:34 +08:00
|
|
|
const Comparator* const wrapped_;
|
|
|
|
};
|
|
|
|
|
2011-08-23 05:08:51 +08:00
|
|
|
// Helper for quickly generating random data.
|
2011-03-19 06:37:00 +08:00
|
|
|
class RandomGenerator {
|
|
|
|
private:
|
|
|
|
std::string data_;
|
|
|
|
int pos_;
|
|
|
|
|
|
|
|
public:
|
|
|
|
RandomGenerator() {
|
|
|
|
// We use a limited amount of data over and over again and ensure
|
|
|
|
// that it is larger than the compression window (32KB), and also
|
|
|
|
// large enough to serve all typical value sizes we want to write.
|
|
|
|
Random rnd(301);
|
|
|
|
std::string piece;
|
|
|
|
while (data_.size() < 1048576) {
|
|
|
|
// Add a short fragment that is as compressible as specified
|
|
|
|
// by FLAGS_compression_ratio.
|
|
|
|
test::CompressibleString(&rnd, FLAGS_compression_ratio, 100, &piece);
|
|
|
|
data_.append(piece);
|
|
|
|
}
|
|
|
|
pos_ = 0;
|
|
|
|
}
|
|
|
|
|
2013-12-11 02:36:31 +08:00
|
|
|
Slice Generate(size_t len) {
|
2011-03-19 06:37:00 +08:00
|
|
|
if (pos_ + len > data_.size()) {
|
|
|
|
pos_ = 0;
|
|
|
|
assert(len < data_.size());
|
|
|
|
}
|
|
|
|
pos_ += len;
|
|
|
|
return Slice(data_.data() + pos_ - len, len);
|
|
|
|
}
|
|
|
|
};
|
2011-03-23 02:32:49 +08:00
|
|
|
|
2021-01-11 23:32:34 +08:00
|
|
|
class KeyBuffer {
|
|
|
|
public:
|
|
|
|
KeyBuffer() {
|
|
|
|
assert(FLAGS_key_prefix < sizeof(buffer_));
|
|
|
|
memset(buffer_, 'a', FLAGS_key_prefix);
|
|
|
|
}
|
|
|
|
KeyBuffer& operator=(KeyBuffer& other) = delete;
|
|
|
|
KeyBuffer(KeyBuffer& other) = delete;
|
|
|
|
|
|
|
|
void Set(int k) {
|
|
|
|
std::snprintf(buffer_ + FLAGS_key_prefix,
|
|
|
|
sizeof(buffer_) - FLAGS_key_prefix, "%016d", k);
|
|
|
|
}
|
|
|
|
|
|
|
|
Slice slice() const { return Slice(buffer_, FLAGS_key_prefix + 16); }
|
|
|
|
|
|
|
|
private:
|
|
|
|
char buffer_[1024];
|
|
|
|
};
|
|
|
|
|
2015-07-23 01:20:21 +08:00
|
|
|
#if defined(__linux)
|
2011-03-23 02:32:49 +08:00
|
|
|
static Slice TrimSpace(Slice s) {
|
2013-12-11 02:36:31 +08:00
|
|
|
size_t start = 0;
|
2011-03-23 02:32:49 +08:00
|
|
|
while (start < s.size() && isspace(s[start])) {
|
|
|
|
start++;
|
|
|
|
}
|
2013-12-11 02:36:31 +08:00
|
|
|
size_t limit = s.size();
|
2019-05-03 02:01:00 +08:00
|
|
|
while (limit > start && isspace(s[limit - 1])) {
|
2011-03-23 02:32:49 +08:00
|
|
|
limit--;
|
|
|
|
}
|
|
|
|
return Slice(s.data() + start, limit - start);
|
|
|
|
}
|
2015-07-23 01:20:21 +08:00
|
|
|
#endif
|
2011-03-23 02:32:49 +08:00
|
|
|
|
2011-08-23 05:08:51 +08:00
|
|
|
static void AppendWithSpace(std::string* str, Slice msg) {
|
|
|
|
if (msg.empty()) return;
|
|
|
|
if (!str->empty()) {
|
|
|
|
str->push_back(' ');
|
|
|
|
}
|
|
|
|
str->append(msg.data(), msg.size());
|
|
|
|
}
|
|
|
|
|
|
|
|
class Stats {
|
|
|
|
private:
|
|
|
|
double start_;
|
|
|
|
double finish_;
|
|
|
|
double seconds_;
|
|
|
|
int done_;
|
|
|
|
int next_report_;
|
|
|
|
int64_t bytes_;
|
|
|
|
double last_op_finish_;
|
|
|
|
Histogram hist_;
|
|
|
|
std::string message_;
|
|
|
|
|
|
|
|
public:
|
|
|
|
Stats() { Start(); }
|
|
|
|
|
|
|
|
void Start() {
|
|
|
|
next_report_ = 100;
|
|
|
|
hist_.Clear();
|
|
|
|
done_ = 0;
|
|
|
|
bytes_ = 0;
|
|
|
|
seconds_ = 0;
|
|
|
|
message_.clear();
|
2019-05-07 01:51:11 +08:00
|
|
|
start_ = finish_ = last_op_finish_ = g_env->NowMicros();
|
2011-08-23 05:08:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void Merge(const Stats& other) {
|
|
|
|
hist_.Merge(other.hist_);
|
|
|
|
done_ += other.done_;
|
|
|
|
bytes_ += other.bytes_;
|
|
|
|
seconds_ += other.seconds_;
|
|
|
|
if (other.start_ < start_) start_ = other.start_;
|
|
|
|
if (other.finish_ > finish_) finish_ = other.finish_;
|
|
|
|
|
|
|
|
// Just keep the messages from one thread
|
|
|
|
if (message_.empty()) message_ = other.message_;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Stop() {
|
2016-09-27 19:50:38 +08:00
|
|
|
finish_ = g_env->NowMicros();
|
2011-08-23 05:08:51 +08:00
|
|
|
seconds_ = (finish_ - start_) * 1e-6;
|
|
|
|
}
|
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
void AddMessage(Slice msg) { AppendWithSpace(&message_, msg); }
|
2011-08-23 05:08:51 +08:00
|
|
|
|
|
|
|
void FinishedSingleOp() {
|
|
|
|
if (FLAGS_histogram) {
|
2016-09-27 19:50:38 +08:00
|
|
|
double now = g_env->NowMicros();
|
2011-08-23 05:08:51 +08:00
|
|
|
double micros = now - last_op_finish_;
|
|
|
|
hist_.Add(micros);
|
|
|
|
if (micros > 20000) {
|
2020-04-30 06:31:41 +08:00
|
|
|
std::fprintf(stderr, "long op: %.1f micros%30s\r", micros, "");
|
|
|
|
std::fflush(stderr);
|
2011-08-23 05:08:51 +08:00
|
|
|
}
|
|
|
|
last_op_finish_ = now;
|
|
|
|
}
|
|
|
|
|
|
|
|
done_++;
|
|
|
|
if (done_ >= next_report_) {
|
2019-05-03 02:01:00 +08:00
|
|
|
if (next_report_ < 1000)
|
|
|
|
next_report_ += 100;
|
|
|
|
else if (next_report_ < 5000)
|
|
|
|
next_report_ += 500;
|
|
|
|
else if (next_report_ < 10000)
|
|
|
|
next_report_ += 1000;
|
|
|
|
else if (next_report_ < 50000)
|
|
|
|
next_report_ += 5000;
|
|
|
|
else if (next_report_ < 100000)
|
|
|
|
next_report_ += 10000;
|
|
|
|
else if (next_report_ < 500000)
|
|
|
|
next_report_ += 50000;
|
|
|
|
else
|
|
|
|
next_report_ += 100000;
|
2020-04-30 06:31:41 +08:00
|
|
|
std::fprintf(stderr, "... finished %d ops%30s\r", done_, "");
|
|
|
|
std::fflush(stderr);
|
2011-08-23 05:08:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
void AddBytes(int64_t n) { bytes_ += n; }
|
2011-08-23 05:08:51 +08:00
|
|
|
|
|
|
|
void Report(const Slice& name) {
|
|
|
|
// Pretend at least one op was done in case we are running a benchmark
|
|
|
|
// that does not call FinishedSingleOp().
|
|
|
|
if (done_ < 1) done_ = 1;
|
|
|
|
|
|
|
|
std::string extra;
|
|
|
|
if (bytes_ > 0) {
|
|
|
|
// Rate is computed on actual elapsed time, not the sum of per-thread
|
|
|
|
// elapsed times.
|
|
|
|
double elapsed = (finish_ - start_) * 1e-6;
|
|
|
|
char rate[100];
|
2020-04-30 06:31:41 +08:00
|
|
|
std::snprintf(rate, sizeof(rate), "%6.1f MB/s",
|
|
|
|
(bytes_ / 1048576.0) / elapsed);
|
2011-08-23 05:08:51 +08:00
|
|
|
extra = rate;
|
|
|
|
}
|
|
|
|
AppendWithSpace(&extra, message_);
|
|
|
|
|
2020-04-30 06:31:41 +08:00
|
|
|
std::fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
|
|
|
|
name.ToString().c_str(), seconds_ * 1e6 / done_,
|
|
|
|
(extra.empty() ? "" : " "), extra.c_str());
|
2011-08-23 05:08:51 +08:00
|
|
|
if (FLAGS_histogram) {
|
2020-04-30 06:31:41 +08:00
|
|
|
std::fprintf(stdout, "Microseconds per op:\n%s\n",
|
|
|
|
hist_.ToString().c_str());
|
2011-08-23 05:08:51 +08:00
|
|
|
}
|
2020-04-30 06:31:41 +08:00
|
|
|
std::fflush(stdout);
|
2011-08-23 05:08:51 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// State shared by all concurrent executions of the same benchmark.
|
|
|
|
struct SharedState {
|
|
|
|
port::Mutex mu;
|
2018-03-24 03:50:14 +08:00
|
|
|
port::CondVar cv GUARDED_BY(mu);
|
|
|
|
int total GUARDED_BY(mu);
|
2011-08-23 05:08:51 +08:00
|
|
|
|
|
|
|
// Each thread goes through the following states:
|
|
|
|
// (1) initializing
|
|
|
|
// (2) waiting for others to be initialized
|
|
|
|
// (3) running
|
|
|
|
// (4) done
|
|
|
|
|
2018-03-24 03:50:14 +08:00
|
|
|
int num_initialized GUARDED_BY(mu);
|
|
|
|
int num_done GUARDED_BY(mu);
|
|
|
|
bool start GUARDED_BY(mu);
|
2011-08-23 05:08:51 +08:00
|
|
|
|
2018-03-24 03:50:14 +08:00
|
|
|
SharedState(int total)
|
2019-05-03 02:01:00 +08:00
|
|
|
: cv(&mu), total(total), num_initialized(0), num_done(0), start(false) {}
|
2011-08-23 05:08:51 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
// Per-thread state for concurrent executions of the same benchmark.
|
|
|
|
struct ThreadState {
|
2019-05-03 02:01:00 +08:00
|
|
|
int tid; // 0..n-1 when running in n threads
|
|
|
|
Random rand; // Has different seeds for different threads
|
2011-08-23 05:08:51 +08:00
|
|
|
Stats stats;
|
2011-09-02 03:08:02 +08:00
|
|
|
SharedState* shared;
|
2011-08-23 05:08:51 +08:00
|
|
|
|
2021-01-11 23:32:34 +08:00
|
|
|
ThreadState(int index, int seed) : tid(index), rand(seed), shared(nullptr) {}
|
2011-08-23 05:08:51 +08:00
|
|
|
};
|
|
|
|
|
2023-02-16 08:04:43 +08:00
|
|
|
void Compress(
|
|
|
|
ThreadState* thread, std::string name,
|
|
|
|
std::function<bool(const char*, size_t, std::string*)> compress_func) {
|
|
|
|
RandomGenerator gen;
|
|
|
|
Slice input = gen.Generate(Options().block_size);
|
|
|
|
int64_t bytes = 0;
|
|
|
|
int64_t produced = 0;
|
|
|
|
bool ok = true;
|
|
|
|
std::string compressed;
|
|
|
|
while (ok && bytes < 1024 * 1048576) { // Compress 1G
|
|
|
|
ok = compress_func(input.data(), input.size(), &compressed);
|
|
|
|
produced += compressed.size();
|
|
|
|
bytes += input.size();
|
|
|
|
thread->stats.FinishedSingleOp();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ok) {
|
|
|
|
thread->stats.AddMessage("(" + name + " failure)");
|
|
|
|
} else {
|
|
|
|
char buf[100];
|
|
|
|
std::snprintf(buf, sizeof(buf), "(output: %.1f%%)",
|
|
|
|
(produced * 100.0) / bytes);
|
|
|
|
thread->stats.AddMessage(buf);
|
|
|
|
thread->stats.AddBytes(bytes);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Uncompress(
|
|
|
|
ThreadState* thread, std::string name,
|
|
|
|
std::function<bool(const char*, size_t, std::string*)> compress_func,
|
|
|
|
std::function<bool(const char*, size_t, char*)> uncompress_func) {
|
|
|
|
RandomGenerator gen;
|
|
|
|
Slice input = gen.Generate(Options().block_size);
|
|
|
|
std::string compressed;
|
|
|
|
bool ok = compress_func(input.data(), input.size(), &compressed);
|
|
|
|
int64_t bytes = 0;
|
|
|
|
char* uncompressed = new char[input.size()];
|
|
|
|
while (ok && bytes < 1024 * 1048576) { // Compress 1G
|
|
|
|
ok = uncompress_func(compressed.data(), compressed.size(), uncompressed);
|
|
|
|
bytes += input.size();
|
|
|
|
thread->stats.FinishedSingleOp();
|
|
|
|
}
|
|
|
|
delete[] uncompressed;
|
|
|
|
|
|
|
|
if (!ok) {
|
|
|
|
thread->stats.AddMessage("(" + name + " failure)");
|
|
|
|
} else {
|
|
|
|
thread->stats.AddBytes(bytes);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-11-01 01:22:06 +08:00
|
|
|
} // namespace
|
2011-03-19 06:37:00 +08:00
|
|
|
|
|
|
|
class Benchmark {
|
|
|
|
private:
|
|
|
|
Cache* cache_;
|
2012-04-17 23:36:46 +08:00
|
|
|
const FilterPolicy* filter_policy_;
|
2011-03-19 06:37:00 +08:00
|
|
|
DB* db_;
|
|
|
|
int num_;
|
2011-08-23 05:08:51 +08:00
|
|
|
int value_size_;
|
|
|
|
int entries_per_batch_;
|
|
|
|
WriteOptions write_options_;
|
2011-05-21 10:17:43 +08:00
|
|
|
int reads_;
|
2011-03-19 06:37:00 +08:00
|
|
|
int heap_counter_;
|
2021-01-11 23:32:34 +08:00
|
|
|
CountComparator count_comparator_;
|
|
|
|
int total_thread_count_;
|
2011-03-19 06:37:00 +08:00
|
|
|
|
2011-03-23 02:32:49 +08:00
|
|
|
void PrintHeader() {
|
2021-01-11 23:32:34 +08:00
|
|
|
const int kKeySize = 16 + FLAGS_key_prefix;
|
2011-03-23 02:32:49 +08:00
|
|
|
PrintEnvironment();
|
2020-04-30 06:31:41 +08:00
|
|
|
std::fprintf(stdout, "Keys: %d bytes each\n", kKeySize);
|
|
|
|
std::fprintf(
|
|
|
|
stdout, "Values: %d bytes each (%d bytes after compression)\n",
|
|
|
|
FLAGS_value_size,
|
|
|
|
static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
|
|
|
|
std::fprintf(stdout, "Entries: %d\n", num_);
|
|
|
|
std::fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
|
|
|
|
((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
|
|
|
|
1048576.0));
|
|
|
|
std::fprintf(
|
|
|
|
stdout, "FileSize: %.1f MB (estimated)\n",
|
|
|
|
(((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) /
|
|
|
|
1048576.0));
|
2011-03-23 02:32:49 +08:00
|
|
|
PrintWarnings();
|
2020-04-30 06:31:41 +08:00
|
|
|
std::fprintf(stdout, "------------------------------------------------\n");
|
2011-03-23 02:32:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void PrintWarnings() {
|
|
|
|
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
|
2020-04-30 06:31:41 +08:00
|
|
|
std::fprintf(
|
2019-05-03 02:01:00 +08:00
|
|
|
stdout,
|
|
|
|
"WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
|
2011-03-23 02:32:49 +08:00
|
|
|
#endif
|
|
|
|
#ifndef NDEBUG
|
2020-04-30 06:31:41 +08:00
|
|
|
std::fprintf(
|
|
|
|
stdout,
|
|
|
|
"WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
|
2011-03-23 02:32:49 +08:00
|
|
|
#endif
|
2011-04-13 03:38:58 +08:00
|
|
|
|
|
|
|
// See if snappy is working by attempting to compress a compressible string
|
|
|
|
const char text[] = "yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy";
|
|
|
|
std::string compressed;
|
|
|
|
if (!port::Snappy_Compress(text, sizeof(text), &compressed)) {
|
2020-04-30 06:31:41 +08:00
|
|
|
std::fprintf(stdout, "WARNING: Snappy compression is not enabled\n");
|
2011-04-13 03:38:58 +08:00
|
|
|
} else if (compressed.size() >= sizeof(text)) {
|
2020-04-30 06:31:41 +08:00
|
|
|
std::fprintf(stdout, "WARNING: Snappy compression is not effective\n");
|
2011-04-13 03:38:58 +08:00
|
|
|
}
|
2011-03-23 02:32:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void PrintEnvironment() {
|
2020-04-30 06:31:41 +08:00
|
|
|
std::fprintf(stderr, "LevelDB: version %d.%d\n", kMajorVersion,
|
|
|
|
kMinorVersion);
|
2011-03-23 02:32:49 +08:00
|
|
|
|
|
|
|
#if defined(__linux)
|
2018-04-11 07:18:06 +08:00
|
|
|
time_t now = time(nullptr);
|
2020-04-30 06:31:41 +08:00
|
|
|
std::fprintf(stderr, "Date: %s",
|
|
|
|
ctime(&now)); // ctime() adds newline
|
2011-03-23 02:32:49 +08:00
|
|
|
|
2020-04-30 06:31:41 +08:00
|
|
|
FILE* cpuinfo = std::fopen("/proc/cpuinfo", "r");
|
2018-04-11 07:18:06 +08:00
|
|
|
if (cpuinfo != nullptr) {
|
2011-03-23 02:32:49 +08:00
|
|
|
char line[1000];
|
|
|
|
int num_cpus = 0;
|
|
|
|
std::string cpu_type;
|
|
|
|
std::string cache_size;
|
2018-04-11 07:18:06 +08:00
|
|
|
while (fgets(line, sizeof(line), cpuinfo) != nullptr) {
|
2011-03-23 02:32:49 +08:00
|
|
|
const char* sep = strchr(line, ':');
|
2018-04-11 07:18:06 +08:00
|
|
|
if (sep == nullptr) {
|
2011-03-23 02:32:49 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
Slice key = TrimSpace(Slice(line, sep - 1 - line));
|
|
|
|
Slice val = TrimSpace(Slice(sep + 1));
|
|
|
|
if (key == "model name") {
|
|
|
|
++num_cpus;
|
|
|
|
cpu_type = val.ToString();
|
|
|
|
} else if (key == "cache size") {
|
|
|
|
cache_size = val.ToString();
|
|
|
|
}
|
|
|
|
}
|
2020-04-30 06:31:41 +08:00
|
|
|
std::fclose(cpuinfo);
|
|
|
|
std::fprintf(stderr, "CPU: %d * %s\n", num_cpus, cpu_type.c_str());
|
|
|
|
std::fprintf(stderr, "CPUCache: %s\n", cache_size.c_str());
|
2011-03-23 02:32:49 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2011-03-19 06:37:00 +08:00
|
|
|
public:
|
2011-04-13 03:38:58 +08:00
|
|
|
Benchmark()
|
2019-05-03 02:01:00 +08:00
|
|
|
: cache_(FLAGS_cache_size >= 0 ? NewLRUCache(FLAGS_cache_size) : nullptr),
|
|
|
|
filter_policy_(FLAGS_bloom_bits >= 0
|
|
|
|
? NewBloomFilterPolicy(FLAGS_bloom_bits)
|
|
|
|
: nullptr),
|
|
|
|
db_(nullptr),
|
|
|
|
num_(FLAGS_num),
|
|
|
|
value_size_(FLAGS_value_size),
|
|
|
|
entries_per_batch_(1),
|
|
|
|
reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
|
2021-01-11 23:32:34 +08:00
|
|
|
heap_counter_(0),
|
|
|
|
count_comparator_(BytewiseComparator()),
|
|
|
|
total_thread_count_(0) {
|
2011-03-19 06:37:00 +08:00
|
|
|
std::vector<std::string> files;
|
2016-09-27 19:50:38 +08:00
|
|
|
g_env->GetChildren(FLAGS_db, &files);
|
2013-12-11 02:36:31 +08:00
|
|
|
for (size_t i = 0; i < files.size(); i++) {
|
2011-03-19 06:37:00 +08:00
|
|
|
if (Slice(files[i]).starts_with("heap-")) {
|
Add Env::Remove{File,Dir} which obsolete Env::Delete{File,Dir}.
The "DeleteFile" method name causes pain for Windows developers, because
<windows.h> #defines a DeleteFile macro to DeleteFileW or DeleteFileA.
Current code uses workarounds, like #undefining DeleteFile everywhere an
Env is declared, implemented, or used.
This CL removes the need for workarounds by renaming Env::DeleteFile to
Env::RemoveFile. For consistency, Env::DeleteDir is also renamed to
Env::RemoveDir. A few internal methods are also renamed for consistency.
Software that supports Windows is expected to migrate any Env
implementations and usage to Remove{File,Dir}, and never use the name
Env::Delete{File,Dir} in its code.
The renaming is done in a backwards-compatible way, at the risk of
making it slightly more difficult to build a new correct Env
implementation. The backwards compatibility is achieved using the
following hacks:
1) Env::Remove{File,Dir} methods are added, with a default
implementation that calls into Env::Delete{File,Dir}. This makes old
Env implementations compatible with code that calls into the updated
API.
2) The Env::Delete{File,Dir} methods are no longer pure virtuals.
Instead, they gain a default implementation that calls into
Env::Remove{File,Dir}. This makes updated Env implementations
compatible with code that calls into the old API.
The cost of this approach is that it's possible to write an Env without
overriding either Rename{File,Dir} or Delete{File,Dir}, without getting
a compiler warning. However, attempting to run the test suite will
immediately fail with an infinite call stack ending in
{Remove,Delete}{File,Dir}, making developers aware of the problem.
PiperOrigin-RevId: 288710907
2020-01-09 01:14:53 +08:00
|
|
|
g_env->RemoveFile(std::string(FLAGS_db) + "/" + files[i]);
|
2011-03-19 06:37:00 +08:00
|
|
|
}
|
|
|
|
}
|
2011-05-21 10:17:43 +08:00
|
|
|
if (!FLAGS_use_existing_db) {
|
2011-06-22 10:36:45 +08:00
|
|
|
DestroyDB(FLAGS_db, Options());
|
2011-05-21 10:17:43 +08:00
|
|
|
}
|
2011-03-19 06:37:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
~Benchmark() {
|
|
|
|
delete db_;
|
|
|
|
delete cache_;
|
2012-04-17 23:36:46 +08:00
|
|
|
delete filter_policy_;
|
2011-03-19 06:37:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void Run() {
|
2011-03-23 02:32:49 +08:00
|
|
|
PrintHeader();
|
|
|
|
Open();
|
2011-03-19 06:37:00 +08:00
|
|
|
|
|
|
|
const char* benchmarks = FLAGS_benchmarks;
|
2018-04-11 07:18:06 +08:00
|
|
|
while (benchmarks != nullptr) {
|
2011-03-19 06:37:00 +08:00
|
|
|
const char* sep = strchr(benchmarks, ',');
|
|
|
|
Slice name;
|
2018-04-11 07:18:06 +08:00
|
|
|
if (sep == nullptr) {
|
2011-03-19 06:37:00 +08:00
|
|
|
name = benchmarks;
|
2018-04-11 07:18:06 +08:00
|
|
|
benchmarks = nullptr;
|
2011-03-19 06:37:00 +08:00
|
|
|
} else {
|
|
|
|
name = Slice(benchmarks, sep - benchmarks);
|
|
|
|
benchmarks = sep + 1;
|
|
|
|
}
|
|
|
|
|
2014-09-17 05:19:52 +08:00
|
|
|
// Reset parameters that may be overridden below
|
2011-08-23 05:08:51 +08:00
|
|
|
num_ = FLAGS_num;
|
2011-09-02 03:08:02 +08:00
|
|
|
reads_ = (FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads);
|
2011-08-23 05:08:51 +08:00
|
|
|
value_size_ = FLAGS_value_size;
|
|
|
|
entries_per_batch_ = 1;
|
|
|
|
write_options_ = WriteOptions();
|
|
|
|
|
2018-04-11 07:18:06 +08:00
|
|
|
void (Benchmark::*method)(ThreadState*) = nullptr;
|
2011-08-23 05:08:51 +08:00
|
|
|
bool fresh_db = false;
|
2011-09-02 03:08:02 +08:00
|
|
|
int num_threads = FLAGS_threads;
|
2011-03-23 02:32:49 +08:00
|
|
|
|
2014-12-12 00:08:57 +08:00
|
|
|
if (name == Slice("open")) {
|
|
|
|
method = &Benchmark::OpenBench;
|
|
|
|
num_ /= 10000;
|
|
|
|
if (num_ < 1) num_ = 1;
|
|
|
|
} else if (name == Slice("fillseq")) {
|
2011-08-23 05:08:51 +08:00
|
|
|
fresh_db = true;
|
|
|
|
method = &Benchmark::WriteSeq;
|
2011-04-13 03:38:58 +08:00
|
|
|
} else if (name == Slice("fillbatch")) {
|
2011-08-23 05:08:51 +08:00
|
|
|
fresh_db = true;
|
|
|
|
entries_per_batch_ = 1000;
|
|
|
|
method = &Benchmark::WriteSeq;
|
2011-03-23 02:32:49 +08:00
|
|
|
} else if (name == Slice("fillrandom")) {
|
2011-08-23 05:08:51 +08:00
|
|
|
fresh_db = true;
|
|
|
|
method = &Benchmark::WriteRandom;
|
2011-03-23 02:32:49 +08:00
|
|
|
} else if (name == Slice("overwrite")) {
|
2011-08-23 05:08:51 +08:00
|
|
|
fresh_db = false;
|
|
|
|
method = &Benchmark::WriteRandom;
|
2011-03-23 02:32:49 +08:00
|
|
|
} else if (name == Slice("fillsync")) {
|
2011-08-23 05:08:51 +08:00
|
|
|
fresh_db = true;
|
|
|
|
num_ /= 1000;
|
|
|
|
write_options_.sync = true;
|
|
|
|
method = &Benchmark::WriteRandom;
|
2011-03-23 02:32:49 +08:00
|
|
|
} else if (name == Slice("fill100K")) {
|
2011-08-23 05:08:51 +08:00
|
|
|
fresh_db = true;
|
|
|
|
num_ /= 1000;
|
|
|
|
value_size_ = 100 * 1000;
|
|
|
|
method = &Benchmark::WriteRandom;
|
2011-03-19 06:37:00 +08:00
|
|
|
} else if (name == Slice("readseq")) {
|
2011-08-23 05:08:51 +08:00
|
|
|
method = &Benchmark::ReadSequential;
|
2011-03-22 03:40:57 +08:00
|
|
|
} else if (name == Slice("readreverse")) {
|
2011-08-23 05:08:51 +08:00
|
|
|
method = &Benchmark::ReadReverse;
|
2011-03-19 06:37:00 +08:00
|
|
|
} else if (name == Slice("readrandom")) {
|
2011-08-23 05:08:51 +08:00
|
|
|
method = &Benchmark::ReadRandom;
|
2012-04-17 23:36:46 +08:00
|
|
|
} else if (name == Slice("readmissing")) {
|
|
|
|
method = &Benchmark::ReadMissing;
|
|
|
|
} else if (name == Slice("seekrandom")) {
|
|
|
|
method = &Benchmark::SeekRandom;
|
2021-01-11 23:32:34 +08:00
|
|
|
} else if (name == Slice("seekordered")) {
|
|
|
|
method = &Benchmark::SeekOrdered;
|
2011-05-21 10:17:43 +08:00
|
|
|
} else if (name == Slice("readhot")) {
|
2011-08-23 05:08:51 +08:00
|
|
|
method = &Benchmark::ReadHot;
|
2011-04-13 03:38:58 +08:00
|
|
|
} else if (name == Slice("readrandomsmall")) {
|
2011-05-21 10:17:43 +08:00
|
|
|
reads_ /= 1000;
|
2011-08-23 05:08:51 +08:00
|
|
|
method = &Benchmark::ReadRandom;
|
2012-04-17 23:36:46 +08:00
|
|
|
} else if (name == Slice("deleteseq")) {
|
|
|
|
method = &Benchmark::DeleteSeq;
|
|
|
|
} else if (name == Slice("deleterandom")) {
|
|
|
|
method = &Benchmark::DeleteRandom;
|
2011-09-02 03:08:02 +08:00
|
|
|
} else if (name == Slice("readwhilewriting")) {
|
|
|
|
num_threads++; // Add extra thread for writing
|
|
|
|
method = &Benchmark::ReadWhileWriting;
|
2011-03-19 06:37:00 +08:00
|
|
|
} else if (name == Slice("compact")) {
|
2011-08-23 05:08:51 +08:00
|
|
|
method = &Benchmark::Compact;
|
2011-03-26 04:27:43 +08:00
|
|
|
} else if (name == Slice("crc32c")) {
|
2011-08-23 05:08:51 +08:00
|
|
|
method = &Benchmark::Crc32c;
|
2011-04-13 03:38:58 +08:00
|
|
|
} else if (name == Slice("snappycomp")) {
|
2011-08-23 05:08:51 +08:00
|
|
|
method = &Benchmark::SnappyCompress;
|
2011-04-13 03:38:58 +08:00
|
|
|
} else if (name == Slice("snappyuncomp")) {
|
2011-08-23 05:08:51 +08:00
|
|
|
method = &Benchmark::SnappyUncompress;
|
2023-02-16 08:04:43 +08:00
|
|
|
} else if (name == Slice("zstdcomp")) {
|
|
|
|
method = &Benchmark::ZstdCompress;
|
|
|
|
} else if (name == Slice("zstduncomp")) {
|
|
|
|
method = &Benchmark::ZstdUncompress;
|
2011-03-19 06:37:00 +08:00
|
|
|
} else if (name == Slice("heapprofile")) {
|
|
|
|
HeapProfile();
|
2011-04-13 03:38:58 +08:00
|
|
|
} else if (name == Slice("stats")) {
|
2012-04-17 23:36:46 +08:00
|
|
|
PrintStats("leveldb.stats");
|
|
|
|
} else if (name == Slice("sstables")) {
|
|
|
|
PrintStats("leveldb.sstables");
|
2011-03-19 06:37:00 +08:00
|
|
|
} else {
|
2019-05-03 02:01:00 +08:00
|
|
|
if (!name.empty()) { // No error message for empty name
|
2020-04-30 06:31:41 +08:00
|
|
|
std::fprintf(stderr, "unknown benchmark '%s'\n",
|
|
|
|
name.ToString().c_str());
|
2011-04-13 03:38:58 +08:00
|
|
|
}
|
|
|
|
}
|
2011-08-23 05:08:51 +08:00
|
|
|
|
|
|
|
if (fresh_db) {
|
|
|
|
if (FLAGS_use_existing_db) {
|
2020-04-30 06:31:41 +08:00
|
|
|
std::fprintf(stdout, "%-12s : skipped (--use_existing_db is true)\n",
|
|
|
|
name.ToString().c_str());
|
2018-04-11 07:18:06 +08:00
|
|
|
method = nullptr;
|
2011-08-23 05:08:51 +08:00
|
|
|
} else {
|
|
|
|
delete db_;
|
2018-04-11 07:18:06 +08:00
|
|
|
db_ = nullptr;
|
2011-08-23 05:08:51 +08:00
|
|
|
DestroyDB(FLAGS_db, Options());
|
|
|
|
Open();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-11 07:18:06 +08:00
|
|
|
if (method != nullptr) {
|
2011-09-02 03:08:02 +08:00
|
|
|
RunBenchmark(num_threads, name, method);
|
2011-03-19 06:37:00 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-23 02:32:49 +08:00
|
|
|
private:
|
2011-08-23 05:08:51 +08:00
|
|
|
struct ThreadArg {
|
|
|
|
Benchmark* bm;
|
|
|
|
SharedState* shared;
|
|
|
|
ThreadState* thread;
|
|
|
|
void (Benchmark::*method)(ThreadState*);
|
|
|
|
};
|
|
|
|
|
|
|
|
static void ThreadBody(void* v) {
|
|
|
|
ThreadArg* arg = reinterpret_cast<ThreadArg*>(v);
|
|
|
|
SharedState* shared = arg->shared;
|
|
|
|
ThreadState* thread = arg->thread;
|
|
|
|
{
|
|
|
|
MutexLock l(&shared->mu);
|
|
|
|
shared->num_initialized++;
|
|
|
|
if (shared->num_initialized >= shared->total) {
|
|
|
|
shared->cv.SignalAll();
|
|
|
|
}
|
|
|
|
while (!shared->start) {
|
|
|
|
shared->cv.Wait();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
thread->stats.Start();
|
|
|
|
(arg->bm->*(arg->method))(thread);
|
|
|
|
thread->stats.Stop();
|
|
|
|
|
|
|
|
{
|
|
|
|
MutexLock l(&shared->mu);
|
|
|
|
shared->num_done++;
|
|
|
|
if (shared->num_done >= shared->total) {
|
|
|
|
shared->cv.SignalAll();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-09-02 03:08:02 +08:00
|
|
|
void RunBenchmark(int n, Slice name,
|
|
|
|
void (Benchmark::*method)(ThreadState*)) {
|
2018-03-24 03:50:14 +08:00
|
|
|
SharedState shared(n);
|
2011-08-23 05:08:51 +08:00
|
|
|
|
|
|
|
ThreadArg* arg = new ThreadArg[n];
|
|
|
|
for (int i = 0; i < n; i++) {
|
|
|
|
arg[i].bm = this;
|
|
|
|
arg[i].method = method;
|
|
|
|
arg[i].shared = &shared;
|
2021-01-11 23:32:34 +08:00
|
|
|
++total_thread_count_;
|
|
|
|
// Seed the thread's random state deterministically based upon thread
|
|
|
|
// creation across all benchmarks. This ensures that the seeds are unique
|
|
|
|
// but reproducible when rerunning the same set of benchmarks.
|
|
|
|
arg[i].thread = new ThreadState(i, /*seed=*/1000 + total_thread_count_);
|
2011-09-02 03:08:02 +08:00
|
|
|
arg[i].thread->shared = &shared;
|
2016-09-27 19:50:38 +08:00
|
|
|
g_env->StartThread(ThreadBody, &arg[i]);
|
2011-08-23 05:08:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
shared.mu.Lock();
|
|
|
|
while (shared.num_initialized < n) {
|
|
|
|
shared.cv.Wait();
|
|
|
|
}
|
|
|
|
|
|
|
|
shared.start = true;
|
|
|
|
shared.cv.SignalAll();
|
|
|
|
while (shared.num_done < n) {
|
|
|
|
shared.cv.Wait();
|
|
|
|
}
|
|
|
|
shared.mu.Unlock();
|
|
|
|
|
|
|
|
for (int i = 1; i < n; i++) {
|
|
|
|
arg[0].thread->stats.Merge(arg[i].thread->stats);
|
|
|
|
}
|
|
|
|
arg[0].thread->stats.Report(name);
|
2021-01-11 23:32:34 +08:00
|
|
|
if (FLAGS_comparisons) {
|
2021-02-18 02:18:12 +08:00
|
|
|
fprintf(stdout, "Comparisons: %zu\n", count_comparator_.comparisons());
|
2021-01-11 23:32:34 +08:00
|
|
|
count_comparator_.reset();
|
|
|
|
fflush(stdout);
|
|
|
|
}
|
2011-08-23 05:08:51 +08:00
|
|
|
|
|
|
|
for (int i = 0; i < n; i++) {
|
|
|
|
delete arg[i].thread;
|
|
|
|
}
|
|
|
|
delete[] arg;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Crc32c(ThreadState* thread) {
|
2011-03-26 04:27:43 +08:00
|
|
|
// Checksum about 500MB of data total
|
2011-08-23 05:08:51 +08:00
|
|
|
const int size = 4096;
|
|
|
|
const char* label = "(4K per op)";
|
2011-03-29 04:43:44 +08:00
|
|
|
std::string data(size, 'x');
|
2011-03-26 04:27:43 +08:00
|
|
|
int64_t bytes = 0;
|
|
|
|
uint32_t crc = 0;
|
|
|
|
while (bytes < 500 * 1048576) {
|
|
|
|
crc = crc32c::Value(data.data(), size);
|
2011-08-23 05:08:51 +08:00
|
|
|
thread->stats.FinishedSingleOp();
|
2011-03-26 04:27:43 +08:00
|
|
|
bytes += size;
|
|
|
|
}
|
|
|
|
// Print so result is not dead
|
2020-04-30 06:31:41 +08:00
|
|
|
std::fprintf(stderr, "... crc=0x%x\r", static_cast<unsigned int>(crc));
|
2011-03-26 04:27:43 +08:00
|
|
|
|
2011-08-23 05:08:51 +08:00
|
|
|
thread->stats.AddBytes(bytes);
|
|
|
|
thread->stats.AddMessage(label);
|
2011-03-26 04:27:43 +08:00
|
|
|
}
|
|
|
|
|
2011-08-23 05:08:51 +08:00
|
|
|
void SnappyCompress(ThreadState* thread) {
|
2023-02-16 08:04:43 +08:00
|
|
|
Compress(thread, "snappy", &port::Snappy_Compress);
|
2011-04-13 03:38:58 +08:00
|
|
|
}
|
|
|
|
|
2011-08-23 05:08:51 +08:00
|
|
|
void SnappyUncompress(ThreadState* thread) {
|
2023-02-16 08:04:43 +08:00
|
|
|
Uncompress(thread, "snappy", &port::Snappy_Compress,
|
|
|
|
&port::Snappy_Uncompress);
|
|
|
|
}
|
2011-04-13 03:38:58 +08:00
|
|
|
|
2023-02-16 08:04:43 +08:00
|
|
|
void ZstdCompress(ThreadState* thread) {
|
2023-03-30 15:12:55 +08:00
|
|
|
Compress(thread, "zstd",
|
|
|
|
[](const char* input, size_t length, std::string* output) {
|
|
|
|
return port::Zstd_Compress(FLAGS_zstd_compression_level, input,
|
|
|
|
length, output);
|
|
|
|
});
|
2023-02-16 08:04:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void ZstdUncompress(ThreadState* thread) {
|
2023-03-30 15:12:55 +08:00
|
|
|
Uncompress(
|
|
|
|
thread, "zstd",
|
|
|
|
[](const char* input, size_t length, std::string* output) {
|
|
|
|
return port::Zstd_Compress(FLAGS_zstd_compression_level, input,
|
|
|
|
length, output);
|
|
|
|
},
|
|
|
|
&port::Zstd_Uncompress);
|
2011-04-13 03:38:58 +08:00
|
|
|
}
|
|
|
|
|
2011-03-23 02:32:49 +08:00
|
|
|
void Open() {
|
2018-04-11 07:18:06 +08:00
|
|
|
assert(db_ == nullptr);
|
2011-03-23 02:32:49 +08:00
|
|
|
Options options;
|
2016-09-27 19:50:38 +08:00
|
|
|
options.env = g_env;
|
2011-05-21 10:17:43 +08:00
|
|
|
options.create_if_missing = !FLAGS_use_existing_db;
|
2011-03-23 02:32:49 +08:00
|
|
|
options.block_cache = cache_;
|
|
|
|
options.write_buffer_size = FLAGS_write_buffer_size;
|
2016-09-27 19:50:38 +08:00
|
|
|
options.max_file_size = FLAGS_max_file_size;
|
|
|
|
options.block_size = FLAGS_block_size;
|
2021-01-11 23:32:34 +08:00
|
|
|
if (FLAGS_comparisons) {
|
|
|
|
options.comparator = &count_comparator_;
|
|
|
|
}
|
2012-10-13 02:53:12 +08:00
|
|
|
options.max_open_files = FLAGS_open_files;
|
2012-04-17 23:36:46 +08:00
|
|
|
options.filter_policy = filter_policy_;
|
2014-12-12 00:13:18 +08:00
|
|
|
options.reuse_logs = FLAGS_reuse_logs;
|
2022-07-19 05:19:56 +08:00
|
|
|
options.compression =
|
|
|
|
FLAGS_compression ? kSnappyCompression : kNoCompression;
|
2011-06-22 10:36:45 +08:00
|
|
|
Status s = DB::Open(options, FLAGS_db, &db_);
|
2011-03-23 02:32:49 +08:00
|
|
|
if (!s.ok()) {
|
2020-04-30 06:31:41 +08:00
|
|
|
std::fprintf(stderr, "open error: %s\n", s.ToString().c_str());
|
|
|
|
std::exit(1);
|
2011-03-23 02:32:49 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-12 00:08:57 +08:00
|
|
|
void OpenBench(ThreadState* thread) {
|
|
|
|
for (int i = 0; i < num_; i++) {
|
|
|
|
delete db_;
|
|
|
|
Open();
|
|
|
|
thread->stats.FinishedSingleOp();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
void WriteSeq(ThreadState* thread) { DoWrite(thread, true); }
|
2011-03-23 02:32:49 +08:00
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
void WriteRandom(ThreadState* thread) { DoWrite(thread, false); }
|
2011-08-23 05:08:51 +08:00
|
|
|
|
|
|
|
void DoWrite(ThreadState* thread, bool seq) {
|
|
|
|
if (num_ != FLAGS_num) {
|
2011-03-23 02:32:49 +08:00
|
|
|
char msg[100];
|
2020-04-30 06:31:41 +08:00
|
|
|
std::snprintf(msg, sizeof(msg), "(%d ops)", num_);
|
2011-08-23 05:08:51 +08:00
|
|
|
thread->stats.AddMessage(msg);
|
2011-03-23 02:32:49 +08:00
|
|
|
}
|
|
|
|
|
2011-08-23 05:08:51 +08:00
|
|
|
RandomGenerator gen;
|
2011-03-19 06:37:00 +08:00
|
|
|
WriteBatch batch;
|
|
|
|
Status s;
|
2011-08-23 05:08:51 +08:00
|
|
|
int64_t bytes = 0;
|
2021-01-11 23:32:34 +08:00
|
|
|
KeyBuffer key;
|
2011-08-23 05:08:51 +08:00
|
|
|
for (int i = 0; i < num_; i += entries_per_batch_) {
|
2011-03-19 06:37:00 +08:00
|
|
|
batch.Clear();
|
2011-08-23 05:08:51 +08:00
|
|
|
for (int j = 0; j < entries_per_batch_; j++) {
|
2021-01-11 23:32:34 +08:00
|
|
|
const int k = seq ? i + j : thread->rand.Uniform(FLAGS_num);
|
|
|
|
key.Set(k);
|
|
|
|
batch.Put(key.slice(), gen.Generate(value_size_));
|
|
|
|
bytes += value_size_ + key.slice().size();
|
2011-08-23 05:08:51 +08:00
|
|
|
thread->stats.FinishedSingleOp();
|
2011-04-13 03:38:58 +08:00
|
|
|
}
|
2011-08-23 05:08:51 +08:00
|
|
|
s = db_->Write(write_options_, &batch);
|
2011-03-19 06:37:00 +08:00
|
|
|
if (!s.ok()) {
|
2020-04-30 06:31:41 +08:00
|
|
|
std::fprintf(stderr, "put error: %s\n", s.ToString().c_str());
|
|
|
|
std::exit(1);
|
2011-03-19 06:37:00 +08:00
|
|
|
}
|
|
|
|
}
|
2011-08-23 05:08:51 +08:00
|
|
|
thread->stats.AddBytes(bytes);
|
2011-03-19 06:37:00 +08:00
|
|
|
}
|
|
|
|
|
2011-08-23 05:08:51 +08:00
|
|
|
void ReadSequential(ThreadState* thread) {
|
2011-03-23 02:32:49 +08:00
|
|
|
Iterator* iter = db_->NewIterator(ReadOptions());
|
|
|
|
int i = 0;
|
2011-08-23 05:08:51 +08:00
|
|
|
int64_t bytes = 0;
|
2011-05-21 10:17:43 +08:00
|
|
|
for (iter->SeekToFirst(); i < reads_ && iter->Valid(); iter->Next()) {
|
2011-08-23 05:08:51 +08:00
|
|
|
bytes += iter->key().size() + iter->value().size();
|
|
|
|
thread->stats.FinishedSingleOp();
|
2011-03-23 02:32:49 +08:00
|
|
|
++i;
|
|
|
|
}
|
|
|
|
delete iter;
|
2011-08-23 05:08:51 +08:00
|
|
|
thread->stats.AddBytes(bytes);
|
2011-03-23 02:32:49 +08:00
|
|
|
}
|
|
|
|
|
2011-08-23 05:08:51 +08:00
|
|
|
void ReadReverse(ThreadState* thread) {
|
2011-03-23 02:32:49 +08:00
|
|
|
Iterator* iter = db_->NewIterator(ReadOptions());
|
|
|
|
int i = 0;
|
2011-08-23 05:08:51 +08:00
|
|
|
int64_t bytes = 0;
|
2011-05-21 10:17:43 +08:00
|
|
|
for (iter->SeekToLast(); i < reads_ && iter->Valid(); iter->Prev()) {
|
2011-08-23 05:08:51 +08:00
|
|
|
bytes += iter->key().size() + iter->value().size();
|
|
|
|
thread->stats.FinishedSingleOp();
|
2011-03-23 02:32:49 +08:00
|
|
|
++i;
|
|
|
|
}
|
|
|
|
delete iter;
|
2011-08-23 05:08:51 +08:00
|
|
|
thread->stats.AddBytes(bytes);
|
2011-03-23 02:32:49 +08:00
|
|
|
}
|
|
|
|
|
2011-08-23 05:08:51 +08:00
|
|
|
void ReadRandom(ThreadState* thread) {
|
2011-03-19 06:37:00 +08:00
|
|
|
ReadOptions options;
|
2011-03-23 02:32:49 +08:00
|
|
|
std::string value;
|
2012-04-17 23:36:46 +08:00
|
|
|
int found = 0;
|
2021-01-11 23:32:34 +08:00
|
|
|
KeyBuffer key;
|
2011-05-21 10:17:43 +08:00
|
|
|
for (int i = 0; i < reads_; i++) {
|
2021-01-11 23:32:34 +08:00
|
|
|
const int k = thread->rand.Uniform(FLAGS_num);
|
|
|
|
key.Set(k);
|
|
|
|
if (db_->Get(options, key.slice(), &value).ok()) {
|
2012-04-17 23:36:46 +08:00
|
|
|
found++;
|
|
|
|
}
|
|
|
|
thread->stats.FinishedSingleOp();
|
|
|
|
}
|
|
|
|
char msg[100];
|
2020-04-30 06:31:41 +08:00
|
|
|
std::snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_);
|
2012-04-17 23:36:46 +08:00
|
|
|
thread->stats.AddMessage(msg);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ReadMissing(ThreadState* thread) {
|
|
|
|
ReadOptions options;
|
|
|
|
std::string value;
|
2021-01-11 23:32:34 +08:00
|
|
|
KeyBuffer key;
|
2012-04-17 23:36:46 +08:00
|
|
|
for (int i = 0; i < reads_; i++) {
|
2021-01-11 23:32:34 +08:00
|
|
|
const int k = thread->rand.Uniform(FLAGS_num);
|
|
|
|
key.Set(k);
|
|
|
|
Slice s = Slice(key.slice().data(), key.slice().size() - 1);
|
|
|
|
db_->Get(options, s, &value);
|
2011-08-23 05:08:51 +08:00
|
|
|
thread->stats.FinishedSingleOp();
|
2011-03-19 06:37:00 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-08-23 05:08:51 +08:00
|
|
|
void ReadHot(ThreadState* thread) {
|
2011-05-21 10:17:43 +08:00
|
|
|
ReadOptions options;
|
|
|
|
std::string value;
|
|
|
|
const int range = (FLAGS_num + 99) / 100;
|
2021-01-11 23:32:34 +08:00
|
|
|
KeyBuffer key;
|
2011-05-21 10:17:43 +08:00
|
|
|
for (int i = 0; i < reads_; i++) {
|
2021-01-11 23:32:34 +08:00
|
|
|
const int k = thread->rand.Uniform(range);
|
|
|
|
key.Set(k);
|
|
|
|
db_->Get(options, key.slice(), &value);
|
2011-08-23 05:08:51 +08:00
|
|
|
thread->stats.FinishedSingleOp();
|
2011-05-21 10:17:43 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-04-17 23:36:46 +08:00
|
|
|
void SeekRandom(ThreadState* thread) {
|
|
|
|
ReadOptions options;
|
|
|
|
int found = 0;
|
2021-01-11 23:32:34 +08:00
|
|
|
KeyBuffer key;
|
2012-04-17 23:36:46 +08:00
|
|
|
for (int i = 0; i < reads_; i++) {
|
|
|
|
Iterator* iter = db_->NewIterator(options);
|
2021-01-11 23:32:34 +08:00
|
|
|
const int k = thread->rand.Uniform(FLAGS_num);
|
|
|
|
key.Set(k);
|
|
|
|
iter->Seek(key.slice());
|
|
|
|
if (iter->Valid() && iter->key() == key.slice()) found++;
|
2012-04-17 23:36:46 +08:00
|
|
|
delete iter;
|
|
|
|
thread->stats.FinishedSingleOp();
|
|
|
|
}
|
|
|
|
char msg[100];
|
2021-01-11 23:32:34 +08:00
|
|
|
snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_);
|
|
|
|
thread->stats.AddMessage(msg);
|
|
|
|
}
|
|
|
|
|
|
|
|
void SeekOrdered(ThreadState* thread) {
|
|
|
|
ReadOptions options;
|
|
|
|
Iterator* iter = db_->NewIterator(options);
|
|
|
|
int found = 0;
|
|
|
|
int k = 0;
|
|
|
|
KeyBuffer key;
|
|
|
|
for (int i = 0; i < reads_; i++) {
|
|
|
|
k = (k + (thread->rand.Uniform(100))) % FLAGS_num;
|
|
|
|
key.Set(k);
|
|
|
|
iter->Seek(key.slice());
|
|
|
|
if (iter->Valid() && iter->key() == key.slice()) found++;
|
|
|
|
thread->stats.FinishedSingleOp();
|
|
|
|
}
|
|
|
|
delete iter;
|
|
|
|
char msg[100];
|
2020-04-30 06:31:41 +08:00
|
|
|
std::snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_);
|
2012-04-17 23:36:46 +08:00
|
|
|
thread->stats.AddMessage(msg);
|
|
|
|
}
|
|
|
|
|
|
|
|
void DoDelete(ThreadState* thread, bool seq) {
|
|
|
|
RandomGenerator gen;
|
|
|
|
WriteBatch batch;
|
|
|
|
Status s;
|
2021-01-11 23:32:34 +08:00
|
|
|
KeyBuffer key;
|
2012-04-17 23:36:46 +08:00
|
|
|
for (int i = 0; i < num_; i += entries_per_batch_) {
|
|
|
|
batch.Clear();
|
|
|
|
for (int j = 0; j < entries_per_batch_; j++) {
|
2021-01-11 23:32:34 +08:00
|
|
|
const int k = seq ? i + j : (thread->rand.Uniform(FLAGS_num));
|
|
|
|
key.Set(k);
|
|
|
|
batch.Delete(key.slice());
|
2012-04-17 23:36:46 +08:00
|
|
|
thread->stats.FinishedSingleOp();
|
|
|
|
}
|
|
|
|
s = db_->Write(write_options_, &batch);
|
|
|
|
if (!s.ok()) {
|
2020-04-30 06:31:41 +08:00
|
|
|
std::fprintf(stderr, "del error: %s\n", s.ToString().c_str());
|
|
|
|
std::exit(1);
|
2012-04-17 23:36:46 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
void DeleteSeq(ThreadState* thread) { DoDelete(thread, true); }
|
2012-04-17 23:36:46 +08:00
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
void DeleteRandom(ThreadState* thread) { DoDelete(thread, false); }
|
2012-04-17 23:36:46 +08:00
|
|
|
|
2011-09-02 03:08:02 +08:00
|
|
|
void ReadWhileWriting(ThreadState* thread) {
|
|
|
|
if (thread->tid > 0) {
|
|
|
|
ReadRandom(thread);
|
|
|
|
} else {
|
|
|
|
// Special thread that keeps writing until other threads are done.
|
|
|
|
RandomGenerator gen;
|
2021-01-11 23:32:34 +08:00
|
|
|
KeyBuffer key;
|
2011-09-02 03:08:02 +08:00
|
|
|
while (true) {
|
|
|
|
{
|
|
|
|
MutexLock l(&thread->shared->mu);
|
|
|
|
if (thread->shared->num_done + 1 >= thread->shared->num_initialized) {
|
|
|
|
// Other threads have finished
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-11 23:32:34 +08:00
|
|
|
const int k = thread->rand.Uniform(FLAGS_num);
|
|
|
|
key.Set(k);
|
|
|
|
Status s =
|
|
|
|
db_->Put(write_options_, key.slice(), gen.Generate(value_size_));
|
2011-09-02 03:08:02 +08:00
|
|
|
if (!s.ok()) {
|
2020-04-30 06:31:41 +08:00
|
|
|
std::fprintf(stderr, "put error: %s\n", s.ToString().c_str());
|
|
|
|
std::exit(1);
|
2011-09-02 03:08:02 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do not count any of the preceding work/delay in stats.
|
|
|
|
thread->stats.Start();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
void Compact(ThreadState* thread) { db_->CompactRange(nullptr, nullptr); }
|
2011-03-19 06:37:00 +08:00
|
|
|
|
2012-04-17 23:36:46 +08:00
|
|
|
void PrintStats(const char* key) {
|
2011-04-13 03:38:58 +08:00
|
|
|
std::string stats;
|
2012-04-17 23:36:46 +08:00
|
|
|
if (!db_->GetProperty(key, &stats)) {
|
2011-08-23 05:08:51 +08:00
|
|
|
stats = "(failed)";
|
2011-04-13 03:38:58 +08:00
|
|
|
}
|
2020-04-30 06:31:41 +08:00
|
|
|
std::fprintf(stdout, "\n%s\n", stats.c_str());
|
2011-04-13 03:38:58 +08:00
|
|
|
}
|
|
|
|
|
2011-03-19 06:37:00 +08:00
|
|
|
static void WriteToFile(void* arg, const char* buf, int n) {
|
|
|
|
reinterpret_cast<WritableFile*>(arg)->Append(Slice(buf, n));
|
|
|
|
}
|
|
|
|
|
|
|
|
void HeapProfile() {
|
|
|
|
char fname[100];
|
2020-04-30 06:31:41 +08:00
|
|
|
std::snprintf(fname, sizeof(fname), "%s/heap-%04d", FLAGS_db,
|
|
|
|
++heap_counter_);
|
2011-03-19 06:37:00 +08:00
|
|
|
WritableFile* file;
|
2016-09-27 19:50:38 +08:00
|
|
|
Status s = g_env->NewWritableFile(fname, &file);
|
2011-03-19 06:37:00 +08:00
|
|
|
if (!s.ok()) {
|
2020-04-30 06:31:41 +08:00
|
|
|
std::fprintf(stderr, "%s\n", s.ToString().c_str());
|
2011-03-19 06:37:00 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
bool ok = port::GetHeapProfile(WriteToFile, file);
|
|
|
|
delete file;
|
|
|
|
if (!ok) {
|
2020-04-30 06:31:41 +08:00
|
|
|
std::fprintf(stderr, "heap profiling not supported\n");
|
Add Env::Remove{File,Dir} which obsolete Env::Delete{File,Dir}.
The "DeleteFile" method name causes pain for Windows developers, because
<windows.h> #defines a DeleteFile macro to DeleteFileW or DeleteFileA.
Current code uses workarounds, like #undefining DeleteFile everywhere an
Env is declared, implemented, or used.
This CL removes the need for workarounds by renaming Env::DeleteFile to
Env::RemoveFile. For consistency, Env::DeleteDir is also renamed to
Env::RemoveDir. A few internal methods are also renamed for consistency.
Software that supports Windows is expected to migrate any Env
implementations and usage to Remove{File,Dir}, and never use the name
Env::Delete{File,Dir} in its code.
The renaming is done in a backwards-compatible way, at the risk of
making it slightly more difficult to build a new correct Env
implementation. The backwards compatibility is achieved using the
following hacks:
1) Env::Remove{File,Dir} methods are added, with a default
implementation that calls into Env::Delete{File,Dir}. This makes old
Env implementations compatible with code that calls into the updated
API.
2) The Env::Delete{File,Dir} methods are no longer pure virtuals.
Instead, they gain a default implementation that calls into
Env::Remove{File,Dir}. This makes updated Env implementations
compatible with code that calls into the old API.
The cost of this approach is that it's possible to write an Env without
overriding either Rename{File,Dir} or Delete{File,Dir}, without getting
a compiler warning. However, attempting to run the test suite will
immediately fail with an infinite call stack ending in
{Remove,Delete}{File,Dir}, making developers aware of the problem.
PiperOrigin-RevId: 288710907
2020-01-09 01:14:53 +08:00
|
|
|
g_env->RemoveFile(fname);
|
2011-03-19 06:37:00 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2011-11-01 01:22:06 +08:00
|
|
|
} // namespace leveldb
|
2011-03-19 06:37:00 +08:00
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
2011-04-13 03:38:58 +08:00
|
|
|
FLAGS_write_buffer_size = leveldb::Options().write_buffer_size;
|
2016-09-27 19:50:38 +08:00
|
|
|
FLAGS_max_file_size = leveldb::Options().max_file_size;
|
|
|
|
FLAGS_block_size = leveldb::Options().block_size;
|
2011-05-21 10:17:43 +08:00
|
|
|
FLAGS_open_files = leveldb::Options().max_open_files;
|
2012-05-31 00:45:46 +08:00
|
|
|
std::string default_db_path;
|
2011-05-21 10:17:43 +08:00
|
|
|
|
2011-03-19 06:37:00 +08:00
|
|
|
for (int i = 1; i < argc; i++) {
|
|
|
|
double d;
|
|
|
|
int n;
|
|
|
|
char junk;
|
|
|
|
if (leveldb::Slice(argv[i]).starts_with("--benchmarks=")) {
|
|
|
|
FLAGS_benchmarks = argv[i] + strlen("--benchmarks=");
|
|
|
|
} else if (sscanf(argv[i], "--compression_ratio=%lf%c", &d, &junk) == 1) {
|
|
|
|
FLAGS_compression_ratio = d;
|
|
|
|
} else if (sscanf(argv[i], "--histogram=%d%c", &n, &junk) == 1 &&
|
|
|
|
(n == 0 || n == 1)) {
|
|
|
|
FLAGS_histogram = n;
|
2021-01-11 23:32:34 +08:00
|
|
|
} else if (sscanf(argv[i], "--comparisons=%d%c", &n, &junk) == 1 &&
|
|
|
|
(n == 0 || n == 1)) {
|
|
|
|
FLAGS_comparisons = n;
|
2011-05-21 10:17:43 +08:00
|
|
|
} else if (sscanf(argv[i], "--use_existing_db=%d%c", &n, &junk) == 1 &&
|
|
|
|
(n == 0 || n == 1)) {
|
|
|
|
FLAGS_use_existing_db = n;
|
2014-12-12 00:13:18 +08:00
|
|
|
} else if (sscanf(argv[i], "--reuse_logs=%d%c", &n, &junk) == 1 &&
|
|
|
|
(n == 0 || n == 1)) {
|
|
|
|
FLAGS_reuse_logs = n;
|
2022-06-19 16:19:58 +08:00
|
|
|
} else if (sscanf(argv[i], "--compression=%d%c", &n, &junk) == 1 &&
|
|
|
|
(n == 0 || n == 1)) {
|
|
|
|
FLAGS_compression = n;
|
2011-03-19 06:37:00 +08:00
|
|
|
} else if (sscanf(argv[i], "--num=%d%c", &n, &junk) == 1) {
|
|
|
|
FLAGS_num = n;
|
2011-05-21 10:17:43 +08:00
|
|
|
} else if (sscanf(argv[i], "--reads=%d%c", &n, &junk) == 1) {
|
|
|
|
FLAGS_reads = n;
|
2011-08-23 05:08:51 +08:00
|
|
|
} else if (sscanf(argv[i], "--threads=%d%c", &n, &junk) == 1) {
|
|
|
|
FLAGS_threads = n;
|
2011-03-19 06:37:00 +08:00
|
|
|
} else if (sscanf(argv[i], "--value_size=%d%c", &n, &junk) == 1) {
|
|
|
|
FLAGS_value_size = n;
|
|
|
|
} else if (sscanf(argv[i], "--write_buffer_size=%d%c", &n, &junk) == 1) {
|
|
|
|
FLAGS_write_buffer_size = n;
|
2016-09-27 19:50:38 +08:00
|
|
|
} else if (sscanf(argv[i], "--max_file_size=%d%c", &n, &junk) == 1) {
|
|
|
|
FLAGS_max_file_size = n;
|
|
|
|
} else if (sscanf(argv[i], "--block_size=%d%c", &n, &junk) == 1) {
|
|
|
|
FLAGS_block_size = n;
|
2021-01-11 23:32:34 +08:00
|
|
|
} else if (sscanf(argv[i], "--key_prefix=%d%c", &n, &junk) == 1) {
|
|
|
|
FLAGS_key_prefix = n;
|
2011-04-13 03:38:58 +08:00
|
|
|
} else if (sscanf(argv[i], "--cache_size=%d%c", &n, &junk) == 1) {
|
|
|
|
FLAGS_cache_size = n;
|
2012-04-17 23:36:46 +08:00
|
|
|
} else if (sscanf(argv[i], "--bloom_bits=%d%c", &n, &junk) == 1) {
|
|
|
|
FLAGS_bloom_bits = n;
|
2011-05-21 10:17:43 +08:00
|
|
|
} else if (sscanf(argv[i], "--open_files=%d%c", &n, &junk) == 1) {
|
|
|
|
FLAGS_open_files = n;
|
2011-06-22 10:36:45 +08:00
|
|
|
} else if (strncmp(argv[i], "--db=", 5) == 0) {
|
|
|
|
FLAGS_db = argv[i] + 5;
|
2011-04-13 03:38:58 +08:00
|
|
|
} else {
|
2020-04-30 06:31:41 +08:00
|
|
|
std::fprintf(stderr, "Invalid flag '%s'\n", argv[i]);
|
|
|
|
std::exit(1);
|
2011-03-19 06:37:00 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-27 19:50:38 +08:00
|
|
|
leveldb::g_env = leveldb::Env::Default();
|
|
|
|
|
2012-05-31 00:45:46 +08:00
|
|
|
// Choose a location for the test database if none given with --db=<path>
|
2018-04-11 07:18:06 +08:00
|
|
|
if (FLAGS_db == nullptr) {
|
2019-05-03 02:01:00 +08:00
|
|
|
leveldb::g_env->GetTestDirectory(&default_db_path);
|
|
|
|
default_db_path += "/dbbench";
|
|
|
|
FLAGS_db = default_db_path.c_str();
|
2012-05-31 00:45:46 +08:00
|
|
|
}
|
|
|
|
|
2011-03-19 06:37:00 +08:00
|
|
|
leveldb::Benchmark benchmark;
|
|
|
|
benchmark.Run();
|
|
|
|
return 0;
|
|
|
|
}
|