2011-03-18 22:37:00 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2019-05-02 11:01:00 -07:00
|
|
|
#include "leveldb/db.h"
|
|
|
|
|
2019-03-11 13:04:53 -07:00
|
|
|
#include <atomic>
|
2020-11-30 10:43:24 -08:00
|
|
|
#include <cinttypes>
|
2019-03-11 13:04:53 -07:00
|
|
|
#include <string>
|
|
|
|
|
2019-11-25 09:29:06 -08:00
|
|
|
#include "gtest/gtest.h"
|
2021-02-16 15:38:44 -08:00
|
|
|
#include "benchmark/benchmark.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "db/db_impl.h"
|
|
|
|
#include "db/filename.h"
|
|
|
|
#include "db/version_set.h"
|
|
|
|
#include "db/write_batch_internal.h"
|
2012-04-17 08:36:46 -07:00
|
|
|
#include "leveldb/cache.h"
|
2011-03-30 18:35:40 +00:00
|
|
|
#include "leveldb/env.h"
|
2019-05-02 11:01:00 -07:00
|
|
|
#include "leveldb/filter_policy.h"
|
2011-03-30 18:35:40 +00:00
|
|
|
#include "leveldb/table.h"
|
2018-03-23 12:50:14 -07:00
|
|
|
#include "port/port.h"
|
|
|
|
#include "port/thread_annotations.h"
|
2012-04-17 08:36:46 -07:00
|
|
|
#include "util/hash.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "util/logging.h"
|
2011-09-01 19:08:02 +00:00
|
|
|
#include "util/mutexlock.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "util/testutil.h"
|
|
|
|
|
|
|
|
namespace leveldb {
|
|
|
|
|
|
|
|
static std::string RandomString(Random* rnd, int len) {
|
|
|
|
std::string r;
|
|
|
|
test::RandomString(rnd, len, &r);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2017-10-02 12:37:45 -07:00
|
|
|
static std::string RandomKey(Random* rnd) {
|
2019-05-02 11:01:00 -07:00
|
|
|
int len =
|
|
|
|
(rnd->OneIn(3) ? 1 // Short sometimes to encourage collisions
|
|
|
|
: (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10)));
|
2017-10-02 12:37:45 -07:00
|
|
|
return test::RandomKey(rnd, len);
|
|
|
|
}
|
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
namespace {
|
|
|
|
class AtomicCounter {
|
|
|
|
public:
|
2019-05-02 11:01:00 -07:00
|
|
|
AtomicCounter() : count_(0) {}
|
|
|
|
void Increment() { IncrementBy(1); }
|
2018-03-23 12:50:14 -07:00
|
|
|
void IncrementBy(int count) LOCKS_EXCLUDED(mu_) {
|
2012-04-17 08:36:46 -07:00
|
|
|
MutexLock l(&mu_);
|
2013-06-13 16:14:06 -07:00
|
|
|
count_ += count;
|
2012-04-17 08:36:46 -07:00
|
|
|
}
|
2018-03-23 12:50:14 -07:00
|
|
|
int Read() LOCKS_EXCLUDED(mu_) {
|
2012-04-17 08:36:46 -07:00
|
|
|
MutexLock l(&mu_);
|
|
|
|
return count_;
|
|
|
|
}
|
2018-03-23 12:50:14 -07:00
|
|
|
void Reset() LOCKS_EXCLUDED(mu_) {
|
2012-04-17 08:36:46 -07:00
|
|
|
MutexLock l(&mu_);
|
|
|
|
count_ = 0;
|
|
|
|
}
|
2019-05-03 09:31:18 -07:00
|
|
|
|
|
|
|
private:
|
|
|
|
port::Mutex mu_;
|
|
|
|
int count_ GUARDED_BY(mu_);
|
2012-04-17 08:36:46 -07:00
|
|
|
};
|
2013-06-13 16:14:06 -07:00
|
|
|
|
|
|
|
void DelayMilliseconds(int millis) {
|
|
|
|
Env::Default()->SleepForMicroseconds(millis * 1000);
|
|
|
|
}
|
2019-03-11 13:04:53 -07:00
|
|
|
} // namespace
|
2012-04-17 08:36:46 -07:00
|
|
|
|
2017-10-17 13:05:47 -07:00
|
|
|
// Test Env to override default Env behavior for testing.
|
|
|
|
class TestEnv : public EnvWrapper {
|
|
|
|
public:
|
|
|
|
explicit TestEnv(Env* base) : EnvWrapper(base), ignore_dot_files_(false) {}
|
|
|
|
|
|
|
|
void SetIgnoreDotFiles(bool ignored) { ignore_dot_files_ = ignored; }
|
|
|
|
|
|
|
|
Status GetChildren(const std::string& dir,
|
|
|
|
std::vector<std::string>* result) override {
|
|
|
|
Status s = target()->GetChildren(dir, result);
|
|
|
|
if (!s.ok() || !ignore_dot_files_) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<std::string>::iterator it = result->begin();
|
|
|
|
while (it != result->end()) {
|
|
|
|
if ((*it == ".") || (*it == "..")) {
|
|
|
|
it = result->erase(it);
|
|
|
|
} else {
|
|
|
|
++it;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
bool ignore_dot_files_;
|
|
|
|
};
|
|
|
|
|
2019-03-11 13:04:53 -07:00
|
|
|
// Special Env used to delay background operations.
|
2011-06-22 02:36:45 +00:00
|
|
|
class SpecialEnv : public EnvWrapper {
|
|
|
|
public:
|
2018-04-10 16:18:06 -07:00
|
|
|
// sstable/log Sync() calls are blocked while this pointer is non-null.
|
2019-03-11 13:04:53 -07:00
|
|
|
std::atomic<bool> delay_data_sync_;
|
2013-12-10 10:36:31 -08:00
|
|
|
|
|
|
|
// sstable/log Sync() calls return an error.
|
2019-03-11 13:04:53 -07:00
|
|
|
std::atomic<bool> data_sync_error_;
|
2011-06-22 02:36:45 +00:00
|
|
|
|
2018-04-10 16:18:06 -07:00
|
|
|
// Simulate no-space errors while this pointer is non-null.
|
2019-03-11 13:04:53 -07:00
|
|
|
std::atomic<bool> no_space_;
|
2012-01-25 14:56:52 -08:00
|
|
|
|
2018-04-10 16:18:06 -07:00
|
|
|
// Simulate non-writable file system while this pointer is non-null.
|
2019-03-11 13:04:53 -07:00
|
|
|
std::atomic<bool> non_writable_;
|
2012-05-30 09:45:46 -07:00
|
|
|
|
2018-04-10 16:18:06 -07:00
|
|
|
// Force sync of manifest files to fail while this pointer is non-null.
|
2019-03-11 13:04:53 -07:00
|
|
|
std::atomic<bool> manifest_sync_error_;
|
2013-01-07 13:17:43 -08:00
|
|
|
|
2018-04-10 16:18:06 -07:00
|
|
|
// Force write to manifest files to fail while this pointer is non-null.
|
2019-03-11 13:04:53 -07:00
|
|
|
std::atomic<bool> manifest_write_error_;
|
2013-01-07 13:17:43 -08:00
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
bool count_random_reads_;
|
|
|
|
AtomicCounter random_read_counter_;
|
|
|
|
|
2019-05-02 11:01:00 -07:00
|
|
|
explicit SpecialEnv(Env* base)
|
|
|
|
: EnvWrapper(base),
|
|
|
|
delay_data_sync_(false),
|
|
|
|
data_sync_error_(false),
|
|
|
|
no_space_(false),
|
|
|
|
non_writable_(false),
|
|
|
|
manifest_sync_error_(false),
|
|
|
|
manifest_write_error_(false),
|
|
|
|
count_random_reads_(false) {}
|
2011-06-22 02:36:45 +00:00
|
|
|
|
|
|
|
Status NewWritableFile(const std::string& f, WritableFile** r) {
|
2013-12-10 10:36:31 -08:00
|
|
|
class DataFile : public WritableFile {
|
2011-06-22 02:36:45 +00:00
|
|
|
private:
|
2019-03-11 13:04:53 -07:00
|
|
|
SpecialEnv* const env_;
|
|
|
|
WritableFile* const base_;
|
2011-06-22 02:36:45 +00:00
|
|
|
|
|
|
|
public:
|
2019-05-02 11:01:00 -07:00
|
|
|
DataFile(SpecialEnv* env, WritableFile* base) : env_(env), base_(base) {}
|
2013-12-10 10:36:31 -08:00
|
|
|
~DataFile() { delete base_; }
|
2012-01-25 14:56:52 -08:00
|
|
|
Status Append(const Slice& data) {
|
2019-03-11 13:04:53 -07:00
|
|
|
if (env_->no_space_.load(std::memory_order_acquire)) {
|
2012-01-25 14:56:52 -08:00
|
|
|
// Drop writes on the floor
|
|
|
|
return Status::OK();
|
|
|
|
} else {
|
|
|
|
return base_->Append(data);
|
|
|
|
}
|
|
|
|
}
|
2011-06-22 02:36:45 +00:00
|
|
|
Status Close() { return base_->Close(); }
|
|
|
|
Status Flush() { return base_->Flush(); }
|
|
|
|
Status Sync() {
|
2019-03-11 13:04:53 -07:00
|
|
|
if (env_->data_sync_error_.load(std::memory_order_acquire)) {
|
2013-12-10 10:36:31 -08:00
|
|
|
return Status::IOError("simulated data sync error");
|
|
|
|
}
|
2019-03-11 13:04:53 -07:00
|
|
|
while (env_->delay_data_sync_.load(std::memory_order_acquire)) {
|
2013-06-13 16:14:06 -07:00
|
|
|
DelayMilliseconds(100);
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
return base_->Sync();
|
|
|
|
}
|
|
|
|
};
|
2013-01-07 13:17:43 -08:00
|
|
|
class ManifestFile : public WritableFile {
|
|
|
|
private:
|
|
|
|
SpecialEnv* env_;
|
|
|
|
WritableFile* base_;
|
2019-05-02 11:01:00 -07:00
|
|
|
|
2013-01-07 13:17:43 -08:00
|
|
|
public:
|
2019-05-02 11:01:00 -07:00
|
|
|
ManifestFile(SpecialEnv* env, WritableFile* b) : env_(env), base_(b) {}
|
2013-01-07 13:17:43 -08:00
|
|
|
~ManifestFile() { delete base_; }
|
|
|
|
Status Append(const Slice& data) {
|
2019-03-11 13:04:53 -07:00
|
|
|
if (env_->manifest_write_error_.load(std::memory_order_acquire)) {
|
2013-01-07 13:17:43 -08:00
|
|
|
return Status::IOError("simulated writer error");
|
|
|
|
} else {
|
|
|
|
return base_->Append(data);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Status Close() { return base_->Close(); }
|
|
|
|
Status Flush() { return base_->Flush(); }
|
|
|
|
Status Sync() {
|
2019-03-11 13:04:53 -07:00
|
|
|
if (env_->manifest_sync_error_.load(std::memory_order_acquire)) {
|
2013-01-07 13:17:43 -08:00
|
|
|
return Status::IOError("simulated sync error");
|
|
|
|
} else {
|
|
|
|
return base_->Sync();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
2011-06-22 02:36:45 +00:00
|
|
|
|
2019-03-11 13:04:53 -07:00
|
|
|
if (non_writable_.load(std::memory_order_acquire)) {
|
2012-05-30 09:45:46 -07:00
|
|
|
return Status::IOError("simulated write error");
|
|
|
|
}
|
|
|
|
|
2011-06-22 02:36:45 +00:00
|
|
|
Status s = target()->NewWritableFile(f, r);
|
|
|
|
if (s.ok()) {
|
2018-04-10 16:18:06 -07:00
|
|
|
if (strstr(f.c_str(), ".ldb") != nullptr ||
|
|
|
|
strstr(f.c_str(), ".log") != nullptr) {
|
2013-12-10 10:36:31 -08:00
|
|
|
*r = new DataFile(this, *r);
|
2018-04-10 16:18:06 -07:00
|
|
|
} else if (strstr(f.c_str(), "MANIFEST") != nullptr) {
|
2013-01-07 13:17:43 -08:00
|
|
|
*r = new ManifestFile(this, *r);
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
2012-04-17 08:36:46 -07:00
|
|
|
|
|
|
|
Status NewRandomAccessFile(const std::string& f, RandomAccessFile** r) {
|
|
|
|
class CountingFile : public RandomAccessFile {
|
|
|
|
private:
|
|
|
|
RandomAccessFile* target_;
|
|
|
|
AtomicCounter* counter_;
|
2019-05-02 11:01:00 -07:00
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
public:
|
|
|
|
CountingFile(RandomAccessFile* target, AtomicCounter* counter)
|
2019-05-02 11:01:00 -07:00
|
|
|
: target_(target), counter_(counter) {}
|
2019-05-09 14:00:07 -07:00
|
|
|
~CountingFile() override { delete target_; }
|
|
|
|
Status Read(uint64_t offset, size_t n, Slice* result,
|
|
|
|
char* scratch) const override {
|
2012-04-17 08:36:46 -07:00
|
|
|
counter_->Increment();
|
|
|
|
return target_->Read(offset, n, result, scratch);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
Status s = target()->NewRandomAccessFile(f, r);
|
|
|
|
if (s.ok() && count_random_reads_) {
|
|
|
|
*r = new CountingFile(*r, &random_read_counter_);
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
2011-06-22 02:36:45 +00:00
|
|
|
};
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
class DBTest : public testing::Test {
|
2011-03-18 22:37:00 +00:00
|
|
|
public:
|
|
|
|
std::string dbname_;
|
2011-06-22 02:36:45 +00:00
|
|
|
SpecialEnv* env_;
|
2011-03-18 22:37:00 +00:00
|
|
|
DB* db_;
|
|
|
|
|
|
|
|
Options last_options_;
|
|
|
|
|
2019-05-03 09:31:18 -07:00
|
|
|
DBTest() : env_(new SpecialEnv(Env::Default())), option_config_(kDefault) {
|
2012-04-17 08:36:46 -07:00
|
|
|
filter_policy_ = NewBloomFilterPolicy(10);
|
2019-11-21 13:09:53 -08:00
|
|
|
dbname_ = testing::TempDir() + "db_test";
|
2011-03-18 22:37:00 +00:00
|
|
|
DestroyDB(dbname_, Options());
|
2018-04-10 16:18:06 -07:00
|
|
|
db_ = nullptr;
|
2011-03-18 22:37:00 +00:00
|
|
|
Reopen();
|
|
|
|
}
|
|
|
|
|
|
|
|
~DBTest() {
|
|
|
|
delete db_;
|
|
|
|
DestroyDB(dbname_, Options());
|
2011-06-22 02:36:45 +00:00
|
|
|
delete env_;
|
2012-04-17 08:36:46 -07:00
|
|
|
delete filter_policy_;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Switch to a fresh database with the next option configuration to
|
|
|
|
// test. Return false if there are no more configurations to test.
|
|
|
|
bool ChangeOptions() {
|
2012-05-30 09:45:46 -07:00
|
|
|
option_config_++;
|
|
|
|
if (option_config_ >= kEnd) {
|
2012-04-17 08:36:46 -07:00
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
DestroyAndReopen();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return the current option configuration.
|
|
|
|
Options CurrentOptions() {
|
|
|
|
Options options;
|
2014-12-11 08:13:18 -08:00
|
|
|
options.reuse_logs = false;
|
2012-04-17 08:36:46 -07:00
|
|
|
switch (option_config_) {
|
2014-12-11 08:13:18 -08:00
|
|
|
case kReuse:
|
|
|
|
options.reuse_logs = true;
|
|
|
|
break;
|
2012-04-17 08:36:46 -07:00
|
|
|
case kFilter:
|
|
|
|
options.filter_policy = filter_policy_;
|
|
|
|
break;
|
2012-05-30 09:45:46 -07:00
|
|
|
case kUncompressed:
|
|
|
|
options.compression = kNoCompression;
|
|
|
|
break;
|
2012-04-17 08:36:46 -07:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return options;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2019-05-02 11:01:00 -07:00
|
|
|
DBImpl* dbfull() { return reinterpret_cast<DBImpl*>(db_); }
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
void Reopen(Options* options = nullptr) {
|
|
|
|
ASSERT_LEVELDB_OK(TryReopen(options));
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
void Close() {
|
|
|
|
delete db_;
|
2018-04-10 16:18:06 -07:00
|
|
|
db_ = nullptr;
|
2012-04-17 08:36:46 -07:00
|
|
|
}
|
|
|
|
|
2018-04-10 16:18:06 -07:00
|
|
|
void DestroyAndReopen(Options* options = nullptr) {
|
2011-03-18 22:37:00 +00:00
|
|
|
delete db_;
|
2018-04-10 16:18:06 -07:00
|
|
|
db_ = nullptr;
|
2011-03-18 22:37:00 +00:00
|
|
|
DestroyDB(dbname_, Options());
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(TryReopen(options));
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Status TryReopen(Options* options) {
|
|
|
|
delete db_;
|
2018-04-10 16:18:06 -07:00
|
|
|
db_ = nullptr;
|
2011-03-18 22:37:00 +00:00
|
|
|
Options opts;
|
2018-04-10 16:18:06 -07:00
|
|
|
if (options != nullptr) {
|
2011-03-18 22:37:00 +00:00
|
|
|
opts = *options;
|
|
|
|
} else {
|
2012-04-17 08:36:46 -07:00
|
|
|
opts = CurrentOptions();
|
2011-03-18 22:37:00 +00:00
|
|
|
opts.create_if_missing = true;
|
|
|
|
}
|
|
|
|
last_options_ = opts;
|
|
|
|
|
|
|
|
return DB::Open(opts, dbname_, &db_);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status Put(const std::string& k, const std::string& v) {
|
2011-04-12 19:38:58 +00:00
|
|
|
return db_->Put(WriteOptions(), k, v);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2019-05-02 11:01:00 -07:00
|
|
|
Status Delete(const std::string& k) { return db_->Delete(WriteOptions(), k); }
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2018-04-10 16:18:06 -07:00
|
|
|
std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) {
|
2011-03-18 22:37:00 +00:00
|
|
|
ReadOptions options;
|
|
|
|
options.snapshot = snapshot;
|
|
|
|
std::string result;
|
|
|
|
Status s = db_->Get(options, k, &result);
|
|
|
|
if (s.IsNotFound()) {
|
|
|
|
result = "NOT_FOUND";
|
|
|
|
} else if (!s.ok()) {
|
|
|
|
result = s.ToString();
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2011-10-31 17:22:06 +00:00
|
|
|
// Return a string that contains all key,value pairs in order,
|
|
|
|
// formatted like "(k1->v1)(k2->v2)".
|
|
|
|
std::string Contents() {
|
|
|
|
std::vector<std::string> forward;
|
|
|
|
std::string result;
|
|
|
|
Iterator* iter = db_->NewIterator(ReadOptions());
|
|
|
|
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
|
|
|
std::string s = IterStatus(iter);
|
|
|
|
result.push_back('(');
|
|
|
|
result.append(s);
|
|
|
|
result.push_back(')');
|
|
|
|
forward.push_back(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check reverse iteration results are the reverse of forward results
|
2013-12-10 10:36:31 -08:00
|
|
|
size_t matched = 0;
|
2011-10-31 17:22:06 +00:00
|
|
|
for (iter->SeekToLast(); iter->Valid(); iter->Prev()) {
|
2019-11-21 13:09:53 -08:00
|
|
|
EXPECT_LT(matched, forward.size());
|
|
|
|
EXPECT_EQ(IterStatus(iter), forward[forward.size() - matched - 1]);
|
2011-10-31 17:22:06 +00:00
|
|
|
matched++;
|
|
|
|
}
|
2019-11-21 13:09:53 -08:00
|
|
|
EXPECT_EQ(matched, forward.size());
|
2011-10-31 17:22:06 +00:00
|
|
|
|
|
|
|
delete iter;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
std::string AllEntriesFor(const Slice& user_key) {
|
|
|
|
Iterator* iter = dbfull()->TEST_NewInternalIterator();
|
|
|
|
InternalKey target(user_key, kMaxSequenceNumber, kTypeValue);
|
|
|
|
iter->Seek(target.Encode());
|
|
|
|
std::string result;
|
|
|
|
if (!iter->status().ok()) {
|
|
|
|
result = iter->status().ToString();
|
|
|
|
} else {
|
|
|
|
result = "[ ";
|
|
|
|
bool first = true;
|
|
|
|
while (iter->Valid()) {
|
|
|
|
ParsedInternalKey ikey;
|
|
|
|
if (!ParseInternalKey(iter->key(), &ikey)) {
|
|
|
|
result += "CORRUPTED";
|
|
|
|
} else {
|
2012-04-17 08:36:46 -07:00
|
|
|
if (last_options_.comparator->Compare(ikey.user_key, user_key) != 0) {
|
2011-03-18 22:37:00 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!first) {
|
|
|
|
result += ", ";
|
|
|
|
}
|
|
|
|
first = false;
|
|
|
|
switch (ikey.type) {
|
|
|
|
case kTypeValue:
|
|
|
|
result += iter->value().ToString();
|
|
|
|
break;
|
|
|
|
case kTypeDeletion:
|
|
|
|
result += "DEL";
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
iter->Next();
|
|
|
|
}
|
|
|
|
if (!first) {
|
|
|
|
result += " ";
|
|
|
|
}
|
|
|
|
result += "]";
|
|
|
|
}
|
|
|
|
delete iter;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
int NumTableFilesAtLevel(int level) {
|
2011-04-12 19:38:58 +00:00
|
|
|
std::string property;
|
2019-11-21 13:09:53 -08:00
|
|
|
EXPECT_TRUE(db_->GetProperty(
|
2019-05-02 11:01:00 -07:00
|
|
|
"leveldb.num-files-at-level" + NumberToString(level), &property));
|
2019-03-11 13:04:53 -07:00
|
|
|
return std::stoi(property);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2011-06-22 02:36:45 +00:00
|
|
|
int TotalTableFiles() {
|
|
|
|
int result = 0;
|
|
|
|
for (int level = 0; level < config::kNumLevels; level++) {
|
|
|
|
result += NumTableFilesAtLevel(level);
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2011-10-05 16:30:28 -07:00
|
|
|
// Return spread of files per level
|
|
|
|
std::string FilesPerLevel() {
|
|
|
|
std::string result;
|
|
|
|
int last_non_zero_offset = 0;
|
|
|
|
for (int level = 0; level < config::kNumLevels; level++) {
|
|
|
|
int f = NumTableFilesAtLevel(level);
|
|
|
|
char buf[100];
|
2020-04-29 22:31:41 +00:00
|
|
|
std::snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f);
|
2011-10-05 16:30:28 -07:00
|
|
|
result += buf;
|
|
|
|
if (f > 0) {
|
|
|
|
last_non_zero_offset = result.size();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
result.resize(last_non_zero_offset);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2012-01-25 14:56:52 -08:00
|
|
|
int CountFiles() {
|
|
|
|
std::vector<std::string> files;
|
|
|
|
env_->GetChildren(dbname_, &files);
|
|
|
|
return static_cast<int>(files.size());
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
uint64_t Size(const Slice& start, const Slice& limit) {
|
|
|
|
Range r(start, limit);
|
|
|
|
uint64_t size;
|
|
|
|
db_->GetApproximateSizes(&r, 1, &size);
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2011-03-22 18:32:49 +00:00
|
|
|
void Compact(const Slice& start, const Slice& limit) {
|
2011-10-05 16:30:28 -07:00
|
|
|
db_->CompactRange(&start, &limit);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do n memtable compactions, each of which produces an sstable
|
2019-03-01 13:12:01 -08:00
|
|
|
// covering the range [small_key,large_key].
|
|
|
|
void MakeTables(int n, const std::string& small_key,
|
|
|
|
const std::string& large_key) {
|
2011-10-05 16:30:28 -07:00
|
|
|
for (int i = 0; i < n; i++) {
|
2019-03-01 13:12:01 -08:00
|
|
|
Put(small_key, "begin");
|
|
|
|
Put(large_key, "end");
|
2011-10-05 16:30:28 -07:00
|
|
|
dbfull()->TEST_CompactMemTable();
|
2011-03-22 18:32:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-06-22 02:36:45 +00:00
|
|
|
// Prevent pushing of new sstables into deeper levels by adding
|
|
|
|
// tables that cover a specified range to all levels.
|
|
|
|
void FillLevels(const std::string& smallest, const std::string& largest) {
|
2011-10-05 16:30:28 -07:00
|
|
|
MakeTables(config::kNumLevels, smallest, largest);
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
|
2011-03-22 18:32:49 +00:00
|
|
|
void DumpFileCounts(const char* label) {
|
2020-04-29 22:31:41 +00:00
|
|
|
std::fprintf(stderr, "---\n%s:\n", label);
|
|
|
|
std::fprintf(
|
2019-05-02 11:01:00 -07:00
|
|
|
stderr, "maxoverlap: %lld\n",
|
|
|
|
static_cast<long long>(dbfull()->TEST_MaxNextLevelOverlappingBytes()));
|
2011-03-22 18:32:49 +00:00
|
|
|
for (int level = 0; level < config::kNumLevels; level++) {
|
|
|
|
int num = NumTableFilesAtLevel(level);
|
|
|
|
if (num > 0) {
|
2020-04-29 22:31:41 +00:00
|
|
|
std::fprintf(stderr, " level %3d : %d files\n", level, num);
|
2011-03-22 18:32:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2011-03-25 20:27:43 +00:00
|
|
|
|
2011-10-05 16:30:28 -07:00
|
|
|
std::string DumpSSTableList() {
|
|
|
|
std::string property;
|
|
|
|
db_->GetProperty("leveldb.sstables", &property);
|
|
|
|
return property;
|
|
|
|
}
|
|
|
|
|
2011-03-25 20:27:43 +00:00
|
|
|
std::string IterStatus(Iterator* iter) {
|
|
|
|
std::string result;
|
|
|
|
if (iter->Valid()) {
|
|
|
|
result = iter->key().ToString() + "->" + iter->value().ToString();
|
|
|
|
} else {
|
|
|
|
result = "(invalid)";
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
2013-02-06 18:03:32 -08:00
|
|
|
|
|
|
|
bool DeleteAnSSTFile() {
|
|
|
|
std::vector<std::string> filenames;
|
2019-11-21 13:09:53 -08:00
|
|
|
EXPECT_LEVELDB_OK(env_->GetChildren(dbname_, &filenames));
|
2013-02-06 18:03:32 -08:00
|
|
|
uint64_t number;
|
|
|
|
FileType type;
|
|
|
|
for (size_t i = 0; i < filenames.size(); i++) {
|
|
|
|
if (ParseFileName(filenames[i], &number, &type) && type == kTableFile) {
|
Add Env::Remove{File,Dir} which obsolete Env::Delete{File,Dir}.
The "DeleteFile" method name causes pain for Windows developers, because
<windows.h> #defines a DeleteFile macro to DeleteFileW or DeleteFileA.
Current code uses workarounds, like #undefining DeleteFile everywhere an
Env is declared, implemented, or used.
This CL removes the need for workarounds by renaming Env::DeleteFile to
Env::RemoveFile. For consistency, Env::DeleteDir is also renamed to
Env::RemoveDir. A few internal methods are also renamed for consistency.
Software that supports Windows is expected to migrate any Env
implementations and usage to Remove{File,Dir}, and never use the name
Env::Delete{File,Dir} in its code.
The renaming is done in a backwards-compatible way, at the risk of
making it slightly more difficult to build a new correct Env
implementation. The backwards compatibility is achieved using the
following hacks:
1) Env::Remove{File,Dir} methods are added, with a default
implementation that calls into Env::Delete{File,Dir}. This makes old
Env implementations compatible with code that calls into the updated
API.
2) The Env::Delete{File,Dir} methods are no longer pure virtuals.
Instead, they gain a default implementation that calls into
Env::Remove{File,Dir}. This makes updated Env implementations
compatible with code that calls into the old API.
The cost of this approach is that it's possible to write an Env without
overriding either Rename{File,Dir} or Delete{File,Dir}, without getting
a compiler warning. However, attempting to run the test suite will
immediately fail with an infinite call stack ending in
{Remove,Delete}{File,Dir}, making developers aware of the problem.
PiperOrigin-RevId: 288710907
2020-01-08 09:14:53 -08:00
|
|
|
EXPECT_LEVELDB_OK(env_->RemoveFile(TableFileName(dbname_, number)));
|
2013-02-06 18:03:32 -08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
2013-09-19 13:42:22 -07:00
|
|
|
|
|
|
|
// Returns number of files renamed.
|
|
|
|
int RenameLDBToSST() {
|
|
|
|
std::vector<std::string> filenames;
|
2019-11-21 13:09:53 -08:00
|
|
|
EXPECT_LEVELDB_OK(env_->GetChildren(dbname_, &filenames));
|
2013-09-19 13:42:22 -07:00
|
|
|
uint64_t number;
|
|
|
|
FileType type;
|
|
|
|
int files_renamed = 0;
|
|
|
|
for (size_t i = 0; i < filenames.size(); i++) {
|
|
|
|
if (ParseFileName(filenames[i], &number, &type) && type == kTableFile) {
|
|
|
|
const std::string from = TableFileName(dbname_, number);
|
|
|
|
const std::string to = SSTTableFileName(dbname_, number);
|
2019-11-21 13:09:53 -08:00
|
|
|
EXPECT_LEVELDB_OK(env_->RenameFile(from, to));
|
2013-09-19 13:42:22 -07:00
|
|
|
files_renamed++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return files_renamed;
|
|
|
|
}
|
2019-05-03 09:31:18 -07:00
|
|
|
|
|
|
|
private:
|
|
|
|
// Sequence of option configurations to try
|
|
|
|
enum OptionConfig { kDefault, kReuse, kFilter, kUncompressed, kEnd };
|
|
|
|
|
|
|
|
const FilterPolicy* filter_policy_;
|
|
|
|
int option_config_;
|
2011-03-18 22:37:00 +00:00
|
|
|
};
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, Empty) {
|
2012-04-17 08:36:46 -07:00
|
|
|
do {
|
2018-04-10 16:18:06 -07:00
|
|
|
ASSERT_TRUE(db_ != nullptr);
|
2012-04-17 08:36:46 -07:00
|
|
|
ASSERT_EQ("NOT_FOUND", Get("foo"));
|
|
|
|
} while (ChangeOptions());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, EmptyKey) {
|
2019-03-21 16:15:30 -07:00
|
|
|
do {
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("", "v1"));
|
2019-03-21 16:15:30 -07:00
|
|
|
ASSERT_EQ("v1", Get(""));
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("", "v2"));
|
2019-03-21 16:15:30 -07:00
|
|
|
ASSERT_EQ("v2", Get(""));
|
|
|
|
} while (ChangeOptions());
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, EmptyValue) {
|
2019-03-21 16:15:30 -07:00
|
|
|
do {
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("key", "v1"));
|
2019-03-21 16:15:30 -07:00
|
|
|
ASSERT_EQ("v1", Get("key"));
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("key", ""));
|
2019-03-21 16:15:30 -07:00
|
|
|
ASSERT_EQ("", Get("key"));
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("key", "v2"));
|
2019-03-21 16:15:30 -07:00
|
|
|
ASSERT_EQ("v2", Get("key"));
|
|
|
|
} while (ChangeOptions());
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, ReadWrite) {
|
2012-04-17 08:36:46 -07:00
|
|
|
do {
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("foo", "v1"));
|
2012-04-17 08:36:46 -07:00
|
|
|
ASSERT_EQ("v1", Get("foo"));
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("bar", "v2"));
|
|
|
|
ASSERT_LEVELDB_OK(Put("foo", "v3"));
|
2012-04-17 08:36:46 -07:00
|
|
|
ASSERT_EQ("v3", Get("foo"));
|
|
|
|
ASSERT_EQ("v2", Get("bar"));
|
|
|
|
} while (ChangeOptions());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, PutDeleteGet) {
|
2012-04-17 08:36:46 -07:00
|
|
|
do {
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v1"));
|
2012-04-17 08:36:46 -07:00
|
|
|
ASSERT_EQ("v1", Get("foo"));
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v2"));
|
2012-04-17 08:36:46 -07:00
|
|
|
ASSERT_EQ("v2", Get("foo"));
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(db_->Delete(WriteOptions(), "foo"));
|
2012-04-17 08:36:46 -07:00
|
|
|
ASSERT_EQ("NOT_FOUND", Get("foo"));
|
|
|
|
} while (ChangeOptions());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, GetFromImmutableLayer) {
|
2012-04-17 08:36:46 -07:00
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.env = env_;
|
|
|
|
options.write_buffer_size = 100000; // Small write buffer
|
|
|
|
Reopen(&options);
|
2011-06-22 02:36:45 +00:00
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("foo", "v1"));
|
2012-04-17 08:36:46 -07:00
|
|
|
ASSERT_EQ("v1", Get("foo"));
|
2011-06-22 02:36:45 +00:00
|
|
|
|
2019-03-11 13:04:53 -07:00
|
|
|
// Block sync calls.
|
|
|
|
env_->delay_data_sync_.store(true, std::memory_order_release);
|
2019-05-02 11:01:00 -07:00
|
|
|
Put("k1", std::string(100000, 'x')); // Fill memtable.
|
|
|
|
Put("k2", std::string(100000, 'y')); // Trigger compaction.
|
2012-04-17 08:36:46 -07:00
|
|
|
ASSERT_EQ("v1", Get("foo"));
|
2019-03-11 13:04:53 -07:00
|
|
|
// Release sync calls.
|
|
|
|
env_->delay_data_sync_.store(false, std::memory_order_release);
|
2012-04-17 08:36:46 -07:00
|
|
|
} while (ChangeOptions());
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, GetFromVersions) {
|
2012-04-17 08:36:46 -07:00
|
|
|
do {
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("foo", "v1"));
|
2012-04-17 08:36:46 -07:00
|
|
|
dbfull()->TEST_CompactMemTable();
|
|
|
|
ASSERT_EQ("v1", Get("foo"));
|
|
|
|
} while (ChangeOptions());
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, GetMemUsage) {
|
2015-09-29 11:52:21 -07:00
|
|
|
do {
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("foo", "v1"));
|
2015-09-29 11:52:21 -07:00
|
|
|
std::string val;
|
|
|
|
ASSERT_TRUE(db_->GetProperty("leveldb.approximate-memory-usage", &val));
|
2019-03-11 13:04:53 -07:00
|
|
|
int mem_usage = std::stoi(val);
|
2015-09-29 11:52:21 -07:00
|
|
|
ASSERT_GT(mem_usage, 0);
|
2019-05-02 11:01:00 -07:00
|
|
|
ASSERT_LT(mem_usage, 5 * 1024 * 1024);
|
2015-09-29 11:52:21 -07:00
|
|
|
} while (ChangeOptions());
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, GetSnapshot) {
|
2012-04-17 08:36:46 -07:00
|
|
|
do {
|
|
|
|
// Try with both a short key and a long key
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x');
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put(key, "v1"));
|
2012-04-17 08:36:46 -07:00
|
|
|
const Snapshot* s1 = db_->GetSnapshot();
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put(key, "v2"));
|
2012-04-17 08:36:46 -07:00
|
|
|
ASSERT_EQ("v2", Get(key));
|
|
|
|
ASSERT_EQ("v1", Get(key, s1));
|
|
|
|
dbfull()->TEST_CompactMemTable();
|
|
|
|
ASSERT_EQ("v2", Get(key));
|
|
|
|
ASSERT_EQ("v1", Get(key, s1));
|
|
|
|
db_->ReleaseSnapshot(s1);
|
|
|
|
}
|
|
|
|
} while (ChangeOptions());
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, GetIdenticalSnapshots) {
|
2018-04-30 15:11:03 -07:00
|
|
|
do {
|
|
|
|
// Try with both a short key and a long key
|
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x');
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put(key, "v1"));
|
2018-04-30 15:11:03 -07:00
|
|
|
const Snapshot* s1 = db_->GetSnapshot();
|
|
|
|
const Snapshot* s2 = db_->GetSnapshot();
|
|
|
|
const Snapshot* s3 = db_->GetSnapshot();
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put(key, "v2"));
|
2018-04-30 15:11:03 -07:00
|
|
|
ASSERT_EQ("v2", Get(key));
|
|
|
|
ASSERT_EQ("v1", Get(key, s1));
|
|
|
|
ASSERT_EQ("v1", Get(key, s2));
|
|
|
|
ASSERT_EQ("v1", Get(key, s3));
|
|
|
|
db_->ReleaseSnapshot(s1);
|
|
|
|
dbfull()->TEST_CompactMemTable();
|
|
|
|
ASSERT_EQ("v2", Get(key));
|
|
|
|
ASSERT_EQ("v1", Get(key, s2));
|
|
|
|
db_->ReleaseSnapshot(s2);
|
|
|
|
ASSERT_EQ("v1", Get(key, s3));
|
|
|
|
db_->ReleaseSnapshot(s3);
|
|
|
|
}
|
|
|
|
} while (ChangeOptions());
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, IterateOverEmptySnapshot) {
|
2018-04-30 15:11:03 -07:00
|
|
|
do {
|
|
|
|
const Snapshot* snapshot = db_->GetSnapshot();
|
|
|
|
ReadOptions read_options;
|
|
|
|
read_options.snapshot = snapshot;
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("foo", "v1"));
|
|
|
|
ASSERT_LEVELDB_OK(Put("foo", "v2"));
|
2018-04-30 15:11:03 -07:00
|
|
|
|
|
|
|
Iterator* iterator1 = db_->NewIterator(read_options);
|
|
|
|
iterator1->SeekToFirst();
|
|
|
|
ASSERT_TRUE(!iterator1->Valid());
|
|
|
|
delete iterator1;
|
|
|
|
|
|
|
|
dbfull()->TEST_CompactMemTable();
|
|
|
|
|
|
|
|
Iterator* iterator2 = db_->NewIterator(read_options);
|
|
|
|
iterator2->SeekToFirst();
|
|
|
|
ASSERT_TRUE(!iterator2->Valid());
|
|
|
|
delete iterator2;
|
|
|
|
|
|
|
|
db_->ReleaseSnapshot(snapshot);
|
|
|
|
} while (ChangeOptions());
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, GetLevel0Ordering) {
|
2012-04-17 08:36:46 -07:00
|
|
|
do {
|
|
|
|
// Check that we process level-0 files in correct order. The code
|
|
|
|
// below generates two level-0 files where the earlier one comes
|
|
|
|
// before the later one in the level-0 file list since the earlier
|
|
|
|
// one has a smaller "smallest" key.
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("bar", "b"));
|
|
|
|
ASSERT_LEVELDB_OK(Put("foo", "v1"));
|
2012-04-17 08:36:46 -07:00
|
|
|
dbfull()->TEST_CompactMemTable();
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("foo", "v2"));
|
2012-04-17 08:36:46 -07:00
|
|
|
dbfull()->TEST_CompactMemTable();
|
|
|
|
ASSERT_EQ("v2", Get("foo"));
|
|
|
|
} while (ChangeOptions());
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, GetOrderedByLevels) {
|
2012-04-17 08:36:46 -07:00
|
|
|
do {
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("foo", "v1"));
|
2012-04-17 08:36:46 -07:00
|
|
|
Compact("a", "z");
|
|
|
|
ASSERT_EQ("v1", Get("foo"));
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("foo", "v2"));
|
2012-04-17 08:36:46 -07:00
|
|
|
ASSERT_EQ("v2", Get("foo"));
|
|
|
|
dbfull()->TEST_CompactMemTable();
|
|
|
|
ASSERT_EQ("v2", Get("foo"));
|
|
|
|
} while (ChangeOptions());
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, GetPicksCorrectFile) {
|
2012-04-17 08:36:46 -07:00
|
|
|
do {
|
|
|
|
// Arrange to have multiple files in a non-level-0 level.
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("a", "va"));
|
2012-04-17 08:36:46 -07:00
|
|
|
Compact("a", "b");
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("x", "vx"));
|
2012-04-17 08:36:46 -07:00
|
|
|
Compact("x", "y");
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("f", "vf"));
|
2012-04-17 08:36:46 -07:00
|
|
|
Compact("f", "g");
|
|
|
|
ASSERT_EQ("va", Get("a"));
|
|
|
|
ASSERT_EQ("vf", Get("f"));
|
|
|
|
ASSERT_EQ("vx", Get("x"));
|
|
|
|
} while (ChangeOptions());
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, GetEncountersEmptyLevel) {
|
2012-04-17 08:36:46 -07:00
|
|
|
do {
|
|
|
|
// Arrange for the following to happen:
|
|
|
|
// * sstable A in level 0
|
|
|
|
// * nothing in level 1
|
|
|
|
// * sstable B in level 2
|
|
|
|
// Then do enough Get() calls to arrange for an automatic compaction
|
|
|
|
// of sstable A. A bug would cause the compaction to be marked as
|
2014-09-16 14:19:52 -07:00
|
|
|
// occurring at level 1 (instead of the correct level 0).
|
2012-04-17 08:36:46 -07:00
|
|
|
|
|
|
|
// Step 1: First place sstables in levels 0 and 2
|
|
|
|
int compaction_count = 0;
|
2019-05-02 11:01:00 -07:00
|
|
|
while (NumTableFilesAtLevel(0) == 0 || NumTableFilesAtLevel(2) == 0) {
|
2012-04-17 08:36:46 -07:00
|
|
|
ASSERT_LE(compaction_count, 100) << "could not fill levels 0 and 2";
|
|
|
|
compaction_count++;
|
|
|
|
Put("a", "begin");
|
|
|
|
Put("z", "end");
|
|
|
|
dbfull()->TEST_CompactMemTable();
|
|
|
|
}
|
2011-09-01 19:08:02 +00:00
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
// Step 2: clear level 1 if necessary.
|
2018-04-10 16:18:06 -07:00
|
|
|
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
|
2012-04-17 08:36:46 -07:00
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(1), 0);
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(2), 1);
|
|
|
|
|
2012-05-30 09:45:46 -07:00
|
|
|
// Step 3: read a bunch of times
|
|
|
|
for (int i = 0; i < 1000; i++) {
|
2012-04-17 08:36:46 -07:00
|
|
|
ASSERT_EQ("NOT_FOUND", Get("missing"));
|
|
|
|
}
|
2012-05-30 09:45:46 -07:00
|
|
|
|
|
|
|
// Step 4: Wait for compaction to finish
|
2013-06-13 16:14:06 -07:00
|
|
|
DelayMilliseconds(1000);
|
2012-05-30 09:45:46 -07:00
|
|
|
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
2012-04-17 08:36:46 -07:00
|
|
|
} while (ChangeOptions());
|
2011-09-01 19:08:02 +00:00
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, IterEmpty) {
|
2011-03-25 20:27:43 +00:00
|
|
|
Iterator* iter = db_->NewIterator(ReadOptions());
|
|
|
|
|
|
|
|
iter->SeekToFirst();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
|
|
|
|
iter->SeekToLast();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
|
|
|
|
iter->Seek("foo");
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
|
|
|
|
delete iter;
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, IterSingle) {
|
|
|
|
ASSERT_LEVELDB_OK(Put("a", "va"));
|
2011-03-25 20:27:43 +00:00
|
|
|
Iterator* iter = db_->NewIterator(ReadOptions());
|
|
|
|
|
|
|
|
iter->SeekToFirst();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
iter->SeekToFirst();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
|
|
|
|
iter->SeekToLast();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
iter->SeekToLast();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
|
|
|
|
iter->Seek("");
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
|
|
|
|
iter->Seek("a");
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
|
|
|
|
iter->Seek("b");
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
|
|
|
|
delete iter;
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, IterMulti) {
|
|
|
|
ASSERT_LEVELDB_OK(Put("a", "va"));
|
|
|
|
ASSERT_LEVELDB_OK(Put("b", "vb"));
|
|
|
|
ASSERT_LEVELDB_OK(Put("c", "vc"));
|
2011-03-25 20:27:43 +00:00
|
|
|
Iterator* iter = db_->NewIterator(ReadOptions());
|
|
|
|
|
|
|
|
iter->SeekToFirst();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "b->vb");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "c->vc");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
iter->SeekToFirst();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
|
|
|
|
iter->SeekToLast();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "c->vc");
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "b->vb");
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
iter->SeekToLast();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "c->vc");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
|
|
|
|
iter->Seek("");
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Seek("a");
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Seek("ax");
|
|
|
|
ASSERT_EQ(IterStatus(iter), "b->vb");
|
|
|
|
iter->Seek("b");
|
|
|
|
ASSERT_EQ(IterStatus(iter), "b->vb");
|
|
|
|
iter->Seek("z");
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
|
|
|
|
// Switch from reverse to forward
|
|
|
|
iter->SeekToLast();
|
|
|
|
iter->Prev();
|
|
|
|
iter->Prev();
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "b->vb");
|
|
|
|
|
|
|
|
// Switch from forward to reverse
|
|
|
|
iter->SeekToFirst();
|
|
|
|
iter->Next();
|
|
|
|
iter->Next();
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "b->vb");
|
|
|
|
|
|
|
|
// Make sure iter stays at snapshot
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("a", "va2"));
|
|
|
|
ASSERT_LEVELDB_OK(Put("a2", "va3"));
|
|
|
|
ASSERT_LEVELDB_OK(Put("b", "vb2"));
|
|
|
|
ASSERT_LEVELDB_OK(Put("c", "vc2"));
|
|
|
|
ASSERT_LEVELDB_OK(Delete("b"));
|
2011-03-25 20:27:43 +00:00
|
|
|
iter->SeekToFirst();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "b->vb");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "c->vc");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
iter->SeekToLast();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "c->vc");
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "b->vb");
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
|
|
|
|
delete iter;
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, IterSmallAndLargeMix) {
|
|
|
|
ASSERT_LEVELDB_OK(Put("a", "va"));
|
|
|
|
ASSERT_LEVELDB_OK(Put("b", std::string(100000, 'b')));
|
|
|
|
ASSERT_LEVELDB_OK(Put("c", "vc"));
|
|
|
|
ASSERT_LEVELDB_OK(Put("d", std::string(100000, 'd')));
|
|
|
|
ASSERT_LEVELDB_OK(Put("e", std::string(100000, 'e')));
|
2011-03-25 20:27:43 +00:00
|
|
|
|
|
|
|
Iterator* iter = db_->NewIterator(ReadOptions());
|
|
|
|
|
|
|
|
iter->SeekToFirst();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "b->" + std::string(100000, 'b'));
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "c->vc");
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "d->" + std::string(100000, 'd'));
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "e->" + std::string(100000, 'e'));
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
|
|
|
|
iter->SeekToLast();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "e->" + std::string(100000, 'e'));
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "d->" + std::string(100000, 'd'));
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "c->vc");
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "b->" + std::string(100000, 'b'));
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "(invalid)");
|
|
|
|
|
|
|
|
delete iter;
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, IterMultiWithDelete) {
|
2012-04-17 08:36:46 -07:00
|
|
|
do {
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("a", "va"));
|
|
|
|
ASSERT_LEVELDB_OK(Put("b", "vb"));
|
|
|
|
ASSERT_LEVELDB_OK(Put("c", "vc"));
|
|
|
|
ASSERT_LEVELDB_OK(Delete("b"));
|
2012-04-17 08:36:46 -07:00
|
|
|
ASSERT_EQ("NOT_FOUND", Get("b"));
|
2011-08-16 01:21:01 +00:00
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
Iterator* iter = db_->NewIterator(ReadOptions());
|
|
|
|
iter->Seek("c");
|
|
|
|
ASSERT_EQ(IterStatus(iter), "c->vc");
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
delete iter;
|
|
|
|
} while (ChangeOptions());
|
2011-08-16 01:21:01 +00:00
|
|
|
}
|
|
|
|
|
2021-01-11 15:32:34 +00:00
|
|
|
TEST_F(DBTest, IterMultiWithDeleteAndCompaction) {
|
|
|
|
do {
|
|
|
|
ASSERT_LEVELDB_OK(Put("b", "vb"));
|
|
|
|
ASSERT_LEVELDB_OK(Put("c", "vc"));
|
|
|
|
ASSERT_LEVELDB_OK(Put("a", "va"));
|
|
|
|
dbfull()->TEST_CompactMemTable();
|
|
|
|
ASSERT_LEVELDB_OK(Delete("b"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("b"));
|
|
|
|
|
|
|
|
Iterator* iter = db_->NewIterator(ReadOptions());
|
|
|
|
iter->Seek("c");
|
|
|
|
ASSERT_EQ(IterStatus(iter), "c->vc");
|
|
|
|
iter->Prev();
|
|
|
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
|
|
|
iter->Seek("b");
|
|
|
|
ASSERT_EQ(IterStatus(iter), "c->vc");
|
|
|
|
delete iter;
|
|
|
|
} while (ChangeOptions());
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, Recover) {
|
2012-04-17 08:36:46 -07:00
|
|
|
do {
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("foo", "v1"));
|
|
|
|
ASSERT_LEVELDB_OK(Put("baz", "v5"));
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
Reopen();
|
|
|
|
ASSERT_EQ("v1", Get("foo"));
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
ASSERT_EQ("v1", Get("foo"));
|
|
|
|
ASSERT_EQ("v5", Get("baz"));
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("bar", "v2"));
|
|
|
|
ASSERT_LEVELDB_OK(Put("foo", "v3"));
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
Reopen();
|
|
|
|
ASSERT_EQ("v3", Get("foo"));
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("foo", "v4"));
|
2012-04-17 08:36:46 -07:00
|
|
|
ASSERT_EQ("v4", Get("foo"));
|
|
|
|
ASSERT_EQ("v2", Get("bar"));
|
|
|
|
ASSERT_EQ("v5", Get("baz"));
|
|
|
|
} while (ChangeOptions());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, RecoveryWithEmptyLog) {
|
2012-04-17 08:36:46 -07:00
|
|
|
do {
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("foo", "v1"));
|
|
|
|
ASSERT_LEVELDB_OK(Put("foo", "v2"));
|
2012-04-17 08:36:46 -07:00
|
|
|
Reopen();
|
|
|
|
Reopen();
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("foo", "v3"));
|
2012-04-17 08:36:46 -07:00
|
|
|
Reopen();
|
|
|
|
ASSERT_EQ("v3", Get("foo"));
|
|
|
|
} while (ChangeOptions());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2011-06-22 02:36:45 +00:00
|
|
|
// Check that writes done during a memtable compaction are recovered
|
|
|
|
// if the database is shutdown during the memtable compaction.
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, RecoverDuringMemtableCompaction) {
|
2012-04-17 08:36:46 -07:00
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.env = env_;
|
|
|
|
options.write_buffer_size = 1000000;
|
|
|
|
Reopen(&options);
|
2011-06-22 02:36:45 +00:00
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
// Trigger a long memtable compaction and reopen the database during it
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("foo", "v1")); // Goes to 1st log file
|
|
|
|
ASSERT_LEVELDB_OK(
|
|
|
|
Put("big1", std::string(10000000, 'x'))); // Fills memtable
|
|
|
|
ASSERT_LEVELDB_OK(
|
|
|
|
Put("big2", std::string(1000, 'y'))); // Triggers compaction
|
|
|
|
ASSERT_LEVELDB_OK(Put("bar", "v2")); // Goes to new log file
|
2011-06-22 02:36:45 +00:00
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
Reopen(&options);
|
|
|
|
ASSERT_EQ("v1", Get("foo"));
|
|
|
|
ASSERT_EQ("v2", Get("bar"));
|
|
|
|
ASSERT_EQ(std::string(10000000, 'x'), Get("big1"));
|
|
|
|
ASSERT_EQ(std::string(1000, 'y'), Get("big2"));
|
|
|
|
} while (ChangeOptions());
|
2011-06-22 02:36:45 +00:00
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
static std::string Key(int i) {
|
|
|
|
char buf[100];
|
2020-04-29 22:31:41 +00:00
|
|
|
std::snprintf(buf, sizeof(buf), "key%06d", i);
|
2011-03-18 22:37:00 +00:00
|
|
|
return std::string(buf);
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, MinorCompactionsHappen) {
|
2012-04-17 08:36:46 -07:00
|
|
|
Options options = CurrentOptions();
|
2011-03-18 22:37:00 +00:00
|
|
|
options.write_buffer_size = 10000;
|
|
|
|
Reopen(&options);
|
|
|
|
|
2011-04-12 19:38:58 +00:00
|
|
|
const int N = 500;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2011-06-22 02:36:45 +00:00
|
|
|
int starting_num_tables = TotalTableFiles();
|
2011-03-18 22:37:00 +00:00
|
|
|
for (int i = 0; i < N; i++) {
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put(Key(i), Key(i) + std::string(1000, 'v')));
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2011-06-22 02:36:45 +00:00
|
|
|
int ending_num_tables = TotalTableFiles();
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_GT(ending_num_tables, starting_num_tables);
|
|
|
|
|
|
|
|
for (int i = 0; i < N; i++) {
|
|
|
|
ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(Key(i)));
|
|
|
|
}
|
|
|
|
|
|
|
|
Reopen();
|
|
|
|
|
|
|
|
for (int i = 0; i < N; i++) {
|
|
|
|
ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(Key(i)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, RecoverWithLargeLog) {
|
2011-03-18 22:37:00 +00:00
|
|
|
{
|
2012-04-17 08:36:46 -07:00
|
|
|
Options options = CurrentOptions();
|
2011-03-18 22:37:00 +00:00
|
|
|
Reopen(&options);
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("big1", std::string(200000, '1')));
|
|
|
|
ASSERT_LEVELDB_OK(Put("big2", std::string(200000, '2')));
|
|
|
|
ASSERT_LEVELDB_OK(Put("small3", std::string(10, '3')));
|
|
|
|
ASSERT_LEVELDB_OK(Put("small4", std::string(10, '4')));
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure that if we re-open with a small write buffer size that
|
|
|
|
// we flush table files in the middle of a large log file.
|
2012-04-17 08:36:46 -07:00
|
|
|
Options options = CurrentOptions();
|
2011-03-18 22:37:00 +00:00
|
|
|
options.write_buffer_size = 100000;
|
|
|
|
Reopen(&options);
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 3);
|
|
|
|
ASSERT_EQ(std::string(200000, '1'), Get("big1"));
|
|
|
|
ASSERT_EQ(std::string(200000, '2'), Get("big2"));
|
|
|
|
ASSERT_EQ(std::string(10, '3'), Get("small3"));
|
|
|
|
ASSERT_EQ(std::string(10, '4'), Get("small4"));
|
|
|
|
ASSERT_GT(NumTableFilesAtLevel(0), 1);
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, CompactionsGenerateMultipleFiles) {
|
2012-04-17 08:36:46 -07:00
|
|
|
Options options = CurrentOptions();
|
2019-05-02 11:01:00 -07:00
|
|
|
options.write_buffer_size = 100000000; // Large write buffer
|
2011-03-18 22:37:00 +00:00
|
|
|
Reopen(&options);
|
|
|
|
|
|
|
|
Random rnd(301);
|
|
|
|
|
|
|
|
// Write 8MB (80 values, each 100K)
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
|
|
|
std::vector<std::string> values;
|
|
|
|
for (int i = 0; i < 80; i++) {
|
|
|
|
values.push_back(RandomString(&rnd, 100000));
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put(Key(i), values[i]));
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Reopening moves updates to level-0
|
|
|
|
Reopen(&options);
|
2018-04-10 16:18:06 -07:00
|
|
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
|
|
|
ASSERT_GT(NumTableFilesAtLevel(1), 1);
|
|
|
|
for (int i = 0; i < 80; i++) {
|
|
|
|
ASSERT_EQ(Get(Key(i)), values[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, RepeatedWritesToSameKey) {
|
2012-04-17 08:36:46 -07:00
|
|
|
Options options = CurrentOptions();
|
2011-07-15 00:20:57 +00:00
|
|
|
options.env = env_;
|
|
|
|
options.write_buffer_size = 100000; // Small write buffer
|
|
|
|
Reopen(&options);
|
|
|
|
|
|
|
|
// We must have at most one file per level except for level-0,
|
|
|
|
// which may have up to kL0_StopWritesTrigger files.
|
|
|
|
const int kMaxFiles = config::kNumLevels + config::kL0_StopWritesTrigger;
|
|
|
|
|
|
|
|
Random rnd(301);
|
|
|
|
std::string value = RandomString(&rnd, 2 * options.write_buffer_size);
|
|
|
|
for (int i = 0; i < 5 * kMaxFiles; i++) {
|
|
|
|
Put("key", value);
|
|
|
|
ASSERT_LE(TotalTableFiles(), kMaxFiles);
|
2020-04-29 22:31:41 +00:00
|
|
|
std::fprintf(stderr, "after %d: %d files\n", i + 1, TotalTableFiles());
|
2011-07-15 00:20:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, SparseMerge) {
|
2012-04-17 08:36:46 -07:00
|
|
|
Options options = CurrentOptions();
|
2011-03-22 18:32:49 +00:00
|
|
|
options.compression = kNoCompression;
|
|
|
|
Reopen(&options);
|
|
|
|
|
2011-06-22 02:36:45 +00:00
|
|
|
FillLevels("A", "Z");
|
|
|
|
|
2011-03-22 18:32:49 +00:00
|
|
|
// Suppose there is:
|
|
|
|
// small amount of data with prefix A
|
|
|
|
// large amount of data with prefix B
|
|
|
|
// small amount of data with prefix C
|
|
|
|
// and that recent updates have made small changes to all three prefixes.
|
|
|
|
// Check that we do not do a compaction that merges all of B in one shot.
|
|
|
|
const std::string value(1000, 'x');
|
|
|
|
Put("A", "va");
|
|
|
|
// Write approximately 100MB of "B" values
|
|
|
|
for (int i = 0; i < 100000; i++) {
|
|
|
|
char key[100];
|
2020-04-29 22:31:41 +00:00
|
|
|
std::snprintf(key, sizeof(key), "B%010d", i);
|
2011-03-22 18:32:49 +00:00
|
|
|
Put(key, value);
|
|
|
|
}
|
|
|
|
Put("C", "vc");
|
2011-06-22 02:36:45 +00:00
|
|
|
dbfull()->TEST_CompactMemTable();
|
2018-04-10 16:18:06 -07:00
|
|
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
|
2011-03-22 18:32:49 +00:00
|
|
|
|
|
|
|
// Make sparse update
|
2019-05-02 11:01:00 -07:00
|
|
|
Put("A", "va2");
|
2011-03-22 18:32:49 +00:00
|
|
|
Put("B100", "bvalue2");
|
2019-05-02 11:01:00 -07:00
|
|
|
Put("C", "vc2");
|
2011-03-22 18:32:49 +00:00
|
|
|
dbfull()->TEST_CompactMemTable();
|
|
|
|
|
|
|
|
// Compactions should not cause us to create a situation where
|
|
|
|
// a file overlaps too much data at the next level.
|
2019-05-02 11:01:00 -07:00
|
|
|
ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576);
|
2018-04-10 16:18:06 -07:00
|
|
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
|
2019-05-02 11:01:00 -07:00
|
|
|
ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576);
|
2018-04-10 16:18:06 -07:00
|
|
|
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
|
2019-05-02 11:01:00 -07:00
|
|
|
ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576);
|
2011-03-22 18:32:49 +00:00
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
static bool Between(uint64_t val, uint64_t low, uint64_t high) {
|
|
|
|
bool result = (val >= low) && (val <= high);
|
|
|
|
if (!result) {
|
2020-04-29 22:31:41 +00:00
|
|
|
std::fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
|
|
|
|
(unsigned long long)(val), (unsigned long long)(low),
|
|
|
|
(unsigned long long)(high));
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, ApproximateSizes) {
|
2012-04-17 08:36:46 -07:00
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
2019-05-02 11:01:00 -07:00
|
|
|
options.write_buffer_size = 100000000; // Large write buffer
|
2012-04-17 08:36:46 -07:00
|
|
|
options.compression = kNoCompression;
|
|
|
|
DestroyAndReopen();
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
ASSERT_TRUE(Between(Size("", "xyz"), 0, 0));
|
|
|
|
Reopen(&options);
|
|
|
|
ASSERT_TRUE(Between(Size("", "xyz"), 0, 0));
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
// Write 8MB (80 values, each 100K)
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
|
|
|
const int N = 80;
|
|
|
|
static const int S1 = 100000;
|
|
|
|
static const int S2 = 105000; // Allow some expansion from metadata
|
|
|
|
Random rnd(301);
|
|
|
|
for (int i = 0; i < N; i++) {
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put(Key(i), RandomString(&rnd, S1)));
|
2012-04-17 08:36:46 -07:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
// 0 because GetApproximateSizes() does not account for memtable space
|
|
|
|
ASSERT_TRUE(Between(Size("", Key(50)), 0, 0));
|
2011-04-19 23:01:25 +00:00
|
|
|
|
2014-12-11 08:13:18 -08:00
|
|
|
if (options.reuse_logs) {
|
|
|
|
// Recovery will reuse memtable, and GetApproximateSizes() does not
|
|
|
|
// account for memtable usage;
|
|
|
|
Reopen(&options);
|
|
|
|
ASSERT_TRUE(Between(Size("", Key(50)), 0, 0));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
// Check sizes across recovery by reopening a few times
|
|
|
|
for (int run = 0; run < 3; run++) {
|
|
|
|
Reopen(&options);
|
2011-04-20 22:48:11 +00:00
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
for (int compact_start = 0; compact_start < N; compact_start += 10) {
|
|
|
|
for (int i = 0; i < N; i += 10) {
|
2019-05-02 11:01:00 -07:00
|
|
|
ASSERT_TRUE(Between(Size("", Key(i)), S1 * i, S2 * i));
|
|
|
|
ASSERT_TRUE(Between(Size("", Key(i) + ".suffix"), S1 * (i + 1),
|
|
|
|
S2 * (i + 1)));
|
|
|
|
ASSERT_TRUE(Between(Size(Key(i), Key(i + 10)), S1 * 10, S2 * 10));
|
2012-04-17 08:36:46 -07:00
|
|
|
}
|
2019-05-02 11:01:00 -07:00
|
|
|
ASSERT_TRUE(Between(Size("", Key(50)), S1 * 50, S2 * 50));
|
|
|
|
ASSERT_TRUE(Between(Size("", Key(50) + ".suffix"), S1 * 50, S2 * 50));
|
2012-04-17 08:36:46 -07:00
|
|
|
|
|
|
|
std::string cstart_str = Key(compact_start);
|
|
|
|
std::string cend_str = Key(compact_start + 9);
|
|
|
|
Slice cstart = cstart_str;
|
|
|
|
Slice cend = cend_str;
|
|
|
|
dbfull()->TEST_CompactRange(0, &cstart, &cend);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2011-04-20 22:48:11 +00:00
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
|
|
|
ASSERT_GT(NumTableFilesAtLevel(1), 0);
|
|
|
|
}
|
|
|
|
} while (ChangeOptions());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
|
2012-04-17 08:36:46 -07:00
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.compression = kNoCompression;
|
|
|
|
Reopen();
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
Random rnd(301);
|
|
|
|
std::string big1 = RandomString(&rnd, 100000);
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put(Key(0), RandomString(&rnd, 10000)));
|
|
|
|
ASSERT_LEVELDB_OK(Put(Key(1), RandomString(&rnd, 10000)));
|
|
|
|
ASSERT_LEVELDB_OK(Put(Key(2), big1));
|
|
|
|
ASSERT_LEVELDB_OK(Put(Key(3), RandomString(&rnd, 10000)));
|
|
|
|
ASSERT_LEVELDB_OK(Put(Key(4), big1));
|
|
|
|
ASSERT_LEVELDB_OK(Put(Key(5), RandomString(&rnd, 10000)));
|
|
|
|
ASSERT_LEVELDB_OK(Put(Key(6), RandomString(&rnd, 300000)));
|
|
|
|
ASSERT_LEVELDB_OK(Put(Key(7), RandomString(&rnd, 10000)));
|
2012-04-17 08:36:46 -07:00
|
|
|
|
2014-12-11 08:13:18 -08:00
|
|
|
if (options.reuse_logs) {
|
|
|
|
// Need to force a memtable compaction since recovery does not do so.
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable());
|
2014-12-11 08:13:18 -08:00
|
|
|
}
|
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
// Check sizes across recovery by reopening a few times
|
|
|
|
for (int run = 0; run < 3; run++) {
|
|
|
|
Reopen(&options);
|
|
|
|
|
|
|
|
ASSERT_TRUE(Between(Size("", Key(0)), 0, 0));
|
|
|
|
ASSERT_TRUE(Between(Size("", Key(1)), 10000, 11000));
|
|
|
|
ASSERT_TRUE(Between(Size("", Key(2)), 20000, 21000));
|
|
|
|
ASSERT_TRUE(Between(Size("", Key(3)), 120000, 121000));
|
|
|
|
ASSERT_TRUE(Between(Size("", Key(4)), 130000, 131000));
|
|
|
|
ASSERT_TRUE(Between(Size("", Key(5)), 230000, 231000));
|
|
|
|
ASSERT_TRUE(Between(Size("", Key(6)), 240000, 241000));
|
|
|
|
ASSERT_TRUE(Between(Size("", Key(7)), 540000, 541000));
|
|
|
|
ASSERT_TRUE(Between(Size("", Key(8)), 550000, 560000));
|
|
|
|
|
|
|
|
ASSERT_TRUE(Between(Size(Key(3), Key(5)), 110000, 111000));
|
|
|
|
|
2018-04-10 16:18:06 -07:00
|
|
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
|
2012-04-17 08:36:46 -07:00
|
|
|
}
|
|
|
|
} while (ChangeOptions());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, IteratorPinsRef) {
|
2011-03-18 22:37:00 +00:00
|
|
|
Put("foo", "hello");
|
|
|
|
|
|
|
|
// Get iterator that will yield the current contents of the DB.
|
|
|
|
Iterator* iter = db_->NewIterator(ReadOptions());
|
|
|
|
|
|
|
|
// Write to force compactions
|
|
|
|
Put("foo", "newvalue1");
|
|
|
|
for (int i = 0; i < 100; i++) {
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(
|
|
|
|
Put(Key(i), Key(i) + std::string(100000, 'v'))); // 100K values
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
Put("foo", "newvalue2");
|
|
|
|
|
|
|
|
iter->SeekToFirst();
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
ASSERT_EQ("foo", iter->key().ToString());
|
|
|
|
ASSERT_EQ("hello", iter->value().ToString());
|
|
|
|
iter->Next();
|
|
|
|
ASSERT_TRUE(!iter->Valid());
|
|
|
|
delete iter;
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, Snapshot) {
|
2012-04-17 08:36:46 -07:00
|
|
|
do {
|
|
|
|
Put("foo", "v1");
|
|
|
|
const Snapshot* s1 = db_->GetSnapshot();
|
|
|
|
Put("foo", "v2");
|
|
|
|
const Snapshot* s2 = db_->GetSnapshot();
|
|
|
|
Put("foo", "v3");
|
|
|
|
const Snapshot* s3 = db_->GetSnapshot();
|
|
|
|
|
|
|
|
Put("foo", "v4");
|
|
|
|
ASSERT_EQ("v1", Get("foo", s1));
|
|
|
|
ASSERT_EQ("v2", Get("foo", s2));
|
|
|
|
ASSERT_EQ("v3", Get("foo", s3));
|
|
|
|
ASSERT_EQ("v4", Get("foo"));
|
|
|
|
|
|
|
|
db_->ReleaseSnapshot(s3);
|
|
|
|
ASSERT_EQ("v1", Get("foo", s1));
|
|
|
|
ASSERT_EQ("v2", Get("foo", s2));
|
|
|
|
ASSERT_EQ("v4", Get("foo"));
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
db_->ReleaseSnapshot(s1);
|
|
|
|
ASSERT_EQ("v2", Get("foo", s2));
|
|
|
|
ASSERT_EQ("v4", Get("foo"));
|
2011-06-22 02:36:45 +00:00
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
db_->ReleaseSnapshot(s2);
|
|
|
|
ASSERT_EQ("v4", Get("foo"));
|
|
|
|
} while (ChangeOptions());
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, HiddenValuesAreRemoved) {
|
2012-04-17 08:36:46 -07:00
|
|
|
do {
|
|
|
|
Random rnd(301);
|
|
|
|
FillLevels("a", "z");
|
|
|
|
|
|
|
|
std::string big = RandomString(&rnd, 50000);
|
|
|
|
Put("foo", big);
|
|
|
|
Put("pastfoo", "v");
|
|
|
|
const Snapshot* snapshot = db_->GetSnapshot();
|
|
|
|
Put("foo", "tiny");
|
2019-05-02 11:01:00 -07:00
|
|
|
Put("pastfoo2", "v2"); // Advance sequence number one more
|
2012-04-17 08:36:46 -07:00
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable());
|
2012-04-17 08:36:46 -07:00
|
|
|
ASSERT_GT(NumTableFilesAtLevel(0), 0);
|
|
|
|
|
|
|
|
ASSERT_EQ(big, Get("foo", snapshot));
|
|
|
|
ASSERT_TRUE(Between(Size("", "pastfoo"), 50000, 60000));
|
|
|
|
db_->ReleaseSnapshot(snapshot);
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny, " + big + " ]");
|
|
|
|
Slice x("x");
|
2018-04-10 16:18:06 -07:00
|
|
|
dbfull()->TEST_CompactRange(0, nullptr, &x);
|
2012-04-17 08:36:46 -07:00
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]");
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
|
|
|
ASSERT_GE(NumTableFilesAtLevel(1), 1);
|
2018-04-10 16:18:06 -07:00
|
|
|
dbfull()->TEST_CompactRange(1, nullptr, &x);
|
2012-04-17 08:36:46 -07:00
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]");
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
ASSERT_TRUE(Between(Size("", "pastfoo"), 0, 1000));
|
|
|
|
} while (ChangeOptions());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, DeletionMarkers1) {
|
2011-03-18 22:37:00 +00:00
|
|
|
Put("foo", "v1");
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable());
|
2011-07-15 00:20:57 +00:00
|
|
|
const int last = config::kMaxMemCompactLevel;
|
2019-05-02 11:01:00 -07:00
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
|
2011-06-22 02:36:45 +00:00
|
|
|
|
|
|
|
// Place a table at level last-1 to prevent merging with preceding mutation
|
|
|
|
Put("a", "begin");
|
|
|
|
Put("z", "end");
|
|
|
|
dbfull()->TEST_CompactMemTable();
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(last), 1);
|
2019-05-02 11:01:00 -07:00
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(last - 1), 1);
|
2011-06-22 02:36:45 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
Delete("foo");
|
|
|
|
Put("foo", "v2");
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
|
2011-10-05 16:30:28 -07:00
|
|
|
Slice z("z");
|
2019-05-02 11:01:00 -07:00
|
|
|
dbfull()->TEST_CompactRange(last - 2, nullptr, &z);
|
2011-03-18 22:37:00 +00:00
|
|
|
// DEL eliminated, but v1 remains because we aren't compacting that level
|
|
|
|
// (DEL can be eliminated because v2 hides v1).
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]");
|
2019-05-02 11:01:00 -07:00
|
|
|
dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr);
|
2011-06-22 02:36:45 +00:00
|
|
|
// Merging last-1 w/ last, so we are the base level for "foo", so
|
|
|
|
// DEL is removed. (as is v1).
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]");
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, DeletionMarkers2) {
|
2011-03-18 22:37:00 +00:00
|
|
|
Put("foo", "v1");
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable());
|
2011-07-15 00:20:57 +00:00
|
|
|
const int last = config::kMaxMemCompactLevel;
|
2019-05-02 11:01:00 -07:00
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
|
2011-06-22 02:36:45 +00:00
|
|
|
|
|
|
|
// Place a table at level last-1 to prevent merging with preceding mutation
|
|
|
|
Put("a", "begin");
|
|
|
|
Put("z", "end");
|
|
|
|
dbfull()->TEST_CompactMemTable();
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(last), 1);
|
2019-05-02 11:01:00 -07:00
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(last - 1), 1);
|
2011-06-22 02:36:45 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
Delete("foo");
|
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
|
2019-05-02 11:01:00 -07:00
|
|
|
dbfull()->TEST_CompactRange(last - 2, nullptr, nullptr);
|
2011-06-22 02:36:45 +00:00
|
|
|
// DEL kept: "last" file overlaps
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
|
2019-05-02 11:01:00 -07:00
|
|
|
dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr);
|
2011-06-22 02:36:45 +00:00
|
|
|
// Merging last-1 w/ last, so we are the base level for "foo", so
|
|
|
|
// DEL is removed. (as is v1).
|
2011-03-18 22:37:00 +00:00
|
|
|
ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, OverlapInLevel0) {
|
2012-04-17 08:36:46 -07:00
|
|
|
do {
|
|
|
|
ASSERT_EQ(config::kMaxMemCompactLevel, 2) << "Fix test to match config";
|
2011-10-05 16:30:28 -07:00
|
|
|
|
2019-05-02 11:01:00 -07:00
|
|
|
// Fill levels 1 and 2 to disable the pushing of new memtables to levels >
|
|
|
|
// 0.
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("100", "v100"));
|
|
|
|
ASSERT_LEVELDB_OK(Put("999", "v999"));
|
2012-04-17 08:36:46 -07:00
|
|
|
dbfull()->TEST_CompactMemTable();
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Delete("100"));
|
|
|
|
ASSERT_LEVELDB_OK(Delete("999"));
|
2012-04-17 08:36:46 -07:00
|
|
|
dbfull()->TEST_CompactMemTable();
|
|
|
|
ASSERT_EQ("0,1,1", FilesPerLevel());
|
|
|
|
|
|
|
|
// Make files spanning the following ranges in level-0:
|
|
|
|
// files[0] 200 .. 900
|
|
|
|
// files[1] 300 .. 500
|
|
|
|
// Note that files are sorted by smallest key.
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("300", "v300"));
|
|
|
|
ASSERT_LEVELDB_OK(Put("500", "v500"));
|
2012-04-17 08:36:46 -07:00
|
|
|
dbfull()->TEST_CompactMemTable();
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("200", "v200"));
|
|
|
|
ASSERT_LEVELDB_OK(Put("600", "v600"));
|
|
|
|
ASSERT_LEVELDB_OK(Put("900", "v900"));
|
2012-04-17 08:36:46 -07:00
|
|
|
dbfull()->TEST_CompactMemTable();
|
|
|
|
ASSERT_EQ("2,1,1", FilesPerLevel());
|
2011-10-05 16:30:28 -07:00
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
// Compact away the placeholder files we created initially
|
2018-04-10 16:18:06 -07:00
|
|
|
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
|
|
|
|
dbfull()->TEST_CompactRange(2, nullptr, nullptr);
|
2012-04-17 08:36:46 -07:00
|
|
|
ASSERT_EQ("2", FilesPerLevel());
|
2011-10-05 16:30:28 -07:00
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
// Do a memtable compaction. Before bug-fix, the compaction would
|
|
|
|
// not detect the overlap with level-0 files and would incorrectly place
|
|
|
|
// the deletion in a deeper level.
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Delete("600"));
|
2012-04-17 08:36:46 -07:00
|
|
|
dbfull()->TEST_CompactMemTable();
|
|
|
|
ASSERT_EQ("3", FilesPerLevel());
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("600"));
|
|
|
|
} while (ChangeOptions());
|
2011-10-05 16:30:28 -07:00
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, L0_CompactionBug_Issue44_a) {
|
2012-04-17 08:36:46 -07:00
|
|
|
Reopen();
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("b", "v"));
|
2012-04-17 08:36:46 -07:00
|
|
|
Reopen();
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Delete("b"));
|
|
|
|
ASSERT_LEVELDB_OK(Delete("a"));
|
2012-04-17 08:36:46 -07:00
|
|
|
Reopen();
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Delete("a"));
|
2012-04-17 08:36:46 -07:00
|
|
|
Reopen();
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("a", "v"));
|
2012-04-17 08:36:46 -07:00
|
|
|
Reopen();
|
|
|
|
Reopen();
|
|
|
|
ASSERT_EQ("(a->v)", Contents());
|
2013-06-13 16:14:06 -07:00
|
|
|
DelayMilliseconds(1000); // Wait for compaction to finish
|
2012-04-17 08:36:46 -07:00
|
|
|
ASSERT_EQ("(a->v)", Contents());
|
2011-10-31 17:22:06 +00:00
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, L0_CompactionBug_Issue44_b) {
|
2012-04-17 08:36:46 -07:00
|
|
|
Reopen();
|
2019-03-11 13:04:53 -07:00
|
|
|
Put("", "");
|
2012-04-17 08:36:46 -07:00
|
|
|
Reopen();
|
|
|
|
Delete("e");
|
2019-03-11 13:04:53 -07:00
|
|
|
Put("", "");
|
2012-04-17 08:36:46 -07:00
|
|
|
Reopen();
|
|
|
|
Put("c", "cv");
|
|
|
|
Reopen();
|
2019-03-11 13:04:53 -07:00
|
|
|
Put("", "");
|
2012-04-17 08:36:46 -07:00
|
|
|
Reopen();
|
2019-03-11 13:04:53 -07:00
|
|
|
Put("", "");
|
2013-06-13 16:14:06 -07:00
|
|
|
DelayMilliseconds(1000); // Wait for compaction to finish
|
2012-04-17 08:36:46 -07:00
|
|
|
Reopen();
|
2019-03-11 13:04:53 -07:00
|
|
|
Put("d", "dv");
|
2012-04-17 08:36:46 -07:00
|
|
|
Reopen();
|
2019-03-11 13:04:53 -07:00
|
|
|
Put("", "");
|
2012-04-17 08:36:46 -07:00
|
|
|
Reopen();
|
|
|
|
Delete("d");
|
|
|
|
Delete("b");
|
|
|
|
Reopen();
|
|
|
|
ASSERT_EQ("(->)(c->cv)", Contents());
|
2013-06-13 16:14:06 -07:00
|
|
|
DelayMilliseconds(1000); // Wait for compaction to finish
|
2012-04-17 08:36:46 -07:00
|
|
|
ASSERT_EQ("(->)(c->cv)", Contents());
|
2011-10-31 17:22:06 +00:00
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, Fflush_Issue474) {
|
2017-10-02 12:37:45 -07:00
|
|
|
static const int kNum = 100000;
|
|
|
|
Random rnd(test::RandomSeed());
|
|
|
|
for (int i = 0; i < kNum; i++) {
|
2020-04-29 22:31:41 +00:00
|
|
|
std::fflush(nullptr);
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put(RandomKey(&rnd), RandomString(&rnd, 100)));
|
2017-10-02 12:37:45 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, ComparatorCheck) {
|
2011-03-18 22:37:00 +00:00
|
|
|
class NewComparator : public Comparator {
|
|
|
|
public:
|
2019-05-09 14:00:07 -07:00
|
|
|
const char* Name() const override { return "leveldb.NewComparator"; }
|
|
|
|
int Compare(const Slice& a, const Slice& b) const override {
|
2011-03-18 22:37:00 +00:00
|
|
|
return BytewiseComparator()->Compare(a, b);
|
|
|
|
}
|
2019-05-09 14:00:07 -07:00
|
|
|
void FindShortestSeparator(std::string* s, const Slice& l) const override {
|
2011-03-18 22:37:00 +00:00
|
|
|
BytewiseComparator()->FindShortestSeparator(s, l);
|
|
|
|
}
|
2019-05-09 14:00:07 -07:00
|
|
|
void FindShortSuccessor(std::string* key) const override {
|
2011-03-18 22:37:00 +00:00
|
|
|
BytewiseComparator()->FindShortSuccessor(key);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
NewComparator cmp;
|
2012-04-17 08:36:46 -07:00
|
|
|
Options new_options = CurrentOptions();
|
2011-03-18 22:37:00 +00:00
|
|
|
new_options.comparator = &cmp;
|
|
|
|
Status s = TryReopen(&new_options);
|
|
|
|
ASSERT_TRUE(!s.ok());
|
|
|
|
ASSERT_TRUE(s.ToString().find("comparator") != std::string::npos)
|
|
|
|
<< s.ToString();
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, CustomComparator) {
|
2011-10-31 17:22:06 +00:00
|
|
|
class NumberComparator : public Comparator {
|
|
|
|
public:
|
2019-05-09 14:00:07 -07:00
|
|
|
const char* Name() const override { return "test.NumberComparator"; }
|
|
|
|
int Compare(const Slice& a, const Slice& b) const override {
|
2011-11-14 17:06:16 +00:00
|
|
|
return ToNumber(a) - ToNumber(b);
|
|
|
|
}
|
2019-05-09 14:00:07 -07:00
|
|
|
void FindShortestSeparator(std::string* s, const Slice& l) const override {
|
2019-05-02 11:01:00 -07:00
|
|
|
ToNumber(*s); // Check format
|
|
|
|
ToNumber(l); // Check format
|
2011-11-14 17:06:16 +00:00
|
|
|
}
|
2019-05-09 14:00:07 -07:00
|
|
|
void FindShortSuccessor(std::string* key) const override {
|
2019-05-02 11:01:00 -07:00
|
|
|
ToNumber(*key); // Check format
|
2011-11-14 17:06:16 +00:00
|
|
|
}
|
2019-05-02 11:01:00 -07:00
|
|
|
|
2011-11-14 17:06:16 +00:00
|
|
|
private:
|
|
|
|
static int ToNumber(const Slice& x) {
|
|
|
|
// Check that there are no extra characters.
|
2019-11-21 13:09:53 -08:00
|
|
|
EXPECT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size() - 1] == ']')
|
2011-11-14 17:06:16 +00:00
|
|
|
<< EscapeString(x);
|
|
|
|
int val;
|
|
|
|
char ignored;
|
2019-11-21 13:09:53 -08:00
|
|
|
EXPECT_TRUE(sscanf(x.ToString().c_str(), "[%i]%c", &val, &ignored) == 1)
|
2011-11-14 17:06:16 +00:00
|
|
|
<< EscapeString(x);
|
|
|
|
return val;
|
2011-10-31 17:22:06 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
NumberComparator cmp;
|
2012-04-17 08:36:46 -07:00
|
|
|
Options new_options = CurrentOptions();
|
2011-10-31 17:22:06 +00:00
|
|
|
new_options.create_if_missing = true;
|
|
|
|
new_options.comparator = &cmp;
|
2019-05-02 11:01:00 -07:00
|
|
|
new_options.filter_policy = nullptr; // Cannot use bloom filters
|
2011-11-14 17:06:16 +00:00
|
|
|
new_options.write_buffer_size = 1000; // Compact more often
|
2011-10-31 17:22:06 +00:00
|
|
|
DestroyAndReopen(&new_options);
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("[10]", "ten"));
|
|
|
|
ASSERT_LEVELDB_OK(Put("[0x14]", "twenty"));
|
2011-10-31 17:22:06 +00:00
|
|
|
for (int i = 0; i < 2; i++) {
|
2011-11-14 17:06:16 +00:00
|
|
|
ASSERT_EQ("ten", Get("[10]"));
|
|
|
|
ASSERT_EQ("ten", Get("[0xa]"));
|
|
|
|
ASSERT_EQ("twenty", Get("[20]"));
|
|
|
|
ASSERT_EQ("twenty", Get("[0x14]"));
|
2012-04-17 08:36:46 -07:00
|
|
|
ASSERT_EQ("NOT_FOUND", Get("[15]"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("[0xf]"));
|
2011-11-14 17:06:16 +00:00
|
|
|
Compact("[0]", "[9999]");
|
|
|
|
}
|
|
|
|
|
|
|
|
for (int run = 0; run < 2; run++) {
|
|
|
|
for (int i = 0; i < 1000; i++) {
|
|
|
|
char buf[100];
|
2020-04-29 22:31:41 +00:00
|
|
|
std::snprintf(buf, sizeof(buf), "[%d]", i * 10);
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put(buf, buf));
|
2011-11-14 17:06:16 +00:00
|
|
|
}
|
|
|
|
Compact("[0]", "[1000000]");
|
2011-10-31 17:22:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, ManualCompaction) {
|
2011-10-05 16:30:28 -07:00
|
|
|
ASSERT_EQ(config::kMaxMemCompactLevel, 2)
|
|
|
|
<< "Need to update this test to match kMaxMemCompactLevel";
|
|
|
|
|
|
|
|
MakeTables(3, "p", "q");
|
|
|
|
ASSERT_EQ("1,1,1", FilesPerLevel());
|
|
|
|
|
|
|
|
// Compaction range falls before files
|
|
|
|
Compact("", "c");
|
|
|
|
ASSERT_EQ("1,1,1", FilesPerLevel());
|
|
|
|
|
|
|
|
// Compaction range falls after files
|
|
|
|
Compact("r", "z");
|
|
|
|
ASSERT_EQ("1,1,1", FilesPerLevel());
|
|
|
|
|
|
|
|
// Compaction range overlaps files
|
|
|
|
Compact("p1", "p9");
|
|
|
|
ASSERT_EQ("0,0,1", FilesPerLevel());
|
|
|
|
|
|
|
|
// Populate a different range
|
|
|
|
MakeTables(3, "c", "e");
|
|
|
|
ASSERT_EQ("1,1,2", FilesPerLevel());
|
|
|
|
|
|
|
|
// Compact just the new range
|
|
|
|
Compact("b", "f");
|
|
|
|
ASSERT_EQ("0,0,2", FilesPerLevel());
|
|
|
|
|
|
|
|
// Compact all
|
|
|
|
MakeTables(1, "a", "z");
|
|
|
|
ASSERT_EQ("0,1,2", FilesPerLevel());
|
2018-04-10 16:18:06 -07:00
|
|
|
db_->CompactRange(nullptr, nullptr);
|
2011-10-05 16:30:28 -07:00
|
|
|
ASSERT_EQ("0,0,1", FilesPerLevel());
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, DBOpen_Options) {
|
|
|
|
std::string dbname = testing::TempDir() + "db_options_test";
|
2011-03-18 22:37:00 +00:00
|
|
|
DestroyDB(dbname, Options());
|
|
|
|
|
|
|
|
// Does not exist, and create_if_missing == false: error
|
2018-04-10 16:18:06 -07:00
|
|
|
DB* db = nullptr;
|
2011-03-18 22:37:00 +00:00
|
|
|
Options opts;
|
|
|
|
opts.create_if_missing = false;
|
|
|
|
Status s = DB::Open(opts, dbname, &db);
|
2018-04-10 16:18:06 -07:00
|
|
|
ASSERT_TRUE(strstr(s.ToString().c_str(), "does not exist") != nullptr);
|
|
|
|
ASSERT_TRUE(db == nullptr);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
// Does not exist, and create_if_missing == true: OK
|
|
|
|
opts.create_if_missing = true;
|
|
|
|
s = DB::Open(opts, dbname, &db);
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(s);
|
2018-04-10 16:18:06 -07:00
|
|
|
ASSERT_TRUE(db != nullptr);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
delete db;
|
2018-04-10 16:18:06 -07:00
|
|
|
db = nullptr;
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
// Does exist, and error_if_exists == true: error
|
|
|
|
opts.create_if_missing = false;
|
|
|
|
opts.error_if_exists = true;
|
|
|
|
s = DB::Open(opts, dbname, &db);
|
2018-04-10 16:18:06 -07:00
|
|
|
ASSERT_TRUE(strstr(s.ToString().c_str(), "exists") != nullptr);
|
|
|
|
ASSERT_TRUE(db == nullptr);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
// Does exist, and error_if_exists == false: OK
|
|
|
|
opts.create_if_missing = true;
|
|
|
|
opts.error_if_exists = false;
|
|
|
|
s = DB::Open(opts, dbname, &db);
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(s);
|
2018-04-10 16:18:06 -07:00
|
|
|
ASSERT_TRUE(db != nullptr);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
delete db;
|
2018-04-10 16:18:06 -07:00
|
|
|
db = nullptr;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, DestroyEmptyDir) {
|
|
|
|
std::string dbname = testing::TempDir() + "db_empty_dir";
|
2017-10-17 13:05:47 -07:00
|
|
|
TestEnv env(Env::Default());
|
Add Env::Remove{File,Dir} which obsolete Env::Delete{File,Dir}.
The "DeleteFile" method name causes pain for Windows developers, because
<windows.h> #defines a DeleteFile macro to DeleteFileW or DeleteFileA.
Current code uses workarounds, like #undefining DeleteFile everywhere an
Env is declared, implemented, or used.
This CL removes the need for workarounds by renaming Env::DeleteFile to
Env::RemoveFile. For consistency, Env::DeleteDir is also renamed to
Env::RemoveDir. A few internal methods are also renamed for consistency.
Software that supports Windows is expected to migrate any Env
implementations and usage to Remove{File,Dir}, and never use the name
Env::Delete{File,Dir} in its code.
The renaming is done in a backwards-compatible way, at the risk of
making it slightly more difficult to build a new correct Env
implementation. The backwards compatibility is achieved using the
following hacks:
1) Env::Remove{File,Dir} methods are added, with a default
implementation that calls into Env::Delete{File,Dir}. This makes old
Env implementations compatible with code that calls into the updated
API.
2) The Env::Delete{File,Dir} methods are no longer pure virtuals.
Instead, they gain a default implementation that calls into
Env::Remove{File,Dir}. This makes updated Env implementations
compatible with code that calls into the old API.
The cost of this approach is that it's possible to write an Env without
overriding either Rename{File,Dir} or Delete{File,Dir}, without getting
a compiler warning. However, attempting to run the test suite will
immediately fail with an infinite call stack ending in
{Remove,Delete}{File,Dir}, making developers aware of the problem.
PiperOrigin-RevId: 288710907
2020-01-08 09:14:53 -08:00
|
|
|
env.RemoveDir(dbname);
|
2017-10-17 13:05:47 -07:00
|
|
|
ASSERT_TRUE(!env.FileExists(dbname));
|
|
|
|
|
|
|
|
Options opts;
|
|
|
|
opts.env = &env;
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(env.CreateDir(dbname));
|
2017-10-17 13:05:47 -07:00
|
|
|
ASSERT_TRUE(env.FileExists(dbname));
|
|
|
|
std::vector<std::string> children;
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(env.GetChildren(dbname, &children));
|
2019-03-01 13:12:01 -08:00
|
|
|
// The stock Env's do not filter out '.' and '..' special files.
|
2017-10-17 13:05:47 -07:00
|
|
|
ASSERT_EQ(2, children.size());
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(DestroyDB(dbname, opts));
|
2017-10-17 13:05:47 -07:00
|
|
|
ASSERT_TRUE(!env.FileExists(dbname));
|
|
|
|
|
|
|
|
// Should also be destroyed if Env is filtering out dot files.
|
|
|
|
env.SetIgnoreDotFiles(true);
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(env.CreateDir(dbname));
|
2017-10-17 13:05:47 -07:00
|
|
|
ASSERT_TRUE(env.FileExists(dbname));
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(env.GetChildren(dbname, &children));
|
2017-10-17 13:05:47 -07:00
|
|
|
ASSERT_EQ(0, children.size());
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(DestroyDB(dbname, opts));
|
2017-10-17 13:05:47 -07:00
|
|
|
ASSERT_TRUE(!env.FileExists(dbname));
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, DestroyOpenDB) {
|
|
|
|
std::string dbname = testing::TempDir() + "open_db_dir";
|
Add Env::Remove{File,Dir} which obsolete Env::Delete{File,Dir}.
The "DeleteFile" method name causes pain for Windows developers, because
<windows.h> #defines a DeleteFile macro to DeleteFileW or DeleteFileA.
Current code uses workarounds, like #undefining DeleteFile everywhere an
Env is declared, implemented, or used.
This CL removes the need for workarounds by renaming Env::DeleteFile to
Env::RemoveFile. For consistency, Env::DeleteDir is also renamed to
Env::RemoveDir. A few internal methods are also renamed for consistency.
Software that supports Windows is expected to migrate any Env
implementations and usage to Remove{File,Dir}, and never use the name
Env::Delete{File,Dir} in its code.
The renaming is done in a backwards-compatible way, at the risk of
making it slightly more difficult to build a new correct Env
implementation. The backwards compatibility is achieved using the
following hacks:
1) Env::Remove{File,Dir} methods are added, with a default
implementation that calls into Env::Delete{File,Dir}. This makes old
Env implementations compatible with code that calls into the updated
API.
2) The Env::Delete{File,Dir} methods are no longer pure virtuals.
Instead, they gain a default implementation that calls into
Env::Remove{File,Dir}. This makes updated Env implementations
compatible with code that calls into the old API.
The cost of this approach is that it's possible to write an Env without
overriding either Rename{File,Dir} or Delete{File,Dir}, without getting
a compiler warning. However, attempting to run the test suite will
immediately fail with an infinite call stack ending in
{Remove,Delete}{File,Dir}, making developers aware of the problem.
PiperOrigin-RevId: 288710907
2020-01-08 09:14:53 -08:00
|
|
|
env_->RemoveDir(dbname);
|
2017-10-17 13:05:47 -07:00
|
|
|
ASSERT_TRUE(!env_->FileExists(dbname));
|
|
|
|
|
|
|
|
Options opts;
|
|
|
|
opts.create_if_missing = true;
|
2018-04-10 16:18:06 -07:00
|
|
|
DB* db = nullptr;
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(DB::Open(opts, dbname, &db));
|
2018-04-10 16:18:06 -07:00
|
|
|
ASSERT_TRUE(db != nullptr);
|
2017-10-17 13:05:47 -07:00
|
|
|
|
|
|
|
// Must fail to destroy an open db.
|
|
|
|
ASSERT_TRUE(env_->FileExists(dbname));
|
|
|
|
ASSERT_TRUE(!DestroyDB(dbname, Options()).ok());
|
|
|
|
ASSERT_TRUE(env_->FileExists(dbname));
|
|
|
|
|
|
|
|
delete db;
|
2018-04-10 16:18:06 -07:00
|
|
|
db = nullptr;
|
2017-10-17 13:05:47 -07:00
|
|
|
|
|
|
|
// Should succeed destroying a closed db.
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(DestroyDB(dbname, Options()));
|
2017-10-17 13:05:47 -07:00
|
|
|
ASSERT_TRUE(!env_->FileExists(dbname));
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, Locking) {
|
2018-04-10 16:18:06 -07:00
|
|
|
DB* db2 = nullptr;
|
2012-10-12 11:53:12 -07:00
|
|
|
Status s = DB::Open(CurrentOptions(), dbname_, &db2);
|
|
|
|
ASSERT_TRUE(!s.ok()) << "Locking did not prevent re-opening db";
|
|
|
|
}
|
|
|
|
|
2012-01-25 14:56:52 -08:00
|
|
|
// Check that number of files does not grow when we are out of space
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, NoSpace) {
|
2012-04-17 08:36:46 -07:00
|
|
|
Options options = CurrentOptions();
|
2012-01-25 14:56:52 -08:00
|
|
|
options.env = env_;
|
|
|
|
Reopen(&options);
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("foo", "v1"));
|
2012-01-25 14:56:52 -08:00
|
|
|
ASSERT_EQ("v1", Get("foo"));
|
|
|
|
Compact("a", "z");
|
|
|
|
const int num_files = CountFiles();
|
2019-03-11 13:04:53 -07:00
|
|
|
// Force out-of-space errors.
|
|
|
|
env_->no_space_.store(true, std::memory_order_release);
|
2013-12-10 10:36:31 -08:00
|
|
|
for (int i = 0; i < 10; i++) {
|
2019-05-02 11:01:00 -07:00
|
|
|
for (int level = 0; level < config::kNumLevels - 1; level++) {
|
2018-04-10 16:18:06 -07:00
|
|
|
dbfull()->TEST_CompactRange(level, nullptr, nullptr);
|
2012-01-25 14:56:52 -08:00
|
|
|
}
|
|
|
|
}
|
2019-03-11 13:04:53 -07:00
|
|
|
env_->no_space_.store(false, std::memory_order_release);
|
2012-05-30 09:45:46 -07:00
|
|
|
ASSERT_LT(CountFiles(), num_files + 3);
|
2013-06-13 16:14:06 -07:00
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, NonWritableFileSystem) {
|
2012-05-30 09:45:46 -07:00
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.write_buffer_size = 1000;
|
|
|
|
options.env = env_;
|
|
|
|
Reopen(&options);
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("foo", "v1"));
|
2019-03-11 13:04:53 -07:00
|
|
|
// Force errors for new files.
|
|
|
|
env_->non_writable_.store(true, std::memory_order_release);
|
2012-05-30 09:45:46 -07:00
|
|
|
std::string big(100000, 'x');
|
|
|
|
int errors = 0;
|
|
|
|
for (int i = 0; i < 20; i++) {
|
2020-04-29 22:31:41 +00:00
|
|
|
std::fprintf(stderr, "iter %d; errors %d\n", i, errors);
|
2012-05-30 09:45:46 -07:00
|
|
|
if (!Put("foo", big).ok()) {
|
|
|
|
errors++;
|
2013-06-13 16:14:06 -07:00
|
|
|
DelayMilliseconds(100);
|
2012-05-30 09:45:46 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
ASSERT_GT(errors, 0);
|
2019-03-11 13:04:53 -07:00
|
|
|
env_->non_writable_.store(false, std::memory_order_release);
|
2012-01-25 14:56:52 -08:00
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, WriteSyncError) {
|
2013-12-10 10:36:31 -08:00
|
|
|
// Check that log sync errors cause the DB to disallow future writes.
|
|
|
|
|
|
|
|
// (a) Cause log sync calls to fail
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.env = env_;
|
|
|
|
Reopen(&options);
|
2019-03-11 13:04:53 -07:00
|
|
|
env_->data_sync_error_.store(true, std::memory_order_release);
|
2013-12-10 10:36:31 -08:00
|
|
|
|
|
|
|
// (b) Normal write should succeed
|
|
|
|
WriteOptions w;
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(db_->Put(w, "k1", "v1"));
|
2013-12-10 10:36:31 -08:00
|
|
|
ASSERT_EQ("v1", Get("k1"));
|
|
|
|
|
|
|
|
// (c) Do a sync write; should fail
|
|
|
|
w.sync = true;
|
|
|
|
ASSERT_TRUE(!db_->Put(w, "k2", "v2").ok());
|
|
|
|
ASSERT_EQ("v1", Get("k1"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("k2"));
|
|
|
|
|
|
|
|
// (d) make sync behave normally
|
2019-03-11 13:04:53 -07:00
|
|
|
env_->data_sync_error_.store(false, std::memory_order_release);
|
2013-12-10 10:36:31 -08:00
|
|
|
|
|
|
|
// (e) Do a non-sync write; should fail
|
|
|
|
w.sync = false;
|
|
|
|
ASSERT_TRUE(!db_->Put(w, "k3", "v3").ok());
|
|
|
|
ASSERT_EQ("v1", Get("k1"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("k2"));
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get("k3"));
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, ManifestWriteError) {
|
2013-01-07 13:17:43 -08:00
|
|
|
// Test for the following problem:
|
|
|
|
// (a) Compaction produces file F
|
|
|
|
// (b) Log record containing F is written to MANIFEST file, but Sync() fails
|
|
|
|
// (c) GC deletes F
|
|
|
|
// (d) After reopening DB, reads fail since deleted F is named in log record
|
|
|
|
|
|
|
|
// We iterate twice. In the second iteration, everything is the
|
|
|
|
// same except the log record never makes it to the MANIFEST file.
|
|
|
|
for (int iter = 0; iter < 2; iter++) {
|
2019-05-02 11:01:00 -07:00
|
|
|
std::atomic<bool>* error_type = (iter == 0) ? &env_->manifest_sync_error_
|
|
|
|
: &env_->manifest_write_error_;
|
2013-01-07 13:17:43 -08:00
|
|
|
|
|
|
|
// Insert foo=>bar mapping
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.env = env_;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.error_if_exists = false;
|
|
|
|
DestroyAndReopen(&options);
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("foo", "bar"));
|
2013-01-07 13:17:43 -08:00
|
|
|
ASSERT_EQ("bar", Get("foo"));
|
|
|
|
|
|
|
|
// Memtable compaction (will succeed)
|
|
|
|
dbfull()->TEST_CompactMemTable();
|
|
|
|
ASSERT_EQ("bar", Get("foo"));
|
|
|
|
const int last = config::kMaxMemCompactLevel;
|
2019-05-02 11:01:00 -07:00
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo=>bar is now in last level
|
2013-01-07 13:17:43 -08:00
|
|
|
|
|
|
|
// Merging compaction (will fail)
|
2019-03-11 13:04:53 -07:00
|
|
|
error_type->store(true, std::memory_order_release);
|
2018-04-10 16:18:06 -07:00
|
|
|
dbfull()->TEST_CompactRange(last, nullptr, nullptr); // Should fail
|
2013-01-07 13:17:43 -08:00
|
|
|
ASSERT_EQ("bar", Get("foo"));
|
|
|
|
|
|
|
|
// Recovery: should not lose data
|
2019-03-11 13:04:53 -07:00
|
|
|
error_type->store(false, std::memory_order_release);
|
2013-01-07 13:17:43 -08:00
|
|
|
Reopen(&options);
|
|
|
|
ASSERT_EQ("bar", Get("foo"));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, MissingSSTFile) {
|
|
|
|
ASSERT_LEVELDB_OK(Put("foo", "bar"));
|
2013-02-06 18:03:32 -08:00
|
|
|
ASSERT_EQ("bar", Get("foo"));
|
|
|
|
|
|
|
|
// Dump the memtable to disk.
|
|
|
|
dbfull()->TEST_CompactMemTable();
|
|
|
|
ASSERT_EQ("bar", Get("foo"));
|
|
|
|
|
2013-06-13 16:14:06 -07:00
|
|
|
Close();
|
2013-02-06 18:03:32 -08:00
|
|
|
ASSERT_TRUE(DeleteAnSSTFile());
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.paranoid_checks = true;
|
|
|
|
Status s = TryReopen(&options);
|
|
|
|
ASSERT_TRUE(!s.ok());
|
2019-05-02 11:01:00 -07:00
|
|
|
ASSERT_TRUE(s.ToString().find("issing") != std::string::npos) << s.ToString();
|
2013-02-06 18:03:32 -08:00
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, StillReadSST) {
|
|
|
|
ASSERT_LEVELDB_OK(Put("foo", "bar"));
|
2013-09-19 13:42:22 -07:00
|
|
|
ASSERT_EQ("bar", Get("foo"));
|
|
|
|
|
|
|
|
// Dump the memtable to disk.
|
|
|
|
dbfull()->TEST_CompactMemTable();
|
|
|
|
ASSERT_EQ("bar", Get("foo"));
|
|
|
|
Close();
|
|
|
|
ASSERT_GT(RenameLDBToSST(), 0);
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.paranoid_checks = true;
|
|
|
|
Status s = TryReopen(&options);
|
|
|
|
ASSERT_TRUE(s.ok());
|
|
|
|
ASSERT_EQ("bar", Get("foo"));
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, FilesDeletedAfterCompaction) {
|
|
|
|
ASSERT_LEVELDB_OK(Put("foo", "v2"));
|
2012-01-25 14:56:52 -08:00
|
|
|
Compact("a", "z");
|
|
|
|
const int num_files = CountFiles();
|
|
|
|
for (int i = 0; i < 10; i++) {
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put("foo", "v2"));
|
2012-01-25 14:56:52 -08:00
|
|
|
Compact("a", "z");
|
|
|
|
}
|
|
|
|
ASSERT_EQ(CountFiles(), num_files);
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, BloomFilter) {
|
2012-04-17 08:36:46 -07:00
|
|
|
env_->count_random_reads_ = true;
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.env = env_;
|
|
|
|
options.block_cache = NewLRUCache(0); // Prevent cache hits
|
|
|
|
options.filter_policy = NewBloomFilterPolicy(10);
|
|
|
|
Reopen(&options);
|
|
|
|
|
|
|
|
// Populate multiple layers
|
|
|
|
const int N = 10000;
|
|
|
|
for (int i = 0; i < N; i++) {
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put(Key(i), Key(i)));
|
2012-04-17 08:36:46 -07:00
|
|
|
}
|
|
|
|
Compact("a", "z");
|
|
|
|
for (int i = 0; i < N; i += 100) {
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(Put(Key(i), Key(i)));
|
2012-04-17 08:36:46 -07:00
|
|
|
}
|
|
|
|
dbfull()->TEST_CompactMemTable();
|
|
|
|
|
|
|
|
// Prevent auto compactions triggered by seeks
|
2019-03-11 13:04:53 -07:00
|
|
|
env_->delay_data_sync_.store(true, std::memory_order_release);
|
2012-04-17 08:36:46 -07:00
|
|
|
|
|
|
|
// Lookup present keys. Should rarely read from small sstable.
|
|
|
|
env_->random_read_counter_.Reset();
|
|
|
|
for (int i = 0; i < N; i++) {
|
|
|
|
ASSERT_EQ(Key(i), Get(Key(i)));
|
|
|
|
}
|
|
|
|
int reads = env_->random_read_counter_.Read();
|
2020-04-29 22:31:41 +00:00
|
|
|
std::fprintf(stderr, "%d present => %d reads\n", N, reads);
|
2012-04-17 08:36:46 -07:00
|
|
|
ASSERT_GE(reads, N);
|
2019-05-02 11:01:00 -07:00
|
|
|
ASSERT_LE(reads, N + 2 * N / 100);
|
2012-04-17 08:36:46 -07:00
|
|
|
|
|
|
|
// Lookup present keys. Should rarely read from either sstable.
|
|
|
|
env_->random_read_counter_.Reset();
|
|
|
|
for (int i = 0; i < N; i++) {
|
|
|
|
ASSERT_EQ("NOT_FOUND", Get(Key(i) + ".missing"));
|
|
|
|
}
|
|
|
|
reads = env_->random_read_counter_.Read();
|
2020-04-29 22:31:41 +00:00
|
|
|
std::fprintf(stderr, "%d missing => %d reads\n", N, reads);
|
2019-05-02 11:01:00 -07:00
|
|
|
ASSERT_LE(reads, 3 * N / 100);
|
2012-04-17 08:36:46 -07:00
|
|
|
|
2019-03-11 13:04:53 -07:00
|
|
|
env_->delay_data_sync_.store(false, std::memory_order_release);
|
2012-04-17 08:36:46 -07:00
|
|
|
Close();
|
|
|
|
delete options.block_cache;
|
|
|
|
delete options.filter_policy;
|
|
|
|
}
|
|
|
|
|
2011-05-28 00:53:58 +00:00
|
|
|
// Multi-threaded test:
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
static const int kNumThreads = 4;
|
|
|
|
static const int kTestSeconds = 10;
|
|
|
|
static const int kNumKeys = 1000;
|
|
|
|
|
|
|
|
struct MTState {
|
|
|
|
DBTest* test;
|
2019-03-11 13:04:53 -07:00
|
|
|
std::atomic<bool> stop;
|
|
|
|
std::atomic<int> counter[kNumThreads];
|
|
|
|
std::atomic<bool> thread_done[kNumThreads];
|
2011-05-28 00:53:58 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct MTThread {
|
|
|
|
MTState* state;
|
|
|
|
int id;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void MTThreadBody(void* arg) {
|
|
|
|
MTThread* t = reinterpret_cast<MTThread*>(arg);
|
2012-01-25 14:56:52 -08:00
|
|
|
int id = t->id;
|
2011-05-28 00:53:58 +00:00
|
|
|
DB* db = t->state->test->db_;
|
2019-03-11 13:04:53 -07:00
|
|
|
int counter = 0;
|
2020-04-29 22:31:41 +00:00
|
|
|
std::fprintf(stderr, "... starting thread %d\n", id);
|
2012-01-25 14:56:52 -08:00
|
|
|
Random rnd(1000 + id);
|
2011-05-28 00:53:58 +00:00
|
|
|
std::string value;
|
|
|
|
char valbuf[1500];
|
2019-03-11 13:04:53 -07:00
|
|
|
while (!t->state->stop.load(std::memory_order_acquire)) {
|
|
|
|
t->state->counter[id].store(counter, std::memory_order_release);
|
2011-05-28 00:53:58 +00:00
|
|
|
|
|
|
|
int key = rnd.Uniform(kNumKeys);
|
|
|
|
char keybuf[20];
|
2020-04-29 22:31:41 +00:00
|
|
|
std::snprintf(keybuf, sizeof(keybuf), "%016d", key);
|
2011-05-28 00:53:58 +00:00
|
|
|
|
|
|
|
if (rnd.OneIn(2)) {
|
|
|
|
// Write values of the form <key, my id, counter>.
|
|
|
|
// We add some padding for force compactions.
|
2020-04-29 22:31:41 +00:00
|
|
|
std::snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d", key, id,
|
|
|
|
static_cast<int>(counter));
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(db->Put(WriteOptions(), Slice(keybuf), Slice(valbuf)));
|
2011-05-28 00:53:58 +00:00
|
|
|
} else {
|
|
|
|
// Read a value and verify that it matches the pattern written above.
|
|
|
|
Status s = db->Get(ReadOptions(), Slice(keybuf), &value);
|
|
|
|
if (s.IsNotFound()) {
|
|
|
|
// Key has not yet been written
|
|
|
|
} else {
|
|
|
|
// Check that the writer thread counter is >= the counter in the value
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(s);
|
2011-05-28 00:53:58 +00:00
|
|
|
int k, w, c;
|
|
|
|
ASSERT_EQ(3, sscanf(value.c_str(), "%d.%d.%d", &k, &w, &c)) << value;
|
|
|
|
ASSERT_EQ(k, key);
|
|
|
|
ASSERT_GE(w, 0);
|
|
|
|
ASSERT_LT(w, kNumThreads);
|
2019-03-11 13:04:53 -07:00
|
|
|
ASSERT_LE(c, t->state->counter[w].load(std::memory_order_acquire));
|
2011-05-28 00:53:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
counter++;
|
|
|
|
}
|
2019-03-11 13:04:53 -07:00
|
|
|
t->state->thread_done[id].store(true, std::memory_order_release);
|
2020-04-29 22:31:41 +00:00
|
|
|
std::fprintf(stderr, "... stopping thread %d after %d ops\n", id, counter);
|
2011-05-28 00:53:58 +00:00
|
|
|
}
|
|
|
|
|
2011-10-31 17:22:06 +00:00
|
|
|
} // namespace
|
2011-05-28 00:53:58 +00:00
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, MultiThreaded) {
|
2012-04-17 08:36:46 -07:00
|
|
|
do {
|
|
|
|
// Initialize state
|
|
|
|
MTState mt;
|
|
|
|
mt.test = this;
|
2019-03-11 13:04:53 -07:00
|
|
|
mt.stop.store(false, std::memory_order_release);
|
2012-04-17 08:36:46 -07:00
|
|
|
for (int id = 0; id < kNumThreads; id++) {
|
2019-03-11 13:04:53 -07:00
|
|
|
mt.counter[id].store(false, std::memory_order_release);
|
|
|
|
mt.thread_done[id].store(false, std::memory_order_release);
|
2012-04-17 08:36:46 -07:00
|
|
|
}
|
2011-05-28 00:53:58 +00:00
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
// Start threads
|
|
|
|
MTThread thread[kNumThreads];
|
|
|
|
for (int id = 0; id < kNumThreads; id++) {
|
|
|
|
thread[id].state = &mt;
|
|
|
|
thread[id].id = id;
|
|
|
|
env_->StartThread(MTThreadBody, &thread[id]);
|
|
|
|
}
|
2011-05-28 00:53:58 +00:00
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
// Let them run for a while
|
2013-06-13 16:14:06 -07:00
|
|
|
DelayMilliseconds(kTestSeconds * 1000);
|
2011-05-28 00:53:58 +00:00
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
// Stop the threads and wait for them to finish
|
2019-03-11 13:04:53 -07:00
|
|
|
mt.stop.store(true, std::memory_order_release);
|
2012-04-17 08:36:46 -07:00
|
|
|
for (int id = 0; id < kNumThreads; id++) {
|
2019-03-11 13:04:53 -07:00
|
|
|
while (!mt.thread_done[id].load(std::memory_order_acquire)) {
|
2013-06-13 16:14:06 -07:00
|
|
|
DelayMilliseconds(100);
|
2012-04-17 08:36:46 -07:00
|
|
|
}
|
2011-05-28 00:53:58 +00:00
|
|
|
}
|
2012-04-17 08:36:46 -07:00
|
|
|
} while (ChangeOptions());
|
2011-05-28 00:53:58 +00:00
|
|
|
}
|
|
|
|
|
2011-05-21 02:17:43 +00:00
|
|
|
namespace {
|
|
|
|
typedef std::map<std::string, std::string> KVMap;
|
|
|
|
}
|
|
|
|
|
2019-05-02 11:01:00 -07:00
|
|
|
class ModelDB : public DB {
|
2011-03-18 22:37:00 +00:00
|
|
|
public:
|
2011-05-21 02:17:43 +00:00
|
|
|
class ModelSnapshot : public Snapshot {
|
|
|
|
public:
|
|
|
|
KVMap map_;
|
|
|
|
};
|
|
|
|
|
2019-05-02 11:01:00 -07:00
|
|
|
explicit ModelDB(const Options& options) : options_(options) {}
|
2019-05-04 17:40:21 -07:00
|
|
|
~ModelDB() override = default;
|
2019-05-04 18:05:13 -07:00
|
|
|
Status Put(const WriteOptions& o, const Slice& k, const Slice& v) override {
|
2011-03-18 22:37:00 +00:00
|
|
|
return DB::Put(o, k, v);
|
|
|
|
}
|
2019-05-04 17:40:21 -07:00
|
|
|
Status Delete(const WriteOptions& o, const Slice& key) override {
|
2011-03-18 22:37:00 +00:00
|
|
|
return DB::Delete(o, key);
|
|
|
|
}
|
2019-05-04 17:40:21 -07:00
|
|
|
Status Get(const ReadOptions& options, const Slice& key,
|
|
|
|
std::string* value) override {
|
2019-05-02 11:01:00 -07:00
|
|
|
assert(false); // Not implemented
|
2011-03-18 22:37:00 +00:00
|
|
|
return Status::NotFound(key);
|
|
|
|
}
|
2019-05-04 17:40:21 -07:00
|
|
|
Iterator* NewIterator(const ReadOptions& options) override {
|
2018-04-10 16:18:06 -07:00
|
|
|
if (options.snapshot == nullptr) {
|
2011-03-18 22:37:00 +00:00
|
|
|
KVMap* saved = new KVMap;
|
|
|
|
*saved = map_;
|
|
|
|
return new ModelIter(saved, true);
|
|
|
|
} else {
|
|
|
|
const KVMap* snapshot_state =
|
2011-05-21 02:17:43 +00:00
|
|
|
&(reinterpret_cast<const ModelSnapshot*>(options.snapshot)->map_);
|
2011-03-18 22:37:00 +00:00
|
|
|
return new ModelIter(snapshot_state, false);
|
|
|
|
}
|
|
|
|
}
|
2019-05-04 17:40:21 -07:00
|
|
|
const Snapshot* GetSnapshot() override {
|
2011-05-21 02:17:43 +00:00
|
|
|
ModelSnapshot* snapshot = new ModelSnapshot;
|
|
|
|
snapshot->map_ = map_;
|
|
|
|
return snapshot;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2019-05-04 17:40:21 -07:00
|
|
|
void ReleaseSnapshot(const Snapshot* snapshot) override {
|
2011-05-21 02:17:43 +00:00
|
|
|
delete reinterpret_cast<const ModelSnapshot*>(snapshot);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2019-05-04 17:40:21 -07:00
|
|
|
Status Write(const WriteOptions& options, WriteBatch* batch) override {
|
2011-05-21 02:17:43 +00:00
|
|
|
class Handler : public WriteBatch::Handler {
|
|
|
|
public:
|
|
|
|
KVMap* map_;
|
2019-05-09 14:00:07 -07:00
|
|
|
void Put(const Slice& key, const Slice& value) override {
|
2011-05-21 02:17:43 +00:00
|
|
|
(*map_)[key.ToString()] = value.ToString();
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2019-05-09 14:00:07 -07:00
|
|
|
void Delete(const Slice& key) override { map_->erase(key.ToString()); }
|
2011-05-21 02:17:43 +00:00
|
|
|
};
|
|
|
|
Handler handler;
|
|
|
|
handler.map_ = &map_;
|
|
|
|
return batch->Iterate(&handler);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2019-05-04 17:40:21 -07:00
|
|
|
bool GetProperty(const Slice& property, std::string* value) override {
|
2011-03-18 22:37:00 +00:00
|
|
|
return false;
|
|
|
|
}
|
2019-05-04 17:40:21 -07:00
|
|
|
void GetApproximateSizes(const Range* r, int n, uint64_t* sizes) override {
|
2011-03-18 22:37:00 +00:00
|
|
|
for (int i = 0; i < n; i++) {
|
|
|
|
sizes[i] = 0;
|
|
|
|
}
|
|
|
|
}
|
2019-05-04 17:40:21 -07:00
|
|
|
void CompactRange(const Slice* start, const Slice* end) override {}
|
2011-10-05 16:30:28 -07:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
private:
|
2019-05-02 11:01:00 -07:00
|
|
|
class ModelIter : public Iterator {
|
2011-03-18 22:37:00 +00:00
|
|
|
public:
|
|
|
|
ModelIter(const KVMap* map, bool owned)
|
2019-05-02 11:01:00 -07:00
|
|
|
: map_(map), owned_(owned), iter_(map_->end()) {}
|
2019-05-09 14:00:07 -07:00
|
|
|
~ModelIter() override {
|
2011-03-18 22:37:00 +00:00
|
|
|
if (owned_) delete map_;
|
|
|
|
}
|
2019-05-09 14:00:07 -07:00
|
|
|
bool Valid() const override { return iter_ != map_->end(); }
|
|
|
|
void SeekToFirst() override { iter_ = map_->begin(); }
|
|
|
|
void SeekToLast() override {
|
2011-03-18 22:37:00 +00:00
|
|
|
if (map_->empty()) {
|
|
|
|
iter_ = map_->end();
|
|
|
|
} else {
|
|
|
|
iter_ = map_->find(map_->rbegin()->first);
|
|
|
|
}
|
|
|
|
}
|
2019-05-09 14:00:07 -07:00
|
|
|
void Seek(const Slice& k) override {
|
2011-03-18 22:37:00 +00:00
|
|
|
iter_ = map_->lower_bound(k.ToString());
|
|
|
|
}
|
2019-05-09 14:00:07 -07:00
|
|
|
void Next() override { ++iter_; }
|
|
|
|
void Prev() override { --iter_; }
|
|
|
|
Slice key() const override { return iter_->first; }
|
|
|
|
Slice value() const override { return iter_->second; }
|
|
|
|
Status status() const override { return Status::OK(); }
|
2019-03-11 13:04:53 -07:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
private:
|
|
|
|
const KVMap* const map_;
|
|
|
|
const bool owned_; // Do we own map_
|
|
|
|
KVMap::const_iterator iter_;
|
|
|
|
};
|
|
|
|
const Options options_;
|
|
|
|
KVMap map_;
|
|
|
|
};
|
|
|
|
|
2019-05-02 11:01:00 -07:00
|
|
|
static bool CompareIterators(int step, DB* model, DB* db,
|
2011-03-18 22:37:00 +00:00
|
|
|
const Snapshot* model_snap,
|
|
|
|
const Snapshot* db_snap) {
|
|
|
|
ReadOptions options;
|
|
|
|
options.snapshot = model_snap;
|
|
|
|
Iterator* miter = model->NewIterator(options);
|
|
|
|
options.snapshot = db_snap;
|
|
|
|
Iterator* dbiter = db->NewIterator(options);
|
|
|
|
bool ok = true;
|
|
|
|
int count = 0;
|
2021-01-11 15:32:34 +00:00
|
|
|
std::vector<std::string> seek_keys;
|
|
|
|
// Compare equality of all elements using Next(). Save some of the keys for
|
|
|
|
// comparing Seek equality.
|
2011-03-18 22:37:00 +00:00
|
|
|
for (miter->SeekToFirst(), dbiter->SeekToFirst();
|
2019-05-02 11:01:00 -07:00
|
|
|
ok && miter->Valid() && dbiter->Valid(); miter->Next(), dbiter->Next()) {
|
2011-03-18 22:37:00 +00:00
|
|
|
count++;
|
|
|
|
if (miter->key().compare(dbiter->key()) != 0) {
|
2020-04-29 22:31:41 +00:00
|
|
|
std::fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n", step,
|
|
|
|
EscapeString(miter->key()).c_str(),
|
|
|
|
EscapeString(dbiter->key()).c_str());
|
2011-03-18 22:37:00 +00:00
|
|
|
ok = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (miter->value().compare(dbiter->value()) != 0) {
|
2020-04-29 22:31:41 +00:00
|
|
|
std::fprintf(stderr,
|
|
|
|
"step %d: Value mismatch for key '%s': '%s' vs. '%s'\n",
|
|
|
|
step, EscapeString(miter->key()).c_str(),
|
|
|
|
EscapeString(miter->value()).c_str(),
|
|
|
|
EscapeString(miter->value()).c_str());
|
2011-03-18 22:37:00 +00:00
|
|
|
ok = false;
|
2021-01-11 15:32:34 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (count % 10 == 0) {
|
|
|
|
seek_keys.push_back(miter->key().ToString());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ok) {
|
|
|
|
if (miter->Valid() != dbiter->Valid()) {
|
2020-04-29 22:31:41 +00:00
|
|
|
std::fprintf(stderr, "step %d: Mismatch at end of iterators: %d vs. %d\n",
|
|
|
|
step, miter->Valid(), dbiter->Valid());
|
2011-03-18 22:37:00 +00:00
|
|
|
ok = false;
|
|
|
|
}
|
|
|
|
}
|
2021-01-11 15:32:34 +00:00
|
|
|
|
|
|
|
if (ok) {
|
|
|
|
// Validate iterator equality when performing seeks.
|
|
|
|
for (auto kiter = seek_keys.begin(); ok && kiter != seek_keys.end();
|
|
|
|
++kiter) {
|
|
|
|
miter->Seek(*kiter);
|
|
|
|
dbiter->Seek(*kiter);
|
|
|
|
if (!miter->Valid() || !dbiter->Valid()) {
|
|
|
|
std::fprintf(stderr, "step %d: Seek iterators invalid: %d vs. %d\n",
|
|
|
|
step, miter->Valid(), dbiter->Valid());
|
|
|
|
ok = false;
|
|
|
|
}
|
|
|
|
if (miter->key().compare(dbiter->key()) != 0) {
|
|
|
|
std::fprintf(stderr, "step %d: Seek key mismatch: '%s' vs. '%s'\n",
|
|
|
|
step, EscapeString(miter->key()).c_str(),
|
|
|
|
EscapeString(dbiter->key()).c_str());
|
|
|
|
ok = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (miter->value().compare(dbiter->value()) != 0) {
|
|
|
|
std::fprintf(
|
|
|
|
stderr,
|
|
|
|
"step %d: Seek value mismatch for key '%s': '%s' vs. '%s'\n", step,
|
|
|
|
EscapeString(miter->key()).c_str(),
|
|
|
|
EscapeString(miter->value()).c_str(),
|
|
|
|
EscapeString(miter->value()).c_str());
|
|
|
|
ok = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-29 22:31:41 +00:00
|
|
|
std::fprintf(stderr, "%d entries compared: ok=%d\n", count, ok);
|
2011-03-18 22:37:00 +00:00
|
|
|
delete miter;
|
|
|
|
delete dbiter;
|
|
|
|
return ok;
|
|
|
|
}
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
TEST_F(DBTest, Randomized) {
|
2011-03-18 22:37:00 +00:00
|
|
|
Random rnd(test::RandomSeed());
|
2012-04-17 08:36:46 -07:00
|
|
|
do {
|
|
|
|
ModelDB model(CurrentOptions());
|
|
|
|
const int N = 10000;
|
2018-04-10 16:18:06 -07:00
|
|
|
const Snapshot* model_snap = nullptr;
|
|
|
|
const Snapshot* db_snap = nullptr;
|
2012-04-17 08:36:46 -07:00
|
|
|
std::string k, v;
|
|
|
|
for (int step = 0; step < N; step++) {
|
|
|
|
if (step % 100 == 0) {
|
2020-04-29 22:31:41 +00:00
|
|
|
std::fprintf(stderr, "Step %d of %d\n", step, N);
|
2012-04-17 08:36:46 -07:00
|
|
|
}
|
|
|
|
// TODO(sanjay): Test Get() works
|
|
|
|
int p = rnd.Uniform(100);
|
2019-05-02 11:01:00 -07:00
|
|
|
if (p < 45) { // Put
|
2012-04-17 08:36:46 -07:00
|
|
|
k = RandomKey(&rnd);
|
2019-05-02 11:01:00 -07:00
|
|
|
v = RandomString(
|
|
|
|
&rnd, rnd.OneIn(20) ? 100 + rnd.Uniform(100) : rnd.Uniform(8));
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(model.Put(WriteOptions(), k, v));
|
|
|
|
ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), k, v));
|
2012-04-17 08:36:46 -07:00
|
|
|
|
2019-05-02 11:01:00 -07:00
|
|
|
} else if (p < 90) { // Delete
|
2012-04-17 08:36:46 -07:00
|
|
|
k = RandomKey(&rnd);
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(model.Delete(WriteOptions(), k));
|
|
|
|
ASSERT_LEVELDB_OK(db_->Delete(WriteOptions(), k));
|
2012-04-17 08:36:46 -07:00
|
|
|
|
2019-05-02 11:01:00 -07:00
|
|
|
} else { // Multi-element batch
|
2012-04-17 08:36:46 -07:00
|
|
|
WriteBatch b;
|
|
|
|
const int num = rnd.Uniform(8);
|
|
|
|
for (int i = 0; i < num; i++) {
|
|
|
|
if (i == 0 || !rnd.OneIn(10)) {
|
|
|
|
k = RandomKey(&rnd);
|
|
|
|
} else {
|
|
|
|
// Periodically re-use the same key from the previous iter, so
|
|
|
|
// we have multiple entries in the write batch for the same key
|
|
|
|
}
|
|
|
|
if (rnd.OneIn(2)) {
|
|
|
|
v = RandomString(&rnd, rnd.Uniform(10));
|
|
|
|
b.Put(k, v);
|
|
|
|
} else {
|
|
|
|
b.Delete(k);
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(model.Write(WriteOptions(), &b));
|
|
|
|
ASSERT_LEVELDB_OK(db_->Write(WriteOptions(), &b));
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
if ((step % 100) == 0) {
|
2018-04-10 16:18:06 -07:00
|
|
|
ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));
|
2012-04-17 08:36:46 -07:00
|
|
|
ASSERT_TRUE(CompareIterators(step, &model, db_, model_snap, db_snap));
|
|
|
|
// Save a snapshot from each DB this time that we'll use next
|
|
|
|
// time we compare things, to make sure the current state is
|
|
|
|
// preserved with the snapshot
|
2018-04-10 16:18:06 -07:00
|
|
|
if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
|
|
|
|
if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
Reopen();
|
2018-04-10 16:18:06 -07:00
|
|
|
ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2012-04-17 08:36:46 -07:00
|
|
|
model_snap = model.GetSnapshot();
|
|
|
|
db_snap = db_->GetSnapshot();
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2018-04-10 16:18:06 -07:00
|
|
|
if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
|
|
|
|
if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);
|
2012-04-17 08:36:46 -07:00
|
|
|
} while (ChangeOptions());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2011-05-21 02:17:43 +00:00
|
|
|
std::string MakeKey(unsigned int num) {
|
|
|
|
char buf[30];
|
2020-04-29 22:31:41 +00:00
|
|
|
std::snprintf(buf, sizeof(buf), "%016u", num);
|
2011-05-21 02:17:43 +00:00
|
|
|
return std::string(buf);
|
|
|
|
}
|
|
|
|
|
2020-10-27 11:09:49 -07:00
|
|
|
static void BM_LogAndApply(benchmark::State& state) {
|
|
|
|
const int num_base_files = state.range(0);
|
|
|
|
|
2019-11-21 13:09:53 -08:00
|
|
|
std::string dbname = testing::TempDir() + "leveldb_test_benchmark";
|
2011-05-21 02:17:43 +00:00
|
|
|
DestroyDB(dbname, Options());
|
|
|
|
|
2018-04-10 16:18:06 -07:00
|
|
|
DB* db = nullptr;
|
2011-05-21 02:17:43 +00:00
|
|
|
Options opts;
|
|
|
|
opts.create_if_missing = true;
|
|
|
|
Status s = DB::Open(opts, dbname, &db);
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(s);
|
2018-04-10 16:18:06 -07:00
|
|
|
ASSERT_TRUE(db != nullptr);
|
2011-05-21 02:17:43 +00:00
|
|
|
|
|
|
|
delete db;
|
2018-04-10 16:18:06 -07:00
|
|
|
db = nullptr;
|
2011-05-21 02:17:43 +00:00
|
|
|
|
|
|
|
Env* env = Env::Default();
|
|
|
|
|
2011-09-01 19:08:02 +00:00
|
|
|
port::Mutex mu;
|
|
|
|
MutexLock l(&mu);
|
|
|
|
|
2011-05-21 02:17:43 +00:00
|
|
|
InternalKeyComparator cmp(BytewiseComparator());
|
|
|
|
Options options;
|
2018-04-10 16:18:06 -07:00
|
|
|
VersionSet vset(dbname, &options, nullptr, &cmp);
|
2014-12-11 08:13:18 -08:00
|
|
|
bool save_manifest;
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(vset.Recover(&save_manifest));
|
2011-05-21 02:17:43 +00:00
|
|
|
VersionEdit vbase;
|
|
|
|
uint64_t fnum = 1;
|
|
|
|
for (int i = 0; i < num_base_files; i++) {
|
2019-05-02 11:01:00 -07:00
|
|
|
InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
|
|
|
|
InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
|
2011-05-21 02:17:43 +00:00
|
|
|
vbase.AddFile(2, fnum++, 1 /* file size */, start, limit);
|
|
|
|
}
|
2019-11-21 13:09:53 -08:00
|
|
|
ASSERT_LEVELDB_OK(vset.LogAndApply(&vbase, &mu));
|
2011-05-21 02:17:43 +00:00
|
|
|
|
|
|
|
uint64_t start_micros = env->NowMicros();
|
|
|
|
|
2020-10-27 11:09:49 -07:00
|
|
|
for (auto st : state) {
|
2011-05-21 02:17:43 +00:00
|
|
|
VersionEdit vedit;
|
Add Env::Remove{File,Dir} which obsolete Env::Delete{File,Dir}.
The "DeleteFile" method name causes pain for Windows developers, because
<windows.h> #defines a DeleteFile macro to DeleteFileW or DeleteFileA.
Current code uses workarounds, like #undefining DeleteFile everywhere an
Env is declared, implemented, or used.
This CL removes the need for workarounds by renaming Env::DeleteFile to
Env::RemoveFile. For consistency, Env::DeleteDir is also renamed to
Env::RemoveDir. A few internal methods are also renamed for consistency.
Software that supports Windows is expected to migrate any Env
implementations and usage to Remove{File,Dir}, and never use the name
Env::Delete{File,Dir} in its code.
The renaming is done in a backwards-compatible way, at the risk of
making it slightly more difficult to build a new correct Env
implementation. The backwards compatibility is achieved using the
following hacks:
1) Env::Remove{File,Dir} methods are added, with a default
implementation that calls into Env::Delete{File,Dir}. This makes old
Env implementations compatible with code that calls into the updated
API.
2) The Env::Delete{File,Dir} methods are no longer pure virtuals.
Instead, they gain a default implementation that calls into
Env::Remove{File,Dir}. This makes updated Env implementations
compatible with code that calls into the old API.
The cost of this approach is that it's possible to write an Env without
overriding either Rename{File,Dir} or Delete{File,Dir}, without getting
a compiler warning. However, attempting to run the test suite will
immediately fail with an infinite call stack ending in
{Remove,Delete}{File,Dir}, making developers aware of the problem.
PiperOrigin-RevId: 288710907
2020-01-08 09:14:53 -08:00
|
|
|
vedit.RemoveFile(2, fnum);
|
2019-05-02 11:01:00 -07:00
|
|
|
InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
|
|
|
|
InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
|
2011-05-21 02:17:43 +00:00
|
|
|
vedit.AddFile(2, fnum++, 1 /* file size */, start, limit);
|
2011-09-01 19:08:02 +00:00
|
|
|
vset.LogAndApply(&vedit, &mu);
|
2011-05-21 02:17:43 +00:00
|
|
|
}
|
|
|
|
uint64_t stop_micros = env->NowMicros();
|
|
|
|
unsigned int us = stop_micros - start_micros;
|
|
|
|
char buf[16];
|
2020-04-29 22:31:41 +00:00
|
|
|
std::snprintf(buf, sizeof(buf), "%d", num_base_files);
|
|
|
|
std::fprintf(stderr,
|
2020-11-30 10:43:24 -08:00
|
|
|
"BM_LogAndApply/%-6s %8" PRIu64
|
|
|
|
" iters : %9u us (%7.0f us / iter)\n",
|
2020-10-27 11:09:49 -07:00
|
|
|
buf, state.iterations(), us, ((float)us) / state.iterations());
|
2011-05-21 02:17:43 +00:00
|
|
|
}
|
|
|
|
|
2020-10-27 11:09:49 -07:00
|
|
|
BENCHMARK(BM_LogAndApply)->Arg(1)->Arg(100)->Arg(10000)->Arg(100000);
|
2011-10-31 17:22:06 +00:00
|
|
|
} // namespace leveldb
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
2019-11-21 13:09:53 -08:00
|
|
|
testing::InitGoogleTest(&argc, argv);
|
2020-10-27 12:59:41 -07:00
|
|
|
benchmark::RunSpecifiedBenchmarks();
|
2019-11-21 13:09:53 -08:00
|
|
|
return RUN_ALL_TESTS();
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|