Fix formatting of recent snapshot compaction fix.

Fix variable names, line lengths, namespace use, and a few other
minor issues to conform to Google C++ style guide.

PiperOrigin-RevId: 243187729
This commit is contained in:
Chris Mumford 2019-04-11 19:10:37 -07:00
parent 7711e76766
commit 65e86f75ea
3 changed files with 158 additions and 155 deletions

View File

@ -1347,78 +1347,81 @@ Compaction* VersionSet::PickCompaction() {
return c; return c;
} }
// find the largest key in a vector of files. returns true if files it not empty // Finds the largest key in a vector of files. Returns true if files it not
bool FindLargestKey(const InternalKeyComparator & icmp, const std::vector<FileMetaData*> & files, InternalKey *largestKey) { // empty.
bool FindLargestKey(const InternalKeyComparator& icmp,
const std::vector<FileMetaData*>& files,
InternalKey* largest_key) {
if (files.empty()) { if (files.empty()) {
return false; return false;
} }
*largestKey = files[0]->largest; *largest_key = files[0]->largest;
for (size_t i = 1; i < files.size(); ++i) { for (size_t i = 1; i < files.size(); ++i) {
FileMetaData* f = files[i]; FileMetaData* f = files[i];
if (icmp.Compare(f->largest, *largestKey) > 0) { if (icmp.Compare(f->largest, *largest_key) > 0) {
*largestKey = f->largest; *largest_key = f->largest;
} }
} }
return true; return true;
} }
// find minimum file b2=(l2, u2) in level file for which l2 > u1 and user_key(l2) = user_key(u1) // Finds minimum file b2=(l2, u2) in level file for which l2 > u1 and
FileMetaData* FindSmallestBoundaryFile(const InternalKeyComparator & icmp, // user_key(l2) = user_key(u1)
const std::vector<FileMetaData*> & levelFiles, FileMetaData* FindSmallestBoundaryFile(
const InternalKey & largestKey) { const InternalKeyComparator& icmp,
const std::vector<FileMetaData*>& level_files,
const InternalKey& largest_key) {
const Comparator* user_cmp = icmp.user_comparator(); const Comparator* user_cmp = icmp.user_comparator();
FileMetaData* smallestBoundaryFile = NULL; FileMetaData* smallest_boundary_file = nullptr;
for (size_t i = 0; i < levelFiles.size(); ++i) { for (size_t i = 0; i < level_files.size(); ++i) {
FileMetaData* f = levelFiles[i]; FileMetaData* f = level_files[i];
if (icmp.Compare(f->smallest, largestKey) > 0 && if (icmp.Compare(f->smallest, largest_key) > 0 &&
user_cmp->Compare(f->smallest.user_key(), largestKey.user_key()) == 0) { user_cmp->Compare(f->smallest.user_key(), largest_key.user_key()) ==
if (smallestBoundaryFile == NULL || 0) {
icmp.Compare(f->smallest, smallestBoundaryFile->smallest) < 0) { if (smallest_boundary_file == nullptr ||
smallestBoundaryFile = f; icmp.Compare(f->smallest, smallest_boundary_file->smallest) < 0) {
smallest_boundary_file = f;
} }
} }
} }
return smallestBoundaryFile; return smallest_boundary_file;
} }
// If there are two blocks, b1=(l1, u1) and b2=(l2, u2) and // Extracts the largest file b1 from |compaction_files| and then searches for a
// user_key(u1) = user_key(l2), and if we compact b1 but not // b2 in |level_files| for which user_key(u1) = user_key(l2). If it finds such a
// b2 then a subsequent get operation will yield an incorrect // file b2 (known as a boundary file) it adds it to |compaction_files| and then
// result because it will return the record from b2 in level // searches again using this new upper bound.
// i rather than from b1 because it searches level by level
// for records matching the supplied user key.
// //
// This function extracts the largest file b1 from compactionFiles // If there are two blocks, b1=(l1, u1) and b2=(l2, u2) and
// and then searches for a b2 in levelFiles for which user_key(u1) = // user_key(u1) = user_key(l2), and if we compact b1 but not b2 then a
// user_key(l2). If it finds such a file b2 (known as a boundary file) // subsequent get operation will yield an incorrect result because it will
// it adds it to compactionFiles and then searches again using this // return the record from b2 in level i rather than from b1 because it searches
// new upper bound. // level by level for records matching the supplied user key.
// //
// parameters: // parameters:
// in levelFiles: list of files to search for boundary files // in level_files: List of files to search for boundary files.
// in/out compactionFiles: list of files to extend by adding boundary files // in/out compaction_files: List of files to extend by adding boundary files.
void AddBoundaryInputs(const InternalKeyComparator& icmp, void AddBoundaryInputs(const InternalKeyComparator& icmp,
const std::vector<FileMetaData*>& levelFiles, const std::vector<FileMetaData*>& level_files,
std::vector<FileMetaData*>* compactionFiles) { std::vector<FileMetaData*>* compaction_files) {
InternalKey largestKey; InternalKey largest_key;
// find largestKey in compactionFiles, quick return if compactionFiles is // Quick return if compaction_files is empty.
// empty if (!FindLargestKey(icmp, *compaction_files, &largest_key)) {
if (!FindLargestKey(icmp, *compactionFiles, &largestKey)) {
return; return;
} }
bool continueSearching = true; bool continue_searching = true;
while (continueSearching) { while (continue_searching) {
FileMetaData* smallestBoundaryFile = FileMetaData* smallest_boundary_file =
FindSmallestBoundaryFile(icmp, levelFiles, largestKey); FindSmallestBoundaryFile(icmp, level_files, largest_key);
// if a boundary file was found advance largestKey, otherwise we're done // If a boundary file was found advance largest_key, otherwise we're done.
if (smallestBoundaryFile != NULL) { if (smallest_boundary_file != NULL) {
compactionFiles->push_back(smallestBoundaryFile); compaction_files->push_back(smallest_boundary_file);
largestKey = smallestBoundaryFile->largest; largest_key = smallest_boundary_file->largest;
} else { } else {
continueSearching = false; continue_searching = false;
} }
} }
} }

View File

@ -172,154 +172,160 @@ TEST(FindFileTest, OverlappingFiles) {
ASSERT_TRUE(Overlaps("600", "700")); ASSERT_TRUE(Overlaps("600", "700"));
} }
void AddBoundaryInputs(const InternalKeyComparator &icmp, void AddBoundaryInputs(const InternalKeyComparator& icmp,
const std::vector<FileMetaData *> &levelFiles, const std::vector<FileMetaData*>& level_files,
std::vector<FileMetaData *> *compactionFiles); std::vector<FileMetaData*>* compaction_files);
class AddBoundaryInputsTest { class AddBoundaryInputsTest {
public: public:
std::vector<FileMetaData *> levelFiles_; std::vector<FileMetaData*> level_files_;
std::vector<FileMetaData *> compactionFiles_; std::vector<FileMetaData*> compaction_files_;
std::vector<FileMetaData *> allFiles_; std::vector<FileMetaData*> all_files_;
InternalKeyComparator icmp_; InternalKeyComparator icmp_;
AddBoundaryInputsTest() : icmp_(BytewiseComparator()){}; AddBoundaryInputsTest() : icmp_(BytewiseComparator()){};
~AddBoundaryInputsTest() { ~AddBoundaryInputsTest() {
for (size_t i = 0; i < allFiles_.size(); ++i) { for (size_t i = 0; i < all_files_.size(); ++i) {
delete allFiles_[i]; delete all_files_[i];
} }
allFiles_.clear(); all_files_.clear();
}; };
FileMetaData *CreateFileMetaData(uint64_t number, InternalKey smallest, FileMetaData* CreateFileMetaData(uint64_t number, InternalKey smallest,
InternalKey largest) { InternalKey largest) {
FileMetaData *f = new FileMetaData(); FileMetaData* f = new FileMetaData();
f->number = number; f->number = number;
f->smallest = smallest; f->smallest = smallest;
f->largest = largest; f->largest = largest;
allFiles_.push_back(f); all_files_.push_back(f);
return f; return f;
} }
}; };
TEST(AddBoundaryInputsTest, TestEmptyFileSets) { TEST(AddBoundaryInputsTest, TestEmptyFileSets) {
AddBoundaryInputs(icmp_, levelFiles_, &compactionFiles_); AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
ASSERT_TRUE(compactionFiles_.empty()); ASSERT_TRUE(compaction_files_.empty());
ASSERT_TRUE(levelFiles_.empty()); ASSERT_TRUE(level_files_.empty());
} }
TEST(AddBoundaryInputsTest, TestEmptyLevelFiles) { TEST(AddBoundaryInputsTest, TestEmptyLevelFiles) {
FileMetaData *f1 = FileMetaData* f1 =
CreateFileMetaData(1, InternalKey("100", 2, kTypeValue), CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
InternalKey(InternalKey("100", 1, kTypeValue))); InternalKey(InternalKey("100", 1, kTypeValue)));
compactionFiles_.push_back(f1); compaction_files_.push_back(f1);
AddBoundaryInputs(icmp_, levelFiles_, &compactionFiles_); AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
ASSERT_EQ(1, compactionFiles_.size()); ASSERT_EQ(1, compaction_files_.size());
ASSERT_EQ(f1, compactionFiles_[0]); ASSERT_EQ(f1, compaction_files_[0]);
ASSERT_TRUE(levelFiles_.empty()); ASSERT_TRUE(level_files_.empty());
} }
TEST(AddBoundaryInputsTest, TestEmptyCompactionFiles) { TEST(AddBoundaryInputsTest, TestEmptyCompactionFiles) {
FileMetaData *f1 = FileMetaData* f1 =
CreateFileMetaData(1, InternalKey("100", 2, kTypeValue), CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
InternalKey(InternalKey("100", 1, kTypeValue))); InternalKey(InternalKey("100", 1, kTypeValue)));
levelFiles_.push_back(f1); level_files_.push_back(f1);
AddBoundaryInputs(icmp_, levelFiles_, &compactionFiles_); AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
ASSERT_TRUE(compactionFiles_.empty()); ASSERT_TRUE(compaction_files_.empty());
ASSERT_EQ(1, levelFiles_.size()); ASSERT_EQ(1, level_files_.size());
ASSERT_EQ(f1, levelFiles_[0]); ASSERT_EQ(f1, level_files_[0]);
} }
TEST(AddBoundaryInputsTest, TestNoBoundaryFiles) { TEST(AddBoundaryInputsTest, TestNoBoundaryFiles) {
FileMetaData *f1 = FileMetaData* f1 =
CreateFileMetaData(1, InternalKey("100", 2, kTypeValue), CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
InternalKey(InternalKey("100", 1, kTypeValue))); InternalKey(InternalKey("100", 1, kTypeValue)));
FileMetaData *f2 = FileMetaData* f2 =
CreateFileMetaData(1, InternalKey("200", 2, kTypeValue), CreateFileMetaData(1, InternalKey("200", 2, kTypeValue),
InternalKey(InternalKey("200", 1, kTypeValue))); InternalKey(InternalKey("200", 1, kTypeValue)));
FileMetaData *f3 = FileMetaData* f3 =
CreateFileMetaData(1, InternalKey("300", 2, kTypeValue), CreateFileMetaData(1, InternalKey("300", 2, kTypeValue),
InternalKey(InternalKey("300", 1, kTypeValue))); InternalKey(InternalKey("300", 1, kTypeValue)));
levelFiles_.push_back(f3); level_files_.push_back(f3);
levelFiles_.push_back(f2); level_files_.push_back(f2);
levelFiles_.push_back(f1); level_files_.push_back(f1);
compactionFiles_.push_back(f2); compaction_files_.push_back(f2);
compactionFiles_.push_back(f3); compaction_files_.push_back(f3);
AddBoundaryInputs(icmp_, levelFiles_, &compactionFiles_); AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
ASSERT_EQ(2, compactionFiles_.size()); ASSERT_EQ(2, compaction_files_.size());
} }
TEST(AddBoundaryInputsTest, TestOneBoundaryFiles) { TEST(AddBoundaryInputsTest, TestOneBoundaryFiles) {
FileMetaData *f1 = FileMetaData* f1 =
CreateFileMetaData(1, InternalKey("100", 3, kTypeValue), CreateFileMetaData(1, InternalKey("100", 3, kTypeValue),
InternalKey(InternalKey("100", 2, kTypeValue))); InternalKey(InternalKey("100", 2, kTypeValue)));
FileMetaData *f2 = FileMetaData* f2 =
CreateFileMetaData(1, InternalKey("100", 1, kTypeValue), CreateFileMetaData(1, InternalKey("100", 1, kTypeValue),
InternalKey(InternalKey("200", 3, kTypeValue))); InternalKey(InternalKey("200", 3, kTypeValue)));
FileMetaData *f3 = FileMetaData* f3 =
CreateFileMetaData(1, InternalKey("300", 2, kTypeValue), CreateFileMetaData(1, InternalKey("300", 2, kTypeValue),
InternalKey(InternalKey("300", 1, kTypeValue))); InternalKey(InternalKey("300", 1, kTypeValue)));
levelFiles_.push_back(f3); level_files_.push_back(f3);
levelFiles_.push_back(f2); level_files_.push_back(f2);
levelFiles_.push_back(f1); level_files_.push_back(f1);
compactionFiles_.push_back(f1); compaction_files_.push_back(f1);
AddBoundaryInputs(icmp_, levelFiles_, &compactionFiles_); AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
ASSERT_EQ(2, compactionFiles_.size()); ASSERT_EQ(2, compaction_files_.size());
ASSERT_EQ(f1, compactionFiles_[0]); ASSERT_EQ(f1, compaction_files_[0]);
ASSERT_EQ(f2, compactionFiles_[1]); ASSERT_EQ(f2, compaction_files_[1]);
} }
TEST(AddBoundaryInputsTest, TestTwoBoundaryFiles) { TEST(AddBoundaryInputsTest, TestTwoBoundaryFiles) {
FileMetaData *f1 = FileMetaData* f1 =
CreateFileMetaData(1, InternalKey("100", 6, kTypeValue), CreateFileMetaData(1, InternalKey("100", 6, kTypeValue),
InternalKey(InternalKey("100", 5, kTypeValue))); InternalKey(InternalKey("100", 5, kTypeValue)));
FileMetaData *f2 = FileMetaData* f2 =
CreateFileMetaData(1, InternalKey("100", 2, kTypeValue), CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
InternalKey(InternalKey("300", 1, kTypeValue))); InternalKey(InternalKey("300", 1, kTypeValue)));
FileMetaData *f3 = FileMetaData* f3 =
CreateFileMetaData(1, InternalKey("100", 4, kTypeValue), CreateFileMetaData(1, InternalKey("100", 4, kTypeValue),
InternalKey(InternalKey("100", 3, kTypeValue))); InternalKey(InternalKey("100", 3, kTypeValue)));
levelFiles_.push_back(f2); level_files_.push_back(f2);
levelFiles_.push_back(f3); level_files_.push_back(f3);
levelFiles_.push_back(f1); level_files_.push_back(f1);
compactionFiles_.push_back(f1); compaction_files_.push_back(f1);
AddBoundaryInputs(icmp_, levelFiles_, &compactionFiles_); AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
ASSERT_EQ(3, compactionFiles_.size()); ASSERT_EQ(3, compaction_files_.size());
ASSERT_EQ(f1, compactionFiles_[0]); ASSERT_EQ(f1, compaction_files_[0]);
ASSERT_EQ(f3, compactionFiles_[1]); ASSERT_EQ(f3, compaction_files_[1]);
ASSERT_EQ(f2, compactionFiles_[2]); ASSERT_EQ(f2, compaction_files_[2]);
} }
TEST(AddBoundaryInputsTest, TestDisjoinFilePointers) { TEST(AddBoundaryInputsTest, TestDisjoinFilePointers) {
FileMetaData *f1 = CreateFileMetaData(1, InternalKey("100", 6, kTypeValue), InternalKey(InternalKey("100", 5, kTypeValue))); FileMetaData* f1 =
FileMetaData *f2 = CreateFileMetaData(1, InternalKey("100", 6, kTypeValue), InternalKey(InternalKey("100", 5, kTypeValue))); CreateFileMetaData(1, InternalKey("100", 6, kTypeValue),
FileMetaData *f3 = CreateFileMetaData(1, InternalKey("100", 2, kTypeValue), InternalKey(InternalKey("300", 1, kTypeValue))); InternalKey(InternalKey("100", 5, kTypeValue)));
FileMetaData *f4 = CreateFileMetaData(1, InternalKey("100", 4, kTypeValue), InternalKey(InternalKey("100", 3, kTypeValue))); FileMetaData* f2 =
CreateFileMetaData(1, InternalKey("100", 6, kTypeValue),
InternalKey(InternalKey("100", 5, kTypeValue)));
FileMetaData* f3 =
CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
InternalKey(InternalKey("300", 1, kTypeValue)));
FileMetaData* f4 =
CreateFileMetaData(1, InternalKey("100", 4, kTypeValue),
InternalKey(InternalKey("100", 3, kTypeValue)));
levelFiles_.push_back(f2); level_files_.push_back(f2);
levelFiles_.push_back(f3); level_files_.push_back(f3);
levelFiles_.push_back(f4); level_files_.push_back(f4);
compactionFiles_.push_back(f1); compaction_files_.push_back(f1);
AddBoundaryInputs(icmp_, levelFiles_, &compactionFiles_); AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
ASSERT_EQ(3, compactionFiles_.size()); ASSERT_EQ(3, compaction_files_.size());
ASSERT_EQ(f1, compactionFiles_[0]); ASSERT_EQ(f1, compaction_files_[0]);
ASSERT_EQ(f4, compactionFiles_[1]); ASSERT_EQ(f4, compaction_files_[1]);
ASSERT_EQ(f3, compactionFiles_[2]); ASSERT_EQ(f3, compaction_files_[2]);
} }
} // namespace leveldb } // namespace leveldb
int main(int argc, char** argv) { int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
return leveldb::test::RunAllTests();
}

View File

@ -1,47 +1,43 @@
// Copyright (c) 2019 The LevelDB Authors. All rights reserved. // Copyright (c) 2019 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors. // found in the LICENSE file. See the AUTHORS file for names of contributors.
#include <cstdint>
#include <cstdlib>
#include <iostream> #include <iostream>
#include <map>
#include <memory> #include <memory>
#include <sstream>
#include <string> #include <string>
#include <vector> #include <vector>
#include <math.h>
#include "leveldb/db.h" #include "leveldb/db.h"
#include "leveldb/write_batch.h" #include "leveldb/write_batch.h"
#include "util/testharness.h" #include "util/testharness.h"
using namespace std;
namespace leveldb { namespace leveldb {
namespace { namespace {
unsigned int random(unsigned int max) { // Creates a random number in the range of [0, max).
return std::rand() % max; int GenerateRandomNumber(int max) { return std::rand() % max; }
}
string newString(int32_t index) { std::string CreateRandomString(int32_t index) {
const unsigned int len = 1024; static const size_t len = 1024;
char bytes[len]; char bytes[len];
unsigned int i = 0; size_t i = 0;
while (i < 8) { while (i < 8) {
bytes[i] = 'a' + ((index >> (4 * i)) & 0xf); bytes[i] = 'a' + ((index >> (4 * i)) & 0xf);
++i; ++i;
} }
while (i < sizeof(bytes)) { while (i < sizeof(bytes)) {
bytes[i] = 'a' + random(26); bytes[i] = 'a' + GenerateRandomNumber(26);
++i; ++i;
} }
return string(bytes, sizeof(bytes)); return std::string(bytes, sizeof(bytes));
} }
} // namespace } // namespace
class Issue320 { }; class Issue320 {};
TEST(Issue320, Test) { TEST(Issue320, Test) {
std::srand(0); std::srand(0);
@ -49,8 +45,8 @@ TEST(Issue320, Test) {
bool delete_before_put = false; bool delete_before_put = false;
bool keep_snapshots = true; bool keep_snapshots = true;
vector<pair<string, string>*> test_map(10000, nullptr); std::vector<std::pair<std::string, std::string>*> test_map(10000, nullptr);
vector<Snapshot const*> snapshots(100, nullptr); std::vector<Snapshot const*> snapshots(100, nullptr);
DB* db; DB* db;
Options options; Options options;
@ -59,11 +55,11 @@ TEST(Issue320, Test) {
std::string dbpath = test::TmpDir() + "/leveldb_issue320_test"; std::string dbpath = test::TmpDir() + "/leveldb_issue320_test";
ASSERT_OK(DB::Open(options, dbpath, &db)); ASSERT_OK(DB::Open(options, dbpath, &db));
unsigned int target_size = 10000; uint32_t target_size = 10000;
unsigned int num_items = 0; uint32_t num_items = 0;
unsigned long count = 0; uint32_t count = 0;
string key; std::string key;
string value, old_value; std::string value, old_value;
WriteOptions writeOptions; WriteOptions writeOptions;
ReadOptions readOptions; ReadOptions readOptions;
@ -72,13 +68,13 @@ TEST(Issue320, Test) {
cout << "count: " << count << endl; cout << "count: " << count << endl;
} }
unsigned int index = random(test_map.size()); int index = GenerateRandomNumber(test_map.size());
WriteBatch batch; WriteBatch batch;
if (test_map[index] == nullptr) { if (test_map[index] == nullptr) {
num_items++; num_items++;
test_map[index] = test_map[index] = new std::pair<std::string, std::string>(
new pair<string, string>(newString(index), newString(index)); CreateRandomString(index), CreateRandomString(index));
batch.Put(test_map[index]->first, test_map[index]->second); batch.Put(test_map[index]->first, test_map[index]->second);
} else { } else {
ASSERT_OK(db->Get(readOptions, test_map[index]->first, &old_value)); ASSERT_OK(db->Get(readOptions, test_map[index]->first, &old_value));
@ -92,13 +88,13 @@ TEST(Issue320, Test) {
ASSERT_EQ(old_value, test_map[index]->second); ASSERT_EQ(old_value, test_map[index]->second);
} }
if (num_items >= target_size && random(100) > 30) { if (num_items >= target_size && GenerateRandomNumber(100) > 30) {
batch.Delete(test_map[index]->first); batch.Delete(test_map[index]->first);
delete test_map[index]; delete test_map[index];
test_map[index] = nullptr; test_map[index] = nullptr;
--num_items; --num_items;
} else { } else {
test_map[index]->second = newString(index); test_map[index]->second = CreateRandomString(index);
if (delete_before_put) batch.Delete(test_map[index]->first); if (delete_before_put) batch.Delete(test_map[index]->first);
batch.Put(test_map[index]->first, test_map[index]->second); batch.Put(test_map[index]->first, test_map[index]->second);
} }
@ -106,8 +102,8 @@ TEST(Issue320, Test) {
ASSERT_OK(db->Write(writeOptions, &batch)); ASSERT_OK(db->Write(writeOptions, &batch));
if (keep_snapshots && random(10) == 0) { if (keep_snapshots && GenerateRandomNumber(10) == 0) {
unsigned int i = random(snapshots.size()); int i = GenerateRandomNumber(snapshots.size());
if (snapshots[i] != nullptr) { if (snapshots[i] != nullptr) {
db->ReleaseSnapshot(snapshots[i]); db->ReleaseSnapshot(snapshots[i]);
} }
@ -134,6 +130,4 @@ TEST(Issue320, Test) {
} // namespace leveldb } // namespace leveldb
int main(int argc, char** argv) { int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
return leveldb::test::RunAllTests();
}