Correct class/structure declaration order.

1. Correct the class/struct declaration order to be IAW
   the Google C++ style guide[1].
2. For non-copyable classes, switched from non-implemented
   private methods to explicitly deleted[2] methods.
3. Minor const and member initialization fixes.

[1] https://google.github.io/styleguide/cppguide.html#Declaration_Order
[2] http://eel.is/c++draft/dcl.fct.def.delete

PiperOrigin-RevId: 246521844
This commit is contained in:
Chris Mumford 2019-05-03 09:31:18 -07:00
parent c784d63b93
commit 9bd23c7676
44 changed files with 414 additions and 405 deletions

View File

@ -12,11 +12,6 @@ namespace leveldb {
class AutoCompactTest { class AutoCompactTest {
public: public:
std::string dbname_;
Cache* tiny_cache_;
Options options_;
DB* db_;
AutoCompactTest() { AutoCompactTest() {
dbname_ = test::TmpDir() + "/autocompact_test"; dbname_ = test::TmpDir() + "/autocompact_test";
tiny_cache_ = NewLRUCache(100); tiny_cache_ = NewLRUCache(100);
@ -47,6 +42,12 @@ class AutoCompactTest {
} }
void DoReads(int n); void DoReads(int n);
private:
std::string dbname_;
Cache* tiny_cache_;
Options options_;
DB* db_;
}; };
static const int kValueSize = 200 * 1024; static const int kValueSize = 200 * 1024;

36
db/c.cc
View File

@ -84,12 +84,6 @@ struct leveldb_filelock_t {
}; };
struct leveldb_comparator_t : public Comparator { struct leveldb_comparator_t : public Comparator {
void* state_;
void (*destructor_)(void*);
int (*compare_)(void*, const char* a, size_t alen, const char* b,
size_t blen);
const char* (*name_)(void*);
virtual ~leveldb_comparator_t() { (*destructor_)(state_); } virtual ~leveldb_comparator_t() { (*destructor_)(state_); }
virtual int Compare(const Slice& a, const Slice& b) const { virtual int Compare(const Slice& a, const Slice& b) const {
@ -101,18 +95,15 @@ struct leveldb_comparator_t : public Comparator {
// No-ops since the C binding does not support key shortening methods. // No-ops since the C binding does not support key shortening methods.
virtual void FindShortestSeparator(std::string*, const Slice&) const {} virtual void FindShortestSeparator(std::string*, const Slice&) const {}
virtual void FindShortSuccessor(std::string* key) const {} virtual void FindShortSuccessor(std::string* key) const {}
void* state_;
void (*destructor_)(void*);
int (*compare_)(void*, const char* a, size_t alen, const char* b,
size_t blen);
const char* (*name_)(void*);
}; };
struct leveldb_filterpolicy_t : public FilterPolicy { struct leveldb_filterpolicy_t : public FilterPolicy {
void* state_;
void (*destructor_)(void*);
const char* (*name_)(void*);
char* (*create_)(void*, const char* const* key_array,
const size_t* key_length_array, int num_keys,
size_t* filter_length);
unsigned char (*key_match_)(void*, const char* key, size_t length,
const char* filter, size_t filter_length);
virtual ~leveldb_filterpolicy_t() { (*destructor_)(state_); } virtual ~leveldb_filterpolicy_t() { (*destructor_)(state_); }
virtual const char* Name() const { return (*name_)(state_); } virtual const char* Name() const { return (*name_)(state_); }
@ -134,6 +125,15 @@ struct leveldb_filterpolicy_t : public FilterPolicy {
return (*key_match_)(state_, key.data(), key.size(), filter.data(), return (*key_match_)(state_, key.data(), key.size(), filter.data(),
filter.size()); filter.size());
} }
void* state_;
void (*destructor_)(void*);
const char* (*name_)(void*);
char* (*create_)(void*, const char* const* key_array,
const size_t* key_length_array, int num_keys,
size_t* filter_length);
unsigned char (*key_match_)(void*, const char* key, size_t length,
const char* filter, size_t filter_length);
}; };
struct leveldb_env_t { struct leveldb_env_t {
@ -470,7 +470,8 @@ leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(int bits_per_key) {
// they delegate to a NewBloomFilterPolicy() instead of user // they delegate to a NewBloomFilterPolicy() instead of user
// supplied C functions. // supplied C functions.
struct Wrapper : public leveldb_filterpolicy_t { struct Wrapper : public leveldb_filterpolicy_t {
const FilterPolicy* rep_; static void DoNothing(void*) {}
~Wrapper() { delete rep_; } ~Wrapper() { delete rep_; }
const char* Name() const { return rep_->Name(); } const char* Name() const { return rep_->Name(); }
void CreateFilter(const Slice* keys, int n, std::string* dst) const { void CreateFilter(const Slice* keys, int n, std::string* dst) const {
@ -479,7 +480,8 @@ leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(int bits_per_key) {
bool KeyMayMatch(const Slice& key, const Slice& filter) const { bool KeyMayMatch(const Slice& key, const Slice& filter) const {
return rep_->KeyMayMatch(key, filter); return rep_->KeyMayMatch(key, filter);
} }
static void DoNothing(void*) {}
const FilterPolicy* rep_;
}; };
Wrapper* wrapper = new Wrapper; Wrapper* wrapper = new Wrapper;
wrapper->rep_ = NewBloomFilterPolicy(bits_per_key); wrapper->rep_ = NewBloomFilterPolicy(bits_per_key);

View File

@ -22,20 +22,14 @@ static const int kValueSize = 1000;
class CorruptionTest { class CorruptionTest {
public: public:
test::ErrorEnv env_; CorruptionTest()
std::string dbname_; : db_(nullptr),
Cache* tiny_cache_; dbname_("/memenv/corruption_test"),
Options options_; tiny_cache_(NewLRUCache(100)) {
DB* db_;
CorruptionTest() {
tiny_cache_ = NewLRUCache(100);
options_.env = &env_; options_.env = &env_;
options_.block_cache = tiny_cache_; options_.block_cache = tiny_cache_;
dbname_ = "/memenv/corruption_test";
DestroyDB(dbname_, options_); DestroyDB(dbname_, options_);
db_ = nullptr;
options_.create_if_missing = true; options_.create_if_missing = true;
Reopen(); Reopen();
options_.create_if_missing = false; options_.create_if_missing = false;
@ -185,6 +179,14 @@ class CorruptionTest {
Random r(k); Random r(k);
return test::RandomString(&r, kValueSize, storage); return test::RandomString(&r, kValueSize, storage);
} }
test::ErrorEnv env_;
Options options_;
DB* db_;
private:
std::string dbname_;
Cache* tiny_cache_;
}; };
TEST(CorruptionTest, Recovery) { TEST(CorruptionTest, Recovery) {

View File

@ -42,38 +42,23 @@ const int kNumNonTableCacheFiles = 10;
// Information kept for every waiting writer // Information kept for every waiting writer
struct DBImpl::Writer { struct DBImpl::Writer {
explicit Writer(port::Mutex* mu)
: batch(nullptr), sync(false), done(false), cv(mu) {}
Status status; Status status;
WriteBatch* batch; WriteBatch* batch;
bool sync; bool sync;
bool done; bool done;
port::CondVar cv; port::CondVar cv;
explicit Writer(port::Mutex* mu)
: batch(nullptr), sync(false), done(false), cv(mu) {}
}; };
struct DBImpl::CompactionState { struct DBImpl::CompactionState {
Compaction* const compaction;
// Sequence numbers < smallest_snapshot are not significant since we
// will never have to service a snapshot below smallest_snapshot.
// Therefore if we have seen a sequence number S <= smallest_snapshot,
// we can drop all entries for the same key with sequence numbers < S.
SequenceNumber smallest_snapshot;
// Files produced by compaction // Files produced by compaction
struct Output { struct Output {
uint64_t number; uint64_t number;
uint64_t file_size; uint64_t file_size;
InternalKey smallest, largest; InternalKey smallest, largest;
}; };
std::vector<Output> outputs;
// State kept for output being generated
WritableFile* outfile;
TableBuilder* builder;
uint64_t total_bytes;
Output* current_output() { return &outputs[outputs.size() - 1]; } Output* current_output() { return &outputs[outputs.size() - 1]; }
@ -83,6 +68,22 @@ struct DBImpl::CompactionState {
outfile(nullptr), outfile(nullptr),
builder(nullptr), builder(nullptr),
total_bytes(0) {} total_bytes(0) {}
Compaction* const compaction;
// Sequence numbers < smallest_snapshot are not significant since we
// will never have to service a snapshot below smallest_snapshot.
// Therefore if we have seen a sequence number S <= smallest_snapshot,
// we can drop all entries for the same key with sequence numbers < S.
SequenceNumber smallest_snapshot;
std::vector<Output> outputs;
// State kept for output being generated
WritableFile* outfile;
TableBuilder* builder;
uint64_t total_bytes;
}; };
// Fix user-supplied options to be reasonable // Fix user-supplied options to be reasonable

View File

@ -29,6 +29,10 @@ class VersionSet;
class DBImpl : public DB { class DBImpl : public DB {
public: public:
DBImpl(const Options& options, const std::string& dbname); DBImpl(const Options& options, const std::string& dbname);
DBImpl(const DBImpl&) = delete;
DBImpl& operator=(const DBImpl&) = delete;
virtual ~DBImpl(); virtual ~DBImpl();
// Implementations of the DB interface // Implementations of the DB interface
@ -71,6 +75,31 @@ class DBImpl : public DB {
struct CompactionState; struct CompactionState;
struct Writer; struct Writer;
// Information for a manual compaction
struct ManualCompaction {
int level;
bool done;
const InternalKey* begin; // null means beginning of key range
const InternalKey* end; // null means end of key range
InternalKey tmp_storage; // Used to keep track of compaction progress
};
// Per level compaction stats. stats_[level] stores the stats for
// compactions that produced data for the specified "level".
struct CompactionStats {
CompactionStats() : micros(0), bytes_read(0), bytes_written(0) {}
void Add(const CompactionStats& c) {
this->micros += c.micros;
this->bytes_read += c.bytes_read;
this->bytes_written += c.bytes_written;
}
int64_t micros;
int64_t bytes_read;
int64_t bytes_written;
};
Iterator* NewInternalIterator(const ReadOptions&, Iterator* NewInternalIterator(const ReadOptions&,
SequenceNumber* latest_snapshot, SequenceNumber* latest_snapshot,
uint32_t* seed); uint32_t* seed);
@ -121,6 +150,10 @@ class DBImpl : public DB {
Status InstallCompactionResults(CompactionState* compact) Status InstallCompactionResults(CompactionState* compact)
EXCLUSIVE_LOCKS_REQUIRED(mutex_); EXCLUSIVE_LOCKS_REQUIRED(mutex_);
const Comparator* user_comparator() const {
return internal_comparator_.user_comparator();
}
// Constant after construction // Constant after construction
Env* const env_; Env* const env_;
const InternalKeyComparator internal_comparator_; const InternalKeyComparator internal_comparator_;
@ -161,14 +194,6 @@ class DBImpl : public DB {
// Has a background compaction been scheduled or is running? // Has a background compaction been scheduled or is running?
bool background_compaction_scheduled_ GUARDED_BY(mutex_); bool background_compaction_scheduled_ GUARDED_BY(mutex_);
// Information for a manual compaction
struct ManualCompaction {
int level;
bool done;
const InternalKey* begin; // null means beginning of key range
const InternalKey* end; // null means end of key range
InternalKey tmp_storage; // Used to keep track of compaction progress
};
ManualCompaction* manual_compaction_ GUARDED_BY(mutex_); ManualCompaction* manual_compaction_ GUARDED_BY(mutex_);
VersionSet* const versions_; VersionSet* const versions_;
@ -176,30 +201,7 @@ class DBImpl : public DB {
// Have we encountered a background error in paranoid mode? // Have we encountered a background error in paranoid mode?
Status bg_error_ GUARDED_BY(mutex_); Status bg_error_ GUARDED_BY(mutex_);
// Per level compaction stats. stats_[level] stores the stats for
// compactions that produced data for the specified "level".
struct CompactionStats {
int64_t micros;
int64_t bytes_read;
int64_t bytes_written;
CompactionStats() : micros(0), bytes_read(0), bytes_written(0) {}
void Add(const CompactionStats& c) {
this->micros += c.micros;
this->bytes_read += c.bytes_read;
this->bytes_written += c.bytes_written;
}
};
CompactionStats stats_[config::kNumLevels] GUARDED_BY(mutex_); CompactionStats stats_[config::kNumLevels] GUARDED_BY(mutex_);
// No copying allowed
DBImpl(const DBImpl&);
void operator=(const DBImpl&);
const Comparator* user_comparator() const {
return internal_comparator_.user_comparator();
}
}; };
// Sanitize db options. The caller should delete result.info_log if // Sanitize db options. The caller should delete result.info_log if

View File

@ -55,6 +55,10 @@ class DBIter : public Iterator {
valid_(false), valid_(false),
rnd_(seed), rnd_(seed),
bytes_until_read_sampling_(RandomCompactionPeriod()) {} bytes_until_read_sampling_(RandomCompactionPeriod()) {}
DBIter(const DBIter&) = delete;
DBIter& operator=(const DBIter&) = delete;
virtual ~DBIter() { delete iter_; } virtual ~DBIter() { delete iter_; }
virtual bool Valid() const { return valid_; } virtual bool Valid() const { return valid_; }
virtual Slice key() const { virtual Slice key() const {
@ -106,19 +110,13 @@ class DBIter : public Iterator {
const Comparator* const user_comparator_; const Comparator* const user_comparator_;
Iterator* const iter_; Iterator* const iter_;
SequenceNumber const sequence_; SequenceNumber const sequence_;
Status status_; Status status_;
std::string saved_key_; // == current key when direction_==kReverse std::string saved_key_; // == current key when direction_==kReverse
std::string saved_value_; // == current raw value when direction_==kReverse std::string saved_value_; // == current raw value when direction_==kReverse
Direction direction_; Direction direction_;
bool valid_; bool valid_;
Random rnd_; Random rnd_;
size_t bytes_until_read_sampling_; size_t bytes_until_read_sampling_;
// No copying allowed
DBIter(const DBIter&);
void operator=(const DBIter&);
}; };
inline bool DBIter::ParseKey(ParsedInternalKey* ikey) { inline bool DBIter::ParseKey(ParsedInternalKey* ikey) {

View File

@ -40,10 +40,6 @@ static std::string RandomKey(Random* rnd) {
namespace { namespace {
class AtomicCounter { class AtomicCounter {
private:
port::Mutex mu_;
int count_ GUARDED_BY(mu_);
public: public:
AtomicCounter() : count_(0) {} AtomicCounter() : count_(0) {}
void Increment() { IncrementBy(1); } void Increment() { IncrementBy(1); }
@ -59,6 +55,10 @@ class AtomicCounter {
MutexLock l(&mu_); MutexLock l(&mu_);
count_ = 0; count_ = 0;
} }
private:
port::Mutex mu_;
int count_ GUARDED_BY(mu_);
}; };
void DelayMilliseconds(int millis) { void DelayMilliseconds(int millis) {
@ -227,13 +227,6 @@ class SpecialEnv : public EnvWrapper {
}; };
class DBTest { class DBTest {
private:
const FilterPolicy* filter_policy_;
// Sequence of option configurations to try
enum OptionConfig { kDefault, kReuse, kFilter, kUncompressed, kEnd };
int option_config_;
public: public:
std::string dbname_; std::string dbname_;
SpecialEnv* env_; SpecialEnv* env_;
@ -241,7 +234,7 @@ class DBTest {
Options last_options_; Options last_options_;
DBTest() : option_config_(kDefault), env_(new SpecialEnv(Env::Default())) { DBTest() : env_(new SpecialEnv(Env::Default())), option_config_(kDefault) {
filter_policy_ = NewBloomFilterPolicy(10); filter_policy_ = NewBloomFilterPolicy(10);
dbname_ = test::TmpDir() + "/db_test"; dbname_ = test::TmpDir() + "/db_test";
DestroyDB(dbname_, Options()); DestroyDB(dbname_, Options());
@ -533,6 +526,13 @@ class DBTest {
} }
return files_renamed; return files_renamed;
} }
private:
// Sequence of option configurations to try
enum OptionConfig { kDefault, kReuse, kFilter, kUncompressed, kEnd };
const FilterPolicy* filter_policy_;
int option_config_;
}; };
TEST(DBTest, Empty) { TEST(DBTest, Empty) {

View File

@ -181,6 +181,9 @@ class LookupKey {
// the specified sequence number. // the specified sequence number.
LookupKey(const Slice& user_key, SequenceNumber sequence); LookupKey(const Slice& user_key, SequenceNumber sequence);
LookupKey(const LookupKey&) = delete;
LookupKey& operator=(const LookupKey&) = delete;
~LookupKey(); ~LookupKey();
// Return a key suitable for lookup in a MemTable. // Return a key suitable for lookup in a MemTable.
@ -204,10 +207,6 @@ class LookupKey {
const char* kstart_; const char* kstart_;
const char* end_; const char* end_;
char space_[200]; // Avoid allocation for short keys char space_[200]; // Avoid allocation for short keys
// No copying allowed
LookupKey(const LookupKey&);
void operator=(const LookupKey&);
}; };
inline LookupKey::~LookupKey() { inline LookupKey::~LookupKey() {

View File

@ -38,7 +38,6 @@ bool GuessType(const std::string& fname, FileType* type) {
// Notified when log reader encounters corruption. // Notified when log reader encounters corruption.
class CorruptionReporter : public log::Reader::Reporter { class CorruptionReporter : public log::Reader::Reporter {
public: public:
WritableFile* dst_;
virtual void Corruption(size_t bytes, const Status& status) { virtual void Corruption(size_t bytes, const Status& status) {
std::string r = "corruption: "; std::string r = "corruption: ";
AppendNumberTo(&r, bytes); AppendNumberTo(&r, bytes);
@ -47,6 +46,8 @@ class CorruptionReporter : public log::Reader::Reporter {
r.push_back('\n'); r.push_back('\n');
dst_->Append(r); dst_->Append(r);
} }
WritableFile* dst_;
}; };
// Print contents of a log file. (*func)() is called on every record. // Print contents of a log file. (*func)() is called on every record.
@ -73,7 +74,6 @@ Status PrintLogContents(Env* env, const std::string& fname,
// Called on every item found in a WriteBatch. // Called on every item found in a WriteBatch.
class WriteBatchItemPrinter : public WriteBatch::Handler { class WriteBatchItemPrinter : public WriteBatch::Handler {
public: public:
WritableFile* dst_;
virtual void Put(const Slice& key, const Slice& value) { virtual void Put(const Slice& key, const Slice& value) {
std::string r = " put '"; std::string r = " put '";
AppendEscapedStringTo(&r, key); AppendEscapedStringTo(&r, key);
@ -88,6 +88,8 @@ class WriteBatchItemPrinter : public WriteBatch::Handler {
r += "'\n"; r += "'\n";
dst_->Append(r); dst_->Append(r);
} }
WritableFile* dst_;
}; };
// Called on every log record (each one of which is a WriteBatch) // Called on every log record (each one of which is a WriteBatch)

View File

@ -43,6 +43,9 @@ class Reader {
Reader(SequentialFile* file, Reporter* reporter, bool checksum, Reader(SequentialFile* file, Reporter* reporter, bool checksum,
uint64_t initial_offset); uint64_t initial_offset);
Reader(const Reader&) = delete;
Reader& operator=(const Reader&) = delete;
~Reader(); ~Reader();
// Read the next record into *record. Returns true if read // Read the next record into *record. Returns true if read
@ -58,26 +61,6 @@ class Reader {
uint64_t LastRecordOffset(); uint64_t LastRecordOffset();
private: private:
SequentialFile* const file_;
Reporter* const reporter_;
bool const checksum_;
char* const backing_store_;
Slice buffer_;
bool eof_; // Last Read() indicated EOF by returning < kBlockSize
// Offset of the last record returned by ReadRecord.
uint64_t last_record_offset_;
// Offset of the first location past the end of buffer_.
uint64_t end_of_buffer_offset_;
// Offset at which to start looking for the first record to return
uint64_t const initial_offset_;
// True if we are resynchronizing after a seek (initial_offset_ > 0). In
// particular, a run of kMiddleType and kLastType records can be silently
// skipped in this mode
bool resyncing_;
// Extend record types with the following special values // Extend record types with the following special values
enum { enum {
kEof = kMaxRecordType + 1, kEof = kMaxRecordType + 1,
@ -102,9 +85,25 @@ class Reader {
void ReportCorruption(uint64_t bytes, const char* reason); void ReportCorruption(uint64_t bytes, const char* reason);
void ReportDrop(uint64_t bytes, const Status& reason); void ReportDrop(uint64_t bytes, const Status& reason);
// No copying allowed SequentialFile* const file_;
Reader(const Reader&); Reporter* const reporter_;
void operator=(const Reader&); bool const checksum_;
char* const backing_store_;
Slice buffer_;
bool eof_; // Last Read() indicated EOF by returning < kBlockSize
// Offset of the last record returned by ReadRecord.
uint64_t last_record_offset_;
// Offset of the first location past the end of buffer_.
uint64_t end_of_buffer_offset_;
// Offset at which to start looking for the first record to return
uint64_t const initial_offset_;
// True if we are resynchronizing after a seek (initial_offset_ > 0). In
// particular, a run of kMiddleType and kLastType records can be silently
// skipped in this mode
bool resyncing_;
}; };
} // namespace log } // namespace log

View File

@ -37,81 +37,6 @@ static std::string RandomSkewedString(int i, Random* rnd) {
} }
class LogTest { class LogTest {
private:
class StringDest : public WritableFile {
public:
std::string contents_;
virtual Status Close() { return Status::OK(); }
virtual Status Flush() { return Status::OK(); }
virtual Status Sync() { return Status::OK(); }
virtual Status Append(const Slice& slice) {
contents_.append(slice.data(), slice.size());
return Status::OK();
}
};
class StringSource : public SequentialFile {
public:
Slice contents_;
bool force_error_;
bool returned_partial_;
StringSource() : force_error_(false), returned_partial_(false) {}
virtual Status Read(size_t n, Slice* result, char* scratch) {
ASSERT_TRUE(!returned_partial_) << "must not Read() after eof/error";
if (force_error_) {
force_error_ = false;
returned_partial_ = true;
return Status::Corruption("read error");
}
if (contents_.size() < n) {
n = contents_.size();
returned_partial_ = true;
}
*result = Slice(contents_.data(), n);
contents_.remove_prefix(n);
return Status::OK();
}
virtual Status Skip(uint64_t n) {
if (n > contents_.size()) {
contents_.clear();
return Status::NotFound("in-memory file skipped past end");
}
contents_.remove_prefix(n);
return Status::OK();
}
};
class ReportCollector : public Reader::Reporter {
public:
size_t dropped_bytes_;
std::string message_;
ReportCollector() : dropped_bytes_(0) {}
virtual void Corruption(size_t bytes, const Status& status) {
dropped_bytes_ += bytes;
message_.append(status.ToString());
}
};
StringDest dest_;
StringSource source_;
ReportCollector report_;
bool reading_;
Writer* writer_;
Reader* reader_;
// Record metadata for testing initial offset functionality
static size_t initial_offset_record_sizes_[];
static uint64_t initial_offset_last_record_offsets_[];
static int num_initial_offset_records_;
public: public:
LogTest() LogTest()
: reading_(false), : reading_(false),
@ -232,6 +157,82 @@ class LogTest {
} }
delete offset_reader; delete offset_reader;
} }
private:
class StringDest : public WritableFile {
public:
virtual Status Close() { return Status::OK(); }
virtual Status Flush() { return Status::OK(); }
virtual Status Sync() { return Status::OK(); }
virtual Status Append(const Slice& slice) {
contents_.append(slice.data(), slice.size());
return Status::OK();
}
std::string contents_;
};
class StringSource : public SequentialFile {
public:
StringSource() : force_error_(false), returned_partial_(false) {}
virtual Status Read(size_t n, Slice* result, char* scratch) {
ASSERT_TRUE(!returned_partial_) << "must not Read() after eof/error";
if (force_error_) {
force_error_ = false;
returned_partial_ = true;
return Status::Corruption("read error");
}
if (contents_.size() < n) {
n = contents_.size();
returned_partial_ = true;
}
*result = Slice(contents_.data(), n);
contents_.remove_prefix(n);
return Status::OK();
}
virtual Status Skip(uint64_t n) {
if (n > contents_.size()) {
contents_.clear();
return Status::NotFound("in-memory file skipped past end");
}
contents_.remove_prefix(n);
return Status::OK();
}
Slice contents_;
bool force_error_;
bool returned_partial_;
};
class ReportCollector : public Reader::Reporter {
public:
ReportCollector() : dropped_bytes_(0) {}
virtual void Corruption(size_t bytes, const Status& status) {
dropped_bytes_ += bytes;
message_.append(status.ToString());
}
size_t dropped_bytes_;
std::string message_;
};
// Record metadata for testing initial offset functionality
static size_t initial_offset_record_sizes_[];
static uint64_t initial_offset_last_record_offsets_[];
static int num_initial_offset_records_;
StringDest dest_;
StringSource source_;
ReportCollector report_;
bool reading_;
Writer* writer_;
Reader* reader_;
}; };
size_t LogTest::initial_offset_record_sizes_[] = { size_t LogTest::initial_offset_record_sizes_[] = {

View File

@ -29,11 +29,16 @@ class Writer {
// "*dest" must remain live while this Writer is in use. // "*dest" must remain live while this Writer is in use.
Writer(WritableFile* dest, uint64_t dest_length); Writer(WritableFile* dest, uint64_t dest_length);
Writer(const Writer&) = delete;
Writer& operator=(const Writer&) = delete;
~Writer(); ~Writer();
Status AddRecord(const Slice& slice); Status AddRecord(const Slice& slice);
private: private:
Status EmitPhysicalRecord(RecordType type, const char* ptr, size_t length);
WritableFile* dest_; WritableFile* dest_;
int block_offset_; // Current offset in block int block_offset_; // Current offset in block
@ -41,12 +46,6 @@ class Writer {
// pre-computed to reduce the overhead of computing the crc of the // pre-computed to reduce the overhead of computing the crc of the
// record type stored in the header. // record type stored in the header.
uint32_t type_crc_[kMaxRecordType + 1]; uint32_t type_crc_[kMaxRecordType + 1];
Status EmitPhysicalRecord(RecordType type, const char* ptr, size_t length);
// No copying allowed
Writer(const Writer&);
void operator=(const Writer&);
}; };
} // namespace log } // namespace log

View File

@ -23,6 +23,9 @@ class MemTable {
// is zero and the caller must call Ref() at least once. // is zero and the caller must call Ref() at least once.
explicit MemTable(const InternalKeyComparator& comparator); explicit MemTable(const InternalKeyComparator& comparator);
MemTable(const MemTable&) = delete;
MemTable& operator=(const MemTable&) = delete;
// Increase reference count. // Increase reference count.
void Ref() { ++refs_; } void Ref() { ++refs_; }
@ -60,26 +63,23 @@ class MemTable {
bool Get(const LookupKey& key, std::string* value, Status* s); bool Get(const LookupKey& key, std::string* value, Status* s);
private: private:
~MemTable(); // Private since only Unref() should be used to delete it friend class MemTableIterator;
friend class MemTableBackwardIterator;
struct KeyComparator { struct KeyComparator {
const InternalKeyComparator comparator; const InternalKeyComparator comparator;
explicit KeyComparator(const InternalKeyComparator& c) : comparator(c) {} explicit KeyComparator(const InternalKeyComparator& c) : comparator(c) {}
int operator()(const char* a, const char* b) const; int operator()(const char* a, const char* b) const;
}; };
friend class MemTableIterator;
friend class MemTableBackwardIterator;
typedef SkipList<const char*, KeyComparator> Table; typedef SkipList<const char*, KeyComparator> Table;
~MemTable(); // Private since only Unref() should be used to delete it
KeyComparator comparator_; KeyComparator comparator_;
int refs_; int refs_;
Arena arena_; Arena arena_;
Table table_; Table table_;
// No copying allowed
MemTable(const MemTable&);
void operator=(const MemTable&);
}; };
} // namespace leveldb } // namespace leveldb

View File

@ -95,22 +95,6 @@ class Repairer {
SequenceNumber max_sequence; SequenceNumber max_sequence;
}; };
std::string const dbname_;
Env* const env_;
InternalKeyComparator const icmp_;
InternalFilterPolicy const ipolicy_;
Options const options_;
bool owns_info_log_;
bool owns_cache_;
TableCache* table_cache_;
VersionEdit edit_;
std::vector<std::string> manifests_;
std::vector<uint64_t> table_numbers_;
std::vector<uint64_t> logs_;
std::vector<TableInfo> tables_;
uint64_t next_file_number_;
Status FindFiles() { Status FindFiles() {
std::vector<std::string> filenames; std::vector<std::string> filenames;
Status status = env_->GetChildren(dbname_, &filenames); Status status = env_->GetChildren(dbname_, &filenames);
@ -439,6 +423,22 @@ class Repairer {
Log(options_.info_log, "Archiving %s: %s\n", fname.c_str(), Log(options_.info_log, "Archiving %s: %s\n", fname.c_str(),
s.ToString().c_str()); s.ToString().c_str());
} }
const std::string dbname_;
Env* const env_;
InternalKeyComparator const icmp_;
InternalFilterPolicy const ipolicy_;
const Options options_;
bool owns_info_log_;
bool owns_cache_;
TableCache* table_cache_;
VersionEdit edit_;
std::vector<std::string> manifests_;
std::vector<uint64_t> table_numbers_;
std::vector<uint64_t> logs_;
std::vector<TableInfo> tables_;
uint64_t next_file_number_;
}; };
} // namespace } // namespace

View File

@ -49,6 +49,9 @@ class SkipList {
// must remain allocated for the lifetime of the skiplist object. // must remain allocated for the lifetime of the skiplist object.
explicit SkipList(Comparator cmp, Arena* arena); explicit SkipList(Comparator cmp, Arena* arena);
SkipList(const SkipList&) = delete;
SkipList& operator=(const SkipList&) = delete;
// Insert key into the list. // Insert key into the list.
// REQUIRES: nothing that compares equal to key is currently in the list. // REQUIRES: nothing that compares equal to key is currently in the list.
void Insert(const Key& key); void Insert(const Key& key);
@ -98,23 +101,10 @@ class SkipList {
private: private:
enum { kMaxHeight = 12 }; enum { kMaxHeight = 12 };
// Immutable after construction
Comparator const compare_;
Arena* const arena_; // Arena used for allocations of nodes
Node* const head_;
// Modified only by Insert(). Read racily by readers, but stale
// values are ok.
std::atomic<int> max_height_; // Height of the entire list
inline int GetMaxHeight() const { inline int GetMaxHeight() const {
return max_height_.load(std::memory_order_relaxed); return max_height_.load(std::memory_order_relaxed);
} }
// Read/written only by Insert().
Random rnd_;
Node* NewNode(const Key& key, int height); Node* NewNode(const Key& key, int height);
int RandomHeight(); int RandomHeight();
bool Equal(const Key& a, const Key& b) const { return (compare_(a, b) == 0); } bool Equal(const Key& a, const Key& b) const { return (compare_(a, b) == 0); }
@ -137,9 +127,18 @@ class SkipList {
// Return head_ if list is empty. // Return head_ if list is empty.
Node* FindLast() const; Node* FindLast() const;
// No copying allowed // Immutable after construction
SkipList(const SkipList&); Comparator const compare_;
void operator=(const SkipList&); Arena* const arena_; // Arena used for allocations of nodes
Node* const head_;
// Modified only by Insert(). Read racily by readers, but stale
// values are ok.
std::atomic<int> max_height_; // Height of the entire list
// Read/written only by Insert().
Random rnd_;
}; };
// Implementation details follow // Implementation details follow

View File

@ -45,12 +45,12 @@ class TableCache {
void Evict(uint64_t file_number); void Evict(uint64_t file_number);
private: private:
Status FindTable(uint64_t file_number, uint64_t file_size, Cache::Handle**);
Env* const env_; Env* const env_;
const std::string dbname_; const std::string dbname_;
const Options& options_; const Options& options_;
Cache* cache_; Cache* cache_;
Status FindTable(uint64_t file_number, uint64_t file_size, Cache::Handle**);
}; };
} // namespace leveldb } // namespace leveldb

View File

@ -16,14 +16,14 @@ namespace leveldb {
class VersionSet; class VersionSet;
struct FileMetaData { struct FileMetaData {
FileMetaData() : refs(0), allowed_seeks(1 << 30), file_size(0) {}
int refs; int refs;
int allowed_seeks; // Seeks allowed until compaction int allowed_seeks; // Seeks allowed until compaction
uint64_t number; uint64_t number;
uint64_t file_size; // File size in bytes uint64_t file_size; // File size in bytes
InternalKey smallest; // Smallest internal key served by table InternalKey smallest; // Smallest internal key served by table
InternalKey largest; // Largest internal key served by table InternalKey largest; // Largest internal key served by table
FileMetaData() : refs(0), allowed_seeks(1 << 30), file_size(0) {}
}; };
class VersionEdit { class VersionEdit {

View File

@ -59,11 +59,6 @@ bool SomeFileOverlapsRange(const InternalKeyComparator& icmp,
class Version { class Version {
public: public:
// Append to *iters a sequence of iterators that will
// yield the contents of this Version when merged together.
// REQUIRES: This version has been saved (see VersionSet::SaveTo)
void AddIterators(const ReadOptions&, std::vector<Iterator*>* iters);
// Lookup the value for key. If found, store it in *val and // Lookup the value for key. If found, store it in *val and
// return OK. Else return a non-OK status. Fills *stats. // return OK. Else return a non-OK status. Fills *stats.
// REQUIRES: lock is not held // REQUIRES: lock is not held
@ -71,6 +66,12 @@ class Version {
FileMetaData* seek_file; FileMetaData* seek_file;
int seek_file_level; int seek_file_level;
}; };
// Append to *iters a sequence of iterators that will
// yield the contents of this Version when merged together.
// REQUIRES: This version has been saved (see VersionSet::SaveTo)
void AddIterators(const ReadOptions&, std::vector<Iterator*>* iters);
Status Get(const ReadOptions&, const LookupKey& key, std::string* val, Status Get(const ReadOptions&, const LookupKey& key, std::string* val,
GetStats* stats); GetStats* stats);
@ -118,6 +119,22 @@ class Version {
friend class VersionSet; friend class VersionSet;
class LevelFileNumIterator; class LevelFileNumIterator;
explicit Version(VersionSet* vset)
: vset_(vset),
next_(this),
prev_(this),
refs_(0),
file_to_compact_(nullptr),
file_to_compact_level_(-1),
compaction_score_(-1),
compaction_level_(-1) {}
Version(const Version&) = delete;
Version& operator=(const Version&) = delete;
~Version();
Iterator* NewConcatenatingIterator(const ReadOptions&, int level) const; Iterator* NewConcatenatingIterator(const ReadOptions&, int level) const;
// Call func(arg, level, f) for every file that overlaps user_key in // Call func(arg, level, f) for every file that overlaps user_key in
@ -145,28 +162,15 @@ class Version {
// are initialized by Finalize(). // are initialized by Finalize().
double compaction_score_; double compaction_score_;
int compaction_level_; int compaction_level_;
explicit Version(VersionSet* vset)
: vset_(vset),
next_(this),
prev_(this),
refs_(0),
file_to_compact_(nullptr),
file_to_compact_level_(-1),
compaction_score_(-1),
compaction_level_(-1) {}
~Version();
// No copying allowed
Version(const Version&);
void operator=(const Version&);
}; };
class VersionSet { class VersionSet {
public: public:
VersionSet(const std::string& dbname, const Options* options, VersionSet(const std::string& dbname, const Options* options,
TableCache* table_cache, const InternalKeyComparator*); TableCache* table_cache, const InternalKeyComparator*);
VersionSet(const VersionSet&) = delete;
VersionSet& operator=(const VersionSet&) = delete;
~VersionSet(); ~VersionSet();
// Apply *edit to the current version to form a new descriptor that // Apply *edit to the current version to form a new descriptor that
@ -309,10 +313,6 @@ class VersionSet {
// Per-level key at which the next compaction at that level should start. // Per-level key at which the next compaction at that level should start.
// Either an empty string, or a valid InternalKey. // Either an empty string, or a valid InternalKey.
std::string compact_pointer_[config::kNumLevels]; std::string compact_pointer_[config::kNumLevels];
// No copying allowed
VersionSet(const VersionSet&);
void operator=(const VersionSet&);
}; };
// A Compaction encapsulates information about a compaction. // A Compaction encapsulates information about a compaction.

View File

@ -11,9 +11,6 @@ namespace leveldb {
class FindFileTest { class FindFileTest {
public: public:
std::vector<FileMetaData*> files_;
bool disjoint_sorted_files_;
FindFileTest() : disjoint_sorted_files_(true) {} FindFileTest() : disjoint_sorted_files_(true) {}
~FindFileTest() { ~FindFileTest() {
@ -46,6 +43,11 @@ class FindFileTest {
(smallest != nullptr ? &s : nullptr), (smallest != nullptr ? &s : nullptr),
(largest != nullptr ? &l : nullptr)); (largest != nullptr ? &l : nullptr));
} }
bool disjoint_sorted_files_;
private:
std::vector<FileMetaData*> files_;
}; };
TEST(FindFileTest, Empty) { TEST(FindFileTest, Empty) {

View File

@ -27,6 +27,10 @@ class FileState {
// and the caller must call Ref() at least once. // and the caller must call Ref() at least once.
FileState() : refs_(0), size_(0) {} FileState() : refs_(0), size_(0) {}
// No copying allowed.
FileState(const FileState&) = delete;
FileState& operator=(const FileState&) = delete;
// Increase the reference count. // Increase the reference count.
void Ref() { void Ref() {
MutexLock lock(&refs_mutex_); MutexLock lock(&refs_mutex_);
@ -133,21 +137,17 @@ class FileState {
} }
private: private:
enum { kBlockSize = 8 * 1024 };
// Private since only Unref() should be used to delete it. // Private since only Unref() should be used to delete it.
~FileState() { Truncate(); } ~FileState() { Truncate(); }
// No copying allowed.
FileState(const FileState&);
void operator=(const FileState&);
port::Mutex refs_mutex_; port::Mutex refs_mutex_;
int refs_ GUARDED_BY(refs_mutex_); int refs_ GUARDED_BY(refs_mutex_);
mutable port::Mutex blocks_mutex_; mutable port::Mutex blocks_mutex_;
std::vector<char*> blocks_ GUARDED_BY(blocks_mutex_); std::vector<char*> blocks_ GUARDED_BY(blocks_mutex_);
uint64_t size_ GUARDED_BY(blocks_mutex_); uint64_t size_ GUARDED_BY(blocks_mutex_);
enum { kBlockSize = 8 * 1024 };
}; };
class SequentialFileImpl : public SequentialFile { class SequentialFileImpl : public SequentialFile {
@ -380,6 +380,7 @@ class InMemoryEnv : public EnvWrapper {
private: private:
// Map from filenames to FileState objects, representing a simple file system. // Map from filenames to FileState objects, representing a simple file system.
typedef std::map<std::string, FileState*> FileSystem; typedef std::map<std::string, FileState*> FileSystem;
port::Mutex mutex_; port::Mutex mutex_;
FileSystem file_map_ GUARDED_BY(mutex_); FileSystem file_map_ GUARDED_BY(mutex_);
}; };

View File

@ -16,10 +16,10 @@ namespace leveldb {
class MemEnvTest { class MemEnvTest {
public: public:
Env* env_;
MemEnvTest() : env_(NewMemEnv(Env::Default())) {} MemEnvTest() : env_(NewMemEnv(Env::Default())) {}
~MemEnvTest() { delete env_; } ~MemEnvTest() { delete env_; }
Env* env_;
}; };
TEST(MemEnvTest, Basics) { TEST(MemEnvTest, Basics) {

View File

@ -33,11 +33,11 @@ class LEVELDB_EXPORT Snapshot {
// A range of keys // A range of keys
struct LEVELDB_EXPORT Range { struct LEVELDB_EXPORT Range {
Slice start; // Included in the range
Slice limit; // Not included in the range
Range() {} Range() {}
Range(const Slice& s, const Slice& l) : start(s), limit(l) {} Range(const Slice& s, const Slice& l) : start(s), limit(l) {}
Slice start; // Included in the range
Slice limit; // Not included in the range
}; };
// A DB is a persistent ordered map from keys to values. // A DB is a persistent ordered map from keys to values.

View File

@ -84,12 +84,6 @@ class LEVELDB_EXPORT Iterator {
// Cleanup functions are stored in a single-linked list. // Cleanup functions are stored in a single-linked list.
// The list's head node is inlined in the iterator. // The list's head node is inlined in the iterator.
struct CleanupNode { struct CleanupNode {
// The head node is used if the function pointer is not null.
CleanupFunction function;
void* arg1;
void* arg2;
CleanupNode* next;
// True if the node is not used. Only head nodes might be unused. // True if the node is not used. Only head nodes might be unused.
bool IsEmpty() const { return function == nullptr; } bool IsEmpty() const { return function == nullptr; }
// Invokes the cleanup function. // Invokes the cleanup function.
@ -97,6 +91,12 @@ class LEVELDB_EXPORT Iterator {
assert(function != nullptr); assert(function != nullptr);
(*function)(arg1, arg2); (*function)(arg1, arg2);
} }
// The head node is used if the function pointer is not null.
CleanupFunction function;
void* arg1;
void* arg2;
CleanupNode* next;
}; };
CleanupNode cleanup_head_; CleanupNode cleanup_head_;
}; };

View File

@ -31,6 +31,9 @@ enum CompressionType {
// Options to control the behavior of a database (passed to DB::Open) // Options to control the behavior of a database (passed to DB::Open)
struct LEVELDB_EXPORT Options { struct LEVELDB_EXPORT Options {
// Create an Options object with default values for all fields.
Options();
// ------------------- // -------------------
// Parameters that affect behavior // Parameters that affect behavior
@ -137,13 +140,12 @@ struct LEVELDB_EXPORT Options {
// Many applications will benefit from passing the result of // Many applications will benefit from passing the result of
// NewBloomFilterPolicy() here. // NewBloomFilterPolicy() here.
const FilterPolicy* filter_policy = nullptr; const FilterPolicy* filter_policy = nullptr;
// Create an Options object with default values for all fields.
Options();
}; };
// Options that control read operations // Options that control read operations
struct LEVELDB_EXPORT ReadOptions { struct LEVELDB_EXPORT ReadOptions {
ReadOptions() = default;
// If true, all data read from underlying storage will be // If true, all data read from underlying storage will be
// verified against corresponding checksums. // verified against corresponding checksums.
bool verify_checksums = false; bool verify_checksums = false;
@ -157,12 +159,12 @@ struct LEVELDB_EXPORT ReadOptions {
// not have been released). If "snapshot" is null, use an implicit // not have been released). If "snapshot" is null, use an implicit
// snapshot of the state at the beginning of this read operation. // snapshot of the state at the beginning of this read operation.
const Snapshot* snapshot = nullptr; const Snapshot* snapshot = nullptr;
ReadOptions() = default;
}; };
// Options that control write operations // Options that control write operations
struct LEVELDB_EXPORT WriteOptions { struct LEVELDB_EXPORT WriteOptions {
WriteOptions() = default;
// If true, the write will be flushed from the operating system // If true, the write will be flushed from the operating system
// buffer cache (by calling WritableFile::Sync()) before the write // buffer cache (by calling WritableFile::Sync()) before the write
// is considered complete. If this flag is true, writes will be // is considered complete. If this flag is true, writes will be
@ -178,8 +180,6 @@ struct LEVELDB_EXPORT WriteOptions {
// with sync==true has similar crash semantics to a "write()" // with sync==true has similar crash semantics to a "write()"
// system call followed by "fsync()". // system call followed by "fsync()".
bool sync = false; bool sync = false;
WriteOptions() = default;
}; };
} // namespace leveldb } // namespace leveldb

View File

@ -76,13 +76,6 @@ class LEVELDB_EXPORT Status {
std::string ToString() const; std::string ToString() const;
private: private:
// OK status has a null state_. Otherwise, state_ is a new[] array
// of the following form:
// state_[0..3] == length of message
// state_[4] == code
// state_[5..] == message
const char* state_;
enum Code { enum Code {
kOk = 0, kOk = 0,
kNotFound = 1, kNotFound = 1,
@ -98,6 +91,13 @@ class LEVELDB_EXPORT Status {
Status(Code code, const Slice& msg, const Slice& msg2); Status(Code code, const Slice& msg, const Slice& msg2);
static const char* CopyState(const char* s); static const char* CopyState(const char* s);
// OK status has a null state_. Otherwise, state_ is a new[] array
// of the following form:
// state_[0..3] == length of message
// state_[4] == code
// state_[5..] == message
const char* state_;
}; };
inline Status::Status(const Status& rhs) { inline Status::Status(const Status& rhs) {

View File

@ -41,7 +41,7 @@ class LEVELDB_EXPORT Table {
uint64_t file_size, Table** table); uint64_t file_size, Table** table);
Table(const Table&) = delete; Table(const Table&) = delete;
void operator=(const Table&) = delete; Table& operator=(const Table&) = delete;
~Table(); ~Table();
@ -59,22 +59,24 @@ class LEVELDB_EXPORT Table {
uint64_t ApproximateOffsetOf(const Slice& key) const; uint64_t ApproximateOffsetOf(const Slice& key) const;
private: private:
friend class TableCache;
struct Rep; struct Rep;
Rep* rep_;
explicit Table(Rep* rep) { rep_ = rep; }
static Iterator* BlockReader(void*, const ReadOptions&, const Slice&); static Iterator* BlockReader(void*, const ReadOptions&, const Slice&);
explicit Table(Rep* rep) : rep_(rep) {}
// Calls (*handle_result)(arg, ...) with the entry found after a call // Calls (*handle_result)(arg, ...) with the entry found after a call
// to Seek(key). May not make such a call if filter policy says // to Seek(key). May not make such a call if filter policy says
// that key is not present. // that key is not present.
friend class TableCache;
Status InternalGet(const ReadOptions&, const Slice& key, void* arg, Status InternalGet(const ReadOptions&, const Slice& key, void* arg,
void (*handle_result)(void* arg, const Slice& k, void (*handle_result)(void* arg, const Slice& k,
const Slice& v)); const Slice& v));
void ReadMeta(const Footer& footer); void ReadMeta(const Footer& footer);
void ReadFilter(const Slice& filter_handle_value); void ReadFilter(const Slice& filter_handle_value);
Rep* const rep_;
}; };
} // namespace leveldb } // namespace leveldb

View File

@ -33,7 +33,7 @@ class LEVELDB_EXPORT TableBuilder {
TableBuilder(const Options& options, WritableFile* file); TableBuilder(const Options& options, WritableFile* file);
TableBuilder(const TableBuilder&) = delete; TableBuilder(const TableBuilder&) = delete;
void operator=(const TableBuilder&) = delete; TableBuilder& operator=(const TableBuilder&) = delete;
// REQUIRES: Either Finish() or Abandon() has been called. // REQUIRES: Either Finish() or Abandon() has been called.
~TableBuilder(); ~TableBuilder();

View File

@ -32,6 +32,13 @@ class Slice;
class LEVELDB_EXPORT WriteBatch { class LEVELDB_EXPORT WriteBatch {
public: public:
class LEVELDB_EXPORT Handler {
public:
virtual ~Handler();
virtual void Put(const Slice& key, const Slice& value) = 0;
virtual void Delete(const Slice& key) = 0;
};
WriteBatch(); WriteBatch();
// Intentionally copyable. // Intentionally copyable.
@ -63,12 +70,6 @@ class LEVELDB_EXPORT WriteBatch {
void Append(const WriteBatch& source); void Append(const WriteBatch& source);
// Support for iterating over the contents of a batch. // Support for iterating over the contents of a batch.
class LEVELDB_EXPORT Handler {
public:
virtual ~Handler();
virtual void Put(const Slice& key, const Slice& value) = 0;
virtual void Delete(const Slice& key) = 0;
};
Status Iterate(Handler* handler) const; Status Iterate(Handler* handler) const;
private: private:

View File

@ -20,24 +20,23 @@ class Block {
// Initialize the block with the specified contents. // Initialize the block with the specified contents.
explicit Block(const BlockContents& contents); explicit Block(const BlockContents& contents);
Block(const Block&) = delete;
Block& operator=(const Block&) = delete;
~Block(); ~Block();
size_t size() const { return size_; } size_t size() const { return size_; }
Iterator* NewIterator(const Comparator* comparator); Iterator* NewIterator(const Comparator* comparator);
private: private:
class Iter;
uint32_t NumRestarts() const; uint32_t NumRestarts() const;
const char* data_; const char* data_;
size_t size_; size_t size_;
uint32_t restart_offset_; // Offset in data_ of restart array uint32_t restart_offset_; // Offset in data_ of restart array
bool owned_; // Block owns data_[] bool owned_; // Block owns data_[]
// No copying allowed
Block(const Block&);
void operator=(const Block&);
class Iter;
}; };
} // namespace leveldb } // namespace leveldb

View File

@ -19,6 +19,9 @@ class BlockBuilder {
public: public:
explicit BlockBuilder(const Options* options); explicit BlockBuilder(const Options* options);
BlockBuilder(const BlockBuilder&) = delete;
BlockBuilder& operator=(const BlockBuilder&) = delete;
// Reset the contents as if the BlockBuilder was just constructed. // Reset the contents as if the BlockBuilder was just constructed.
void Reset(); void Reset();
@ -45,10 +48,6 @@ class BlockBuilder {
int counter_; // Number of entries emitted since restart int counter_; // Number of entries emitted since restart
bool finished_; // Has Finish() been called? bool finished_; // Has Finish() been called?
std::string last_key_; std::string last_key_;
// No copying allowed
BlockBuilder(const BlockBuilder&);
void operator=(const BlockBuilder&);
}; };
} // namespace leveldb } // namespace leveldb

View File

@ -32,6 +32,9 @@ class FilterBlockBuilder {
public: public:
explicit FilterBlockBuilder(const FilterPolicy*); explicit FilterBlockBuilder(const FilterPolicy*);
FilterBlockBuilder(const FilterBlockBuilder&) = delete;
FilterBlockBuilder& operator=(const FilterBlockBuilder&) = delete;
void StartBlock(uint64_t block_offset); void StartBlock(uint64_t block_offset);
void AddKey(const Slice& key); void AddKey(const Slice& key);
Slice Finish(); Slice Finish();
@ -45,10 +48,6 @@ class FilterBlockBuilder {
std::string result_; // Filter data computed so far std::string result_; // Filter data computed so far
std::vector<Slice> tmp_keys_; // policy_->CreateFilter() argument std::vector<Slice> tmp_keys_; // policy_->CreateFilter() argument
std::vector<uint32_t> filter_offsets_; std::vector<uint32_t> filter_offsets_;
// No copying allowed
FilterBlockBuilder(const FilterBlockBuilder&);
void operator=(const FilterBlockBuilder&);
}; };
class FilterBlockReader { class FilterBlockReader {

View File

@ -23,6 +23,9 @@ struct ReadOptions;
// block or a meta block. // block or a meta block.
class BlockHandle { class BlockHandle {
public: public:
// Maximum encoding length of a BlockHandle
enum { kMaxEncodedLength = 10 + 10 };
BlockHandle(); BlockHandle();
// The offset of the block in the file. // The offset of the block in the file.
@ -36,9 +39,6 @@ class BlockHandle {
void EncodeTo(std::string* dst) const; void EncodeTo(std::string* dst) const;
Status DecodeFrom(Slice* input); Status DecodeFrom(Slice* input);
// Maximum encoding length of a BlockHandle
enum { kMaxEncodedLength = 10 + 10 };
private: private:
uint64_t offset_; uint64_t offset_;
uint64_t size_; uint64_t size_;
@ -48,6 +48,11 @@ class BlockHandle {
// end of every table file. // end of every table file.
class Footer { class Footer {
public: public:
// Encoded length of a Footer. Note that the serialization of a
// Footer will always occupy exactly this many bytes. It consists
// of two block handles and a magic number.
enum { kEncodedLength = 2 * BlockHandle::kMaxEncodedLength + 8 };
Footer() {} Footer() {}
// The block handle for the metaindex block of the table // The block handle for the metaindex block of the table
@ -61,11 +66,6 @@ class Footer {
void EncodeTo(std::string* dst) const; void EncodeTo(std::string* dst) const;
Status DecodeFrom(Slice* input); Status DecodeFrom(Slice* input);
// Encoded length of a Footer. Note that the serialization of a
// Footer will always occupy exactly this many bytes. It consists
// of two block handles and a magic number.
enum { kEncodedLength = 2 * BlockHandle::kMaxEncodedLength + 8 };
private: private:
BlockHandle metaindex_handle_; BlockHandle metaindex_handle_;
BlockHandle index_handle_; BlockHandle index_handle_;

View File

@ -129,6 +129,9 @@ class MergingIterator : public Iterator {
} }
private: private:
// Which direction is the iterator moving?
enum Direction { kForward, kReverse };
void FindSmallest(); void FindSmallest();
void FindLargest(); void FindLargest();
@ -139,9 +142,6 @@ class MergingIterator : public Iterator {
IteratorWrapper* children_; IteratorWrapper* children_;
int n_; int n_;
IteratorWrapper* current_; IteratorWrapper* current_;
// Which direction is the iterator moving?
enum Direction { kForward, kReverse };
Direction direction_; Direction direction_;
}; };

View File

@ -19,6 +19,22 @@
namespace leveldb { namespace leveldb {
struct TableBuilder::Rep { struct TableBuilder::Rep {
Rep(const Options& opt, WritableFile* f)
: options(opt),
index_block_options(opt),
file(f),
offset(0),
data_block(&options),
index_block(&index_block_options),
num_entries(0),
closed(false),
filter_block(opt.filter_policy == nullptr
? nullptr
: new FilterBlockBuilder(opt.filter_policy)),
pending_index_entry(false) {
index_block_options.block_restart_interval = 1;
}
Options options; Options options;
Options index_block_options; Options index_block_options;
WritableFile* file; WritableFile* file;
@ -44,22 +60,6 @@ struct TableBuilder::Rep {
BlockHandle pending_handle; // Handle to add to index block BlockHandle pending_handle; // Handle to add to index block
std::string compressed_output; std::string compressed_output;
Rep(const Options& opt, WritableFile* f)
: options(opt),
index_block_options(opt),
file(f),
offset(0),
data_block(&options),
index_block(&index_block_options),
num_entries(0),
closed(false),
filter_block(opt.filter_policy == nullptr
? nullptr
: new FilterBlockBuilder(opt.filter_policy)),
pending_index_entry(false) {
index_block_options.block_restart_interval = 1;
}
}; };
TableBuilder::TableBuilder(const Options& options, WritableFile* file) TableBuilder::TableBuilder(const Options& options, WritableFile* file)

View File

@ -8,10 +8,8 @@ namespace leveldb {
static const int kBlockSize = 4096; static const int kBlockSize = 4096;
Arena::Arena() : memory_usage_(0) { Arena::Arena()
alloc_ptr_ = nullptr; // First allocation will allocate a block : alloc_ptr_(nullptr), alloc_bytes_remaining_(0), memory_usage_(0) {}
alloc_bytes_remaining_ = 0;
}
Arena::~Arena() { Arena::~Arena() {
for (size_t i = 0; i < blocks_.size(); i++) { for (size_t i = 0; i < blocks_.size(); i++) {

View File

@ -16,6 +16,10 @@ namespace leveldb {
class Arena { class Arena {
public: public:
Arena(); Arena();
Arena(const Arena&) = delete;
Arena& operator=(const Arena&) = delete;
~Arena(); ~Arena();
// Return a pointer to a newly allocated memory block of "bytes" bytes. // Return a pointer to a newly allocated memory block of "bytes" bytes.
@ -46,10 +50,6 @@ class Arena {
// TODO(costan): This member is accessed via atomics, but the others are // TODO(costan): This member is accessed via atomics, but the others are
// accessed without any locking. Is this OK? // accessed without any locking. Is this OK?
std::atomic<size_t> memory_usage_; std::atomic<size_t> memory_usage_;
// No copying allowed
Arena(const Arena&);
void operator=(const Arena&);
}; };
inline char* Arena::Allocate(size_t bytes) { inline char* Arena::Allocate(size_t bytes) {

View File

@ -15,10 +15,6 @@ static uint32_t BloomHash(const Slice& key) {
} }
class BloomFilterPolicy : public FilterPolicy { class BloomFilterPolicy : public FilterPolicy {
private:
size_t bits_per_key_;
size_t k_;
public: public:
explicit BloomFilterPolicy(int bits_per_key) : bits_per_key_(bits_per_key) { explicit BloomFilterPolicy(int bits_per_key) : bits_per_key_(bits_per_key) {
// We intentionally round down to reduce probing cost a little bit // We intentionally round down to reduce probing cost a little bit
@ -82,6 +78,10 @@ class BloomFilterPolicy : public FilterPolicy {
} }
return true; return true;
} }
private:
size_t bits_per_key_;
size_t k_;
}; };
} // namespace } // namespace

View File

@ -19,11 +19,6 @@ static Slice Key(int i, char* buffer) {
} }
class BloomTest { class BloomTest {
private:
const FilterPolicy* policy_;
std::string filter_;
std::vector<std::string> keys_;
public: public:
BloomTest() : policy_(NewBloomFilterPolicy(10)) {} BloomTest() : policy_(NewBloomFilterPolicy(10)) {}
@ -78,6 +73,11 @@ class BloomTest {
} }
return result / 10000.0; return result / 10000.0;
} }
private:
const FilterPolicy* policy_;
std::string filter_;
std::vector<std::string> keys_;
}; };
TEST(BloomTest, EmptyFilter) { TEST(BloomTest, EmptyFilter) {

View File

@ -25,8 +25,6 @@ static int DecodeValue(void* v) { return reinterpret_cast<uintptr_t>(v); }
class CacheTest { class CacheTest {
public: public:
static CacheTest* current_;
static void Deleter(const Slice& key, void* v) { static void Deleter(const Slice& key, void* v) {
current_->deleted_keys_.push_back(DecodeKey(key)); current_->deleted_keys_.push_back(DecodeKey(key));
current_->deleted_values_.push_back(DecodeValue(v)); current_->deleted_values_.push_back(DecodeValue(v));
@ -61,6 +59,8 @@ class CacheTest {
} }
void Erase(int key) { cache_->Erase(EncodeKey(key)); } void Erase(int key) { cache_->Erase(EncodeKey(key)); }
static CacheTest* current_;
}; };
CacheTest* CacheTest::current_; CacheTest* CacheTest::current_;

View File

@ -14,13 +14,14 @@ static const int kMMapLimit = 4;
class EnvPosixTest { class EnvPosixTest {
public: public:
Env* env_;
EnvPosixTest() : env_(Env::Default()) {}
static void SetFileLimits(int read_only_file_limit, int mmap_limit) { static void SetFileLimits(int read_only_file_limit, int mmap_limit) {
EnvPosixTestHelper::SetReadOnlyFDLimit(read_only_file_limit); EnvPosixTestHelper::SetReadOnlyFDLimit(read_only_file_limit);
EnvPosixTestHelper::SetReadOnlyMMapLimit(mmap_limit); EnvPosixTestHelper::SetReadOnlyMMapLimit(mmap_limit);
} }
EnvPosixTest() : env_(Env::Default()) {}
Env* env_;
}; };
TEST(EnvPosixTest, TestOpenOnRead) { TEST(EnvPosixTest, TestOpenOnRead) {

View File

@ -19,8 +19,9 @@ static const int kDelayMicros = 100000;
class EnvTest { class EnvTest {
public: public:
Env* env_;
EnvTest() : env_(Env::Default()) {} EnvTest() : env_(Env::Default()) {}
Env* env_;
}; };
namespace { namespace {

View File

@ -626,21 +626,19 @@ class WindowsEnv : public Env {
} }
private: private:
// Entry per Schedule() call
struct BGItem {
void* arg;
void (*function)(void*);
};
// BGThread() is the body of the background thread // BGThread() is the body of the background thread
void BGThread(); void BGThread();
std::mutex mu_; std::mutex mu_;
std::condition_variable bgsignal_; std::condition_variable bgsignal_;
bool started_bgthread_; bool started_bgthread_;
std::deque<BGItem> queue_;
// Entry per Schedule() call
struct BGItem {
void* arg;
void (*function)(void*);
};
typedef std::deque<BGItem> BGQueue;
BGQueue queue_;
Limiter mmap_limiter_; Limiter mmap_limiter_;
}; };

View File

@ -14,12 +14,13 @@ static const int kMMapLimit = 4;
class EnvWindowsTest { class EnvWindowsTest {
public: public:
Env* env_;
EnvWindowsTest() : env_(Env::Default()) {}
static void SetFileLimits(int mmap_limit) { static void SetFileLimits(int mmap_limit) {
EnvWindowsTestHelper::SetReadOnlyMMapLimit(mmap_limit); EnvWindowsTestHelper::SetReadOnlyMMapLimit(mmap_limit);
} }
EnvWindowsTest() : env_(Env::Default()) {}
Env* env_;
}; };
TEST(EnvWindowsTest, TestOpenOnRead) { TEST(EnvWindowsTest, TestOpenOnRead) {

View File

@ -21,20 +21,22 @@ class Histogram {
std::string ToString() const; std::string ToString() const;
private: private:
enum { kNumBuckets = 154 };
double Median() const;
double Percentile(double p) const;
double Average() const;
double StandardDeviation() const;
static const double kBucketLimit[kNumBuckets];
double min_; double min_;
double max_; double max_;
double num_; double num_;
double sum_; double sum_;
double sum_squares_; double sum_squares_;
enum { kNumBuckets = 154 };
static const double kBucketLimit[kNumBuckets];
double buckets_[kNumBuckets]; double buckets_[kNumBuckets];
double Median() const;
double Percentile(double p) const;
double Average() const;
double StandardDeviation() const;
}; };
} // namespace leveldb } // namespace leveldb