Format all files IAW the Google C++ Style Guide.
Use clang-format to correct formatting to be in agreement with the [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html). Doing this simplifies the process of accepting changes. Also fixed a few warnings flagged by clang-tidy. PiperOrigin-RevId: 246350737
This commit is contained in:
parent
3724030179
commit
297e66afc1
18
.clang-format
Normal file
18
.clang-format
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
# Run manually to reformat a file:
|
||||||
|
# clang-format -i --style=file <file>
|
||||||
|
# find . -iname '*.cc' -o -iname '*.h' -o -iname '*.h.in' | xargs clang-format -i --style=file
|
||||||
|
BasedOnStyle: Google
|
||||||
|
DerivePointerAlignment: false
|
||||||
|
|
||||||
|
# Public headers are in a different location in the internal Google repository.
|
||||||
|
# Order them so that when imported to the authoritative repository they will be
|
||||||
|
# in correct alphabetical order.
|
||||||
|
IncludeCategories:
|
||||||
|
- Regex: '^(<|"(db|helpers)/)'
|
||||||
|
Priority: 1
|
||||||
|
- Regex: '^"(leveldb)/'
|
||||||
|
Priority: 2
|
||||||
|
- Regex: '^(<|"(issues|port|table|third_party|util)/)'
|
||||||
|
Priority: 3
|
||||||
|
- Regex: '.*'
|
||||||
|
Priority: 4
|
@ -86,6 +86,14 @@ Contribution requirements:
|
|||||||
3. **Tests**: All changes must be accompanied by a new (or changed) test, or
|
3. **Tests**: All changes must be accompanied by a new (or changed) test, or
|
||||||
a sufficient explanation as to why a new (or changed) test is not required.
|
a sufficient explanation as to why a new (or changed) test is not required.
|
||||||
|
|
||||||
|
4. **Consistent Style**: This project conforms to the
|
||||||
|
[Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html).
|
||||||
|
To ensure your changes are properly formatted please run:
|
||||||
|
|
||||||
|
```
|
||||||
|
clang-format -i --style=file <file>
|
||||||
|
```
|
||||||
|
|
||||||
## Submitting a Pull Request
|
## Submitting a Pull Request
|
||||||
|
|
||||||
Before any pull request will be accepted the author must first sign a
|
Before any pull request will be accepted the author must first sign a
|
||||||
|
@ -2,9 +2,9 @@
|
|||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
#include "leveldb/db.h"
|
|
||||||
#include "db/db_impl.h"
|
#include "db/db_impl.h"
|
||||||
#include "leveldb/cache.h"
|
#include "leveldb/cache.h"
|
||||||
|
#include "leveldb/db.h"
|
||||||
#include "util/testharness.h"
|
#include "util/testharness.h"
|
||||||
#include "util/testutil.h"
|
#include "util/testutil.h"
|
||||||
|
|
||||||
@ -81,17 +81,16 @@ void AutoCompactTest::DoReads(int n) {
|
|||||||
ASSERT_LT(read, 100) << "Taking too long to compact";
|
ASSERT_LT(read, 100) << "Taking too long to compact";
|
||||||
Iterator* iter = db_->NewIterator(ReadOptions());
|
Iterator* iter = db_->NewIterator(ReadOptions());
|
||||||
for (iter->SeekToFirst();
|
for (iter->SeekToFirst();
|
||||||
iter->Valid() && iter->key().ToString() < limit_key;
|
iter->Valid() && iter->key().ToString() < limit_key; iter->Next()) {
|
||||||
iter->Next()) {
|
|
||||||
// Drop data
|
// Drop data
|
||||||
}
|
}
|
||||||
delete iter;
|
delete iter;
|
||||||
// Wait a little bit to allow any triggered compactions to complete.
|
// Wait a little bit to allow any triggered compactions to complete.
|
||||||
Env::Default()->SleepForMicroseconds(1000000);
|
Env::Default()->SleepForMicroseconds(1000000);
|
||||||
uint64_t size = Size(Key(0), Key(n));
|
uint64_t size = Size(Key(0), Key(n));
|
||||||
fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n",
|
fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n", read + 1,
|
||||||
read+1, size/1048576.0, Size(Key(n), Key(kCount))/1048576.0);
|
size / 1048576.0, Size(Key(n), Key(kCount)) / 1048576.0);
|
||||||
if (size <= initial_size/10) {
|
if (size <= initial_size / 10) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -100,19 +99,13 @@ void AutoCompactTest::DoReads(int n) {
|
|||||||
// is pretty much unchanged.
|
// is pretty much unchanged.
|
||||||
const int64_t final_other_size = Size(Key(n), Key(kCount));
|
const int64_t final_other_size = Size(Key(n), Key(kCount));
|
||||||
ASSERT_LE(final_other_size, initial_other_size + 1048576);
|
ASSERT_LE(final_other_size, initial_other_size + 1048576);
|
||||||
ASSERT_GE(final_other_size, initial_other_size/5 - 1048576);
|
ASSERT_GE(final_other_size, initial_other_size / 5 - 1048576);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(AutoCompactTest, ReadAll) {
|
TEST(AutoCompactTest, ReadAll) { DoReads(kCount); }
|
||||||
DoReads(kCount);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(AutoCompactTest, ReadHalf) {
|
TEST(AutoCompactTest, ReadHalf) { DoReads(kCount / 2); }
|
||||||
DoReads(kCount/2);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
@ -4,8 +4,8 @@
|
|||||||
|
|
||||||
#include "db/builder.h"
|
#include "db/builder.h"
|
||||||
|
|
||||||
#include "db/filename.h"
|
|
||||||
#include "db/dbformat.h"
|
#include "db/dbformat.h"
|
||||||
|
#include "db/filename.h"
|
||||||
#include "db/table_cache.h"
|
#include "db/table_cache.h"
|
||||||
#include "db/version_edit.h"
|
#include "db/version_edit.h"
|
||||||
#include "leveldb/db.h"
|
#include "leveldb/db.h"
|
||||||
@ -14,12 +14,8 @@
|
|||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
Status BuildTable(const std::string& dbname,
|
Status BuildTable(const std::string& dbname, Env* env, const Options& options,
|
||||||
Env* env,
|
TableCache* table_cache, Iterator* iter, FileMetaData* meta) {
|
||||||
const Options& options,
|
|
||||||
TableCache* table_cache,
|
|
||||||
Iterator* iter,
|
|
||||||
FileMetaData* meta) {
|
|
||||||
Status s;
|
Status s;
|
||||||
meta->file_size = 0;
|
meta->file_size = 0;
|
||||||
iter->SeekToFirst();
|
iter->SeekToFirst();
|
||||||
@ -60,8 +56,7 @@ Status BuildTable(const std::string& dbname,
|
|||||||
|
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
// Verify that the table is usable
|
// Verify that the table is usable
|
||||||
Iterator* it = table_cache->NewIterator(ReadOptions(),
|
Iterator* it = table_cache->NewIterator(ReadOptions(), meta->number,
|
||||||
meta->number,
|
|
||||||
meta->file_size);
|
meta->file_size);
|
||||||
s = it->status();
|
s = it->status();
|
||||||
delete it;
|
delete it;
|
||||||
|
@ -22,12 +22,8 @@ class VersionEdit;
|
|||||||
// *meta will be filled with metadata about the generated table.
|
// *meta will be filled with metadata about the generated table.
|
||||||
// If no data is present in *iter, meta->file_size will be set to
|
// If no data is present in *iter, meta->file_size will be set to
|
||||||
// zero, and no Table file will be produced.
|
// zero, and no Table file will be produced.
|
||||||
Status BuildTable(const std::string& dbname,
|
Status BuildTable(const std::string& dbname, Env* env, const Options& options,
|
||||||
Env* env,
|
TableCache* table_cache, Iterator* iter, FileMetaData* meta);
|
||||||
const Options& options,
|
|
||||||
TableCache* table_cache,
|
|
||||||
Iterator* iter,
|
|
||||||
FileMetaData* meta);
|
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
|
323
db/c.cc
323
db/c.cc
@ -5,6 +5,7 @@
|
|||||||
#include "leveldb/c.h"
|
#include "leveldb/c.h"
|
||||||
|
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
|
||||||
#include "leveldb/cache.h"
|
#include "leveldb/cache.h"
|
||||||
#include "leveldb/comparator.h"
|
#include "leveldb/comparator.h"
|
||||||
#include "leveldb/db.h"
|
#include "leveldb/db.h"
|
||||||
@ -42,67 +43,79 @@ using leveldb::WriteOptions;
|
|||||||
|
|
||||||
extern "C" {
|
extern "C" {
|
||||||
|
|
||||||
struct leveldb_t { DB* rep; };
|
struct leveldb_t {
|
||||||
struct leveldb_iterator_t { Iterator* rep; };
|
DB* rep;
|
||||||
struct leveldb_writebatch_t { WriteBatch rep; };
|
};
|
||||||
struct leveldb_snapshot_t { const Snapshot* rep; };
|
struct leveldb_iterator_t {
|
||||||
struct leveldb_readoptions_t { ReadOptions rep; };
|
Iterator* rep;
|
||||||
struct leveldb_writeoptions_t { WriteOptions rep; };
|
};
|
||||||
struct leveldb_options_t { Options rep; };
|
struct leveldb_writebatch_t {
|
||||||
struct leveldb_cache_t { Cache* rep; };
|
WriteBatch rep;
|
||||||
struct leveldb_seqfile_t { SequentialFile* rep; };
|
};
|
||||||
struct leveldb_randomfile_t { RandomAccessFile* rep; };
|
struct leveldb_snapshot_t {
|
||||||
struct leveldb_writablefile_t { WritableFile* rep; };
|
const Snapshot* rep;
|
||||||
struct leveldb_logger_t { Logger* rep; };
|
};
|
||||||
struct leveldb_filelock_t { FileLock* rep; };
|
struct leveldb_readoptions_t {
|
||||||
|
ReadOptions rep;
|
||||||
|
};
|
||||||
|
struct leveldb_writeoptions_t {
|
||||||
|
WriteOptions rep;
|
||||||
|
};
|
||||||
|
struct leveldb_options_t {
|
||||||
|
Options rep;
|
||||||
|
};
|
||||||
|
struct leveldb_cache_t {
|
||||||
|
Cache* rep;
|
||||||
|
};
|
||||||
|
struct leveldb_seqfile_t {
|
||||||
|
SequentialFile* rep;
|
||||||
|
};
|
||||||
|
struct leveldb_randomfile_t {
|
||||||
|
RandomAccessFile* rep;
|
||||||
|
};
|
||||||
|
struct leveldb_writablefile_t {
|
||||||
|
WritableFile* rep;
|
||||||
|
};
|
||||||
|
struct leveldb_logger_t {
|
||||||
|
Logger* rep;
|
||||||
|
};
|
||||||
|
struct leveldb_filelock_t {
|
||||||
|
FileLock* rep;
|
||||||
|
};
|
||||||
|
|
||||||
struct leveldb_comparator_t : public Comparator {
|
struct leveldb_comparator_t : public Comparator {
|
||||||
void* state_;
|
void* state_;
|
||||||
void (*destructor_)(void*);
|
void (*destructor_)(void*);
|
||||||
int (*compare_)(
|
int (*compare_)(void*, const char* a, size_t alen, const char* b,
|
||||||
void*,
|
size_t blen);
|
||||||
const char* a, size_t alen,
|
|
||||||
const char* b, size_t blen);
|
|
||||||
const char* (*name_)(void*);
|
const char* (*name_)(void*);
|
||||||
|
|
||||||
virtual ~leveldb_comparator_t() {
|
virtual ~leveldb_comparator_t() { (*destructor_)(state_); }
|
||||||
(*destructor_)(state_);
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual int Compare(const Slice& a, const Slice& b) const {
|
virtual int Compare(const Slice& a, const Slice& b) const {
|
||||||
return (*compare_)(state_, a.data(), a.size(), b.data(), b.size());
|
return (*compare_)(state_, a.data(), a.size(), b.data(), b.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual const char* Name() const {
|
virtual const char* Name() const { return (*name_)(state_); }
|
||||||
return (*name_)(state_);
|
|
||||||
}
|
|
||||||
|
|
||||||
// No-ops since the C binding does not support key shortening methods.
|
// No-ops since the C binding does not support key shortening methods.
|
||||||
virtual void FindShortestSeparator(std::string*, const Slice&) const { }
|
virtual void FindShortestSeparator(std::string*, const Slice&) const {}
|
||||||
virtual void FindShortSuccessor(std::string* key) const { }
|
virtual void FindShortSuccessor(std::string* key) const {}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct leveldb_filterpolicy_t : public FilterPolicy {
|
struct leveldb_filterpolicy_t : public FilterPolicy {
|
||||||
void* state_;
|
void* state_;
|
||||||
void (*destructor_)(void*);
|
void (*destructor_)(void*);
|
||||||
const char* (*name_)(void*);
|
const char* (*name_)(void*);
|
||||||
char* (*create_)(
|
char* (*create_)(void*, const char* const* key_array,
|
||||||
void*,
|
const size_t* key_length_array, int num_keys,
|
||||||
const char* const* key_array, const size_t* key_length_array,
|
size_t* filter_length);
|
||||||
int num_keys,
|
unsigned char (*key_match_)(void*, const char* key, size_t length,
|
||||||
size_t* filter_length);
|
const char* filter, size_t filter_length);
|
||||||
unsigned char (*key_match_)(
|
|
||||||
void*,
|
|
||||||
const char* key, size_t length,
|
|
||||||
const char* filter, size_t filter_length);
|
|
||||||
|
|
||||||
virtual ~leveldb_filterpolicy_t() {
|
virtual ~leveldb_filterpolicy_t() { (*destructor_)(state_); }
|
||||||
(*destructor_)(state_);
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual const char* Name() const {
|
virtual const char* Name() const { return (*name_)(state_); }
|
||||||
return (*name_)(state_);
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
|
virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
|
||||||
std::vector<const char*> key_pointers(n);
|
std::vector<const char*> key_pointers(n);
|
||||||
@ -118,8 +131,8 @@ struct leveldb_filterpolicy_t : public FilterPolicy {
|
|||||||
}
|
}
|
||||||
|
|
||||||
virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const {
|
virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const {
|
||||||
return (*key_match_)(state_, key.data(), key.size(),
|
return (*key_match_)(state_, key.data(), key.size(), filter.data(),
|
||||||
filter.data(), filter.size());
|
filter.size());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -148,10 +161,8 @@ static char* CopyString(const std::string& str) {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
leveldb_t* leveldb_open(
|
leveldb_t* leveldb_open(const leveldb_options_t* options, const char* name,
|
||||||
const leveldb_options_t* options,
|
char** errptr) {
|
||||||
const char* name,
|
|
||||||
char** errptr) {
|
|
||||||
DB* db;
|
DB* db;
|
||||||
if (SaveError(errptr, DB::Open(options->rep, std::string(name), &db))) {
|
if (SaveError(errptr, DB::Open(options->rep, std::string(name), &db))) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
@ -166,39 +177,26 @@ void leveldb_close(leveldb_t* db) {
|
|||||||
delete db;
|
delete db;
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_put(
|
void leveldb_put(leveldb_t* db, const leveldb_writeoptions_t* options,
|
||||||
leveldb_t* db,
|
const char* key, size_t keylen, const char* val, size_t vallen,
|
||||||
const leveldb_writeoptions_t* options,
|
char** errptr) {
|
||||||
const char* key, size_t keylen,
|
|
||||||
const char* val, size_t vallen,
|
|
||||||
char** errptr) {
|
|
||||||
SaveError(errptr,
|
SaveError(errptr,
|
||||||
db->rep->Put(options->rep, Slice(key, keylen), Slice(val, vallen)));
|
db->rep->Put(options->rep, Slice(key, keylen), Slice(val, vallen)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_delete(
|
void leveldb_delete(leveldb_t* db, const leveldb_writeoptions_t* options,
|
||||||
leveldb_t* db,
|
const char* key, size_t keylen, char** errptr) {
|
||||||
const leveldb_writeoptions_t* options,
|
|
||||||
const char* key, size_t keylen,
|
|
||||||
char** errptr) {
|
|
||||||
SaveError(errptr, db->rep->Delete(options->rep, Slice(key, keylen)));
|
SaveError(errptr, db->rep->Delete(options->rep, Slice(key, keylen)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void leveldb_write(leveldb_t* db, const leveldb_writeoptions_t* options,
|
||||||
void leveldb_write(
|
leveldb_writebatch_t* batch, char** errptr) {
|
||||||
leveldb_t* db,
|
|
||||||
const leveldb_writeoptions_t* options,
|
|
||||||
leveldb_writebatch_t* batch,
|
|
||||||
char** errptr) {
|
|
||||||
SaveError(errptr, db->rep->Write(options->rep, &batch->rep));
|
SaveError(errptr, db->rep->Write(options->rep, &batch->rep));
|
||||||
}
|
}
|
||||||
|
|
||||||
char* leveldb_get(
|
char* leveldb_get(leveldb_t* db, const leveldb_readoptions_t* options,
|
||||||
leveldb_t* db,
|
const char* key, size_t keylen, size_t* vallen,
|
||||||
const leveldb_readoptions_t* options,
|
char** errptr) {
|
||||||
const char* key, size_t keylen,
|
|
||||||
size_t* vallen,
|
|
||||||
char** errptr) {
|
|
||||||
char* result = nullptr;
|
char* result = nullptr;
|
||||||
std::string tmp;
|
std::string tmp;
|
||||||
Status s = db->rep->Get(options->rep, Slice(key, keylen), &tmp);
|
Status s = db->rep->Get(options->rep, Slice(key, keylen), &tmp);
|
||||||
@ -215,30 +213,25 @@ char* leveldb_get(
|
|||||||
}
|
}
|
||||||
|
|
||||||
leveldb_iterator_t* leveldb_create_iterator(
|
leveldb_iterator_t* leveldb_create_iterator(
|
||||||
leveldb_t* db,
|
leveldb_t* db, const leveldb_readoptions_t* options) {
|
||||||
const leveldb_readoptions_t* options) {
|
|
||||||
leveldb_iterator_t* result = new leveldb_iterator_t;
|
leveldb_iterator_t* result = new leveldb_iterator_t;
|
||||||
result->rep = db->rep->NewIterator(options->rep);
|
result->rep = db->rep->NewIterator(options->rep);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
const leveldb_snapshot_t* leveldb_create_snapshot(
|
const leveldb_snapshot_t* leveldb_create_snapshot(leveldb_t* db) {
|
||||||
leveldb_t* db) {
|
|
||||||
leveldb_snapshot_t* result = new leveldb_snapshot_t;
|
leveldb_snapshot_t* result = new leveldb_snapshot_t;
|
||||||
result->rep = db->rep->GetSnapshot();
|
result->rep = db->rep->GetSnapshot();
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_release_snapshot(
|
void leveldb_release_snapshot(leveldb_t* db,
|
||||||
leveldb_t* db,
|
const leveldb_snapshot_t* snapshot) {
|
||||||
const leveldb_snapshot_t* snapshot) {
|
|
||||||
db->rep->ReleaseSnapshot(snapshot->rep);
|
db->rep->ReleaseSnapshot(snapshot->rep);
|
||||||
delete snapshot;
|
delete snapshot;
|
||||||
}
|
}
|
||||||
|
|
||||||
char* leveldb_property_value(
|
char* leveldb_property_value(leveldb_t* db, const char* propname) {
|
||||||
leveldb_t* db,
|
|
||||||
const char* propname) {
|
|
||||||
std::string tmp;
|
std::string tmp;
|
||||||
if (db->rep->GetProperty(Slice(propname), &tmp)) {
|
if (db->rep->GetProperty(Slice(propname), &tmp)) {
|
||||||
// We use strdup() since we expect human readable output.
|
// We use strdup() since we expect human readable output.
|
||||||
@ -248,12 +241,12 @@ char* leveldb_property_value(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_approximate_sizes(
|
void leveldb_approximate_sizes(leveldb_t* db, int num_ranges,
|
||||||
leveldb_t* db,
|
const char* const* range_start_key,
|
||||||
int num_ranges,
|
const size_t* range_start_key_len,
|
||||||
const char* const* range_start_key, const size_t* range_start_key_len,
|
const char* const* range_limit_key,
|
||||||
const char* const* range_limit_key, const size_t* range_limit_key_len,
|
const size_t* range_limit_key_len,
|
||||||
uint64_t* sizes) {
|
uint64_t* sizes) {
|
||||||
Range* ranges = new Range[num_ranges];
|
Range* ranges = new Range[num_ranges];
|
||||||
for (int i = 0; i < num_ranges; i++) {
|
for (int i = 0; i < num_ranges; i++) {
|
||||||
ranges[i].start = Slice(range_start_key[i], range_start_key_len[i]);
|
ranges[i].start = Slice(range_start_key[i], range_start_key_len[i]);
|
||||||
@ -263,10 +256,9 @@ void leveldb_approximate_sizes(
|
|||||||
delete[] ranges;
|
delete[] ranges;
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_compact_range(
|
void leveldb_compact_range(leveldb_t* db, const char* start_key,
|
||||||
leveldb_t* db,
|
size_t start_key_len, const char* limit_key,
|
||||||
const char* start_key, size_t start_key_len,
|
size_t limit_key_len) {
|
||||||
const char* limit_key, size_t limit_key_len) {
|
|
||||||
Slice a, b;
|
Slice a, b;
|
||||||
db->rep->CompactRange(
|
db->rep->CompactRange(
|
||||||
// Pass null Slice if corresponding "const char*" is null
|
// Pass null Slice if corresponding "const char*" is null
|
||||||
@ -274,17 +266,13 @@ void leveldb_compact_range(
|
|||||||
(limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr));
|
(limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_destroy_db(
|
void leveldb_destroy_db(const leveldb_options_t* options, const char* name,
|
||||||
const leveldb_options_t* options,
|
char** errptr) {
|
||||||
const char* name,
|
|
||||||
char** errptr) {
|
|
||||||
SaveError(errptr, DestroyDB(name, options->rep));
|
SaveError(errptr, DestroyDB(name, options->rep));
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_repair_db(
|
void leveldb_repair_db(const leveldb_options_t* options, const char* name,
|
||||||
const leveldb_options_t* options,
|
char** errptr) {
|
||||||
const char* name,
|
|
||||||
char** errptr) {
|
|
||||||
SaveError(errptr, RepairDB(name, options->rep));
|
SaveError(errptr, RepairDB(name, options->rep));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -309,13 +297,9 @@ void leveldb_iter_seek(leveldb_iterator_t* iter, const char* k, size_t klen) {
|
|||||||
iter->rep->Seek(Slice(k, klen));
|
iter->rep->Seek(Slice(k, klen));
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_iter_next(leveldb_iterator_t* iter) {
|
void leveldb_iter_next(leveldb_iterator_t* iter) { iter->rep->Next(); }
|
||||||
iter->rep->Next();
|
|
||||||
}
|
|
||||||
|
|
||||||
void leveldb_iter_prev(leveldb_iterator_t* iter) {
|
void leveldb_iter_prev(leveldb_iterator_t* iter) { iter->rep->Prev(); }
|
||||||
iter->rep->Prev();
|
|
||||||
}
|
|
||||||
|
|
||||||
const char* leveldb_iter_key(const leveldb_iterator_t* iter, size_t* klen) {
|
const char* leveldb_iter_key(const leveldb_iterator_t* iter, size_t* klen) {
|
||||||
Slice s = iter->rep->key();
|
Slice s = iter->rep->key();
|
||||||
@ -337,32 +321,25 @@ leveldb_writebatch_t* leveldb_writebatch_create() {
|
|||||||
return new leveldb_writebatch_t;
|
return new leveldb_writebatch_t;
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_writebatch_destroy(leveldb_writebatch_t* b) {
|
void leveldb_writebatch_destroy(leveldb_writebatch_t* b) { delete b; }
|
||||||
delete b;
|
|
||||||
}
|
|
||||||
|
|
||||||
void leveldb_writebatch_clear(leveldb_writebatch_t* b) {
|
void leveldb_writebatch_clear(leveldb_writebatch_t* b) { b->rep.Clear(); }
|
||||||
b->rep.Clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
void leveldb_writebatch_put(
|
void leveldb_writebatch_put(leveldb_writebatch_t* b, const char* key,
|
||||||
leveldb_writebatch_t* b,
|
size_t klen, const char* val, size_t vlen) {
|
||||||
const char* key, size_t klen,
|
|
||||||
const char* val, size_t vlen) {
|
|
||||||
b->rep.Put(Slice(key, klen), Slice(val, vlen));
|
b->rep.Put(Slice(key, klen), Slice(val, vlen));
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_writebatch_delete(
|
void leveldb_writebatch_delete(leveldb_writebatch_t* b, const char* key,
|
||||||
leveldb_writebatch_t* b,
|
size_t klen) {
|
||||||
const char* key, size_t klen) {
|
|
||||||
b->rep.Delete(Slice(key, klen));
|
b->rep.Delete(Slice(key, klen));
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_writebatch_iterate(
|
void leveldb_writebatch_iterate(const leveldb_writebatch_t* b, void* state,
|
||||||
const leveldb_writebatch_t* b,
|
void (*put)(void*, const char* k, size_t klen,
|
||||||
void* state,
|
const char* v, size_t vlen),
|
||||||
void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen),
|
void (*deleted)(void*, const char* k,
|
||||||
void (*deleted)(void*, const char* k, size_t klen)) {
|
size_t klen)) {
|
||||||
class H : public WriteBatch::Handler {
|
class H : public WriteBatch::Handler {
|
||||||
public:
|
public:
|
||||||
void* state_;
|
void* state_;
|
||||||
@ -382,43 +359,37 @@ void leveldb_writebatch_iterate(
|
|||||||
b->rep.Iterate(&handler);
|
b->rep.Iterate(&handler);
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_writebatch_append(leveldb_writebatch_t *destination,
|
void leveldb_writebatch_append(leveldb_writebatch_t* destination,
|
||||||
const leveldb_writebatch_t *source) {
|
const leveldb_writebatch_t* source) {
|
||||||
destination->rep.Append(source->rep);
|
destination->rep.Append(source->rep);
|
||||||
}
|
}
|
||||||
|
|
||||||
leveldb_options_t* leveldb_options_create() {
|
leveldb_options_t* leveldb_options_create() { return new leveldb_options_t; }
|
||||||
return new leveldb_options_t;
|
|
||||||
}
|
|
||||||
|
|
||||||
void leveldb_options_destroy(leveldb_options_t* options) {
|
void leveldb_options_destroy(leveldb_options_t* options) { delete options; }
|
||||||
delete options;
|
|
||||||
}
|
|
||||||
|
|
||||||
void leveldb_options_set_comparator(
|
void leveldb_options_set_comparator(leveldb_options_t* opt,
|
||||||
leveldb_options_t* opt,
|
leveldb_comparator_t* cmp) {
|
||||||
leveldb_comparator_t* cmp) {
|
|
||||||
opt->rep.comparator = cmp;
|
opt->rep.comparator = cmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_options_set_filter_policy(
|
void leveldb_options_set_filter_policy(leveldb_options_t* opt,
|
||||||
leveldb_options_t* opt,
|
leveldb_filterpolicy_t* policy) {
|
||||||
leveldb_filterpolicy_t* policy) {
|
|
||||||
opt->rep.filter_policy = policy;
|
opt->rep.filter_policy = policy;
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_options_set_create_if_missing(
|
void leveldb_options_set_create_if_missing(leveldb_options_t* opt,
|
||||||
leveldb_options_t* opt, unsigned char v) {
|
unsigned char v) {
|
||||||
opt->rep.create_if_missing = v;
|
opt->rep.create_if_missing = v;
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_options_set_error_if_exists(
|
void leveldb_options_set_error_if_exists(leveldb_options_t* opt,
|
||||||
leveldb_options_t* opt, unsigned char v) {
|
unsigned char v) {
|
||||||
opt->rep.error_if_exists = v;
|
opt->rep.error_if_exists = v;
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_options_set_paranoid_checks(
|
void leveldb_options_set_paranoid_checks(leveldb_options_t* opt,
|
||||||
leveldb_options_t* opt, unsigned char v) {
|
unsigned char v) {
|
||||||
opt->rep.paranoid_checks = v;
|
opt->rep.paranoid_checks = v;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -459,12 +430,9 @@ void leveldb_options_set_compression(leveldb_options_t* opt, int t) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
leveldb_comparator_t* leveldb_comparator_create(
|
leveldb_comparator_t* leveldb_comparator_create(
|
||||||
void* state,
|
void* state, void (*destructor)(void*),
|
||||||
void (*destructor)(void*),
|
int (*compare)(void*, const char* a, size_t alen, const char* b,
|
||||||
int (*compare)(
|
size_t blen),
|
||||||
void*,
|
|
||||||
const char* a, size_t alen,
|
|
||||||
const char* b, size_t blen),
|
|
||||||
const char* (*name)(void*)) {
|
const char* (*name)(void*)) {
|
||||||
leveldb_comparator_t* result = new leveldb_comparator_t;
|
leveldb_comparator_t* result = new leveldb_comparator_t;
|
||||||
result->state_ = state;
|
result->state_ = state;
|
||||||
@ -474,22 +442,15 @@ leveldb_comparator_t* leveldb_comparator_create(
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_comparator_destroy(leveldb_comparator_t* cmp) {
|
void leveldb_comparator_destroy(leveldb_comparator_t* cmp) { delete cmp; }
|
||||||
delete cmp;
|
|
||||||
}
|
|
||||||
|
|
||||||
leveldb_filterpolicy_t* leveldb_filterpolicy_create(
|
leveldb_filterpolicy_t* leveldb_filterpolicy_create(
|
||||||
void* state,
|
void* state, void (*destructor)(void*),
|
||||||
void (*destructor)(void*),
|
char* (*create_filter)(void*, const char* const* key_array,
|
||||||
char* (*create_filter)(
|
const size_t* key_length_array, int num_keys,
|
||||||
void*,
|
size_t* filter_length),
|
||||||
const char* const* key_array, const size_t* key_length_array,
|
unsigned char (*key_may_match)(void*, const char* key, size_t length,
|
||||||
int num_keys,
|
const char* filter, size_t filter_length),
|
||||||
size_t* filter_length),
|
|
||||||
unsigned char (*key_may_match)(
|
|
||||||
void*,
|
|
||||||
const char* key, size_t length,
|
|
||||||
const char* filter, size_t filter_length),
|
|
||||||
const char* (*name)(void*)) {
|
const char* (*name)(void*)) {
|
||||||
leveldb_filterpolicy_t* result = new leveldb_filterpolicy_t;
|
leveldb_filterpolicy_t* result = new leveldb_filterpolicy_t;
|
||||||
result->state_ = state;
|
result->state_ = state;
|
||||||
@ -518,7 +479,7 @@ leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(int bits_per_key) {
|
|||||||
bool KeyMayMatch(const Slice& key, const Slice& filter) const {
|
bool KeyMayMatch(const Slice& key, const Slice& filter) const {
|
||||||
return rep_->KeyMayMatch(key, filter);
|
return rep_->KeyMayMatch(key, filter);
|
||||||
}
|
}
|
||||||
static void DoNothing(void*) { }
|
static void DoNothing(void*) {}
|
||||||
};
|
};
|
||||||
Wrapper* wrapper = new Wrapper;
|
Wrapper* wrapper = new Wrapper;
|
||||||
wrapper->rep_ = NewBloomFilterPolicy(bits_per_key);
|
wrapper->rep_ = NewBloomFilterPolicy(bits_per_key);
|
||||||
@ -531,24 +492,20 @@ leveldb_readoptions_t* leveldb_readoptions_create() {
|
|||||||
return new leveldb_readoptions_t;
|
return new leveldb_readoptions_t;
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_readoptions_destroy(leveldb_readoptions_t* opt) {
|
void leveldb_readoptions_destroy(leveldb_readoptions_t* opt) { delete opt; }
|
||||||
delete opt;
|
|
||||||
}
|
|
||||||
|
|
||||||
void leveldb_readoptions_set_verify_checksums(
|
void leveldb_readoptions_set_verify_checksums(leveldb_readoptions_t* opt,
|
||||||
leveldb_readoptions_t* opt,
|
unsigned char v) {
|
||||||
unsigned char v) {
|
|
||||||
opt->rep.verify_checksums = v;
|
opt->rep.verify_checksums = v;
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_readoptions_set_fill_cache(
|
void leveldb_readoptions_set_fill_cache(leveldb_readoptions_t* opt,
|
||||||
leveldb_readoptions_t* opt, unsigned char v) {
|
unsigned char v) {
|
||||||
opt->rep.fill_cache = v;
|
opt->rep.fill_cache = v;
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_readoptions_set_snapshot(
|
void leveldb_readoptions_set_snapshot(leveldb_readoptions_t* opt,
|
||||||
leveldb_readoptions_t* opt,
|
const leveldb_snapshot_t* snap) {
|
||||||
const leveldb_snapshot_t* snap) {
|
|
||||||
opt->rep.snapshot = (snap ? snap->rep : nullptr);
|
opt->rep.snapshot = (snap ? snap->rep : nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -556,12 +513,10 @@ leveldb_writeoptions_t* leveldb_writeoptions_create() {
|
|||||||
return new leveldb_writeoptions_t;
|
return new leveldb_writeoptions_t;
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_writeoptions_destroy(leveldb_writeoptions_t* opt) {
|
void leveldb_writeoptions_destroy(leveldb_writeoptions_t* opt) { delete opt; }
|
||||||
delete opt;
|
|
||||||
}
|
|
||||||
|
|
||||||
void leveldb_writeoptions_set_sync(
|
void leveldb_writeoptions_set_sync(leveldb_writeoptions_t* opt,
|
||||||
leveldb_writeoptions_t* opt, unsigned char v) {
|
unsigned char v) {
|
||||||
opt->rep.sync = v;
|
opt->rep.sync = v;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -600,16 +555,10 @@ char* leveldb_env_get_test_directory(leveldb_env_t* env) {
|
|||||||
return buffer;
|
return buffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_free(void* ptr) {
|
void leveldb_free(void* ptr) { free(ptr); }
|
||||||
free(ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
int leveldb_major_version() {
|
int leveldb_major_version() { return kMajorVersion; }
|
||||||
return kMajorVersion;
|
|
||||||
}
|
|
||||||
|
|
||||||
int leveldb_minor_version() {
|
int leveldb_minor_version() { return kMinorVersion; }
|
||||||
return kMinorVersion;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // end extern "C"
|
} // end extern "C"
|
||||||
|
@ -2,16 +2,16 @@
|
|||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
#include "leveldb/db.h"
|
|
||||||
|
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
#include "leveldb/cache.h"
|
|
||||||
#include "leveldb/table.h"
|
|
||||||
#include "leveldb/write_batch.h"
|
|
||||||
#include "db/db_impl.h"
|
#include "db/db_impl.h"
|
||||||
#include "db/filename.h"
|
#include "db/filename.h"
|
||||||
#include "db/log_format.h"
|
#include "db/log_format.h"
|
||||||
#include "db/version_set.h"
|
#include "db/version_set.h"
|
||||||
|
#include "leveldb/cache.h"
|
||||||
|
#include "leveldb/db.h"
|
||||||
|
#include "leveldb/table.h"
|
||||||
|
#include "leveldb/write_batch.h"
|
||||||
#include "util/logging.h"
|
#include "util/logging.h"
|
||||||
#include "util/testharness.h"
|
#include "util/testharness.h"
|
||||||
#include "util/testutil.h"
|
#include "util/testutil.h"
|
||||||
@ -42,8 +42,8 @@ class CorruptionTest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
~CorruptionTest() {
|
~CorruptionTest() {
|
||||||
delete db_;
|
delete db_;
|
||||||
delete tiny_cache_;
|
delete tiny_cache_;
|
||||||
}
|
}
|
||||||
|
|
||||||
Status TryReopen() {
|
Status TryReopen() {
|
||||||
@ -52,9 +52,7 @@ class CorruptionTest {
|
|||||||
return DB::Open(options_, dbname_, &db_);
|
return DB::Open(options_, dbname_, &db_);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Reopen() {
|
void Reopen() { ASSERT_OK(TryReopen()); }
|
||||||
ASSERT_OK(TryReopen());
|
|
||||||
}
|
|
||||||
|
|
||||||
void RepairDB() {
|
void RepairDB() {
|
||||||
delete db_;
|
delete db_;
|
||||||
@ -66,7 +64,7 @@ class CorruptionTest {
|
|||||||
std::string key_space, value_space;
|
std::string key_space, value_space;
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
for (int i = 0; i < n; i++) {
|
for (int i = 0; i < n; i++) {
|
||||||
//if ((i % 100) == 0) fprintf(stderr, "@ %d of %d\n", i, n);
|
// if ((i % 100) == 0) fprintf(stderr, "@ %d of %d\n", i, n);
|
||||||
Slice key = Key(i, &key_space);
|
Slice key = Key(i, &key_space);
|
||||||
batch.Clear();
|
batch.Clear();
|
||||||
batch.Put(key, Value(i, &value_space));
|
batch.Put(key, Value(i, &value_space));
|
||||||
@ -95,8 +93,7 @@ class CorruptionTest {
|
|||||||
// Ignore boundary keys.
|
// Ignore boundary keys.
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (!ConsumeDecimalNumber(&in, &key) ||
|
if (!ConsumeDecimalNumber(&in, &key) || !in.empty() ||
|
||||||
!in.empty() ||
|
|
||||||
key < next_expected) {
|
key < next_expected) {
|
||||||
bad_keys++;
|
bad_keys++;
|
||||||
continue;
|
continue;
|
||||||
@ -127,8 +124,7 @@ class CorruptionTest {
|
|||||||
std::string fname;
|
std::string fname;
|
||||||
int picked_number = -1;
|
int picked_number = -1;
|
||||||
for (size_t i = 0; i < filenames.size(); i++) {
|
for (size_t i = 0; i < filenames.size(); i++) {
|
||||||
if (ParseFileName(filenames[i], &number, &type) &&
|
if (ParseFileName(filenames[i], &number, &type) && type == filetype &&
|
||||||
type == filetype &&
|
|
||||||
int(number) > picked_number) { // Pick latest file
|
int(number) > picked_number) { // Pick latest file
|
||||||
fname = dbname_ + "/" + filenames[i];
|
fname = dbname_ + "/" + filenames[i];
|
||||||
picked_number = number;
|
picked_number = number;
|
||||||
@ -194,7 +190,7 @@ class CorruptionTest {
|
|||||||
TEST(CorruptionTest, Recovery) {
|
TEST(CorruptionTest, Recovery) {
|
||||||
Build(100);
|
Build(100);
|
||||||
Check(100, 100);
|
Check(100, 100);
|
||||||
Corrupt(kLogFile, 19, 1); // WriteBatch tag for first record
|
Corrupt(kLogFile, 19, 1); // WriteBatch tag for first record
|
||||||
Corrupt(kLogFile, log::kBlockSize + 1000, 1); // Somewhere in second block
|
Corrupt(kLogFile, log::kBlockSize + 1000, 1); // Somewhere in second block
|
||||||
Reopen();
|
Reopen();
|
||||||
|
|
||||||
@ -361,6 +357,4 @@ TEST(CorruptionTest, UnrelatedKeys) {
|
|||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
124
db/db_bench.cc
124
db/db_bench.cc
@ -2,9 +2,10 @@
|
|||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
#include <sys/types.h>
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
#include <sys/types.h>
|
||||||
|
|
||||||
#include "leveldb/cache.h"
|
#include "leveldb/cache.h"
|
||||||
#include "leveldb/db.h"
|
#include "leveldb/db.h"
|
||||||
#include "leveldb/env.h"
|
#include "leveldb/env.h"
|
||||||
@ -55,8 +56,7 @@ static const char* FLAGS_benchmarks =
|
|||||||
"fill100K,"
|
"fill100K,"
|
||||||
"crc32c,"
|
"crc32c,"
|
||||||
"snappycomp,"
|
"snappycomp,"
|
||||||
"snappyuncomp,"
|
"snappyuncomp,";
|
||||||
;
|
|
||||||
|
|
||||||
// Number of key/values to place in database
|
// Number of key/values to place in database
|
||||||
static int FLAGS_num = 1000000;
|
static int FLAGS_num = 1000000;
|
||||||
@ -155,7 +155,7 @@ static Slice TrimSpace(Slice s) {
|
|||||||
start++;
|
start++;
|
||||||
}
|
}
|
||||||
size_t limit = s.size();
|
size_t limit = s.size();
|
||||||
while (limit > start && isspace(s[limit-1])) {
|
while (limit > start && isspace(s[limit - 1])) {
|
||||||
limit--;
|
limit--;
|
||||||
}
|
}
|
||||||
return Slice(s.data() + start, limit - start);
|
return Slice(s.data() + start, limit - start);
|
||||||
@ -214,9 +214,7 @@ class Stats {
|
|||||||
seconds_ = (finish_ - start_) * 1e-6;
|
seconds_ = (finish_ - start_) * 1e-6;
|
||||||
}
|
}
|
||||||
|
|
||||||
void AddMessage(Slice msg) {
|
void AddMessage(Slice msg) { AppendWithSpace(&message_, msg); }
|
||||||
AppendWithSpace(&message_, msg);
|
|
||||||
}
|
|
||||||
|
|
||||||
void FinishedSingleOp() {
|
void FinishedSingleOp() {
|
||||||
if (FLAGS_histogram) {
|
if (FLAGS_histogram) {
|
||||||
@ -232,21 +230,26 @@ class Stats {
|
|||||||
|
|
||||||
done_++;
|
done_++;
|
||||||
if (done_ >= next_report_) {
|
if (done_ >= next_report_) {
|
||||||
if (next_report_ < 1000) next_report_ += 100;
|
if (next_report_ < 1000)
|
||||||
else if (next_report_ < 5000) next_report_ += 500;
|
next_report_ += 100;
|
||||||
else if (next_report_ < 10000) next_report_ += 1000;
|
else if (next_report_ < 5000)
|
||||||
else if (next_report_ < 50000) next_report_ += 5000;
|
next_report_ += 500;
|
||||||
else if (next_report_ < 100000) next_report_ += 10000;
|
else if (next_report_ < 10000)
|
||||||
else if (next_report_ < 500000) next_report_ += 50000;
|
next_report_ += 1000;
|
||||||
else next_report_ += 100000;
|
else if (next_report_ < 50000)
|
||||||
|
next_report_ += 5000;
|
||||||
|
else if (next_report_ < 100000)
|
||||||
|
next_report_ += 10000;
|
||||||
|
else if (next_report_ < 500000)
|
||||||
|
next_report_ += 50000;
|
||||||
|
else
|
||||||
|
next_report_ += 100000;
|
||||||
fprintf(stderr, "... finished %d ops%30s\r", done_, "");
|
fprintf(stderr, "... finished %d ops%30s\r", done_, "");
|
||||||
fflush(stderr);
|
fflush(stderr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void AddBytes(int64_t n) {
|
void AddBytes(int64_t n) { bytes_ += n; }
|
||||||
bytes_ += n;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Report(const Slice& name) {
|
void Report(const Slice& name) {
|
||||||
// Pretend at least one op was done in case we are running a benchmark
|
// Pretend at least one op was done in case we are running a benchmark
|
||||||
@ -265,11 +268,8 @@ class Stats {
|
|||||||
}
|
}
|
||||||
AppendWithSpace(&extra, message_);
|
AppendWithSpace(&extra, message_);
|
||||||
|
|
||||||
fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
|
fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(),
|
||||||
name.ToString().c_str(),
|
seconds_ * 1e6 / done_, (extra.empty() ? "" : " "), extra.c_str());
|
||||||
seconds_ * 1e6 / done_,
|
|
||||||
(extra.empty() ? "" : " "),
|
|
||||||
extra.c_str());
|
|
||||||
if (FLAGS_histogram) {
|
if (FLAGS_histogram) {
|
||||||
fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
|
fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
|
||||||
}
|
}
|
||||||
@ -294,13 +294,13 @@ struct SharedState {
|
|||||||
bool start GUARDED_BY(mu);
|
bool start GUARDED_BY(mu);
|
||||||
|
|
||||||
SharedState(int total)
|
SharedState(int total)
|
||||||
: cv(&mu), total(total), num_initialized(0), num_done(0), start(false) { }
|
: cv(&mu), total(total), num_initialized(0), num_done(0), start(false) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Per-thread state for concurrent executions of the same benchmark.
|
// Per-thread state for concurrent executions of the same benchmark.
|
||||||
struct ThreadState {
|
struct ThreadState {
|
||||||
int tid; // 0..n-1 when running in n threads
|
int tid; // 0..n-1 when running in n threads
|
||||||
Random rand; // Has different seeds for different threads
|
Random rand; // Has different seeds for different threads
|
||||||
Stats stats;
|
Stats stats;
|
||||||
SharedState* shared;
|
SharedState* shared;
|
||||||
|
|
||||||
@ -330,20 +330,20 @@ class Benchmark {
|
|||||||
static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
|
static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
|
||||||
fprintf(stdout, "Entries: %d\n", num_);
|
fprintf(stdout, "Entries: %d\n", num_);
|
||||||
fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
|
fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
|
||||||
((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_)
|
((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
|
||||||
/ 1048576.0));
|
1048576.0));
|
||||||
fprintf(stdout, "FileSize: %.1f MB (estimated)\n",
|
fprintf(stdout, "FileSize: %.1f MB (estimated)\n",
|
||||||
(((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_)
|
(((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) /
|
||||||
/ 1048576.0));
|
1048576.0));
|
||||||
PrintWarnings();
|
PrintWarnings();
|
||||||
fprintf(stdout, "------------------------------------------------\n");
|
fprintf(stdout, "------------------------------------------------\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
void PrintWarnings() {
|
void PrintWarnings() {
|
||||||
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
|
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
|
||||||
fprintf(stdout,
|
fprintf(
|
||||||
"WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"
|
stdout,
|
||||||
);
|
"WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
|
||||||
#endif
|
#endif
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
fprintf(stdout,
|
fprintf(stdout,
|
||||||
@ -361,8 +361,8 @@ class Benchmark {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void PrintEnvironment() {
|
void PrintEnvironment() {
|
||||||
fprintf(stderr, "LevelDB: version %d.%d\n",
|
fprintf(stderr, "LevelDB: version %d.%d\n", kMajorVersion,
|
||||||
kMajorVersion, kMinorVersion);
|
kMinorVersion);
|
||||||
|
|
||||||
#if defined(__linux)
|
#if defined(__linux)
|
||||||
time_t now = time(nullptr);
|
time_t now = time(nullptr);
|
||||||
@ -397,16 +397,16 @@ class Benchmark {
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
Benchmark()
|
Benchmark()
|
||||||
: cache_(FLAGS_cache_size >= 0 ? NewLRUCache(FLAGS_cache_size) : nullptr),
|
: cache_(FLAGS_cache_size >= 0 ? NewLRUCache(FLAGS_cache_size) : nullptr),
|
||||||
filter_policy_(FLAGS_bloom_bits >= 0
|
filter_policy_(FLAGS_bloom_bits >= 0
|
||||||
? NewBloomFilterPolicy(FLAGS_bloom_bits)
|
? NewBloomFilterPolicy(FLAGS_bloom_bits)
|
||||||
: nullptr),
|
: nullptr),
|
||||||
db_(nullptr),
|
db_(nullptr),
|
||||||
num_(FLAGS_num),
|
num_(FLAGS_num),
|
||||||
value_size_(FLAGS_value_size),
|
value_size_(FLAGS_value_size),
|
||||||
entries_per_batch_(1),
|
entries_per_batch_(1),
|
||||||
reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
|
reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
|
||||||
heap_counter_(0) {
|
heap_counter_(0) {
|
||||||
std::vector<std::string> files;
|
std::vector<std::string> files;
|
||||||
g_env->GetChildren(FLAGS_db, &files);
|
g_env->GetChildren(FLAGS_db, &files);
|
||||||
for (size_t i = 0; i < files.size(); i++) {
|
for (size_t i = 0; i < files.size(); i++) {
|
||||||
@ -516,7 +516,7 @@ class Benchmark {
|
|||||||
} else if (name == Slice("sstables")) {
|
} else if (name == Slice("sstables")) {
|
||||||
PrintStats("leveldb.sstables");
|
PrintStats("leveldb.sstables");
|
||||||
} else {
|
} else {
|
||||||
if (name != Slice()) { // No error message for empty name
|
if (!name.empty()) { // No error message for empty name
|
||||||
fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str());
|
fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -665,8 +665,8 @@ class Benchmark {
|
|||||||
int64_t bytes = 0;
|
int64_t bytes = 0;
|
||||||
char* uncompressed = new char[input.size()];
|
char* uncompressed = new char[input.size()];
|
||||||
while (ok && bytes < 1024 * 1048576) { // Compress 1G
|
while (ok && bytes < 1024 * 1048576) { // Compress 1G
|
||||||
ok = port::Snappy_Uncompress(compressed.data(), compressed.size(),
|
ok = port::Snappy_Uncompress(compressed.data(), compressed.size(),
|
||||||
uncompressed);
|
uncompressed);
|
||||||
bytes += input.size();
|
bytes += input.size();
|
||||||
thread->stats.FinishedSingleOp();
|
thread->stats.FinishedSingleOp();
|
||||||
}
|
}
|
||||||
@ -706,13 +706,9 @@ class Benchmark {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void WriteSeq(ThreadState* thread) {
|
void WriteSeq(ThreadState* thread) { DoWrite(thread, true); }
|
||||||
DoWrite(thread, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
void WriteRandom(ThreadState* thread) {
|
void WriteRandom(ThreadState* thread) { DoWrite(thread, false); }
|
||||||
DoWrite(thread, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
void DoWrite(ThreadState* thread, bool seq) {
|
void DoWrite(ThreadState* thread, bool seq) {
|
||||||
if (num_ != FLAGS_num) {
|
if (num_ != FLAGS_num) {
|
||||||
@ -728,7 +724,7 @@ class Benchmark {
|
|||||||
for (int i = 0; i < num_; i += entries_per_batch_) {
|
for (int i = 0; i < num_; i += entries_per_batch_) {
|
||||||
batch.Clear();
|
batch.Clear();
|
||||||
for (int j = 0; j < entries_per_batch_; j++) {
|
for (int j = 0; j < entries_per_batch_; j++) {
|
||||||
const int k = seq ? i+j : (thread->rand.Next() % FLAGS_num);
|
const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num);
|
||||||
char key[100];
|
char key[100];
|
||||||
snprintf(key, sizeof(key), "%016d", k);
|
snprintf(key, sizeof(key), "%016d", k);
|
||||||
batch.Put(key, gen.Generate(value_size_));
|
batch.Put(key, gen.Generate(value_size_));
|
||||||
@ -838,7 +834,7 @@ class Benchmark {
|
|||||||
for (int i = 0; i < num_; i += entries_per_batch_) {
|
for (int i = 0; i < num_; i += entries_per_batch_) {
|
||||||
batch.Clear();
|
batch.Clear();
|
||||||
for (int j = 0; j < entries_per_batch_; j++) {
|
for (int j = 0; j < entries_per_batch_; j++) {
|
||||||
const int k = seq ? i+j : (thread->rand.Next() % FLAGS_num);
|
const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num);
|
||||||
char key[100];
|
char key[100];
|
||||||
snprintf(key, sizeof(key), "%016d", k);
|
snprintf(key, sizeof(key), "%016d", k);
|
||||||
batch.Delete(key);
|
batch.Delete(key);
|
||||||
@ -852,13 +848,9 @@ class Benchmark {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void DeleteSeq(ThreadState* thread) {
|
void DeleteSeq(ThreadState* thread) { DoDelete(thread, true); }
|
||||||
DoDelete(thread, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
void DeleteRandom(ThreadState* thread) {
|
void DeleteRandom(ThreadState* thread) { DoDelete(thread, false); }
|
||||||
DoDelete(thread, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ReadWhileWriting(ThreadState* thread) {
|
void ReadWhileWriting(ThreadState* thread) {
|
||||||
if (thread->tid > 0) {
|
if (thread->tid > 0) {
|
||||||
@ -890,9 +882,7 @@ class Benchmark {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Compact(ThreadState* thread) {
|
void Compact(ThreadState* thread) { db_->CompactRange(nullptr, nullptr); }
|
||||||
db_->CompactRange(nullptr, nullptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
void PrintStats(const char* key) {
|
void PrintStats(const char* key) {
|
||||||
std::string stats;
|
std::string stats;
|
||||||
@ -982,9 +972,9 @@ int main(int argc, char** argv) {
|
|||||||
|
|
||||||
// Choose a location for the test database if none given with --db=<path>
|
// Choose a location for the test database if none given with --db=<path>
|
||||||
if (FLAGS_db == nullptr) {
|
if (FLAGS_db == nullptr) {
|
||||||
leveldb::g_env->GetTestDirectory(&default_db_path);
|
leveldb::g_env->GetTestDirectory(&default_db_path);
|
||||||
default_db_path += "/dbbench";
|
default_db_path += "/dbbench";
|
||||||
FLAGS_db = default_db_path.c_str();
|
FLAGS_db = default_db_path.c_str();
|
||||||
}
|
}
|
||||||
|
|
||||||
leveldb::Benchmark benchmark;
|
leveldb::Benchmark benchmark;
|
||||||
|
179
db/db_impl.cc
179
db/db_impl.cc
@ -75,7 +75,7 @@ struct DBImpl::CompactionState {
|
|||||||
|
|
||||||
uint64_t total_bytes;
|
uint64_t total_bytes;
|
||||||
|
|
||||||
Output* current_output() { return &outputs[outputs.size()-1]; }
|
Output* current_output() { return &outputs[outputs.size() - 1]; }
|
||||||
|
|
||||||
explicit CompactionState(Compaction* c)
|
explicit CompactionState(Compaction* c)
|
||||||
: compaction(c),
|
: compaction(c),
|
||||||
@ -98,10 +98,10 @@ Options SanitizeOptions(const std::string& dbname,
|
|||||||
Options result = src;
|
Options result = src;
|
||||||
result.comparator = icmp;
|
result.comparator = icmp;
|
||||||
result.filter_policy = (src.filter_policy != nullptr) ? ipolicy : nullptr;
|
result.filter_policy = (src.filter_policy != nullptr) ? ipolicy : nullptr;
|
||||||
ClipToRange(&result.max_open_files, 64 + kNumNonTableCacheFiles, 50000);
|
ClipToRange(&result.max_open_files, 64 + kNumNonTableCacheFiles, 50000);
|
||||||
ClipToRange(&result.write_buffer_size, 64<<10, 1<<30);
|
ClipToRange(&result.write_buffer_size, 64 << 10, 1 << 30);
|
||||||
ClipToRange(&result.max_file_size, 1<<20, 1<<30);
|
ClipToRange(&result.max_file_size, 1 << 20, 1 << 30);
|
||||||
ClipToRange(&result.block_size, 1<<10, 4<<20);
|
ClipToRange(&result.block_size, 1 << 10, 4 << 20);
|
||||||
if (result.info_log == nullptr) {
|
if (result.info_log == nullptr) {
|
||||||
// Open a log file in the same directory as the db
|
// Open a log file in the same directory as the db
|
||||||
src.env->CreateDir(dbname); // In case it does not exist
|
src.env->CreateDir(dbname); // In case it does not exist
|
||||||
@ -268,8 +268,7 @@ void DBImpl::DeleteObsoleteFiles() {
|
|||||||
if (type == kTableFile) {
|
if (type == kTableFile) {
|
||||||
table_cache_->Evict(number);
|
table_cache_->Evict(number);
|
||||||
}
|
}
|
||||||
Log(options_.info_log, "Delete type=%d #%lld\n",
|
Log(options_.info_log, "Delete type=%d #%lld\n", static_cast<int>(type),
|
||||||
static_cast<int>(type),
|
|
||||||
static_cast<unsigned long long>(number));
|
static_cast<unsigned long long>(number));
|
||||||
env_->DeleteFile(dbname_ + "/" + filenames[i]);
|
env_->DeleteFile(dbname_ + "/" + filenames[i]);
|
||||||
}
|
}
|
||||||
@ -277,7 +276,7 @@ void DBImpl::DeleteObsoleteFiles() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Status DBImpl::Recover(VersionEdit* edit, bool *save_manifest) {
|
Status DBImpl::Recover(VersionEdit* edit, bool* save_manifest) {
|
||||||
mutex_.AssertHeld();
|
mutex_.AssertHeld();
|
||||||
|
|
||||||
// Ignore error from CreateDir since the creation of the DB is
|
// Ignore error from CreateDir since the creation of the DB is
|
||||||
@ -302,8 +301,8 @@ Status DBImpl::Recover(VersionEdit* edit, bool *save_manifest) {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (options_.error_if_exists) {
|
if (options_.error_if_exists) {
|
||||||
return Status::InvalidArgument(
|
return Status::InvalidArgument(dbname_,
|
||||||
dbname_, "exists (error_if_exists is true)");
|
"exists (error_if_exists is true)");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -378,8 +377,8 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log,
|
|||||||
Status* status; // null if options_.paranoid_checks==false
|
Status* status; // null if options_.paranoid_checks==false
|
||||||
virtual void Corruption(size_t bytes, const Status& s) {
|
virtual void Corruption(size_t bytes, const Status& s) {
|
||||||
Log(info_log, "%s%s: dropping %d bytes; %s",
|
Log(info_log, "%s%s: dropping %d bytes; %s",
|
||||||
(this->status == nullptr ? "(ignoring error) " : ""),
|
(this->status == nullptr ? "(ignoring error) " : ""), fname,
|
||||||
fname, static_cast<int>(bytes), s.ToString().c_str());
|
static_cast<int>(bytes), s.ToString().c_str());
|
||||||
if (this->status != nullptr && this->status->ok()) *this->status = s;
|
if (this->status != nullptr && this->status->ok()) *this->status = s;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -405,10 +404,9 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log,
|
|||||||
// paranoid_checks==false so that corruptions cause entire commits
|
// paranoid_checks==false so that corruptions cause entire commits
|
||||||
// to be skipped instead of propagating bad information (like overly
|
// to be skipped instead of propagating bad information (like overly
|
||||||
// large sequence numbers).
|
// large sequence numbers).
|
||||||
log::Reader reader(file, &reporter, true/*checksum*/,
|
log::Reader reader(file, &reporter, true /*checksum*/, 0 /*initial_offset*/);
|
||||||
0/*initial_offset*/);
|
|
||||||
Log(options_.info_log, "Recovering log #%llu",
|
Log(options_.info_log, "Recovering log #%llu",
|
||||||
(unsigned long long) log_number);
|
(unsigned long long)log_number);
|
||||||
|
|
||||||
// Read all the records and add to a memtable
|
// Read all the records and add to a memtable
|
||||||
std::string scratch;
|
std::string scratch;
|
||||||
@ -416,11 +414,10 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log,
|
|||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
int compactions = 0;
|
int compactions = 0;
|
||||||
MemTable* mem = nullptr;
|
MemTable* mem = nullptr;
|
||||||
while (reader.ReadRecord(&record, &scratch) &&
|
while (reader.ReadRecord(&record, &scratch) && status.ok()) {
|
||||||
status.ok()) {
|
|
||||||
if (record.size() < 12) {
|
if (record.size() < 12) {
|
||||||
reporter.Corruption(
|
reporter.Corruption(record.size(),
|
||||||
record.size(), Status::Corruption("log record too small"));
|
Status::Corruption("log record too small"));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
WriteBatchInternal::SetContents(&batch, record);
|
WriteBatchInternal::SetContents(&batch, record);
|
||||||
@ -434,9 +431,8 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log,
|
|||||||
if (!status.ok()) {
|
if (!status.ok()) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
const SequenceNumber last_seq =
|
const SequenceNumber last_seq = WriteBatchInternal::Sequence(&batch) +
|
||||||
WriteBatchInternal::Sequence(&batch) +
|
WriteBatchInternal::Count(&batch) - 1;
|
||||||
WriteBatchInternal::Count(&batch) - 1;
|
|
||||||
if (last_seq > *max_sequence) {
|
if (last_seq > *max_sequence) {
|
||||||
*max_sequence = last_seq;
|
*max_sequence = last_seq;
|
||||||
}
|
}
|
||||||
@ -500,7 +496,7 @@ Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit,
|
|||||||
pending_outputs_.insert(meta.number);
|
pending_outputs_.insert(meta.number);
|
||||||
Iterator* iter = mem->NewIterator();
|
Iterator* iter = mem->NewIterator();
|
||||||
Log(options_.info_log, "Level-0 table #%llu: started",
|
Log(options_.info_log, "Level-0 table #%llu: started",
|
||||||
(unsigned long long) meta.number);
|
(unsigned long long)meta.number);
|
||||||
|
|
||||||
Status s;
|
Status s;
|
||||||
{
|
{
|
||||||
@ -510,13 +506,11 @@ Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit,
|
|||||||
}
|
}
|
||||||
|
|
||||||
Log(options_.info_log, "Level-0 table #%llu: %lld bytes %s",
|
Log(options_.info_log, "Level-0 table #%llu: %lld bytes %s",
|
||||||
(unsigned long long) meta.number,
|
(unsigned long long)meta.number, (unsigned long long)meta.file_size,
|
||||||
(unsigned long long) meta.file_size,
|
|
||||||
s.ToString().c_str());
|
s.ToString().c_str());
|
||||||
delete iter;
|
delete iter;
|
||||||
pending_outputs_.erase(meta.number);
|
pending_outputs_.erase(meta.number);
|
||||||
|
|
||||||
|
|
||||||
// Note that if file_size is zero, the file has been deleted and
|
// Note that if file_size is zero, the file has been deleted and
|
||||||
// should not be added to the manifest.
|
// should not be added to the manifest.
|
||||||
int level = 0;
|
int level = 0;
|
||||||
@ -526,8 +520,8 @@ Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit,
|
|||||||
if (base != nullptr) {
|
if (base != nullptr) {
|
||||||
level = base->PickLevelForMemTableOutput(min_user_key, max_user_key);
|
level = base->PickLevelForMemTableOutput(min_user_key, max_user_key);
|
||||||
}
|
}
|
||||||
edit->AddFile(level, meta.number, meta.file_size,
|
edit->AddFile(level, meta.number, meta.file_size, meta.smallest,
|
||||||
meta.smallest, meta.largest);
|
meta.largest);
|
||||||
}
|
}
|
||||||
|
|
||||||
CompactionStats stats;
|
CompactionStats stats;
|
||||||
@ -658,8 +652,7 @@ void DBImpl::MaybeScheduleCompaction() {
|
|||||||
// DB is being deleted; no more background compactions
|
// DB is being deleted; no more background compactions
|
||||||
} else if (!bg_error_.ok()) {
|
} else if (!bg_error_.ok()) {
|
||||||
// Already got an error; no more changes
|
// Already got an error; no more changes
|
||||||
} else if (imm_ == nullptr &&
|
} else if (imm_ == nullptr && manual_compaction_ == nullptr &&
|
||||||
manual_compaction_ == nullptr &&
|
|
||||||
!versions_->NeedsCompaction()) {
|
!versions_->NeedsCompaction()) {
|
||||||
// No work to be done
|
// No work to be done
|
||||||
} else {
|
} else {
|
||||||
@ -711,8 +704,7 @@ void DBImpl::BackgroundCompaction() {
|
|||||||
}
|
}
|
||||||
Log(options_.info_log,
|
Log(options_.info_log,
|
||||||
"Manual compaction at level-%d from %s .. %s; will stop at %s\n",
|
"Manual compaction at level-%d from %s .. %s; will stop at %s\n",
|
||||||
m->level,
|
m->level, (m->begin ? m->begin->DebugString().c_str() : "(begin)"),
|
||||||
(m->begin ? m->begin->DebugString().c_str() : "(begin)"),
|
|
||||||
(m->end ? m->end->DebugString().c_str() : "(end)"),
|
(m->end ? m->end->DebugString().c_str() : "(end)"),
|
||||||
(m->done ? "(end)" : manual_end.DebugString().c_str()));
|
(m->done ? "(end)" : manual_end.DebugString().c_str()));
|
||||||
} else {
|
} else {
|
||||||
@ -727,19 +719,17 @@ void DBImpl::BackgroundCompaction() {
|
|||||||
assert(c->num_input_files(0) == 1);
|
assert(c->num_input_files(0) == 1);
|
||||||
FileMetaData* f = c->input(0, 0);
|
FileMetaData* f = c->input(0, 0);
|
||||||
c->edit()->DeleteFile(c->level(), f->number);
|
c->edit()->DeleteFile(c->level(), f->number);
|
||||||
c->edit()->AddFile(c->level() + 1, f->number, f->file_size,
|
c->edit()->AddFile(c->level() + 1, f->number, f->file_size, f->smallest,
|
||||||
f->smallest, f->largest);
|
f->largest);
|
||||||
status = versions_->LogAndApply(c->edit(), &mutex_);
|
status = versions_->LogAndApply(c->edit(), &mutex_);
|
||||||
if (!status.ok()) {
|
if (!status.ok()) {
|
||||||
RecordBackgroundError(status);
|
RecordBackgroundError(status);
|
||||||
}
|
}
|
||||||
VersionSet::LevelSummaryStorage tmp;
|
VersionSet::LevelSummaryStorage tmp;
|
||||||
Log(options_.info_log, "Moved #%lld to level-%d %lld bytes %s: %s\n",
|
Log(options_.info_log, "Moved #%lld to level-%d %lld bytes %s: %s\n",
|
||||||
static_cast<unsigned long long>(f->number),
|
static_cast<unsigned long long>(f->number), c->level() + 1,
|
||||||
c->level() + 1,
|
|
||||||
static_cast<unsigned long long>(f->file_size),
|
static_cast<unsigned long long>(f->file_size),
|
||||||
status.ToString().c_str(),
|
status.ToString().c_str(), versions_->LevelSummary(&tmp));
|
||||||
versions_->LevelSummary(&tmp));
|
|
||||||
} else {
|
} else {
|
||||||
CompactionState* compact = new CompactionState(c);
|
CompactionState* compact = new CompactionState(c);
|
||||||
status = DoCompactionWork(compact);
|
status = DoCompactionWork(compact);
|
||||||
@ -757,8 +747,7 @@ void DBImpl::BackgroundCompaction() {
|
|||||||
} else if (shutting_down_.load(std::memory_order_acquire)) {
|
} else if (shutting_down_.load(std::memory_order_acquire)) {
|
||||||
// Ignore compaction errors found during shutting down
|
// Ignore compaction errors found during shutting down
|
||||||
} else {
|
} else {
|
||||||
Log(options_.info_log,
|
Log(options_.info_log, "Compaction error: %s", status.ToString().c_str());
|
||||||
"Compaction error: %s", status.ToString().c_str());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_manual) {
|
if (is_manual) {
|
||||||
@ -853,31 +842,25 @@ Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
|
|||||||
|
|
||||||
if (s.ok() && current_entries > 0) {
|
if (s.ok() && current_entries > 0) {
|
||||||
// Verify that the table is usable
|
// Verify that the table is usable
|
||||||
Iterator* iter = table_cache_->NewIterator(ReadOptions(),
|
Iterator* iter =
|
||||||
output_number,
|
table_cache_->NewIterator(ReadOptions(), output_number, current_bytes);
|
||||||
current_bytes);
|
|
||||||
s = iter->status();
|
s = iter->status();
|
||||||
delete iter;
|
delete iter;
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
Log(options_.info_log,
|
Log(options_.info_log, "Generated table #%llu@%d: %lld keys, %lld bytes",
|
||||||
"Generated table #%llu@%d: %lld keys, %lld bytes",
|
(unsigned long long)output_number, compact->compaction->level(),
|
||||||
(unsigned long long) output_number,
|
(unsigned long long)current_entries,
|
||||||
compact->compaction->level(),
|
(unsigned long long)current_bytes);
|
||||||
(unsigned long long) current_entries,
|
|
||||||
(unsigned long long) current_bytes);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Status DBImpl::InstallCompactionResults(CompactionState* compact) {
|
Status DBImpl::InstallCompactionResults(CompactionState* compact) {
|
||||||
mutex_.AssertHeld();
|
mutex_.AssertHeld();
|
||||||
Log(options_.info_log, "Compacted %d@%d + %d@%d files => %lld bytes",
|
Log(options_.info_log, "Compacted %d@%d + %d@%d files => %lld bytes",
|
||||||
compact->compaction->num_input_files(0),
|
compact->compaction->num_input_files(0), compact->compaction->level(),
|
||||||
compact->compaction->level(),
|
compact->compaction->num_input_files(1), compact->compaction->level() + 1,
|
||||||
compact->compaction->num_input_files(1),
|
|
||||||
compact->compaction->level() + 1,
|
|
||||||
static_cast<long long>(compact->total_bytes));
|
static_cast<long long>(compact->total_bytes));
|
||||||
|
|
||||||
// Add compaction outputs
|
// Add compaction outputs
|
||||||
@ -885,9 +868,8 @@ Status DBImpl::InstallCompactionResults(CompactionState* compact) {
|
|||||||
const int level = compact->compaction->level();
|
const int level = compact->compaction->level();
|
||||||
for (size_t i = 0; i < compact->outputs.size(); i++) {
|
for (size_t i = 0; i < compact->outputs.size(); i++) {
|
||||||
const CompactionState::Output& out = compact->outputs[i];
|
const CompactionState::Output& out = compact->outputs[i];
|
||||||
compact->compaction->edit()->AddFile(
|
compact->compaction->edit()->AddFile(level + 1, out.number, out.file_size,
|
||||||
level + 1,
|
out.smallest, out.largest);
|
||||||
out.number, out.file_size, out.smallest, out.largest);
|
|
||||||
}
|
}
|
||||||
return versions_->LogAndApply(compact->compaction->edit(), &mutex_);
|
return versions_->LogAndApply(compact->compaction->edit(), &mutex_);
|
||||||
}
|
}
|
||||||
@ -896,9 +878,8 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
|
|||||||
const uint64_t start_micros = env_->NowMicros();
|
const uint64_t start_micros = env_->NowMicros();
|
||||||
int64_t imm_micros = 0; // Micros spent doing imm_ compactions
|
int64_t imm_micros = 0; // Micros spent doing imm_ compactions
|
||||||
|
|
||||||
Log(options_.info_log, "Compacting %d@%d + %d@%d files",
|
Log(options_.info_log, "Compacting %d@%d + %d@%d files",
|
||||||
compact->compaction->num_input_files(0),
|
compact->compaction->num_input_files(0), compact->compaction->level(),
|
||||||
compact->compaction->level(),
|
|
||||||
compact->compaction->num_input_files(1),
|
compact->compaction->num_input_files(1),
|
||||||
compact->compaction->level() + 1);
|
compact->compaction->level() + 1);
|
||||||
|
|
||||||
@ -921,7 +902,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
|
|||||||
std::string current_user_key;
|
std::string current_user_key;
|
||||||
bool has_current_user_key = false;
|
bool has_current_user_key = false;
|
||||||
SequenceNumber last_sequence_for_key = kMaxSequenceNumber;
|
SequenceNumber last_sequence_for_key = kMaxSequenceNumber;
|
||||||
for (; input->Valid() && !shutting_down_.load(std::memory_order_acquire); ) {
|
for (; input->Valid() && !shutting_down_.load(std::memory_order_acquire);) {
|
||||||
// Prioritize immutable compaction work
|
// Prioritize immutable compaction work
|
||||||
if (has_imm_.load(std::memory_order_relaxed)) {
|
if (has_imm_.load(std::memory_order_relaxed)) {
|
||||||
const uint64_t imm_start = env_->NowMicros();
|
const uint64_t imm_start = env_->NowMicros();
|
||||||
@ -953,8 +934,8 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
|
|||||||
last_sequence_for_key = kMaxSequenceNumber;
|
last_sequence_for_key = kMaxSequenceNumber;
|
||||||
} else {
|
} else {
|
||||||
if (!has_current_user_key ||
|
if (!has_current_user_key ||
|
||||||
user_comparator()->Compare(ikey.user_key,
|
user_comparator()->Compare(ikey.user_key, Slice(current_user_key)) !=
|
||||||
Slice(current_user_key)) != 0) {
|
0) {
|
||||||
// First occurrence of this user key
|
// First occurrence of this user key
|
||||||
current_user_key.assign(ikey.user_key.data(), ikey.user_key.size());
|
current_user_key.assign(ikey.user_key.data(), ikey.user_key.size());
|
||||||
has_current_user_key = true;
|
has_current_user_key = true;
|
||||||
@ -963,7 +944,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
|
|||||||
|
|
||||||
if (last_sequence_for_key <= compact->smallest_snapshot) {
|
if (last_sequence_for_key <= compact->smallest_snapshot) {
|
||||||
// Hidden by an newer entry for same user key
|
// Hidden by an newer entry for same user key
|
||||||
drop = true; // (A)
|
drop = true; // (A)
|
||||||
} else if (ikey.type == kTypeDeletion &&
|
} else if (ikey.type == kTypeDeletion &&
|
||||||
ikey.sequence <= compact->smallest_snapshot &&
|
ikey.sequence <= compact->smallest_snapshot &&
|
||||||
compact->compaction->IsBaseLevelForKey(ikey.user_key)) {
|
compact->compaction->IsBaseLevelForKey(ikey.user_key)) {
|
||||||
@ -1049,8 +1030,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
|
|||||||
RecordBackgroundError(status);
|
RecordBackgroundError(status);
|
||||||
}
|
}
|
||||||
VersionSet::LevelSummaryStorage tmp;
|
VersionSet::LevelSummaryStorage tmp;
|
||||||
Log(options_.info_log,
|
Log(options_.info_log, "compacted to: %s", versions_->LevelSummary(&tmp));
|
||||||
"compacted to: %s", versions_->LevelSummary(&tmp));
|
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1063,7 +1043,7 @@ struct IterState {
|
|||||||
MemTable* const imm GUARDED_BY(mu);
|
MemTable* const imm GUARDED_BY(mu);
|
||||||
|
|
||||||
IterState(port::Mutex* mutex, MemTable* mem, MemTable* imm, Version* version)
|
IterState(port::Mutex* mutex, MemTable* mem, MemTable* imm, Version* version)
|
||||||
: mu(mutex), version(version), mem(mem), imm(imm) { }
|
: mu(mutex), version(version), mem(mem), imm(imm) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
static void CleanupIteratorState(void* arg1, void* arg2) {
|
static void CleanupIteratorState(void* arg1, void* arg2) {
|
||||||
@ -1116,8 +1096,7 @@ int64_t DBImpl::TEST_MaxNextLevelOverlappingBytes() {
|
|||||||
return versions_->MaxNextLevelOverlappingBytes();
|
return versions_->MaxNextLevelOverlappingBytes();
|
||||||
}
|
}
|
||||||
|
|
||||||
Status DBImpl::Get(const ReadOptions& options,
|
Status DBImpl::Get(const ReadOptions& options, const Slice& key,
|
||||||
const Slice& key,
|
|
||||||
std::string* value) {
|
std::string* value) {
|
||||||
Status s;
|
Status s;
|
||||||
MutexLock l(&mutex_);
|
MutexLock l(&mutex_);
|
||||||
@ -1168,12 +1147,12 @@ Iterator* DBImpl::NewIterator(const ReadOptions& options) {
|
|||||||
SequenceNumber latest_snapshot;
|
SequenceNumber latest_snapshot;
|
||||||
uint32_t seed;
|
uint32_t seed;
|
||||||
Iterator* iter = NewInternalIterator(options, &latest_snapshot, &seed);
|
Iterator* iter = NewInternalIterator(options, &latest_snapshot, &seed);
|
||||||
return NewDBIterator(
|
return NewDBIterator(this, user_comparator(), iter,
|
||||||
this, user_comparator(), iter,
|
(options.snapshot != nullptr
|
||||||
(options.snapshot != nullptr
|
? static_cast<const SnapshotImpl*>(options.snapshot)
|
||||||
? static_cast<const SnapshotImpl*>(options.snapshot)->sequence_number()
|
->sequence_number()
|
||||||
: latest_snapshot),
|
: latest_snapshot),
|
||||||
seed);
|
seed);
|
||||||
}
|
}
|
||||||
|
|
||||||
void DBImpl::RecordReadSample(Slice key) {
|
void DBImpl::RecordReadSample(Slice key) {
|
||||||
@ -1202,9 +1181,9 @@ Status DBImpl::Delete(const WriteOptions& options, const Slice& key) {
|
|||||||
return DB::Delete(options, key);
|
return DB::Delete(options, key);
|
||||||
}
|
}
|
||||||
|
|
||||||
Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
|
Status DBImpl::Write(const WriteOptions& options, WriteBatch* updates) {
|
||||||
Writer w(&mutex_);
|
Writer w(&mutex_);
|
||||||
w.batch = my_batch;
|
w.batch = updates;
|
||||||
w.sync = options.sync;
|
w.sync = options.sync;
|
||||||
w.done = false;
|
w.done = false;
|
||||||
|
|
||||||
@ -1218,10 +1197,10 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// May temporarily unlock and wait.
|
// May temporarily unlock and wait.
|
||||||
Status status = MakeRoomForWrite(my_batch == nullptr);
|
Status status = MakeRoomForWrite(updates == nullptr);
|
||||||
uint64_t last_sequence = versions_->LastSequence();
|
uint64_t last_sequence = versions_->LastSequence();
|
||||||
Writer* last_writer = &w;
|
Writer* last_writer = &w;
|
||||||
if (status.ok() && my_batch != nullptr) { // nullptr batch is for compactions
|
if (status.ok() && updates != nullptr) { // nullptr batch is for compactions
|
||||||
WriteBatch* updates = BuildBatchGroup(&last_writer);
|
WriteBatch* updates = BuildBatchGroup(&last_writer);
|
||||||
WriteBatchInternal::SetSequence(updates, last_sequence + 1);
|
WriteBatchInternal::SetSequence(updates, last_sequence + 1);
|
||||||
last_sequence += WriteBatchInternal::Count(updates);
|
last_sequence += WriteBatchInternal::Count(updates);
|
||||||
@ -1290,8 +1269,8 @@ WriteBatch* DBImpl::BuildBatchGroup(Writer** last_writer) {
|
|||||||
// original write is small, limit the growth so we do not slow
|
// original write is small, limit the growth so we do not slow
|
||||||
// down the small write too much.
|
// down the small write too much.
|
||||||
size_t max_size = 1 << 20;
|
size_t max_size = 1 << 20;
|
||||||
if (size <= (128<<10)) {
|
if (size <= (128 << 10)) {
|
||||||
max_size = size + (128<<10);
|
max_size = size + (128 << 10);
|
||||||
}
|
}
|
||||||
|
|
||||||
*last_writer = first;
|
*last_writer = first;
|
||||||
@ -1337,9 +1316,8 @@ Status DBImpl::MakeRoomForWrite(bool force) {
|
|||||||
// Yield previous error
|
// Yield previous error
|
||||||
s = bg_error_;
|
s = bg_error_;
|
||||||
break;
|
break;
|
||||||
} else if (
|
} else if (allow_delay && versions_->NumLevelFiles(0) >=
|
||||||
allow_delay &&
|
config::kL0_SlowdownWritesTrigger) {
|
||||||
versions_->NumLevelFiles(0) >= config::kL0_SlowdownWritesTrigger) {
|
|
||||||
// We are getting close to hitting a hard limit on the number of
|
// We are getting close to hitting a hard limit on the number of
|
||||||
// L0 files. Rather than delaying a single write by several
|
// L0 files. Rather than delaying a single write by several
|
||||||
// seconds when we hit the hard limit, start delaying each
|
// seconds when we hit the hard limit, start delaying each
|
||||||
@ -1383,7 +1361,7 @@ Status DBImpl::MakeRoomForWrite(bool force) {
|
|||||||
has_imm_.store(true, std::memory_order_release);
|
has_imm_.store(true, std::memory_order_release);
|
||||||
mem_ = new MemTable(internal_comparator_);
|
mem_ = new MemTable(internal_comparator_);
|
||||||
mem_->Ref();
|
mem_->Ref();
|
||||||
force = false; // Do not force another compaction if have room
|
force = false; // Do not force another compaction if have room
|
||||||
MaybeScheduleCompaction();
|
MaybeScheduleCompaction();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1417,21 +1395,16 @@ bool DBImpl::GetProperty(const Slice& property, std::string* value) {
|
|||||||
snprintf(buf, sizeof(buf),
|
snprintf(buf, sizeof(buf),
|
||||||
" Compactions\n"
|
" Compactions\n"
|
||||||
"Level Files Size(MB) Time(sec) Read(MB) Write(MB)\n"
|
"Level Files Size(MB) Time(sec) Read(MB) Write(MB)\n"
|
||||||
"--------------------------------------------------\n"
|
"--------------------------------------------------\n");
|
||||||
);
|
|
||||||
value->append(buf);
|
value->append(buf);
|
||||||
for (int level = 0; level < config::kNumLevels; level++) {
|
for (int level = 0; level < config::kNumLevels; level++) {
|
||||||
int files = versions_->NumLevelFiles(level);
|
int files = versions_->NumLevelFiles(level);
|
||||||
if (stats_[level].micros > 0 || files > 0) {
|
if (stats_[level].micros > 0 || files > 0) {
|
||||||
snprintf(
|
snprintf(buf, sizeof(buf), "%3d %8d %8.0f %9.0f %8.0f %9.0f\n", level,
|
||||||
buf, sizeof(buf),
|
files, versions_->NumLevelBytes(level) / 1048576.0,
|
||||||
"%3d %8d %8.0f %9.0f %8.0f %9.0f\n",
|
stats_[level].micros / 1e6,
|
||||||
level,
|
stats_[level].bytes_read / 1048576.0,
|
||||||
files,
|
stats_[level].bytes_written / 1048576.0);
|
||||||
versions_->NumLevelBytes(level) / 1048576.0,
|
|
||||||
stats_[level].micros / 1e6,
|
|
||||||
stats_[level].bytes_read / 1048576.0,
|
|
||||||
stats_[level].bytes_written / 1048576.0);
|
|
||||||
value->append(buf);
|
value->append(buf);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1457,9 +1430,7 @@ bool DBImpl::GetProperty(const Slice& property, std::string* value) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void DBImpl::GetApproximateSizes(
|
void DBImpl::GetApproximateSizes(const Range* range, int n, uint64_t* sizes) {
|
||||||
const Range* range, int n,
|
|
||||||
uint64_t* sizes) {
|
|
||||||
// TODO(opt): better implementation
|
// TODO(opt): better implementation
|
||||||
Version* v;
|
Version* v;
|
||||||
{
|
{
|
||||||
@ -1497,10 +1468,9 @@ Status DB::Delete(const WriteOptions& opt, const Slice& key) {
|
|||||||
return Write(opt, &batch);
|
return Write(opt, &batch);
|
||||||
}
|
}
|
||||||
|
|
||||||
DB::~DB() { }
|
DB::~DB() {}
|
||||||
|
|
||||||
Status DB::Open(const Options& options, const std::string& dbname,
|
Status DB::Open(const Options& options, const std::string& dbname, DB** dbptr) {
|
||||||
DB** dbptr) {
|
|
||||||
*dbptr = nullptr;
|
*dbptr = nullptr;
|
||||||
|
|
||||||
DBImpl* impl = new DBImpl(options, dbname);
|
DBImpl* impl = new DBImpl(options, dbname);
|
||||||
@ -1543,8 +1513,7 @@ Status DB::Open(const Options& options, const std::string& dbname,
|
|||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
Snapshot::~Snapshot() {
|
Snapshot::~Snapshot() {}
|
||||||
}
|
|
||||||
|
|
||||||
Status DestroyDB(const std::string& dbname, const Options& options) {
|
Status DestroyDB(const std::string& dbname, const Options& options) {
|
||||||
Env* env = options.env;
|
Env* env = options.env;
|
||||||
|
11
db/db_impl.h
11
db/db_impl.h
@ -35,8 +35,7 @@ class DBImpl : public DB {
|
|||||||
virtual Status Put(const WriteOptions&, const Slice& key, const Slice& value);
|
virtual Status Put(const WriteOptions&, const Slice& key, const Slice& value);
|
||||||
virtual Status Delete(const WriteOptions&, const Slice& key);
|
virtual Status Delete(const WriteOptions&, const Slice& key);
|
||||||
virtual Status Write(const WriteOptions& options, WriteBatch* updates);
|
virtual Status Write(const WriteOptions& options, WriteBatch* updates);
|
||||||
virtual Status Get(const ReadOptions& options,
|
virtual Status Get(const ReadOptions& options, const Slice& key,
|
||||||
const Slice& key,
|
|
||||||
std::string* value);
|
std::string* value);
|
||||||
virtual Iterator* NewIterator(const ReadOptions&);
|
virtual Iterator* NewIterator(const ReadOptions&);
|
||||||
virtual const Snapshot* GetSnapshot();
|
virtual const Snapshot* GetSnapshot();
|
||||||
@ -166,9 +165,9 @@ class DBImpl : public DB {
|
|||||||
struct ManualCompaction {
|
struct ManualCompaction {
|
||||||
int level;
|
int level;
|
||||||
bool done;
|
bool done;
|
||||||
const InternalKey* begin; // null means beginning of key range
|
const InternalKey* begin; // null means beginning of key range
|
||||||
const InternalKey* end; // null means end of key range
|
const InternalKey* end; // null means end of key range
|
||||||
InternalKey tmp_storage; // Used to keep track of compaction progress
|
InternalKey tmp_storage; // Used to keep track of compaction progress
|
||||||
};
|
};
|
||||||
ManualCompaction* manual_compaction_ GUARDED_BY(mutex_);
|
ManualCompaction* manual_compaction_ GUARDED_BY(mutex_);
|
||||||
|
|
||||||
@ -184,7 +183,7 @@ class DBImpl : public DB {
|
|||||||
int64_t bytes_read;
|
int64_t bytes_read;
|
||||||
int64_t bytes_written;
|
int64_t bytes_written;
|
||||||
|
|
||||||
CompactionStats() : micros(0), bytes_read(0), bytes_written(0) { }
|
CompactionStats() : micros(0), bytes_read(0), bytes_written(0) {}
|
||||||
|
|
||||||
void Add(const CompactionStats& c) {
|
void Add(const CompactionStats& c) {
|
||||||
this->micros += c.micros;
|
this->micros += c.micros;
|
||||||
|
@ -4,9 +4,9 @@
|
|||||||
|
|
||||||
#include "db/db_iter.h"
|
#include "db/db_iter.h"
|
||||||
|
|
||||||
#include "db/filename.h"
|
|
||||||
#include "db/db_impl.h"
|
#include "db/db_impl.h"
|
||||||
#include "db/dbformat.h"
|
#include "db/dbformat.h"
|
||||||
|
#include "db/filename.h"
|
||||||
#include "leveldb/env.h"
|
#include "leveldb/env.h"
|
||||||
#include "leveldb/iterator.h"
|
#include "leveldb/iterator.h"
|
||||||
#include "port/port.h"
|
#include "port/port.h"
|
||||||
@ -36,17 +36,14 @@ namespace {
|
|||||||
// combines multiple entries for the same userkey found in the DB
|
// combines multiple entries for the same userkey found in the DB
|
||||||
// representation into a single entry while accounting for sequence
|
// representation into a single entry while accounting for sequence
|
||||||
// numbers, deletion markers, overwrites, etc.
|
// numbers, deletion markers, overwrites, etc.
|
||||||
class DBIter: public Iterator {
|
class DBIter : public Iterator {
|
||||||
public:
|
public:
|
||||||
// Which direction is the iterator currently moving?
|
// Which direction is the iterator currently moving?
|
||||||
// (1) When moving forward, the internal iterator is positioned at
|
// (1) When moving forward, the internal iterator is positioned at
|
||||||
// the exact entry that yields this->key(), this->value()
|
// the exact entry that yields this->key(), this->value()
|
||||||
// (2) When moving backwards, the internal iterator is positioned
|
// (2) When moving backwards, the internal iterator is positioned
|
||||||
// just before all entries whose user key == this->key().
|
// just before all entries whose user key == this->key().
|
||||||
enum Direction {
|
enum Direction { kForward, kReverse };
|
||||||
kForward,
|
|
||||||
kReverse
|
|
||||||
};
|
|
||||||
|
|
||||||
DBIter(DBImpl* db, const Comparator* cmp, Iterator* iter, SequenceNumber s,
|
DBIter(DBImpl* db, const Comparator* cmp, Iterator* iter, SequenceNumber s,
|
||||||
uint32_t seed)
|
uint32_t seed)
|
||||||
@ -57,11 +54,8 @@ class DBIter: public Iterator {
|
|||||||
direction_(kForward),
|
direction_(kForward),
|
||||||
valid_(false),
|
valid_(false),
|
||||||
rnd_(seed),
|
rnd_(seed),
|
||||||
bytes_until_read_sampling_(RandomCompactionPeriod()) {
|
bytes_until_read_sampling_(RandomCompactionPeriod()) {}
|
||||||
}
|
virtual ~DBIter() { delete iter_; }
|
||||||
virtual ~DBIter() {
|
|
||||||
delete iter_;
|
|
||||||
}
|
|
||||||
virtual bool Valid() const { return valid_; }
|
virtual bool Valid() const { return valid_; }
|
||||||
virtual Slice key() const {
|
virtual Slice key() const {
|
||||||
assert(valid_);
|
assert(valid_);
|
||||||
@ -105,7 +99,7 @@ class DBIter: public Iterator {
|
|||||||
|
|
||||||
// Picks the number of bytes that can be read until a compaction is scheduled.
|
// Picks the number of bytes that can be read until a compaction is scheduled.
|
||||||
size_t RandomCompactionPeriod() {
|
size_t RandomCompactionPeriod() {
|
||||||
return rnd_.Uniform(2*config::kReadBytesPeriod);
|
return rnd_.Uniform(2 * config::kReadBytesPeriod);
|
||||||
}
|
}
|
||||||
|
|
||||||
DBImpl* db_;
|
DBImpl* db_;
|
||||||
@ -114,8 +108,8 @@ class DBIter: public Iterator {
|
|||||||
SequenceNumber const sequence_;
|
SequenceNumber const sequence_;
|
||||||
|
|
||||||
Status status_;
|
Status status_;
|
||||||
std::string saved_key_; // == current key when direction_==kReverse
|
std::string saved_key_; // == current key when direction_==kReverse
|
||||||
std::string saved_value_; // == current raw value when direction_==kReverse
|
std::string saved_value_; // == current raw value when direction_==kReverse
|
||||||
Direction direction_;
|
Direction direction_;
|
||||||
bool valid_;
|
bool valid_;
|
||||||
|
|
||||||
@ -221,8 +215,8 @@ void DBIter::Prev() {
|
|||||||
ClearSavedValue();
|
ClearSavedValue();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (user_comparator_->Compare(ExtractUserKey(iter_->key()),
|
if (user_comparator_->Compare(ExtractUserKey(iter_->key()), saved_key_) <
|
||||||
saved_key_) < 0) {
|
0) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -278,8 +272,8 @@ void DBIter::Seek(const Slice& target) {
|
|||||||
direction_ = kForward;
|
direction_ = kForward;
|
||||||
ClearSavedValue();
|
ClearSavedValue();
|
||||||
saved_key_.clear();
|
saved_key_.clear();
|
||||||
AppendInternalKey(
|
AppendInternalKey(&saved_key_,
|
||||||
&saved_key_, ParsedInternalKey(target, sequence_, kValueTypeForSeek));
|
ParsedInternalKey(target, sequence_, kValueTypeForSeek));
|
||||||
iter_->Seek(saved_key_);
|
iter_->Seek(saved_key_);
|
||||||
if (iter_->Valid()) {
|
if (iter_->Valid()) {
|
||||||
FindNextUserEntry(false, &saved_key_ /* temporary storage */);
|
FindNextUserEntry(false, &saved_key_ /* temporary storage */);
|
||||||
@ -308,12 +302,9 @@ void DBIter::SeekToLast() {
|
|||||||
|
|
||||||
} // anonymous namespace
|
} // anonymous namespace
|
||||||
|
|
||||||
Iterator* NewDBIterator(
|
Iterator* NewDBIterator(DBImpl* db, const Comparator* user_key_comparator,
|
||||||
DBImpl* db,
|
Iterator* internal_iter, SequenceNumber sequence,
|
||||||
const Comparator* user_key_comparator,
|
uint32_t seed) {
|
||||||
Iterator* internal_iter,
|
|
||||||
SequenceNumber sequence,
|
|
||||||
uint32_t seed) {
|
|
||||||
return new DBIter(db, user_key_comparator, internal_iter, sequence, seed);
|
return new DBIter(db, user_key_comparator, internal_iter, sequence, seed);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,8 +6,9 @@
|
|||||||
#define STORAGE_LEVELDB_DB_DB_ITER_H_
|
#define STORAGE_LEVELDB_DB_DB_ITER_H_
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include "leveldb/db.h"
|
|
||||||
#include "db/dbformat.h"
|
#include "db/dbformat.h"
|
||||||
|
#include "leveldb/db.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
@ -16,10 +17,8 @@ class DBImpl;
|
|||||||
// Return a new iterator that converts internal keys (yielded by
|
// Return a new iterator that converts internal keys (yielded by
|
||||||
// "*internal_iter") that were live at the specified "sequence" number
|
// "*internal_iter") that were live at the specified "sequence" number
|
||||||
// into appropriate user keys.
|
// into appropriate user keys.
|
||||||
Iterator* NewDBIterator(DBImpl* db,
|
Iterator* NewDBIterator(DBImpl* db, const Comparator* user_key_comparator,
|
||||||
const Comparator* user_key_comparator,
|
Iterator* internal_iter, SequenceNumber sequence,
|
||||||
Iterator* internal_iter,
|
|
||||||
SequenceNumber sequence,
|
|
||||||
uint32_t seed);
|
uint32_t seed);
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
241
db/db_test.cc
241
db/db_test.cc
@ -2,17 +2,18 @@
|
|||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
|
#include "leveldb/db.h"
|
||||||
|
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
#include "leveldb/db.h"
|
|
||||||
#include "leveldb/filter_policy.h"
|
|
||||||
#include "db/db_impl.h"
|
#include "db/db_impl.h"
|
||||||
#include "db/filename.h"
|
#include "db/filename.h"
|
||||||
#include "db/version_set.h"
|
#include "db/version_set.h"
|
||||||
#include "db/write_batch_internal.h"
|
#include "db/write_batch_internal.h"
|
||||||
#include "leveldb/cache.h"
|
#include "leveldb/cache.h"
|
||||||
#include "leveldb/env.h"
|
#include "leveldb/env.h"
|
||||||
|
#include "leveldb/filter_policy.h"
|
||||||
#include "leveldb/table.h"
|
#include "leveldb/table.h"
|
||||||
#include "port/port.h"
|
#include "port/port.h"
|
||||||
#include "port/thread_annotations.h"
|
#include "port/thread_annotations.h"
|
||||||
@ -31,9 +32,9 @@ static std::string RandomString(Random* rnd, int len) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static std::string RandomKey(Random* rnd) {
|
static std::string RandomKey(Random* rnd) {
|
||||||
int len = (rnd->OneIn(3)
|
int len =
|
||||||
? 1 // Short sometimes to encourage collisions
|
(rnd->OneIn(3) ? 1 // Short sometimes to encourage collisions
|
||||||
: (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10)));
|
: (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10)));
|
||||||
return test::RandomKey(rnd, len);
|
return test::RandomKey(rnd, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -42,11 +43,10 @@ class AtomicCounter {
|
|||||||
private:
|
private:
|
||||||
port::Mutex mu_;
|
port::Mutex mu_;
|
||||||
int count_ GUARDED_BY(mu_);
|
int count_ GUARDED_BY(mu_);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
AtomicCounter() : count_(0) { }
|
AtomicCounter() : count_(0) {}
|
||||||
void Increment() {
|
void Increment() { IncrementBy(1); }
|
||||||
IncrementBy(1);
|
|
||||||
}
|
|
||||||
void IncrementBy(int count) LOCKS_EXCLUDED(mu_) {
|
void IncrementBy(int count) LOCKS_EXCLUDED(mu_) {
|
||||||
MutexLock l(&mu_);
|
MutexLock l(&mu_);
|
||||||
count_ += count;
|
count_ += count;
|
||||||
@ -120,15 +120,15 @@ class SpecialEnv : public EnvWrapper {
|
|||||||
bool count_random_reads_;
|
bool count_random_reads_;
|
||||||
AtomicCounter random_read_counter_;
|
AtomicCounter random_read_counter_;
|
||||||
|
|
||||||
explicit SpecialEnv(Env* base) : EnvWrapper(base),
|
explicit SpecialEnv(Env* base)
|
||||||
delay_data_sync_(false),
|
: EnvWrapper(base),
|
||||||
data_sync_error_(false),
|
delay_data_sync_(false),
|
||||||
no_space_(false),
|
data_sync_error_(false),
|
||||||
non_writable_(false),
|
no_space_(false),
|
||||||
manifest_sync_error_(false),
|
non_writable_(false),
|
||||||
manifest_write_error_(false),
|
manifest_sync_error_(false),
|
||||||
count_random_reads_(false) {
|
manifest_write_error_(false),
|
||||||
}
|
count_random_reads_(false) {}
|
||||||
|
|
||||||
Status NewWritableFile(const std::string& f, WritableFile** r) {
|
Status NewWritableFile(const std::string& f, WritableFile** r) {
|
||||||
class DataFile : public WritableFile {
|
class DataFile : public WritableFile {
|
||||||
@ -137,10 +137,7 @@ class SpecialEnv : public EnvWrapper {
|
|||||||
WritableFile* const base_;
|
WritableFile* const base_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
DataFile(SpecialEnv* env, WritableFile* base)
|
DataFile(SpecialEnv* env, WritableFile* base) : env_(env), base_(base) {}
|
||||||
: env_(env),
|
|
||||||
base_(base) {
|
|
||||||
}
|
|
||||||
~DataFile() { delete base_; }
|
~DataFile() { delete base_; }
|
||||||
Status Append(const Slice& data) {
|
Status Append(const Slice& data) {
|
||||||
if (env_->no_space_.load(std::memory_order_acquire)) {
|
if (env_->no_space_.load(std::memory_order_acquire)) {
|
||||||
@ -166,8 +163,9 @@ class SpecialEnv : public EnvWrapper {
|
|||||||
private:
|
private:
|
||||||
SpecialEnv* env_;
|
SpecialEnv* env_;
|
||||||
WritableFile* base_;
|
WritableFile* base_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ManifestFile(SpecialEnv* env, WritableFile* b) : env_(env), base_(b) { }
|
ManifestFile(SpecialEnv* env, WritableFile* b) : env_(env), base_(b) {}
|
||||||
~ManifestFile() { delete base_; }
|
~ManifestFile() { delete base_; }
|
||||||
Status Append(const Slice& data) {
|
Status Append(const Slice& data) {
|
||||||
if (env_->manifest_write_error_.load(std::memory_order_acquire)) {
|
if (env_->manifest_write_error_.load(std::memory_order_acquire)) {
|
||||||
@ -208,10 +206,10 @@ class SpecialEnv : public EnvWrapper {
|
|||||||
private:
|
private:
|
||||||
RandomAccessFile* target_;
|
RandomAccessFile* target_;
|
||||||
AtomicCounter* counter_;
|
AtomicCounter* counter_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
CountingFile(RandomAccessFile* target, AtomicCounter* counter)
|
CountingFile(RandomAccessFile* target, AtomicCounter* counter)
|
||||||
: target_(target), counter_(counter) {
|
: target_(target), counter_(counter) {}
|
||||||
}
|
|
||||||
virtual ~CountingFile() { delete target_; }
|
virtual ~CountingFile() { delete target_; }
|
||||||
virtual Status Read(uint64_t offset, size_t n, Slice* result,
|
virtual Status Read(uint64_t offset, size_t n, Slice* result,
|
||||||
char* scratch) const {
|
char* scratch) const {
|
||||||
@ -233,13 +231,7 @@ class DBTest {
|
|||||||
const FilterPolicy* filter_policy_;
|
const FilterPolicy* filter_policy_;
|
||||||
|
|
||||||
// Sequence of option configurations to try
|
// Sequence of option configurations to try
|
||||||
enum OptionConfig {
|
enum OptionConfig { kDefault, kReuse, kFilter, kUncompressed, kEnd };
|
||||||
kDefault,
|
|
||||||
kReuse,
|
|
||||||
kFilter,
|
|
||||||
kUncompressed,
|
|
||||||
kEnd
|
|
||||||
};
|
|
||||||
int option_config_;
|
int option_config_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@ -249,8 +241,7 @@ class DBTest {
|
|||||||
|
|
||||||
Options last_options_;
|
Options last_options_;
|
||||||
|
|
||||||
DBTest() : option_config_(kDefault),
|
DBTest() : option_config_(kDefault), env_(new SpecialEnv(Env::Default())) {
|
||||||
env_(new SpecialEnv(Env::Default())) {
|
|
||||||
filter_policy_ = NewBloomFilterPolicy(10);
|
filter_policy_ = NewBloomFilterPolicy(10);
|
||||||
dbname_ = test::TmpDir() + "/db_test";
|
dbname_ = test::TmpDir() + "/db_test";
|
||||||
DestroyDB(dbname_, Options());
|
DestroyDB(dbname_, Options());
|
||||||
@ -297,13 +288,9 @@ class DBTest {
|
|||||||
return options;
|
return options;
|
||||||
}
|
}
|
||||||
|
|
||||||
DBImpl* dbfull() {
|
DBImpl* dbfull() { return reinterpret_cast<DBImpl*>(db_); }
|
||||||
return reinterpret_cast<DBImpl*>(db_);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Reopen(Options* options = nullptr) {
|
void Reopen(Options* options = nullptr) { ASSERT_OK(TryReopen(options)); }
|
||||||
ASSERT_OK(TryReopen(options));
|
|
||||||
}
|
|
||||||
|
|
||||||
void Close() {
|
void Close() {
|
||||||
delete db_;
|
delete db_;
|
||||||
@ -336,9 +323,7 @@ class DBTest {
|
|||||||
return db_->Put(WriteOptions(), k, v);
|
return db_->Put(WriteOptions(), k, v);
|
||||||
}
|
}
|
||||||
|
|
||||||
Status Delete(const std::string& k) {
|
Status Delete(const std::string& k) { return db_->Delete(WriteOptions(), k); }
|
||||||
return db_->Delete(WriteOptions(), k);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) {
|
std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) {
|
||||||
ReadOptions options;
|
ReadOptions options;
|
||||||
@ -424,9 +409,8 @@ class DBTest {
|
|||||||
|
|
||||||
int NumTableFilesAtLevel(int level) {
|
int NumTableFilesAtLevel(int level) {
|
||||||
std::string property;
|
std::string property;
|
||||||
ASSERT_TRUE(
|
ASSERT_TRUE(db_->GetProperty(
|
||||||
db_->GetProperty("leveldb.num-files-at-level" + NumberToString(level),
|
"leveldb.num-files-at-level" + NumberToString(level), &property));
|
||||||
&property));
|
|
||||||
return std::stoi(property);
|
return std::stoi(property);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -491,9 +475,9 @@ class DBTest {
|
|||||||
|
|
||||||
void DumpFileCounts(const char* label) {
|
void DumpFileCounts(const char* label) {
|
||||||
fprintf(stderr, "---\n%s:\n", label);
|
fprintf(stderr, "---\n%s:\n", label);
|
||||||
fprintf(stderr, "maxoverlap: %lld\n",
|
fprintf(
|
||||||
static_cast<long long>(
|
stderr, "maxoverlap: %lld\n",
|
||||||
dbfull()->TEST_MaxNextLevelOverlappingBytes()));
|
static_cast<long long>(dbfull()->TEST_MaxNextLevelOverlappingBytes()));
|
||||||
for (int level = 0; level < config::kNumLevels; level++) {
|
for (int level = 0; level < config::kNumLevels; level++) {
|
||||||
int num = NumTableFilesAtLevel(level);
|
int num = NumTableFilesAtLevel(level);
|
||||||
if (num > 0) {
|
if (num > 0) {
|
||||||
@ -612,8 +596,8 @@ TEST(DBTest, GetFromImmutableLayer) {
|
|||||||
|
|
||||||
// Block sync calls.
|
// Block sync calls.
|
||||||
env_->delay_data_sync_.store(true, std::memory_order_release);
|
env_->delay_data_sync_.store(true, std::memory_order_release);
|
||||||
Put("k1", std::string(100000, 'x')); // Fill memtable.
|
Put("k1", std::string(100000, 'x')); // Fill memtable.
|
||||||
Put("k2", std::string(100000, 'y')); // Trigger compaction.
|
Put("k2", std::string(100000, 'y')); // Trigger compaction.
|
||||||
ASSERT_EQ("v1", Get("foo"));
|
ASSERT_EQ("v1", Get("foo"));
|
||||||
// Release sync calls.
|
// Release sync calls.
|
||||||
env_->delay_data_sync_.store(false, std::memory_order_release);
|
env_->delay_data_sync_.store(false, std::memory_order_release);
|
||||||
@ -635,7 +619,7 @@ TEST(DBTest, GetMemUsage) {
|
|||||||
ASSERT_TRUE(db_->GetProperty("leveldb.approximate-memory-usage", &val));
|
ASSERT_TRUE(db_->GetProperty("leveldb.approximate-memory-usage", &val));
|
||||||
int mem_usage = std::stoi(val);
|
int mem_usage = std::stoi(val);
|
||||||
ASSERT_GT(mem_usage, 0);
|
ASSERT_GT(mem_usage, 0);
|
||||||
ASSERT_LT(mem_usage, 5*1024*1024);
|
ASSERT_LT(mem_usage, 5 * 1024 * 1024);
|
||||||
} while (ChangeOptions());
|
} while (ChangeOptions());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -760,8 +744,7 @@ TEST(DBTest, GetEncountersEmptyLevel) {
|
|||||||
|
|
||||||
// Step 1: First place sstables in levels 0 and 2
|
// Step 1: First place sstables in levels 0 and 2
|
||||||
int compaction_count = 0;
|
int compaction_count = 0;
|
||||||
while (NumTableFilesAtLevel(0) == 0 ||
|
while (NumTableFilesAtLevel(0) == 0 || NumTableFilesAtLevel(2) == 0) {
|
||||||
NumTableFilesAtLevel(2) == 0) {
|
|
||||||
ASSERT_LE(compaction_count, 100) << "could not fill levels 0 and 2";
|
ASSERT_LE(compaction_count, 100) << "could not fill levels 0 and 2";
|
||||||
compaction_count++;
|
compaction_count++;
|
||||||
Put("a", "begin");
|
Put("a", "begin");
|
||||||
@ -898,10 +881,10 @@ TEST(DBTest, IterMulti) {
|
|||||||
ASSERT_EQ(IterStatus(iter), "b->vb");
|
ASSERT_EQ(IterStatus(iter), "b->vb");
|
||||||
|
|
||||||
// Make sure iter stays at snapshot
|
// Make sure iter stays at snapshot
|
||||||
ASSERT_OK(Put("a", "va2"));
|
ASSERT_OK(Put("a", "va2"));
|
||||||
ASSERT_OK(Put("a2", "va3"));
|
ASSERT_OK(Put("a2", "va3"));
|
||||||
ASSERT_OK(Put("b", "vb2"));
|
ASSERT_OK(Put("b", "vb2"));
|
||||||
ASSERT_OK(Put("c", "vc2"));
|
ASSERT_OK(Put("c", "vc2"));
|
||||||
ASSERT_OK(Delete("b"));
|
ASSERT_OK(Delete("b"));
|
||||||
iter->SeekToFirst();
|
iter->SeekToFirst();
|
||||||
ASSERT_EQ(IterStatus(iter), "a->va");
|
ASSERT_EQ(IterStatus(iter), "a->va");
|
||||||
@ -1092,7 +1075,7 @@ TEST(DBTest, RecoverWithLargeLog) {
|
|||||||
|
|
||||||
TEST(DBTest, CompactionsGenerateMultipleFiles) {
|
TEST(DBTest, CompactionsGenerateMultipleFiles) {
|
||||||
Options options = CurrentOptions();
|
Options options = CurrentOptions();
|
||||||
options.write_buffer_size = 100000000; // Large write buffer
|
options.write_buffer_size = 100000000; // Large write buffer
|
||||||
Reopen(&options);
|
Reopen(&options);
|
||||||
|
|
||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
@ -1161,26 +1144,25 @@ TEST(DBTest, SparseMerge) {
|
|||||||
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
|
||||||
|
|
||||||
// Make sparse update
|
// Make sparse update
|
||||||
Put("A", "va2");
|
Put("A", "va2");
|
||||||
Put("B100", "bvalue2");
|
Put("B100", "bvalue2");
|
||||||
Put("C", "vc2");
|
Put("C", "vc2");
|
||||||
dbfull()->TEST_CompactMemTable();
|
dbfull()->TEST_CompactMemTable();
|
||||||
|
|
||||||
// Compactions should not cause us to create a situation where
|
// Compactions should not cause us to create a situation where
|
||||||
// a file overlaps too much data at the next level.
|
// a file overlaps too much data at the next level.
|
||||||
ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
|
ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576);
|
||||||
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
|
||||||
ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
|
ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576);
|
||||||
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
|
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
|
||||||
ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
|
ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool Between(uint64_t val, uint64_t low, uint64_t high) {
|
static bool Between(uint64_t val, uint64_t low, uint64_t high) {
|
||||||
bool result = (val >= low) && (val <= high);
|
bool result = (val >= low) && (val <= high);
|
||||||
if (!result) {
|
if (!result) {
|
||||||
fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
|
fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
|
||||||
(unsigned long long)(val),
|
(unsigned long long)(val), (unsigned long long)(low),
|
||||||
(unsigned long long)(low),
|
|
||||||
(unsigned long long)(high));
|
(unsigned long long)(high));
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
@ -1189,7 +1171,7 @@ static bool Between(uint64_t val, uint64_t low, uint64_t high) {
|
|||||||
TEST(DBTest, ApproximateSizes) {
|
TEST(DBTest, ApproximateSizes) {
|
||||||
do {
|
do {
|
||||||
Options options = CurrentOptions();
|
Options options = CurrentOptions();
|
||||||
options.write_buffer_size = 100000000; // Large write buffer
|
options.write_buffer_size = 100000000; // Large write buffer
|
||||||
options.compression = kNoCompression;
|
options.compression = kNoCompression;
|
||||||
DestroyAndReopen();
|
DestroyAndReopen();
|
||||||
|
|
||||||
@ -1224,12 +1206,13 @@ TEST(DBTest, ApproximateSizes) {
|
|||||||
|
|
||||||
for (int compact_start = 0; compact_start < N; compact_start += 10) {
|
for (int compact_start = 0; compact_start < N; compact_start += 10) {
|
||||||
for (int i = 0; i < N; i += 10) {
|
for (int i = 0; i < N; i += 10) {
|
||||||
ASSERT_TRUE(Between(Size("", Key(i)), S1*i, S2*i));
|
ASSERT_TRUE(Between(Size("", Key(i)), S1 * i, S2 * i));
|
||||||
ASSERT_TRUE(Between(Size("", Key(i)+".suffix"), S1*(i+1), S2*(i+1)));
|
ASSERT_TRUE(Between(Size("", Key(i) + ".suffix"), S1 * (i + 1),
|
||||||
ASSERT_TRUE(Between(Size(Key(i), Key(i+10)), S1*10, S2*10));
|
S2 * (i + 1)));
|
||||||
|
ASSERT_TRUE(Between(Size(Key(i), Key(i + 10)), S1 * 10, S2 * 10));
|
||||||
}
|
}
|
||||||
ASSERT_TRUE(Between(Size("", Key(50)), S1*50, S2*50));
|
ASSERT_TRUE(Between(Size("", Key(50)), S1 * 50, S2 * 50));
|
||||||
ASSERT_TRUE(Between(Size("", Key(50)+".suffix"), S1*50, S2*50));
|
ASSERT_TRUE(Between(Size("", Key(50) + ".suffix"), S1 * 50, S2 * 50));
|
||||||
|
|
||||||
std::string cstart_str = Key(compact_start);
|
std::string cstart_str = Key(compact_start);
|
||||||
std::string cend_str = Key(compact_start + 9);
|
std::string cend_str = Key(compact_start + 9);
|
||||||
@ -1348,7 +1331,7 @@ TEST(DBTest, HiddenValuesAreRemoved) {
|
|||||||
Put("pastfoo", "v");
|
Put("pastfoo", "v");
|
||||||
const Snapshot* snapshot = db_->GetSnapshot();
|
const Snapshot* snapshot = db_->GetSnapshot();
|
||||||
Put("foo", "tiny");
|
Put("foo", "tiny");
|
||||||
Put("pastfoo2", "v2"); // Advance sequence number one more
|
Put("pastfoo2", "v2"); // Advance sequence number one more
|
||||||
|
|
||||||
ASSERT_OK(dbfull()->TEST_CompactMemTable());
|
ASSERT_OK(dbfull()->TEST_CompactMemTable());
|
||||||
ASSERT_GT(NumTableFilesAtLevel(0), 0);
|
ASSERT_GT(NumTableFilesAtLevel(0), 0);
|
||||||
@ -1373,14 +1356,14 @@ TEST(DBTest, DeletionMarkers1) {
|
|||||||
Put("foo", "v1");
|
Put("foo", "v1");
|
||||||
ASSERT_OK(dbfull()->TEST_CompactMemTable());
|
ASSERT_OK(dbfull()->TEST_CompactMemTable());
|
||||||
const int last = config::kMaxMemCompactLevel;
|
const int last = config::kMaxMemCompactLevel;
|
||||||
ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
|
ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
|
||||||
|
|
||||||
// Place a table at level last-1 to prevent merging with preceding mutation
|
// Place a table at level last-1 to prevent merging with preceding mutation
|
||||||
Put("a", "begin");
|
Put("a", "begin");
|
||||||
Put("z", "end");
|
Put("z", "end");
|
||||||
dbfull()->TEST_CompactMemTable();
|
dbfull()->TEST_CompactMemTable();
|
||||||
ASSERT_EQ(NumTableFilesAtLevel(last), 1);
|
ASSERT_EQ(NumTableFilesAtLevel(last), 1);
|
||||||
ASSERT_EQ(NumTableFilesAtLevel(last-1), 1);
|
ASSERT_EQ(NumTableFilesAtLevel(last - 1), 1);
|
||||||
|
|
||||||
Delete("foo");
|
Delete("foo");
|
||||||
Put("foo", "v2");
|
Put("foo", "v2");
|
||||||
@ -1388,11 +1371,11 @@ TEST(DBTest, DeletionMarkers1) {
|
|||||||
ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
|
ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
|
||||||
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
|
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
|
||||||
Slice z("z");
|
Slice z("z");
|
||||||
dbfull()->TEST_CompactRange(last-2, nullptr, &z);
|
dbfull()->TEST_CompactRange(last - 2, nullptr, &z);
|
||||||
// DEL eliminated, but v1 remains because we aren't compacting that level
|
// DEL eliminated, but v1 remains because we aren't compacting that level
|
||||||
// (DEL can be eliminated because v2 hides v1).
|
// (DEL can be eliminated because v2 hides v1).
|
||||||
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]");
|
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]");
|
||||||
dbfull()->TEST_CompactRange(last-1, nullptr, nullptr);
|
dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr);
|
||||||
// Merging last-1 w/ last, so we are the base level for "foo", so
|
// Merging last-1 w/ last, so we are the base level for "foo", so
|
||||||
// DEL is removed. (as is v1).
|
// DEL is removed. (as is v1).
|
||||||
ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]");
|
ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]");
|
||||||
@ -1402,23 +1385,23 @@ TEST(DBTest, DeletionMarkers2) {
|
|||||||
Put("foo", "v1");
|
Put("foo", "v1");
|
||||||
ASSERT_OK(dbfull()->TEST_CompactMemTable());
|
ASSERT_OK(dbfull()->TEST_CompactMemTable());
|
||||||
const int last = config::kMaxMemCompactLevel;
|
const int last = config::kMaxMemCompactLevel;
|
||||||
ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
|
ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
|
||||||
|
|
||||||
// Place a table at level last-1 to prevent merging with preceding mutation
|
// Place a table at level last-1 to prevent merging with preceding mutation
|
||||||
Put("a", "begin");
|
Put("a", "begin");
|
||||||
Put("z", "end");
|
Put("z", "end");
|
||||||
dbfull()->TEST_CompactMemTable();
|
dbfull()->TEST_CompactMemTable();
|
||||||
ASSERT_EQ(NumTableFilesAtLevel(last), 1);
|
ASSERT_EQ(NumTableFilesAtLevel(last), 1);
|
||||||
ASSERT_EQ(NumTableFilesAtLevel(last-1), 1);
|
ASSERT_EQ(NumTableFilesAtLevel(last - 1), 1);
|
||||||
|
|
||||||
Delete("foo");
|
Delete("foo");
|
||||||
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
|
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
|
||||||
ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
|
ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
|
||||||
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
|
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
|
||||||
dbfull()->TEST_CompactRange(last-2, nullptr, nullptr);
|
dbfull()->TEST_CompactRange(last - 2, nullptr, nullptr);
|
||||||
// DEL kept: "last" file overlaps
|
// DEL kept: "last" file overlaps
|
||||||
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
|
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
|
||||||
dbfull()->TEST_CompactRange(last-1, nullptr, nullptr);
|
dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr);
|
||||||
// Merging last-1 w/ last, so we are the base level for "foo", so
|
// Merging last-1 w/ last, so we are the base level for "foo", so
|
||||||
// DEL is removed. (as is v1).
|
// DEL is removed. (as is v1).
|
||||||
ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
|
ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
|
||||||
@ -1428,7 +1411,8 @@ TEST(DBTest, OverlapInLevel0) {
|
|||||||
do {
|
do {
|
||||||
ASSERT_EQ(config::kMaxMemCompactLevel, 2) << "Fix test to match config";
|
ASSERT_EQ(config::kMaxMemCompactLevel, 2) << "Fix test to match config";
|
||||||
|
|
||||||
// Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0.
|
// Fill levels 1 and 2 to disable the pushing of new memtables to levels >
|
||||||
|
// 0.
|
||||||
ASSERT_OK(Put("100", "v100"));
|
ASSERT_OK(Put("100", "v100"));
|
||||||
ASSERT_OK(Put("999", "v999"));
|
ASSERT_OK(Put("999", "v999"));
|
||||||
dbfull()->TEST_CompactMemTable();
|
dbfull()->TEST_CompactMemTable();
|
||||||
@ -1548,16 +1532,17 @@ TEST(DBTest, CustomComparator) {
|
|||||||
return ToNumber(a) - ToNumber(b);
|
return ToNumber(a) - ToNumber(b);
|
||||||
}
|
}
|
||||||
virtual void FindShortestSeparator(std::string* s, const Slice& l) const {
|
virtual void FindShortestSeparator(std::string* s, const Slice& l) const {
|
||||||
ToNumber(*s); // Check format
|
ToNumber(*s); // Check format
|
||||||
ToNumber(l); // Check format
|
ToNumber(l); // Check format
|
||||||
}
|
}
|
||||||
virtual void FindShortSuccessor(std::string* key) const {
|
virtual void FindShortSuccessor(std::string* key) const {
|
||||||
ToNumber(*key); // Check format
|
ToNumber(*key); // Check format
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static int ToNumber(const Slice& x) {
|
static int ToNumber(const Slice& x) {
|
||||||
// Check that there are no extra characters.
|
// Check that there are no extra characters.
|
||||||
ASSERT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size()-1] == ']')
|
ASSERT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size() - 1] == ']')
|
||||||
<< EscapeString(x);
|
<< EscapeString(x);
|
||||||
int val;
|
int val;
|
||||||
char ignored;
|
char ignored;
|
||||||
@ -1570,7 +1555,7 @@ TEST(DBTest, CustomComparator) {
|
|||||||
Options new_options = CurrentOptions();
|
Options new_options = CurrentOptions();
|
||||||
new_options.create_if_missing = true;
|
new_options.create_if_missing = true;
|
||||||
new_options.comparator = &cmp;
|
new_options.comparator = &cmp;
|
||||||
new_options.filter_policy = nullptr; // Cannot use bloom filters
|
new_options.filter_policy = nullptr; // Cannot use bloom filters
|
||||||
new_options.write_buffer_size = 1000; // Compact more often
|
new_options.write_buffer_size = 1000; // Compact more often
|
||||||
DestroyAndReopen(&new_options);
|
DestroyAndReopen(&new_options);
|
||||||
ASSERT_OK(Put("[10]", "ten"));
|
ASSERT_OK(Put("[10]", "ten"));
|
||||||
@ -1588,7 +1573,7 @@ TEST(DBTest, CustomComparator) {
|
|||||||
for (int run = 0; run < 2; run++) {
|
for (int run = 0; run < 2; run++) {
|
||||||
for (int i = 0; i < 1000; i++) {
|
for (int i = 0; i < 1000; i++) {
|
||||||
char buf[100];
|
char buf[100];
|
||||||
snprintf(buf, sizeof(buf), "[%d]", i*10);
|
snprintf(buf, sizeof(buf), "[%d]", i * 10);
|
||||||
ASSERT_OK(Put(buf, buf));
|
ASSERT_OK(Put(buf, buf));
|
||||||
}
|
}
|
||||||
Compact("[0]", "[1000000]");
|
Compact("[0]", "[1000000]");
|
||||||
@ -1739,7 +1724,7 @@ TEST(DBTest, NoSpace) {
|
|||||||
// Force out-of-space errors.
|
// Force out-of-space errors.
|
||||||
env_->no_space_.store(true, std::memory_order_release);
|
env_->no_space_.store(true, std::memory_order_release);
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
for (int level = 0; level < config::kNumLevels-1; level++) {
|
for (int level = 0; level < config::kNumLevels - 1; level++) {
|
||||||
dbfull()->TEST_CompactRange(level, nullptr, nullptr);
|
dbfull()->TEST_CompactRange(level, nullptr, nullptr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1809,9 +1794,8 @@ TEST(DBTest, ManifestWriteError) {
|
|||||||
// We iterate twice. In the second iteration, everything is the
|
// We iterate twice. In the second iteration, everything is the
|
||||||
// same except the log record never makes it to the MANIFEST file.
|
// same except the log record never makes it to the MANIFEST file.
|
||||||
for (int iter = 0; iter < 2; iter++) {
|
for (int iter = 0; iter < 2; iter++) {
|
||||||
std::atomic<bool>* error_type = (iter == 0)
|
std::atomic<bool>* error_type = (iter == 0) ? &env_->manifest_sync_error_
|
||||||
? &env_->manifest_sync_error_
|
: &env_->manifest_write_error_;
|
||||||
: &env_->manifest_write_error_;
|
|
||||||
|
|
||||||
// Insert foo=>bar mapping
|
// Insert foo=>bar mapping
|
||||||
Options options = CurrentOptions();
|
Options options = CurrentOptions();
|
||||||
@ -1826,7 +1810,7 @@ TEST(DBTest, ManifestWriteError) {
|
|||||||
dbfull()->TEST_CompactMemTable();
|
dbfull()->TEST_CompactMemTable();
|
||||||
ASSERT_EQ("bar", Get("foo"));
|
ASSERT_EQ("bar", Get("foo"));
|
||||||
const int last = config::kMaxMemCompactLevel;
|
const int last = config::kMaxMemCompactLevel;
|
||||||
ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo=>bar is now in last level
|
ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo=>bar is now in last level
|
||||||
|
|
||||||
// Merging compaction (will fail)
|
// Merging compaction (will fail)
|
||||||
error_type->store(true, std::memory_order_release);
|
error_type->store(true, std::memory_order_release);
|
||||||
@ -1854,8 +1838,7 @@ TEST(DBTest, MissingSSTFile) {
|
|||||||
options.paranoid_checks = true;
|
options.paranoid_checks = true;
|
||||||
Status s = TryReopen(&options);
|
Status s = TryReopen(&options);
|
||||||
ASSERT_TRUE(!s.ok());
|
ASSERT_TRUE(!s.ok());
|
||||||
ASSERT_TRUE(s.ToString().find("issing") != std::string::npos)
|
ASSERT_TRUE(s.ToString().find("issing") != std::string::npos) << s.ToString();
|
||||||
<< s.ToString();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(DBTest, StillReadSST) {
|
TEST(DBTest, StillReadSST) {
|
||||||
@ -1915,7 +1898,7 @@ TEST(DBTest, BloomFilter) {
|
|||||||
int reads = env_->random_read_counter_.Read();
|
int reads = env_->random_read_counter_.Read();
|
||||||
fprintf(stderr, "%d present => %d reads\n", N, reads);
|
fprintf(stderr, "%d present => %d reads\n", N, reads);
|
||||||
ASSERT_GE(reads, N);
|
ASSERT_GE(reads, N);
|
||||||
ASSERT_LE(reads, N + 2*N/100);
|
ASSERT_LE(reads, N + 2 * N / 100);
|
||||||
|
|
||||||
// Lookup present keys. Should rarely read from either sstable.
|
// Lookup present keys. Should rarely read from either sstable.
|
||||||
env_->random_read_counter_.Reset();
|
env_->random_read_counter_.Reset();
|
||||||
@ -1924,7 +1907,7 @@ TEST(DBTest, BloomFilter) {
|
|||||||
}
|
}
|
||||||
reads = env_->random_read_counter_.Read();
|
reads = env_->random_read_counter_.Read();
|
||||||
fprintf(stderr, "%d missing => %d reads\n", N, reads);
|
fprintf(stderr, "%d missing => %d reads\n", N, reads);
|
||||||
ASSERT_LE(reads, 3*N/100);
|
ASSERT_LE(reads, 3 * N / 100);
|
||||||
|
|
||||||
env_->delay_data_sync_.store(false, std::memory_order_release);
|
env_->delay_data_sync_.store(false, std::memory_order_release);
|
||||||
Close();
|
Close();
|
||||||
@ -1970,8 +1953,8 @@ static void MTThreadBody(void* arg) {
|
|||||||
if (rnd.OneIn(2)) {
|
if (rnd.OneIn(2)) {
|
||||||
// Write values of the form <key, my id, counter>.
|
// Write values of the form <key, my id, counter>.
|
||||||
// We add some padding for force compactions.
|
// We add some padding for force compactions.
|
||||||
snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d",
|
snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d", key, id,
|
||||||
key, id, static_cast<int>(counter));
|
static_cast<int>(counter));
|
||||||
ASSERT_OK(db->Put(WriteOptions(), Slice(keybuf), Slice(valbuf)));
|
ASSERT_OK(db->Put(WriteOptions(), Slice(keybuf), Slice(valbuf)));
|
||||||
} else {
|
} else {
|
||||||
// Read a value and verify that it matches the pattern written above.
|
// Read a value and verify that it matches the pattern written above.
|
||||||
@ -2033,24 +2016,24 @@ namespace {
|
|||||||
typedef std::map<std::string, std::string> KVMap;
|
typedef std::map<std::string, std::string> KVMap;
|
||||||
}
|
}
|
||||||
|
|
||||||
class ModelDB: public DB {
|
class ModelDB : public DB {
|
||||||
public:
|
public:
|
||||||
class ModelSnapshot : public Snapshot {
|
class ModelSnapshot : public Snapshot {
|
||||||
public:
|
public:
|
||||||
KVMap map_;
|
KVMap map_;
|
||||||
};
|
};
|
||||||
|
|
||||||
explicit ModelDB(const Options& options): options_(options) { }
|
explicit ModelDB(const Options& options) : options_(options) {}
|
||||||
~ModelDB() { }
|
~ModelDB() {}
|
||||||
virtual Status Put(const WriteOptions& o, const Slice& k, const Slice& v) {
|
virtual Status Put(const WriteOptions& o, const Slice& k, const Slice& v) {
|
||||||
return DB::Put(o, k, v);
|
return DB::Put(o, k, v);
|
||||||
}
|
}
|
||||||
virtual Status Delete(const WriteOptions& o, const Slice& key) {
|
virtual Status Delete(const WriteOptions& o, const Slice& key) {
|
||||||
return DB::Delete(o, key);
|
return DB::Delete(o, key);
|
||||||
}
|
}
|
||||||
virtual Status Get(const ReadOptions& options,
|
virtual Status Get(const ReadOptions& options, const Slice& key,
|
||||||
const Slice& key, std::string* value) {
|
std::string* value) {
|
||||||
assert(false); // Not implemented
|
assert(false); // Not implemented
|
||||||
return Status::NotFound(key);
|
return Status::NotFound(key);
|
||||||
}
|
}
|
||||||
virtual Iterator* NewIterator(const ReadOptions& options) {
|
virtual Iterator* NewIterator(const ReadOptions& options) {
|
||||||
@ -2080,9 +2063,7 @@ class ModelDB: public DB {
|
|||||||
virtual void Put(const Slice& key, const Slice& value) {
|
virtual void Put(const Slice& key, const Slice& value) {
|
||||||
(*map_)[key.ToString()] = value.ToString();
|
(*map_)[key.ToString()] = value.ToString();
|
||||||
}
|
}
|
||||||
virtual void Delete(const Slice& key) {
|
virtual void Delete(const Slice& key) { map_->erase(key.ToString()); }
|
||||||
map_->erase(key.ToString());
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
Handler handler;
|
Handler handler;
|
||||||
handler.map_ = &map_;
|
handler.map_ = &map_;
|
||||||
@ -2097,15 +2078,13 @@ class ModelDB: public DB {
|
|||||||
sizes[i] = 0;
|
sizes[i] = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
virtual void CompactRange(const Slice* start, const Slice* end) {
|
virtual void CompactRange(const Slice* start, const Slice* end) {}
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
class ModelIter: public Iterator {
|
class ModelIter : public Iterator {
|
||||||
public:
|
public:
|
||||||
ModelIter(const KVMap* map, bool owned)
|
ModelIter(const KVMap* map, bool owned)
|
||||||
: map_(map), owned_(owned), iter_(map_->end()) {
|
: map_(map), owned_(owned), iter_(map_->end()) {}
|
||||||
}
|
|
||||||
~ModelIter() {
|
~ModelIter() {
|
||||||
if (owned_) delete map_;
|
if (owned_) delete map_;
|
||||||
}
|
}
|
||||||
@ -2136,9 +2115,7 @@ class ModelDB: public DB {
|
|||||||
KVMap map_;
|
KVMap map_;
|
||||||
};
|
};
|
||||||
|
|
||||||
static bool CompareIterators(int step,
|
static bool CompareIterators(int step, DB* model, DB* db,
|
||||||
DB* model,
|
|
||||||
DB* db,
|
|
||||||
const Snapshot* model_snap,
|
const Snapshot* model_snap,
|
||||||
const Snapshot* db_snap) {
|
const Snapshot* db_snap) {
|
||||||
ReadOptions options;
|
ReadOptions options;
|
||||||
@ -2149,12 +2126,10 @@ static bool CompareIterators(int step,
|
|||||||
bool ok = true;
|
bool ok = true;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
for (miter->SeekToFirst(), dbiter->SeekToFirst();
|
for (miter->SeekToFirst(), dbiter->SeekToFirst();
|
||||||
ok && miter->Valid() && dbiter->Valid();
|
ok && miter->Valid() && dbiter->Valid(); miter->Next(), dbiter->Next()) {
|
||||||
miter->Next(), dbiter->Next()) {
|
|
||||||
count++;
|
count++;
|
||||||
if (miter->key().compare(dbiter->key()) != 0) {
|
if (miter->key().compare(dbiter->key()) != 0) {
|
||||||
fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n",
|
fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n", step,
|
||||||
step,
|
|
||||||
EscapeString(miter->key()).c_str(),
|
EscapeString(miter->key()).c_str(),
|
||||||
EscapeString(dbiter->key()).c_str());
|
EscapeString(dbiter->key()).c_str());
|
||||||
ok = false;
|
ok = false;
|
||||||
@ -2163,8 +2138,7 @@ static bool CompareIterators(int step,
|
|||||||
|
|
||||||
if (miter->value().compare(dbiter->value()) != 0) {
|
if (miter->value().compare(dbiter->value()) != 0) {
|
||||||
fprintf(stderr, "step %d: Value mismatch for key '%s': '%s' vs. '%s'\n",
|
fprintf(stderr, "step %d: Value mismatch for key '%s': '%s' vs. '%s'\n",
|
||||||
step,
|
step, EscapeString(miter->key()).c_str(),
|
||||||
EscapeString(miter->key()).c_str(),
|
|
||||||
EscapeString(miter->value()).c_str(),
|
EscapeString(miter->value()).c_str(),
|
||||||
EscapeString(miter->value()).c_str());
|
EscapeString(miter->value()).c_str());
|
||||||
ok = false;
|
ok = false;
|
||||||
@ -2198,22 +2172,19 @@ TEST(DBTest, Randomized) {
|
|||||||
}
|
}
|
||||||
// TODO(sanjay): Test Get() works
|
// TODO(sanjay): Test Get() works
|
||||||
int p = rnd.Uniform(100);
|
int p = rnd.Uniform(100);
|
||||||
if (p < 45) { // Put
|
if (p < 45) { // Put
|
||||||
k = RandomKey(&rnd);
|
k = RandomKey(&rnd);
|
||||||
v = RandomString(&rnd,
|
v = RandomString(
|
||||||
rnd.OneIn(20)
|
&rnd, rnd.OneIn(20) ? 100 + rnd.Uniform(100) : rnd.Uniform(8));
|
||||||
? 100 + rnd.Uniform(100)
|
|
||||||
: rnd.Uniform(8));
|
|
||||||
ASSERT_OK(model.Put(WriteOptions(), k, v));
|
ASSERT_OK(model.Put(WriteOptions(), k, v));
|
||||||
ASSERT_OK(db_->Put(WriteOptions(), k, v));
|
ASSERT_OK(db_->Put(WriteOptions(), k, v));
|
||||||
|
|
||||||
} else if (p < 90) { // Delete
|
} else if (p < 90) { // Delete
|
||||||
k = RandomKey(&rnd);
|
k = RandomKey(&rnd);
|
||||||
ASSERT_OK(model.Delete(WriteOptions(), k));
|
ASSERT_OK(model.Delete(WriteOptions(), k));
|
||||||
ASSERT_OK(db_->Delete(WriteOptions(), k));
|
ASSERT_OK(db_->Delete(WriteOptions(), k));
|
||||||
|
|
||||||
|
} else { // Multi-element batch
|
||||||
} else { // Multi-element batch
|
|
||||||
WriteBatch b;
|
WriteBatch b;
|
||||||
const int num = rnd.Uniform(8);
|
const int num = rnd.Uniform(8);
|
||||||
for (int i = 0; i < num; i++) {
|
for (int i = 0; i < num; i++) {
|
||||||
@ -2288,8 +2259,8 @@ void BM_LogAndApply(int iters, int num_base_files) {
|
|||||||
VersionEdit vbase;
|
VersionEdit vbase;
|
||||||
uint64_t fnum = 1;
|
uint64_t fnum = 1;
|
||||||
for (int i = 0; i < num_base_files; i++) {
|
for (int i = 0; i < num_base_files; i++) {
|
||||||
InternalKey start(MakeKey(2*fnum), 1, kTypeValue);
|
InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
|
||||||
InternalKey limit(MakeKey(2*fnum+1), 1, kTypeDeletion);
|
InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
|
||||||
vbase.AddFile(2, fnum++, 1 /* file size */, start, limit);
|
vbase.AddFile(2, fnum++, 1 /* file size */, start, limit);
|
||||||
}
|
}
|
||||||
ASSERT_OK(vset.LogAndApply(&vbase, &mu));
|
ASSERT_OK(vset.LogAndApply(&vbase, &mu));
|
||||||
@ -2299,8 +2270,8 @@ void BM_LogAndApply(int iters, int num_base_files) {
|
|||||||
for (int i = 0; i < iters; i++) {
|
for (int i = 0; i < iters; i++) {
|
||||||
VersionEdit vedit;
|
VersionEdit vedit;
|
||||||
vedit.DeleteFile(2, fnum);
|
vedit.DeleteFile(2, fnum);
|
||||||
InternalKey start(MakeKey(2*fnum), 1, kTypeValue);
|
InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
|
||||||
InternalKey limit(MakeKey(2*fnum+1), 1, kTypeDeletion);
|
InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
|
||||||
vedit.AddFile(2, fnum++, 1 /* file size */, start, limit);
|
vedit.AddFile(2, fnum++, 1 /* file size */, start, limit);
|
||||||
vset.LogAndApply(&vedit, &mu);
|
vset.LogAndApply(&vedit, &mu);
|
||||||
}
|
}
|
||||||
@ -2309,8 +2280,8 @@ void BM_LogAndApply(int iters, int num_base_files) {
|
|||||||
char buf[16];
|
char buf[16];
|
||||||
snprintf(buf, sizeof(buf), "%d", num_base_files);
|
snprintf(buf, sizeof(buf), "%d", num_base_files);
|
||||||
fprintf(stderr,
|
fprintf(stderr,
|
||||||
"BM_LogAndApply/%-6s %8d iters : %9u us (%7.0f us / iter)\n",
|
"BM_LogAndApply/%-6s %8d iters : %9u us (%7.0f us / iter)\n", buf,
|
||||||
buf, iters, us, ((float)us) / iters);
|
iters, us, ((float)us) / iters);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
@ -2,8 +2,10 @@
|
|||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
#include <stdio.h>
|
|
||||||
#include "db/dbformat.h"
|
#include "db/dbformat.h"
|
||||||
|
|
||||||
|
#include <stdio.h>
|
||||||
|
|
||||||
#include "port/port.h"
|
#include "port/port.h"
|
||||||
#include "util/coding.h"
|
#include "util/coding.h"
|
||||||
|
|
||||||
@ -22,8 +24,7 @@ void AppendInternalKey(std::string* result, const ParsedInternalKey& key) {
|
|||||||
|
|
||||||
std::string ParsedInternalKey::DebugString() const {
|
std::string ParsedInternalKey::DebugString() const {
|
||||||
char buf[50];
|
char buf[50];
|
||||||
snprintf(buf, sizeof(buf), "' @ %llu : %d",
|
snprintf(buf, sizeof(buf), "' @ %llu : %d", (unsigned long long)sequence,
|
||||||
(unsigned long long) sequence,
|
|
||||||
int(type));
|
int(type));
|
||||||
std::string result = "'";
|
std::string result = "'";
|
||||||
result += EscapeString(user_key.ToString());
|
result += EscapeString(user_key.ToString());
|
||||||
@ -65,9 +66,8 @@ int InternalKeyComparator::Compare(const Slice& akey, const Slice& bkey) const {
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
void InternalKeyComparator::FindShortestSeparator(
|
void InternalKeyComparator::FindShortestSeparator(std::string* start,
|
||||||
std::string* start,
|
const Slice& limit) const {
|
||||||
const Slice& limit) const {
|
|
||||||
// Attempt to shorten the user portion of the key
|
// Attempt to shorten the user portion of the key
|
||||||
Slice user_start = ExtractUserKey(*start);
|
Slice user_start = ExtractUserKey(*start);
|
||||||
Slice user_limit = ExtractUserKey(limit);
|
Slice user_limit = ExtractUserKey(limit);
|
||||||
@ -77,7 +77,8 @@ void InternalKeyComparator::FindShortestSeparator(
|
|||||||
user_comparator_->Compare(user_start, tmp) < 0) {
|
user_comparator_->Compare(user_start, tmp) < 0) {
|
||||||
// User key has become shorter physically, but larger logically.
|
// User key has become shorter physically, but larger logically.
|
||||||
// Tack on the earliest possible number to the shortened user key.
|
// Tack on the earliest possible number to the shortened user key.
|
||||||
PutFixed64(&tmp, PackSequenceAndType(kMaxSequenceNumber,kValueTypeForSeek));
|
PutFixed64(&tmp,
|
||||||
|
PackSequenceAndType(kMaxSequenceNumber, kValueTypeForSeek));
|
||||||
assert(this->Compare(*start, tmp) < 0);
|
assert(this->Compare(*start, tmp) < 0);
|
||||||
assert(this->Compare(tmp, limit) < 0);
|
assert(this->Compare(tmp, limit) < 0);
|
||||||
start->swap(tmp);
|
start->swap(tmp);
|
||||||
@ -92,15 +93,14 @@ void InternalKeyComparator::FindShortSuccessor(std::string* key) const {
|
|||||||
user_comparator_->Compare(user_key, tmp) < 0) {
|
user_comparator_->Compare(user_key, tmp) < 0) {
|
||||||
// User key has become shorter physically, but larger logically.
|
// User key has become shorter physically, but larger logically.
|
||||||
// Tack on the earliest possible number to the shortened user key.
|
// Tack on the earliest possible number to the shortened user key.
|
||||||
PutFixed64(&tmp, PackSequenceAndType(kMaxSequenceNumber,kValueTypeForSeek));
|
PutFixed64(&tmp,
|
||||||
|
PackSequenceAndType(kMaxSequenceNumber, kValueTypeForSeek));
|
||||||
assert(this->Compare(*key, tmp) < 0);
|
assert(this->Compare(*key, tmp) < 0);
|
||||||
key->swap(tmp);
|
key->swap(tmp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const char* InternalFilterPolicy::Name() const {
|
const char* InternalFilterPolicy::Name() const { return user_policy_->Name(); }
|
||||||
return user_policy_->Name();
|
|
||||||
}
|
|
||||||
|
|
||||||
void InternalFilterPolicy::CreateFilter(const Slice* keys, int n,
|
void InternalFilterPolicy::CreateFilter(const Slice* keys, int n,
|
||||||
std::string* dst) const {
|
std::string* dst) const {
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#define STORAGE_LEVELDB_DB_DBFORMAT_H_
|
#define STORAGE_LEVELDB_DB_DBFORMAT_H_
|
||||||
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
|
||||||
#include "leveldb/comparator.h"
|
#include "leveldb/comparator.h"
|
||||||
#include "leveldb/db.h"
|
#include "leveldb/db.h"
|
||||||
#include "leveldb/filter_policy.h"
|
#include "leveldb/filter_policy.h"
|
||||||
@ -48,10 +49,7 @@ class InternalKey;
|
|||||||
// Value types encoded as the last component of internal keys.
|
// Value types encoded as the last component of internal keys.
|
||||||
// DO NOT CHANGE THESE ENUM VALUES: they are embedded in the on-disk
|
// DO NOT CHANGE THESE ENUM VALUES: they are embedded in the on-disk
|
||||||
// data structures.
|
// data structures.
|
||||||
enum ValueType {
|
enum ValueType { kTypeDeletion = 0x0, kTypeValue = 0x1 };
|
||||||
kTypeDeletion = 0x0,
|
|
||||||
kTypeValue = 0x1
|
|
||||||
};
|
|
||||||
// kValueTypeForSeek defines the ValueType that should be passed when
|
// kValueTypeForSeek defines the ValueType that should be passed when
|
||||||
// constructing a ParsedInternalKey object for seeking to a particular
|
// constructing a ParsedInternalKey object for seeking to a particular
|
||||||
// sequence number (since we sort sequence numbers in decreasing order
|
// sequence number (since we sort sequence numbers in decreasing order
|
||||||
@ -64,17 +62,16 @@ typedef uint64_t SequenceNumber;
|
|||||||
|
|
||||||
// We leave eight bits empty at the bottom so a type and sequence#
|
// We leave eight bits empty at the bottom so a type and sequence#
|
||||||
// can be packed together into 64-bits.
|
// can be packed together into 64-bits.
|
||||||
static const SequenceNumber kMaxSequenceNumber =
|
static const SequenceNumber kMaxSequenceNumber = ((0x1ull << 56) - 1);
|
||||||
((0x1ull << 56) - 1);
|
|
||||||
|
|
||||||
struct ParsedInternalKey {
|
struct ParsedInternalKey {
|
||||||
Slice user_key;
|
Slice user_key;
|
||||||
SequenceNumber sequence;
|
SequenceNumber sequence;
|
||||||
ValueType type;
|
ValueType type;
|
||||||
|
|
||||||
ParsedInternalKey() { } // Intentionally left uninitialized (for speed)
|
ParsedInternalKey() {} // Intentionally left uninitialized (for speed)
|
||||||
ParsedInternalKey(const Slice& u, const SequenceNumber& seq, ValueType t)
|
ParsedInternalKey(const Slice& u, const SequenceNumber& seq, ValueType t)
|
||||||
: user_key(u), sequence(seq), type(t) { }
|
: user_key(u), sequence(seq), type(t) {}
|
||||||
std::string DebugString() const;
|
std::string DebugString() const;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -103,13 +100,13 @@ inline Slice ExtractUserKey(const Slice& internal_key) {
|
|||||||
class InternalKeyComparator : public Comparator {
|
class InternalKeyComparator : public Comparator {
|
||||||
private:
|
private:
|
||||||
const Comparator* user_comparator_;
|
const Comparator* user_comparator_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
explicit InternalKeyComparator(const Comparator* c) : user_comparator_(c) { }
|
explicit InternalKeyComparator(const Comparator* c) : user_comparator_(c) {}
|
||||||
virtual const char* Name() const;
|
virtual const char* Name() const;
|
||||||
virtual int Compare(const Slice& a, const Slice& b) const;
|
virtual int Compare(const Slice& a, const Slice& b) const;
|
||||||
virtual void FindShortestSeparator(
|
virtual void FindShortestSeparator(std::string* start,
|
||||||
std::string* start,
|
const Slice& limit) const;
|
||||||
const Slice& limit) const;
|
|
||||||
virtual void FindShortSuccessor(std::string* key) const;
|
virtual void FindShortSuccessor(std::string* key) const;
|
||||||
|
|
||||||
const Comparator* user_comparator() const { return user_comparator_; }
|
const Comparator* user_comparator() const { return user_comparator_; }
|
||||||
@ -121,8 +118,9 @@ class InternalKeyComparator : public Comparator {
|
|||||||
class InternalFilterPolicy : public FilterPolicy {
|
class InternalFilterPolicy : public FilterPolicy {
|
||||||
private:
|
private:
|
||||||
const FilterPolicy* const user_policy_;
|
const FilterPolicy* const user_policy_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
explicit InternalFilterPolicy(const FilterPolicy* p) : user_policy_(p) { }
|
explicit InternalFilterPolicy(const FilterPolicy* p) : user_policy_(p) {}
|
||||||
virtual const char* Name() const;
|
virtual const char* Name() const;
|
||||||
virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const;
|
virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const;
|
||||||
virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const;
|
virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const;
|
||||||
@ -134,8 +132,9 @@ class InternalFilterPolicy : public FilterPolicy {
|
|||||||
class InternalKey {
|
class InternalKey {
|
||||||
private:
|
private:
|
||||||
std::string rep_;
|
std::string rep_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
InternalKey() { } // Leave rep_ as empty to indicate it is invalid
|
InternalKey() {} // Leave rep_ as empty to indicate it is invalid
|
||||||
InternalKey(const Slice& user_key, SequenceNumber s, ValueType t) {
|
InternalKey(const Slice& user_key, SequenceNumber s, ValueType t) {
|
||||||
AppendInternalKey(&rep_, ParsedInternalKey(user_key, s, t));
|
AppendInternalKey(&rep_, ParsedInternalKey(user_key, s, t));
|
||||||
}
|
}
|
||||||
@ -158,8 +157,8 @@ class InternalKey {
|
|||||||
std::string DebugString() const;
|
std::string DebugString() const;
|
||||||
};
|
};
|
||||||
|
|
||||||
inline int InternalKeyComparator::Compare(
|
inline int InternalKeyComparator::Compare(const InternalKey& a,
|
||||||
const InternalKey& a, const InternalKey& b) const {
|
const InternalKey& b) const {
|
||||||
return Compare(a.Encode(), b.Encode());
|
return Compare(a.Encode(), b.Encode());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -204,7 +203,7 @@ class LookupKey {
|
|||||||
const char* start_;
|
const char* start_;
|
||||||
const char* kstart_;
|
const char* kstart_;
|
||||||
const char* end_;
|
const char* end_;
|
||||||
char space_[200]; // Avoid allocation for short keys
|
char space_[200]; // Avoid allocation for short keys
|
||||||
|
|
||||||
// No copying allowed
|
// No copying allowed
|
||||||
LookupKey(const LookupKey&);
|
LookupKey(const LookupKey&);
|
||||||
|
@ -8,8 +8,7 @@
|
|||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
static std::string IKey(const std::string& user_key,
|
static std::string IKey(const std::string& user_key, uint64_t seq,
|
||||||
uint64_t seq,
|
|
||||||
ValueType vt) {
|
ValueType vt) {
|
||||||
std::string encoded;
|
std::string encoded;
|
||||||
AppendInternalKey(&encoded, ParsedInternalKey(user_key, seq, vt));
|
AppendInternalKey(&encoded, ParsedInternalKey(user_key, seq, vt));
|
||||||
@ -28,9 +27,7 @@ static std::string ShortSuccessor(const std::string& s) {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void TestKey(const std::string& key,
|
static void TestKey(const std::string& key, uint64_t seq, ValueType vt) {
|
||||||
uint64_t seq,
|
|
||||||
ValueType vt) {
|
|
||||||
std::string encoded = IKey(key, seq, vt);
|
std::string encoded = IKey(key, seq, vt);
|
||||||
|
|
||||||
Slice in(encoded);
|
Slice in(encoded);
|
||||||
@ -44,16 +41,22 @@ static void TestKey(const std::string& key,
|
|||||||
ASSERT_TRUE(!ParseInternalKey(Slice("bar"), &decoded));
|
ASSERT_TRUE(!ParseInternalKey(Slice("bar"), &decoded));
|
||||||
}
|
}
|
||||||
|
|
||||||
class FormatTest { };
|
class FormatTest {};
|
||||||
|
|
||||||
TEST(FormatTest, InternalKey_EncodeDecode) {
|
TEST(FormatTest, InternalKey_EncodeDecode) {
|
||||||
const char* keys[] = { "", "k", "hello", "longggggggggggggggggggggg" };
|
const char* keys[] = {"", "k", "hello", "longggggggggggggggggggggg"};
|
||||||
const uint64_t seq[] = {
|
const uint64_t seq[] = {1,
|
||||||
1, 2, 3,
|
2,
|
||||||
(1ull << 8) - 1, 1ull << 8, (1ull << 8) + 1,
|
3,
|
||||||
(1ull << 16) - 1, 1ull << 16, (1ull << 16) + 1,
|
(1ull << 8) - 1,
|
||||||
(1ull << 32) - 1, 1ull << 32, (1ull << 32) + 1
|
1ull << 8,
|
||||||
};
|
(1ull << 8) + 1,
|
||||||
|
(1ull << 16) - 1,
|
||||||
|
1ull << 16,
|
||||||
|
(1ull << 16) + 1,
|
||||||
|
(1ull << 32) - 1,
|
||||||
|
1ull << 32,
|
||||||
|
(1ull << 32) + 1};
|
||||||
for (int k = 0; k < sizeof(keys) / sizeof(keys[0]); k++) {
|
for (int k = 0; k < sizeof(keys) / sizeof(keys[0]); k++) {
|
||||||
for (int s = 0; s < sizeof(seq) / sizeof(seq[0]); s++) {
|
for (int s = 0; s < sizeof(seq) / sizeof(seq[0]); s++) {
|
||||||
TestKey(keys[k], seq[s], kTypeValue);
|
TestKey(keys[k], seq[s], kTypeValue);
|
||||||
@ -65,37 +68,35 @@ TEST(FormatTest, InternalKey_EncodeDecode) {
|
|||||||
TEST(FormatTest, InternalKeyShortSeparator) {
|
TEST(FormatTest, InternalKeyShortSeparator) {
|
||||||
// When user keys are same
|
// When user keys are same
|
||||||
ASSERT_EQ(IKey("foo", 100, kTypeValue),
|
ASSERT_EQ(IKey("foo", 100, kTypeValue),
|
||||||
Shorten(IKey("foo", 100, kTypeValue),
|
Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 99, kTypeValue)));
|
||||||
IKey("foo", 99, kTypeValue)));
|
ASSERT_EQ(
|
||||||
ASSERT_EQ(IKey("foo", 100, kTypeValue),
|
IKey("foo", 100, kTypeValue),
|
||||||
Shorten(IKey("foo", 100, kTypeValue),
|
Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 101, kTypeValue)));
|
||||||
IKey("foo", 101, kTypeValue)));
|
ASSERT_EQ(
|
||||||
ASSERT_EQ(IKey("foo", 100, kTypeValue),
|
IKey("foo", 100, kTypeValue),
|
||||||
Shorten(IKey("foo", 100, kTypeValue),
|
Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 100, kTypeValue)));
|
||||||
IKey("foo", 100, kTypeValue)));
|
ASSERT_EQ(
|
||||||
ASSERT_EQ(IKey("foo", 100, kTypeValue),
|
IKey("foo", 100, kTypeValue),
|
||||||
Shorten(IKey("foo", 100, kTypeValue),
|
Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 100, kTypeDeletion)));
|
||||||
IKey("foo", 100, kTypeDeletion)));
|
|
||||||
|
|
||||||
// When user keys are misordered
|
// When user keys are misordered
|
||||||
ASSERT_EQ(IKey("foo", 100, kTypeValue),
|
ASSERT_EQ(IKey("foo", 100, kTypeValue),
|
||||||
Shorten(IKey("foo", 100, kTypeValue),
|
Shorten(IKey("foo", 100, kTypeValue), IKey("bar", 99, kTypeValue)));
|
||||||
IKey("bar", 99, kTypeValue)));
|
|
||||||
|
|
||||||
// When user keys are different, but correctly ordered
|
// When user keys are different, but correctly ordered
|
||||||
ASSERT_EQ(IKey("g", kMaxSequenceNumber, kValueTypeForSeek),
|
ASSERT_EQ(
|
||||||
Shorten(IKey("foo", 100, kTypeValue),
|
IKey("g", kMaxSequenceNumber, kValueTypeForSeek),
|
||||||
IKey("hello", 200, kTypeValue)));
|
Shorten(IKey("foo", 100, kTypeValue), IKey("hello", 200, kTypeValue)));
|
||||||
|
|
||||||
// When start user key is prefix of limit user key
|
// When start user key is prefix of limit user key
|
||||||
ASSERT_EQ(IKey("foo", 100, kTypeValue),
|
ASSERT_EQ(
|
||||||
Shorten(IKey("foo", 100, kTypeValue),
|
IKey("foo", 100, kTypeValue),
|
||||||
IKey("foobar", 200, kTypeValue)));
|
Shorten(IKey("foo", 100, kTypeValue), IKey("foobar", 200, kTypeValue)));
|
||||||
|
|
||||||
// When limit user key is prefix of start user key
|
// When limit user key is prefix of start user key
|
||||||
ASSERT_EQ(IKey("foobar", 100, kTypeValue),
|
ASSERT_EQ(
|
||||||
Shorten(IKey("foobar", 100, kTypeValue),
|
IKey("foobar", 100, kTypeValue),
|
||||||
IKey("foo", 200, kTypeValue)));
|
Shorten(IKey("foobar", 100, kTypeValue), IKey("foo", 200, kTypeValue)));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(FormatTest, InternalKeyShortestSuccessor) {
|
TEST(FormatTest, InternalKeyShortestSuccessor) {
|
||||||
@ -107,6 +108,4 @@ TEST(FormatTest, InternalKeyShortestSuccessor) {
|
|||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
@ -90,7 +90,6 @@ class WriteBatchItemPrinter : public WriteBatch::Handler {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
// Called on every log record (each one of which is a WriteBatch)
|
// Called on every log record (each one of which is a WriteBatch)
|
||||||
// found in a kLogFile.
|
// found in a kLogFile.
|
||||||
static void WriteBatchPrinter(uint64_t pos, Slice record, WritableFile* dst) {
|
static void WriteBatchPrinter(uint64_t pos, Slice record, WritableFile* dst) {
|
||||||
@ -216,9 +215,12 @@ Status DumpFile(Env* env, const std::string& fname, WritableFile* dst) {
|
|||||||
return Status::InvalidArgument(fname + ": unknown file type");
|
return Status::InvalidArgument(fname + ": unknown file type");
|
||||||
}
|
}
|
||||||
switch (ftype) {
|
switch (ftype) {
|
||||||
case kLogFile: return DumpLog(env, fname, dst);
|
case kLogFile:
|
||||||
case kDescriptorFile: return DumpDescriptor(env, fname, dst);
|
return DumpLog(env, fname, dst);
|
||||||
case kTableFile: return DumpTable(env, fname, dst);
|
case kDescriptorFile:
|
||||||
|
return DumpDescriptor(env, fname, dst);
|
||||||
|
case kTableFile:
|
||||||
|
return DumpTable(env, fname, dst);
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -9,12 +9,12 @@
|
|||||||
#include <map>
|
#include <map>
|
||||||
#include <set>
|
#include <set>
|
||||||
|
|
||||||
#include "leveldb/db.h"
|
|
||||||
#include "db/db_impl.h"
|
#include "db/db_impl.h"
|
||||||
#include "db/filename.h"
|
#include "db/filename.h"
|
||||||
#include "db/log_format.h"
|
#include "db/log_format.h"
|
||||||
#include "db/version_set.h"
|
#include "db/version_set.h"
|
||||||
#include "leveldb/cache.h"
|
#include "leveldb/cache.h"
|
||||||
|
#include "leveldb/db.h"
|
||||||
#include "leveldb/env.h"
|
#include "leveldb/env.h"
|
||||||
#include "leveldb/table.h"
|
#include "leveldb/table.h"
|
||||||
#include "leveldb/write_batch.h"
|
#include "leveldb/write_batch.h"
|
||||||
@ -56,8 +56,7 @@ Status Truncate(const std::string& filename, uint64_t length) {
|
|||||||
|
|
||||||
SequentialFile* orig_file;
|
SequentialFile* orig_file;
|
||||||
Status s = env->NewSequentialFile(filename, &orig_file);
|
Status s = env->NewSequentialFile(filename, &orig_file);
|
||||||
if (!s.ok())
|
if (!s.ok()) return s;
|
||||||
return s;
|
|
||||||
|
|
||||||
char* scratch = new char[length];
|
char* scratch = new char[length];
|
||||||
leveldb::Slice result;
|
leveldb::Slice result;
|
||||||
@ -93,7 +92,7 @@ struct FileState {
|
|||||||
: filename_(filename),
|
: filename_(filename),
|
||||||
pos_(-1),
|
pos_(-1),
|
||||||
pos_at_last_sync_(-1),
|
pos_at_last_sync_(-1),
|
||||||
pos_at_last_flush_(-1) { }
|
pos_at_last_flush_(-1) {}
|
||||||
|
|
||||||
FileState() : pos_(-1), pos_at_last_sync_(-1), pos_at_last_flush_(-1) {}
|
FileState() : pos_(-1), pos_at_last_sync_(-1), pos_at_last_flush_(-1) {}
|
||||||
|
|
||||||
@ -108,8 +107,7 @@ struct FileState {
|
|||||||
// is written to or sync'ed.
|
// is written to or sync'ed.
|
||||||
class TestWritableFile : public WritableFile {
|
class TestWritableFile : public WritableFile {
|
||||||
public:
|
public:
|
||||||
TestWritableFile(const FileState& state,
|
TestWritableFile(const FileState& state, WritableFile* f,
|
||||||
WritableFile* f,
|
|
||||||
FaultInjectionTestEnv* env);
|
FaultInjectionTestEnv* env);
|
||||||
virtual ~TestWritableFile();
|
virtual ~TestWritableFile();
|
||||||
virtual Status Append(const Slice& data);
|
virtual Status Append(const Slice& data);
|
||||||
@ -130,7 +128,7 @@ class FaultInjectionTestEnv : public EnvWrapper {
|
|||||||
public:
|
public:
|
||||||
FaultInjectionTestEnv()
|
FaultInjectionTestEnv()
|
||||||
: EnvWrapper(Env::Default()), filesystem_active_(true) {}
|
: EnvWrapper(Env::Default()), filesystem_active_(true) {}
|
||||||
virtual ~FaultInjectionTestEnv() { }
|
virtual ~FaultInjectionTestEnv() {}
|
||||||
virtual Status NewWritableFile(const std::string& fname,
|
virtual Status NewWritableFile(const std::string& fname,
|
||||||
WritableFile** result);
|
WritableFile** result);
|
||||||
virtual Status NewAppendableFile(const std::string& fname,
|
virtual Status NewAppendableFile(const std::string& fname,
|
||||||
@ -165,13 +163,9 @@ class FaultInjectionTestEnv : public EnvWrapper {
|
|||||||
bool filesystem_active_ GUARDED_BY(mutex_); // Record flushes, syncs, writes
|
bool filesystem_active_ GUARDED_BY(mutex_); // Record flushes, syncs, writes
|
||||||
};
|
};
|
||||||
|
|
||||||
TestWritableFile::TestWritableFile(const FileState& state,
|
TestWritableFile::TestWritableFile(const FileState& state, WritableFile* f,
|
||||||
WritableFile* f,
|
|
||||||
FaultInjectionTestEnv* env)
|
FaultInjectionTestEnv* env)
|
||||||
: state_(state),
|
: state_(state), target_(f), writable_file_opened_(true), env_(env) {
|
||||||
target_(f),
|
|
||||||
writable_file_opened_(true),
|
|
||||||
env_(env) {
|
|
||||||
assert(f != nullptr);
|
assert(f != nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -395,9 +389,7 @@ class FaultInjectionTest {
|
|||||||
delete env_;
|
delete env_;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ReuseLogs(bool reuse) {
|
void ReuseLogs(bool reuse) { options_.reuse_logs = reuse; }
|
||||||
options_.reuse_logs = reuse;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Build(int start_idx, int num_vals) {
|
void Build(int start_idx, int num_vals) {
|
||||||
std::string key_space, value_space;
|
std::string key_space, value_space;
|
||||||
@ -497,18 +489,17 @@ class FaultInjectionTest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void PartialCompactTestReopenWithFault(ResetMethod reset_method,
|
void PartialCompactTestReopenWithFault(ResetMethod reset_method,
|
||||||
int num_pre_sync,
|
int num_pre_sync, int num_post_sync) {
|
||||||
int num_post_sync) {
|
|
||||||
env_->SetFilesystemActive(false);
|
env_->SetFilesystemActive(false);
|
||||||
CloseDB();
|
CloseDB();
|
||||||
ResetDBState(reset_method);
|
ResetDBState(reset_method);
|
||||||
ASSERT_OK(OpenDB());
|
ASSERT_OK(OpenDB());
|
||||||
ASSERT_OK(Verify(0, num_pre_sync, FaultInjectionTest::VAL_EXPECT_NO_ERROR));
|
ASSERT_OK(Verify(0, num_pre_sync, FaultInjectionTest::VAL_EXPECT_NO_ERROR));
|
||||||
ASSERT_OK(Verify(num_pre_sync, num_post_sync, FaultInjectionTest::VAL_EXPECT_ERROR));
|
ASSERT_OK(Verify(num_pre_sync, num_post_sync,
|
||||||
|
FaultInjectionTest::VAL_EXPECT_ERROR));
|
||||||
}
|
}
|
||||||
|
|
||||||
void NoWriteTestPreFault() {
|
void NoWriteTestPreFault() {}
|
||||||
}
|
|
||||||
|
|
||||||
void NoWriteTestReopenWithFault(ResetMethod reset_method) {
|
void NoWriteTestReopenWithFault(ResetMethod reset_method) {
|
||||||
CloseDB();
|
CloseDB();
|
||||||
@ -524,8 +515,7 @@ class FaultInjectionTest {
|
|||||||
int num_post_sync = rnd.Uniform(kMaxNumValues);
|
int num_post_sync = rnd.Uniform(kMaxNumValues);
|
||||||
|
|
||||||
PartialCompactTestPreFault(num_pre_sync, num_post_sync);
|
PartialCompactTestPreFault(num_pre_sync, num_post_sync);
|
||||||
PartialCompactTestReopenWithFault(RESET_DROP_UNSYNCED_DATA,
|
PartialCompactTestReopenWithFault(RESET_DROP_UNSYNCED_DATA, num_pre_sync,
|
||||||
num_pre_sync,
|
|
||||||
num_post_sync);
|
num_post_sync);
|
||||||
|
|
||||||
NoWriteTestPreFault();
|
NoWriteTestPreFault();
|
||||||
@ -535,8 +525,7 @@ class FaultInjectionTest {
|
|||||||
// No new files created so we expect all values since no files will be
|
// No new files created so we expect all values since no files will be
|
||||||
// dropped.
|
// dropped.
|
||||||
PartialCompactTestReopenWithFault(RESET_DELETE_UNSYNCED_FILES,
|
PartialCompactTestReopenWithFault(RESET_DELETE_UNSYNCED_FILES,
|
||||||
num_pre_sync + num_post_sync,
|
num_pre_sync + num_post_sync, 0);
|
||||||
0);
|
|
||||||
|
|
||||||
NoWriteTestPreFault();
|
NoWriteTestPreFault();
|
||||||
NoWriteTestReopenWithFault(RESET_DELETE_UNSYNCED_FILES);
|
NoWriteTestReopenWithFault(RESET_DELETE_UNSYNCED_FILES);
|
||||||
@ -556,6 +545,4 @@ TEST(FaultInjectionTest, FaultTestWithLogReuse) {
|
|||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
@ -2,9 +2,11 @@
|
|||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
|
#include "db/filename.h"
|
||||||
|
|
||||||
#include <ctype.h>
|
#include <ctype.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include "db/filename.h"
|
|
||||||
#include "db/dbformat.h"
|
#include "db/dbformat.h"
|
||||||
#include "leveldb/env.h"
|
#include "leveldb/env.h"
|
||||||
#include "util/logging.h"
|
#include "util/logging.h"
|
||||||
@ -19,8 +21,7 @@ static std::string MakeFileName(const std::string& dbname, uint64_t number,
|
|||||||
const char* suffix) {
|
const char* suffix) {
|
||||||
char buf[100];
|
char buf[100];
|
||||||
snprintf(buf, sizeof(buf), "/%06llu.%s",
|
snprintf(buf, sizeof(buf), "/%06llu.%s",
|
||||||
static_cast<unsigned long long>(number),
|
static_cast<unsigned long long>(number), suffix);
|
||||||
suffix);
|
|
||||||
return dbname + buf;
|
return dbname + buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -51,9 +52,7 @@ std::string CurrentFileName(const std::string& dbname) {
|
|||||||
return dbname + "/CURRENT";
|
return dbname + "/CURRENT";
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string LockFileName(const std::string& dbname) {
|
std::string LockFileName(const std::string& dbname) { return dbname + "/LOCK"; }
|
||||||
return dbname + "/LOCK";
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string TempFileName(const std::string& dbname, uint64_t number) {
|
std::string TempFileName(const std::string& dbname, uint64_t number) {
|
||||||
assert(number > 0);
|
assert(number > 0);
|
||||||
@ -69,7 +68,6 @@ std::string OldInfoLogFileName(const std::string& dbname) {
|
|||||||
return dbname + "/LOG.old";
|
return dbname + "/LOG.old";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Owned filenames have the form:
|
// Owned filenames have the form:
|
||||||
// dbname/CURRENT
|
// dbname/CURRENT
|
||||||
// dbname/LOCK
|
// dbname/LOCK
|
||||||
@ -77,8 +75,7 @@ std::string OldInfoLogFileName(const std::string& dbname) {
|
|||||||
// dbname/LOG.old
|
// dbname/LOG.old
|
||||||
// dbname/MANIFEST-[0-9]+
|
// dbname/MANIFEST-[0-9]+
|
||||||
// dbname/[0-9]+.(log|sst|ldb)
|
// dbname/[0-9]+.(log|sst|ldb)
|
||||||
bool ParseFileName(const std::string& filename,
|
bool ParseFileName(const std::string& filename, uint64_t* number,
|
||||||
uint64_t* number,
|
|
||||||
FileType* type) {
|
FileType* type) {
|
||||||
Slice rest(filename);
|
Slice rest(filename);
|
||||||
if (rest == "CURRENT") {
|
if (rest == "CURRENT") {
|
||||||
|
@ -8,7 +8,9 @@
|
|||||||
#define STORAGE_LEVELDB_DB_FILENAME_H_
|
#define STORAGE_LEVELDB_DB_FILENAME_H_
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
#include "leveldb/slice.h"
|
#include "leveldb/slice.h"
|
||||||
#include "leveldb/status.h"
|
#include "leveldb/status.h"
|
||||||
#include "port/port.h"
|
#include "port/port.h"
|
||||||
@ -69,8 +71,7 @@ std::string OldInfoLogFileName(const std::string& dbname);
|
|||||||
// If filename is a leveldb file, store the type of the file in *type.
|
// If filename is a leveldb file, store the type of the file in *type.
|
||||||
// The number encoded in the filename is stored in *number. If the
|
// The number encoded in the filename is stored in *number. If the
|
||||||
// filename was successfully parsed, returns true. Else return false.
|
// filename was successfully parsed, returns true. Else return false.
|
||||||
bool ParseFileName(const std::string& filename,
|
bool ParseFileName(const std::string& filename, uint64_t* number,
|
||||||
uint64_t* number,
|
|
||||||
FileType* type);
|
FileType* type);
|
||||||
|
|
||||||
// Make the CURRENT file point to the descriptor file with the
|
// Make the CURRENT file point to the descriptor file with the
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
class FileNameTest { };
|
class FileNameTest {};
|
||||||
|
|
||||||
TEST(FileNameTest, Parse) {
|
TEST(FileNameTest, Parse) {
|
||||||
Slice db;
|
Slice db;
|
||||||
@ -24,17 +24,17 @@ TEST(FileNameTest, Parse) {
|
|||||||
uint64_t number;
|
uint64_t number;
|
||||||
FileType type;
|
FileType type;
|
||||||
} cases[] = {
|
} cases[] = {
|
||||||
{ "100.log", 100, kLogFile },
|
{"100.log", 100, kLogFile},
|
||||||
{ "0.log", 0, kLogFile },
|
{"0.log", 0, kLogFile},
|
||||||
{ "0.sst", 0, kTableFile },
|
{"0.sst", 0, kTableFile},
|
||||||
{ "0.ldb", 0, kTableFile },
|
{"0.ldb", 0, kTableFile},
|
||||||
{ "CURRENT", 0, kCurrentFile },
|
{"CURRENT", 0, kCurrentFile},
|
||||||
{ "LOCK", 0, kDBLockFile },
|
{"LOCK", 0, kDBLockFile},
|
||||||
{ "MANIFEST-2", 2, kDescriptorFile },
|
{"MANIFEST-2", 2, kDescriptorFile},
|
||||||
{ "MANIFEST-7", 7, kDescriptorFile },
|
{"MANIFEST-7", 7, kDescriptorFile},
|
||||||
{ "LOG", 0, kInfoLogFile },
|
{"LOG", 0, kInfoLogFile},
|
||||||
{ "LOG.old", 0, kInfoLogFile },
|
{"LOG.old", 0, kInfoLogFile},
|
||||||
{ "18446744073709551615.log", 18446744073709551615ull, kLogFile },
|
{"18446744073709551615.log", 18446744073709551615ull, kLogFile},
|
||||||
};
|
};
|
||||||
for (int i = 0; i < sizeof(cases) / sizeof(cases[0]); i++) {
|
for (int i = 0; i < sizeof(cases) / sizeof(cases[0]); i++) {
|
||||||
std::string f = cases[i].fname;
|
std::string f = cases[i].fname;
|
||||||
@ -44,30 +44,28 @@ TEST(FileNameTest, Parse) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Errors
|
// Errors
|
||||||
static const char* errors[] = {
|
static const char* errors[] = {"",
|
||||||
"",
|
"foo",
|
||||||
"foo",
|
"foo-dx-100.log",
|
||||||
"foo-dx-100.log",
|
".log",
|
||||||
".log",
|
"",
|
||||||
"",
|
"manifest",
|
||||||
"manifest",
|
"CURREN",
|
||||||
"CURREN",
|
"CURRENTX",
|
||||||
"CURRENTX",
|
"MANIFES",
|
||||||
"MANIFES",
|
"MANIFEST",
|
||||||
"MANIFEST",
|
"MANIFEST-",
|
||||||
"MANIFEST-",
|
"XMANIFEST-3",
|
||||||
"XMANIFEST-3",
|
"MANIFEST-3x",
|
||||||
"MANIFEST-3x",
|
"LOC",
|
||||||
"LOC",
|
"LOCKx",
|
||||||
"LOCKx",
|
"LO",
|
||||||
"LO",
|
"LOGx",
|
||||||
"LOGx",
|
"18446744073709551616.log",
|
||||||
"18446744073709551616.log",
|
"184467440737095516150.log",
|
||||||
"184467440737095516150.log",
|
"100",
|
||||||
"100",
|
"100.",
|
||||||
"100.",
|
"100.lop"};
|
||||||
"100.lop"
|
|
||||||
};
|
|
||||||
for (int i = 0; i < sizeof(errors) / sizeof(errors[0]); i++) {
|
for (int i = 0; i < sizeof(errors) / sizeof(errors[0]); i++) {
|
||||||
std::string f = errors[i];
|
std::string f = errors[i];
|
||||||
ASSERT_TRUE(!ParseFileName(f, &number, &type)) << f;
|
ASSERT_TRUE(!ParseFileName(f, &number, &type)) << f;
|
||||||
@ -130,6 +128,4 @@ TEST(FileNameTest, Construction) {
|
|||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
|
||||||
#include "leveldb/dumpfile.h"
|
#include "leveldb/dumpfile.h"
|
||||||
#include "leveldb/env.h"
|
#include "leveldb/env.h"
|
||||||
#include "leveldb/status.h"
|
#include "leveldb/status.h"
|
||||||
@ -38,11 +39,9 @@ bool HandleDumpCommand(Env* env, char** files, int num) {
|
|||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
static void Usage() {
|
static void Usage() {
|
||||||
fprintf(
|
fprintf(stderr,
|
||||||
stderr,
|
"Usage: leveldbutil command...\n"
|
||||||
"Usage: leveldbutil command...\n"
|
" dump files... -- dump contents of specified files\n");
|
||||||
" dump files... -- dump contents of specified files\n"
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) {
|
||||||
@ -54,7 +53,7 @@ int main(int argc, char** argv) {
|
|||||||
} else {
|
} else {
|
||||||
std::string command = argv[1];
|
std::string command = argv[1];
|
||||||
if (command == "dump") {
|
if (command == "dump") {
|
||||||
ok = leveldb::HandleDumpCommand(env, argv+2, argc-2);
|
ok = leveldb::HandleDumpCommand(env, argv + 2, argc - 2);
|
||||||
} else {
|
} else {
|
||||||
Usage();
|
Usage();
|
||||||
ok = false;
|
ok = false;
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
#include "db/log_reader.h"
|
#include "db/log_reader.h"
|
||||||
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
|
||||||
#include "leveldb/env.h"
|
#include "leveldb/env.h"
|
||||||
#include "util/coding.h"
|
#include "util/coding.h"
|
||||||
#include "util/crc32c.h"
|
#include "util/crc32c.h"
|
||||||
@ -12,8 +13,7 @@
|
|||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
namespace log {
|
namespace log {
|
||||||
|
|
||||||
Reader::Reporter::~Reporter() {
|
Reader::Reporter::~Reporter() {}
|
||||||
}
|
|
||||||
|
|
||||||
Reader::Reader(SequentialFile* file, Reporter* reporter, bool checksum,
|
Reader::Reader(SequentialFile* file, Reporter* reporter, bool checksum,
|
||||||
uint64_t initial_offset)
|
uint64_t initial_offset)
|
||||||
@ -26,12 +26,9 @@ Reader::Reader(SequentialFile* file, Reporter* reporter, bool checksum,
|
|||||||
last_record_offset_(0),
|
last_record_offset_(0),
|
||||||
end_of_buffer_offset_(0),
|
end_of_buffer_offset_(0),
|
||||||
initial_offset_(initial_offset),
|
initial_offset_(initial_offset),
|
||||||
resyncing_(initial_offset > 0) {
|
resyncing_(initial_offset > 0) {}
|
||||||
}
|
|
||||||
|
|
||||||
Reader::~Reader() {
|
Reader::~Reader() { delete[] backing_store_; }
|
||||||
delete[] backing_store_;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool Reader::SkipToInitialBlock() {
|
bool Reader::SkipToInitialBlock() {
|
||||||
const size_t offset_in_block = initial_offset_ % kBlockSize;
|
const size_t offset_in_block = initial_offset_ % kBlockSize;
|
||||||
@ -176,9 +173,7 @@ bool Reader::ReadRecord(Slice* record, std::string* scratch) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t Reader::LastRecordOffset() {
|
uint64_t Reader::LastRecordOffset() { return last_record_offset_; }
|
||||||
return last_record_offset_;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Reader::ReportCorruption(uint64_t bytes, const char* reason) {
|
void Reader::ReportCorruption(uint64_t bytes, const char* reason) {
|
||||||
ReportDrop(bytes, Status::Corruption(reason));
|
ReportDrop(bytes, Status::Corruption(reason));
|
||||||
|
@ -63,7 +63,7 @@ class Reader {
|
|||||||
bool const checksum_;
|
bool const checksum_;
|
||||||
char* const backing_store_;
|
char* const backing_store_;
|
||||||
Slice buffer_;
|
Slice buffer_;
|
||||||
bool eof_; // Last Read() indicated EOF by returning < kBlockSize
|
bool eof_; // Last Read() indicated EOF by returning < kBlockSize
|
||||||
|
|
||||||
// Offset of the last record returned by ReadRecord.
|
// Offset of the last record returned by ReadRecord.
|
||||||
uint64_t last_record_offset_;
|
uint64_t last_record_offset_;
|
||||||
|
140
db/log_test.cc
140
db/log_test.cc
@ -56,7 +56,7 @@ class LogTest {
|
|||||||
Slice contents_;
|
Slice contents_;
|
||||||
bool force_error_;
|
bool force_error_;
|
||||||
bool returned_partial_;
|
bool returned_partial_;
|
||||||
StringSource() : force_error_(false), returned_partial_(false) { }
|
StringSource() : force_error_(false), returned_partial_(false) {}
|
||||||
|
|
||||||
virtual Status Read(size_t n, Slice* result, char* scratch) {
|
virtual Status Read(size_t n, Slice* result, char* scratch) {
|
||||||
ASSERT_TRUE(!returned_partial_) << "must not Read() after eof/error";
|
ASSERT_TRUE(!returned_partial_) << "must not Read() after eof/error";
|
||||||
@ -93,7 +93,7 @@ class LogTest {
|
|||||||
size_t dropped_bytes_;
|
size_t dropped_bytes_;
|
||||||
std::string message_;
|
std::string message_;
|
||||||
|
|
||||||
ReportCollector() : dropped_bytes_(0) { }
|
ReportCollector() : dropped_bytes_(0) {}
|
||||||
virtual void Corruption(size_t bytes, const Status& status) {
|
virtual void Corruption(size_t bytes, const Status& status) {
|
||||||
dropped_bytes_ += bytes;
|
dropped_bytes_ += bytes;
|
||||||
message_.append(status.ToString());
|
message_.append(status.ToString());
|
||||||
@ -113,11 +113,11 @@ class LogTest {
|
|||||||
static int num_initial_offset_records_;
|
static int num_initial_offset_records_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
LogTest() : reading_(false),
|
LogTest()
|
||||||
writer_(new Writer(&dest_)),
|
: reading_(false),
|
||||||
reader_(new Reader(&source_, &report_, true/*checksum*/,
|
writer_(new Writer(&dest_)),
|
||||||
0/*initial_offset*/)) {
|
reader_(new Reader(&source_, &report_, true /*checksum*/,
|
||||||
}
|
0 /*initial_offset*/)) {}
|
||||||
|
|
||||||
~LogTest() {
|
~LogTest() {
|
||||||
delete writer_;
|
delete writer_;
|
||||||
@ -134,9 +134,7 @@ class LogTest {
|
|||||||
writer_->AddRecord(Slice(msg));
|
writer_->AddRecord(Slice(msg));
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t WrittenBytes() const {
|
size_t WrittenBytes() const { return dest_.contents_.size(); }
|
||||||
return dest_.contents_.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string Read() {
|
std::string Read() {
|
||||||
if (!reading_) {
|
if (!reading_) {
|
||||||
@ -166,22 +164,16 @@ class LogTest {
|
|||||||
|
|
||||||
void FixChecksum(int header_offset, int len) {
|
void FixChecksum(int header_offset, int len) {
|
||||||
// Compute crc of type/len/data
|
// Compute crc of type/len/data
|
||||||
uint32_t crc = crc32c::Value(&dest_.contents_[header_offset+6], 1 + len);
|
uint32_t crc = crc32c::Value(&dest_.contents_[header_offset + 6], 1 + len);
|
||||||
crc = crc32c::Mask(crc);
|
crc = crc32c::Mask(crc);
|
||||||
EncodeFixed32(&dest_.contents_[header_offset], crc);
|
EncodeFixed32(&dest_.contents_[header_offset], crc);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ForceError() {
|
void ForceError() { source_.force_error_ = true; }
|
||||||
source_.force_error_ = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t DroppedBytes() const {
|
size_t DroppedBytes() const { return report_.dropped_bytes_; }
|
||||||
return report_.dropped_bytes_;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string ReportMessage() const {
|
std::string ReportMessage() const { return report_.message_; }
|
||||||
return report_.message_;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns OK iff recorded error message contains "msg"
|
// Returns OK iff recorded error message contains "msg"
|
||||||
std::string MatchError(const std::string& msg) const {
|
std::string MatchError(const std::string& msg) const {
|
||||||
@ -202,14 +194,14 @@ class LogTest {
|
|||||||
|
|
||||||
void StartReadingAt(uint64_t initial_offset) {
|
void StartReadingAt(uint64_t initial_offset) {
|
||||||
delete reader_;
|
delete reader_;
|
||||||
reader_ = new Reader(&source_, &report_, true/*checksum*/, initial_offset);
|
reader_ = new Reader(&source_, &report_, true /*checksum*/, initial_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CheckOffsetPastEndReturnsNoRecords(uint64_t offset_past_end) {
|
void CheckOffsetPastEndReturnsNoRecords(uint64_t offset_past_end) {
|
||||||
WriteInitialOffsetLog();
|
WriteInitialOffsetLog();
|
||||||
reading_ = true;
|
reading_ = true;
|
||||||
source_.contents_ = Slice(dest_.contents_);
|
source_.contents_ = Slice(dest_.contents_);
|
||||||
Reader* offset_reader = new Reader(&source_, &report_, true/*checksum*/,
|
Reader* offset_reader = new Reader(&source_, &report_, true /*checksum*/,
|
||||||
WrittenBytes() + offset_past_end);
|
WrittenBytes() + offset_past_end);
|
||||||
Slice record;
|
Slice record;
|
||||||
std::string scratch;
|
std::string scratch;
|
||||||
@ -222,8 +214,8 @@ class LogTest {
|
|||||||
WriteInitialOffsetLog();
|
WriteInitialOffsetLog();
|
||||||
reading_ = true;
|
reading_ = true;
|
||||||
source_.contents_ = Slice(dest_.contents_);
|
source_.contents_ = Slice(dest_.contents_);
|
||||||
Reader* offset_reader = new Reader(&source_, &report_, true/*checksum*/,
|
Reader* offset_reader =
|
||||||
initial_offset);
|
new Reader(&source_, &report_, true /*checksum*/, initial_offset);
|
||||||
|
|
||||||
// Read all records from expected_record_offset through the last one.
|
// Read all records from expected_record_offset through the last one.
|
||||||
ASSERT_LT(expected_record_offset, num_initial_offset_records_);
|
ASSERT_LT(expected_record_offset, num_initial_offset_records_);
|
||||||
@ -242,34 +234,30 @@ class LogTest {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
size_t LogTest::initial_offset_record_sizes_[] =
|
size_t LogTest::initial_offset_record_sizes_[] = {
|
||||||
{10000, // Two sizable records in first block
|
10000, // Two sizable records in first block
|
||||||
10000,
|
10000,
|
||||||
2 * log::kBlockSize - 1000, // Span three blocks
|
2 * log::kBlockSize - 1000, // Span three blocks
|
||||||
1,
|
1,
|
||||||
13716, // Consume all but two bytes of block 3.
|
13716, // Consume all but two bytes of block 3.
|
||||||
log::kBlockSize - kHeaderSize, // Consume the entirety of block 4.
|
log::kBlockSize - kHeaderSize, // Consume the entirety of block 4.
|
||||||
};
|
};
|
||||||
|
|
||||||
uint64_t LogTest::initial_offset_last_record_offsets_[] =
|
uint64_t LogTest::initial_offset_last_record_offsets_[] = {
|
||||||
{0,
|
0,
|
||||||
kHeaderSize + 10000,
|
kHeaderSize + 10000,
|
||||||
2 * (kHeaderSize + 10000),
|
2 * (kHeaderSize + 10000),
|
||||||
2 * (kHeaderSize + 10000) +
|
2 * (kHeaderSize + 10000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize,
|
||||||
(2 * log::kBlockSize - 1000) + 3 * kHeaderSize,
|
2 * (kHeaderSize + 10000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize +
|
||||||
2 * (kHeaderSize + 10000) +
|
kHeaderSize + 1,
|
||||||
(2 * log::kBlockSize - 1000) + 3 * kHeaderSize
|
3 * log::kBlockSize,
|
||||||
+ kHeaderSize + 1,
|
};
|
||||||
3 * log::kBlockSize,
|
|
||||||
};
|
|
||||||
|
|
||||||
// LogTest::initial_offset_last_record_offsets_ must be defined before this.
|
// LogTest::initial_offset_last_record_offsets_ must be defined before this.
|
||||||
int LogTest::num_initial_offset_records_ =
|
int LogTest::num_initial_offset_records_ =
|
||||||
sizeof(LogTest::initial_offset_last_record_offsets_)/sizeof(uint64_t);
|
sizeof(LogTest::initial_offset_last_record_offsets_) / sizeof(uint64_t);
|
||||||
|
|
||||||
TEST(LogTest, Empty) {
|
TEST(LogTest, Empty) { ASSERT_EQ("EOF", Read()); }
|
||||||
ASSERT_EQ("EOF", Read());
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(LogTest, ReadWrite) {
|
TEST(LogTest, ReadWrite) {
|
||||||
Write("foo");
|
Write("foo");
|
||||||
@ -306,7 +294,7 @@ TEST(LogTest, Fragmentation) {
|
|||||||
|
|
||||||
TEST(LogTest, MarginalTrailer) {
|
TEST(LogTest, MarginalTrailer) {
|
||||||
// Make a trailer that is exactly the same length as an empty record.
|
// Make a trailer that is exactly the same length as an empty record.
|
||||||
const int n = kBlockSize - 2*kHeaderSize;
|
const int n = kBlockSize - 2 * kHeaderSize;
|
||||||
Write(BigString("foo", n));
|
Write(BigString("foo", n));
|
||||||
ASSERT_EQ(kBlockSize - kHeaderSize, WrittenBytes());
|
ASSERT_EQ(kBlockSize - kHeaderSize, WrittenBytes());
|
||||||
Write("");
|
Write("");
|
||||||
@ -319,7 +307,7 @@ TEST(LogTest, MarginalTrailer) {
|
|||||||
|
|
||||||
TEST(LogTest, MarginalTrailer2) {
|
TEST(LogTest, MarginalTrailer2) {
|
||||||
// Make a trailer that is exactly the same length as an empty record.
|
// Make a trailer that is exactly the same length as an empty record.
|
||||||
const int n = kBlockSize - 2*kHeaderSize;
|
const int n = kBlockSize - 2 * kHeaderSize;
|
||||||
Write(BigString("foo", n));
|
Write(BigString("foo", n));
|
||||||
ASSERT_EQ(kBlockSize - kHeaderSize, WrittenBytes());
|
ASSERT_EQ(kBlockSize - kHeaderSize, WrittenBytes());
|
||||||
Write("bar");
|
Write("bar");
|
||||||
@ -331,7 +319,7 @@ TEST(LogTest, MarginalTrailer2) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST(LogTest, ShortTrailer) {
|
TEST(LogTest, ShortTrailer) {
|
||||||
const int n = kBlockSize - 2*kHeaderSize + 4;
|
const int n = kBlockSize - 2 * kHeaderSize + 4;
|
||||||
Write(BigString("foo", n));
|
Write(BigString("foo", n));
|
||||||
ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes());
|
ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes());
|
||||||
Write("");
|
Write("");
|
||||||
@ -343,7 +331,7 @@ TEST(LogTest, ShortTrailer) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST(LogTest, AlignedEof) {
|
TEST(LogTest, AlignedEof) {
|
||||||
const int n = kBlockSize - 2*kHeaderSize + 4;
|
const int n = kBlockSize - 2 * kHeaderSize + 4;
|
||||||
Write(BigString("foo", n));
|
Write(BigString("foo", n));
|
||||||
ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes());
|
ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes());
|
||||||
ASSERT_EQ(BigString("foo", n), Read());
|
ASSERT_EQ(BigString("foo", n), Read());
|
||||||
@ -394,7 +382,7 @@ TEST(LogTest, BadRecordType) {
|
|||||||
|
|
||||||
TEST(LogTest, TruncatedTrailingRecordIsIgnored) {
|
TEST(LogTest, TruncatedTrailingRecordIsIgnored) {
|
||||||
Write("foo");
|
Write("foo");
|
||||||
ShrinkSize(4); // Drop all payload as well as a header byte
|
ShrinkSize(4); // Drop all payload as well as a header byte
|
||||||
ASSERT_EQ("EOF", Read());
|
ASSERT_EQ("EOF", Read());
|
||||||
// Truncated last record is ignored, not treated as an error.
|
// Truncated last record is ignored, not treated as an error.
|
||||||
ASSERT_EQ(0, DroppedBytes());
|
ASSERT_EQ(0, DroppedBytes());
|
||||||
@ -492,7 +480,7 @@ TEST(LogTest, SkipIntoMultiRecord) {
|
|||||||
// If initial_offset points to a record after first(R1) but before first(R2)
|
// If initial_offset points to a record after first(R1) but before first(R2)
|
||||||
// incomplete fragment errors are not actual errors, and must be suppressed
|
// incomplete fragment errors are not actual errors, and must be suppressed
|
||||||
// until a new first or full record is encountered.
|
// until a new first or full record is encountered.
|
||||||
Write(BigString("foo", 3*kBlockSize));
|
Write(BigString("foo", 3 * kBlockSize));
|
||||||
Write("correct");
|
Write("correct");
|
||||||
StartReadingAt(kBlockSize);
|
StartReadingAt(kBlockSize);
|
||||||
|
|
||||||
@ -514,44 +502,30 @@ TEST(LogTest, ErrorJoinsRecords) {
|
|||||||
Write("correct");
|
Write("correct");
|
||||||
|
|
||||||
// Wipe the middle block
|
// Wipe the middle block
|
||||||
for (int offset = kBlockSize; offset < 2*kBlockSize; offset++) {
|
for (int offset = kBlockSize; offset < 2 * kBlockSize; offset++) {
|
||||||
SetByte(offset, 'x');
|
SetByte(offset, 'x');
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT_EQ("correct", Read());
|
ASSERT_EQ("correct", Read());
|
||||||
ASSERT_EQ("EOF", Read());
|
ASSERT_EQ("EOF", Read());
|
||||||
const size_t dropped = DroppedBytes();
|
const size_t dropped = DroppedBytes();
|
||||||
ASSERT_LE(dropped, 2*kBlockSize + 100);
|
ASSERT_LE(dropped, 2 * kBlockSize + 100);
|
||||||
ASSERT_GE(dropped, 2*kBlockSize);
|
ASSERT_GE(dropped, 2 * kBlockSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(LogTest, ReadStart) {
|
TEST(LogTest, ReadStart) { CheckInitialOffsetRecord(0, 0); }
|
||||||
CheckInitialOffsetRecord(0, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(LogTest, ReadSecondOneOff) {
|
TEST(LogTest, ReadSecondOneOff) { CheckInitialOffsetRecord(1, 1); }
|
||||||
CheckInitialOffsetRecord(1, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(LogTest, ReadSecondTenThousand) {
|
TEST(LogTest, ReadSecondTenThousand) { CheckInitialOffsetRecord(10000, 1); }
|
||||||
CheckInitialOffsetRecord(10000, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(LogTest, ReadSecondStart) {
|
TEST(LogTest, ReadSecondStart) { CheckInitialOffsetRecord(10007, 1); }
|
||||||
CheckInitialOffsetRecord(10007, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(LogTest, ReadThirdOneOff) {
|
TEST(LogTest, ReadThirdOneOff) { CheckInitialOffsetRecord(10008, 2); }
|
||||||
CheckInitialOffsetRecord(10008, 2);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(LogTest, ReadThirdStart) {
|
TEST(LogTest, ReadThirdStart) { CheckInitialOffsetRecord(20014, 2); }
|
||||||
CheckInitialOffsetRecord(20014, 2);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(LogTest, ReadFourthOneOff) {
|
TEST(LogTest, ReadFourthOneOff) { CheckInitialOffsetRecord(20015, 3); }
|
||||||
CheckInitialOffsetRecord(20015, 3);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(LogTest, ReadFourthFirstBlockTrailer) {
|
TEST(LogTest, ReadFourthFirstBlockTrailer) {
|
||||||
CheckInitialOffsetRecord(log::kBlockSize - 4, 3);
|
CheckInitialOffsetRecord(log::kBlockSize - 4, 3);
|
||||||
@ -575,17 +549,11 @@ TEST(LogTest, ReadInitialOffsetIntoBlockPadding) {
|
|||||||
CheckInitialOffsetRecord(3 * log::kBlockSize - 3, 5);
|
CheckInitialOffsetRecord(3 * log::kBlockSize - 3, 5);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(LogTest, ReadEnd) {
|
TEST(LogTest, ReadEnd) { CheckOffsetPastEndReturnsNoRecords(0); }
|
||||||
CheckOffsetPastEndReturnsNoRecords(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(LogTest, ReadPastEnd) {
|
TEST(LogTest, ReadPastEnd) { CheckOffsetPastEndReturnsNoRecords(5); }
|
||||||
CheckOffsetPastEndReturnsNoRecords(5);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace log
|
} // namespace log
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
#include "db/log_writer.h"
|
#include "db/log_writer.h"
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
#include "leveldb/env.h"
|
#include "leveldb/env.h"
|
||||||
#include "util/coding.h"
|
#include "util/coding.h"
|
||||||
#include "util/crc32c.h"
|
#include "util/crc32c.h"
|
||||||
@ -19,9 +20,7 @@ static void InitTypeCrc(uint32_t* type_crc) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Writer::Writer(WritableFile* dest)
|
Writer::Writer(WritableFile* dest) : dest_(dest), block_offset_(0) {
|
||||||
: dest_(dest),
|
|
||||||
block_offset_(0) {
|
|
||||||
InitTypeCrc(type_crc_);
|
InitTypeCrc(type_crc_);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -30,8 +29,7 @@ Writer::Writer(WritableFile* dest, uint64_t dest_length)
|
|||||||
InitTypeCrc(type_crc_);
|
InitTypeCrc(type_crc_);
|
||||||
}
|
}
|
||||||
|
|
||||||
Writer::~Writer() {
|
Writer::~Writer() {}
|
||||||
}
|
|
||||||
|
|
||||||
Status Writer::AddRecord(const Slice& slice) {
|
Status Writer::AddRecord(const Slice& slice) {
|
||||||
const char* ptr = slice.data();
|
const char* ptr = slice.data();
|
||||||
@ -49,7 +47,7 @@ Status Writer::AddRecord(const Slice& slice) {
|
|||||||
// Switch to a new block
|
// Switch to a new block
|
||||||
if (leftover > 0) {
|
if (leftover > 0) {
|
||||||
// Fill the trailer (literal below relies on kHeaderSize being 7)
|
// Fill the trailer (literal below relies on kHeaderSize being 7)
|
||||||
assert(kHeaderSize == 7);
|
static_assert(kHeaderSize == 7, "");
|
||||||
dest_->Append(Slice("\x00\x00\x00\x00\x00\x00", leftover));
|
dest_->Append(Slice("\x00\x00\x00\x00\x00\x00", leftover));
|
||||||
}
|
}
|
||||||
block_offset_ = 0;
|
block_offset_ = 0;
|
||||||
@ -81,30 +79,31 @@ Status Writer::AddRecord(const Slice& slice) {
|
|||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
Status Writer::EmitPhysicalRecord(RecordType t, const char* ptr, size_t n) {
|
Status Writer::EmitPhysicalRecord(RecordType t, const char* ptr,
|
||||||
assert(n <= 0xffff); // Must fit in two bytes
|
size_t length) {
|
||||||
assert(block_offset_ + kHeaderSize + n <= kBlockSize);
|
assert(length <= 0xffff); // Must fit in two bytes
|
||||||
|
assert(block_offset_ + kHeaderSize + length <= kBlockSize);
|
||||||
|
|
||||||
// Format the header
|
// Format the header
|
||||||
char buf[kHeaderSize];
|
char buf[kHeaderSize];
|
||||||
buf[4] = static_cast<char>(n & 0xff);
|
buf[4] = static_cast<char>(length & 0xff);
|
||||||
buf[5] = static_cast<char>(n >> 8);
|
buf[5] = static_cast<char>(length >> 8);
|
||||||
buf[6] = static_cast<char>(t);
|
buf[6] = static_cast<char>(t);
|
||||||
|
|
||||||
// Compute the crc of the record type and the payload.
|
// Compute the crc of the record type and the payload.
|
||||||
uint32_t crc = crc32c::Extend(type_crc_[t], ptr, n);
|
uint32_t crc = crc32c::Extend(type_crc_[t], ptr, length);
|
||||||
crc = crc32c::Mask(crc); // Adjust for storage
|
crc = crc32c::Mask(crc); // Adjust for storage
|
||||||
EncodeFixed32(buf, crc);
|
EncodeFixed32(buf, crc);
|
||||||
|
|
||||||
// Write the header and the payload
|
// Write the header and the payload
|
||||||
Status s = dest_->Append(Slice(buf, kHeaderSize));
|
Status s = dest_->Append(Slice(buf, kHeaderSize));
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
s = dest_->Append(Slice(ptr, n));
|
s = dest_->Append(Slice(ptr, length));
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
s = dest_->Flush();
|
s = dest_->Flush();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
block_offset_ += kHeaderSize + n;
|
block_offset_ += kHeaderSize + length;
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#define STORAGE_LEVELDB_DB_LOG_WRITER_H_
|
#define STORAGE_LEVELDB_DB_LOG_WRITER_H_
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
#include "db/log_format.h"
|
#include "db/log_format.h"
|
||||||
#include "leveldb/slice.h"
|
#include "leveldb/slice.h"
|
||||||
#include "leveldb/status.h"
|
#include "leveldb/status.h"
|
||||||
@ -34,7 +35,7 @@ class Writer {
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
WritableFile* dest_;
|
WritableFile* dest_;
|
||||||
int block_offset_; // Current offset in block
|
int block_offset_; // Current offset in block
|
||||||
|
|
||||||
// crc32c values for all supported record types. These are
|
// crc32c values for all supported record types. These are
|
||||||
// pre-computed to reduce the overhead of computing the crc of the
|
// pre-computed to reduce the overhead of computing the crc of the
|
||||||
|
@ -18,20 +18,15 @@ static Slice GetLengthPrefixedSlice(const char* data) {
|
|||||||
return Slice(p, len);
|
return Slice(p, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
MemTable::MemTable(const InternalKeyComparator& cmp)
|
MemTable::MemTable(const InternalKeyComparator& comparator)
|
||||||
: comparator_(cmp),
|
: comparator_(comparator), refs_(0), table_(comparator_, &arena_) {}
|
||||||
refs_(0),
|
|
||||||
table_(comparator_, &arena_) {
|
|
||||||
}
|
|
||||||
|
|
||||||
MemTable::~MemTable() {
|
MemTable::~MemTable() { assert(refs_ == 0); }
|
||||||
assert(refs_ == 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t MemTable::ApproximateMemoryUsage() { return arena_.MemoryUsage(); }
|
size_t MemTable::ApproximateMemoryUsage() { return arena_.MemoryUsage(); }
|
||||||
|
|
||||||
int MemTable::KeyComparator::operator()(const char* aptr, const char* bptr)
|
int MemTable::KeyComparator::operator()(const char* aptr,
|
||||||
const {
|
const char* bptr) const {
|
||||||
// Internal keys are encoded as length-prefixed strings.
|
// Internal keys are encoded as length-prefixed strings.
|
||||||
Slice a = GetLengthPrefixedSlice(aptr);
|
Slice a = GetLengthPrefixedSlice(aptr);
|
||||||
Slice b = GetLengthPrefixedSlice(bptr);
|
Slice b = GetLengthPrefixedSlice(bptr);
|
||||||
@ -48,9 +43,9 @@ static const char* EncodeKey(std::string* scratch, const Slice& target) {
|
|||||||
return scratch->data();
|
return scratch->data();
|
||||||
}
|
}
|
||||||
|
|
||||||
class MemTableIterator: public Iterator {
|
class MemTableIterator : public Iterator {
|
||||||
public:
|
public:
|
||||||
explicit MemTableIterator(MemTable::Table* table) : iter_(table) { }
|
explicit MemTableIterator(MemTable::Table* table) : iter_(table) {}
|
||||||
|
|
||||||
virtual bool Valid() const { return iter_.Valid(); }
|
virtual bool Valid() const { return iter_.Valid(); }
|
||||||
virtual void Seek(const Slice& k) { iter_.Seek(EncodeKey(&tmp_, k)); }
|
virtual void Seek(const Slice& k) { iter_.Seek(EncodeKey(&tmp_, k)); }
|
||||||
@ -68,19 +63,16 @@ class MemTableIterator: public Iterator {
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
MemTable::Table::Iterator iter_;
|
MemTable::Table::Iterator iter_;
|
||||||
std::string tmp_; // For passing to EncodeKey
|
std::string tmp_; // For passing to EncodeKey
|
||||||
|
|
||||||
// No copying allowed
|
// No copying allowed
|
||||||
MemTableIterator(const MemTableIterator&);
|
MemTableIterator(const MemTableIterator&);
|
||||||
void operator=(const MemTableIterator&);
|
void operator=(const MemTableIterator&);
|
||||||
};
|
};
|
||||||
|
|
||||||
Iterator* MemTable::NewIterator() {
|
Iterator* MemTable::NewIterator() { return new MemTableIterator(&table_); }
|
||||||
return new MemTableIterator(&table_);
|
|
||||||
}
|
|
||||||
|
|
||||||
void MemTable::Add(SequenceNumber s, ValueType type,
|
void MemTable::Add(SequenceNumber s, ValueType type, const Slice& key,
|
||||||
const Slice& key,
|
|
||||||
const Slice& value) {
|
const Slice& value) {
|
||||||
// Format of an entry is concatenation of:
|
// Format of an entry is concatenation of:
|
||||||
// key_size : varint32 of internal_key.size()
|
// key_size : varint32 of internal_key.size()
|
||||||
@ -90,9 +82,9 @@ void MemTable::Add(SequenceNumber s, ValueType type,
|
|||||||
size_t key_size = key.size();
|
size_t key_size = key.size();
|
||||||
size_t val_size = value.size();
|
size_t val_size = value.size();
|
||||||
size_t internal_key_size = key_size + 8;
|
size_t internal_key_size = key_size + 8;
|
||||||
const size_t encoded_len =
|
const size_t encoded_len = VarintLength(internal_key_size) +
|
||||||
VarintLength(internal_key_size) + internal_key_size +
|
internal_key_size + VarintLength(val_size) +
|
||||||
VarintLength(val_size) + val_size;
|
val_size;
|
||||||
char* buf = arena_.Allocate(encoded_len);
|
char* buf = arena_.Allocate(encoded_len);
|
||||||
char* p = EncodeVarint32(buf, internal_key_size);
|
char* p = EncodeVarint32(buf, internal_key_size);
|
||||||
memcpy(p, key.data(), key_size);
|
memcpy(p, key.data(), key_size);
|
||||||
@ -121,10 +113,9 @@ bool MemTable::Get(const LookupKey& key, std::string* value, Status* s) {
|
|||||||
// all entries with overly large sequence numbers.
|
// all entries with overly large sequence numbers.
|
||||||
const char* entry = iter.key();
|
const char* entry = iter.key();
|
||||||
uint32_t key_length;
|
uint32_t key_length;
|
||||||
const char* key_ptr = GetVarint32Ptr(entry, entry+5, &key_length);
|
const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
|
||||||
if (comparator_.comparator.user_comparator()->Compare(
|
if (comparator_.comparator.user_comparator()->Compare(
|
||||||
Slice(key_ptr, key_length - 8),
|
Slice(key_ptr, key_length - 8), key.user_key()) == 0) {
|
||||||
key.user_key()) == 0) {
|
|
||||||
// Correct user key
|
// Correct user key
|
||||||
const uint64_t tag = DecodeFixed64(key_ptr + key_length - 8);
|
const uint64_t tag = DecodeFixed64(key_ptr + key_length - 8);
|
||||||
switch (static_cast<ValueType>(tag & 0xff)) {
|
switch (static_cast<ValueType>(tag & 0xff)) {
|
||||||
|
@ -6,9 +6,10 @@
|
|||||||
#define STORAGE_LEVELDB_DB_MEMTABLE_H_
|
#define STORAGE_LEVELDB_DB_MEMTABLE_H_
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
#include "leveldb/db.h"
|
|
||||||
#include "db/dbformat.h"
|
#include "db/dbformat.h"
|
||||||
#include "db/skiplist.h"
|
#include "db/skiplist.h"
|
||||||
|
#include "leveldb/db.h"
|
||||||
#include "util/arena.h"
|
#include "util/arena.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
@ -49,8 +50,7 @@ class MemTable {
|
|||||||
// Add an entry into memtable that maps key to value at the
|
// Add an entry into memtable that maps key to value at the
|
||||||
// specified sequence number and with the specified type.
|
// specified sequence number and with the specified type.
|
||||||
// Typically value will be empty if type==kTypeDeletion.
|
// Typically value will be empty if type==kTypeDeletion.
|
||||||
void Add(SequenceNumber seq, ValueType type,
|
void Add(SequenceNumber seq, ValueType type, const Slice& key,
|
||||||
const Slice& key,
|
|
||||||
const Slice& value);
|
const Slice& value);
|
||||||
|
|
||||||
// If memtable contains a value for key, store it in *value and return true.
|
// If memtable contains a value for key, store it in *value and return true.
|
||||||
@ -64,7 +64,7 @@ class MemTable {
|
|||||||
|
|
||||||
struct KeyComparator {
|
struct KeyComparator {
|
||||||
const InternalKeyComparator comparator;
|
const InternalKeyComparator comparator;
|
||||||
explicit KeyComparator(const InternalKeyComparator& c) : comparator(c) { }
|
explicit KeyComparator(const InternalKeyComparator& c) : comparator(c) {}
|
||||||
int operator()(const char* a, const char* b) const;
|
int operator()(const char* a, const char* b) const;
|
||||||
};
|
};
|
||||||
friend class MemTableIterator;
|
friend class MemTableIterator;
|
||||||
|
@ -86,15 +86,13 @@ class RecoveryTest {
|
|||||||
std::string current;
|
std::string current;
|
||||||
ASSERT_OK(ReadFileToString(env_, CurrentFileName(dbname_), ¤t));
|
ASSERT_OK(ReadFileToString(env_, CurrentFileName(dbname_), ¤t));
|
||||||
size_t len = current.size();
|
size_t len = current.size();
|
||||||
if (len > 0 && current[len-1] == '\n') {
|
if (len > 0 && current[len - 1] == '\n') {
|
||||||
current.resize(len - 1);
|
current.resize(len - 1);
|
||||||
}
|
}
|
||||||
return dbname_ + "/" + current;
|
return dbname_ + "/" + current;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string LogName(uint64_t number) {
|
std::string LogName(uint64_t number) { return LogFileName(dbname_, number); }
|
||||||
return LogFileName(dbname_, number);
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t DeleteLogFiles() {
|
size_t DeleteLogFiles() {
|
||||||
// Linux allows unlinking open files, but Windows does not.
|
// Linux allows unlinking open files, but Windows does not.
|
||||||
@ -107,13 +105,9 @@ class RecoveryTest {
|
|||||||
return logs.size();
|
return logs.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
void DeleteManifestFile() {
|
void DeleteManifestFile() { ASSERT_OK(env_->DeleteFile(ManifestFileName())); }
|
||||||
ASSERT_OK(env_->DeleteFile(ManifestFileName()));
|
|
||||||
}
|
|
||||||
|
|
||||||
uint64_t FirstLogFile() {
|
uint64_t FirstLogFile() { return GetFiles(kLogFile)[0]; }
|
||||||
return GetFiles(kLogFile)[0];
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<uint64_t> GetFiles(FileType t) {
|
std::vector<uint64_t> GetFiles(FileType t) {
|
||||||
std::vector<std::string> filenames;
|
std::vector<std::string> filenames;
|
||||||
@ -129,13 +123,9 @@ class RecoveryTest {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
int NumLogs() {
|
int NumLogs() { return GetFiles(kLogFile).size(); }
|
||||||
return GetFiles(kLogFile).size();
|
|
||||||
}
|
|
||||||
|
|
||||||
int NumTables() {
|
int NumTables() { return GetFiles(kTableFile).size(); }
|
||||||
return GetFiles(kTableFile).size();
|
|
||||||
}
|
|
||||||
|
|
||||||
uint64_t FileSize(const std::string& fname) {
|
uint64_t FileSize(const std::string& fname) {
|
||||||
uint64_t result;
|
uint64_t result;
|
||||||
@ -143,9 +133,7 @@ class RecoveryTest {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
void CompactMemTable() {
|
void CompactMemTable() { dbfull()->TEST_CompactMemTable(); }
|
||||||
dbfull()->TEST_CompactMemTable();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Directly construct a log file that sets key to val.
|
// Directly construct a log file that sets key to val.
|
||||||
void MakeLogFile(uint64_t lognum, SequenceNumber seq, Slice key, Slice val) {
|
void MakeLogFile(uint64_t lognum, SequenceNumber seq, Slice key, Slice val) {
|
||||||
@ -197,7 +185,7 @@ TEST(RecoveryTest, LargeManifestCompacted) {
|
|||||||
uint64_t len = FileSize(old_manifest);
|
uint64_t len = FileSize(old_manifest);
|
||||||
WritableFile* file;
|
WritableFile* file;
|
||||||
ASSERT_OK(env()->NewAppendableFile(old_manifest, &file));
|
ASSERT_OK(env()->NewAppendableFile(old_manifest, &file));
|
||||||
std::string zeroes(3*1048576 - static_cast<size_t>(len), 0);
|
std::string zeroes(3 * 1048576 - static_cast<size_t>(len), 0);
|
||||||
ASSERT_OK(file->Append(zeroes));
|
ASSERT_OK(file->Append(zeroes));
|
||||||
ASSERT_OK(file->Flush());
|
ASSERT_OK(file->Flush());
|
||||||
delete file;
|
delete file;
|
||||||
@ -270,7 +258,7 @@ TEST(RecoveryTest, MultipleMemTables) {
|
|||||||
// Force creation of multiple memtables by reducing the write buffer size.
|
// Force creation of multiple memtables by reducing the write buffer size.
|
||||||
Options opt;
|
Options opt;
|
||||||
opt.reuse_logs = true;
|
opt.reuse_logs = true;
|
||||||
opt.write_buffer_size = (kNum*100) / 2;
|
opt.write_buffer_size = (kNum * 100) / 2;
|
||||||
Open(&opt);
|
Open(&opt);
|
||||||
ASSERT_LE(2, NumTables());
|
ASSERT_LE(2, NumTables());
|
||||||
ASSERT_EQ(1, NumLogs());
|
ASSERT_EQ(1, NumLogs());
|
||||||
@ -289,16 +277,16 @@ TEST(RecoveryTest, MultipleLogFiles) {
|
|||||||
|
|
||||||
// Make a bunch of uncompacted log files.
|
// Make a bunch of uncompacted log files.
|
||||||
uint64_t old_log = FirstLogFile();
|
uint64_t old_log = FirstLogFile();
|
||||||
MakeLogFile(old_log+1, 1000, "hello", "world");
|
MakeLogFile(old_log + 1, 1000, "hello", "world");
|
||||||
MakeLogFile(old_log+2, 1001, "hi", "there");
|
MakeLogFile(old_log + 2, 1001, "hi", "there");
|
||||||
MakeLogFile(old_log+3, 1002, "foo", "bar2");
|
MakeLogFile(old_log + 3, 1002, "foo", "bar2");
|
||||||
|
|
||||||
// Recover and check that all log files were processed.
|
// Recover and check that all log files were processed.
|
||||||
Open();
|
Open();
|
||||||
ASSERT_LE(1, NumTables());
|
ASSERT_LE(1, NumTables());
|
||||||
ASSERT_EQ(1, NumLogs());
|
ASSERT_EQ(1, NumLogs());
|
||||||
uint64_t new_log = FirstLogFile();
|
uint64_t new_log = FirstLogFile();
|
||||||
ASSERT_LE(old_log+3, new_log);
|
ASSERT_LE(old_log + 3, new_log);
|
||||||
ASSERT_EQ("bar2", Get("foo"));
|
ASSERT_EQ("bar2", Get("foo"));
|
||||||
ASSERT_EQ("world", Get("hello"));
|
ASSERT_EQ("world", Get("hello"));
|
||||||
ASSERT_EQ("there", Get("hi"));
|
ASSERT_EQ("there", Get("hi"));
|
||||||
@ -316,7 +304,7 @@ TEST(RecoveryTest, MultipleLogFiles) {
|
|||||||
|
|
||||||
// Check that introducing an older log file does not cause it to be re-read.
|
// Check that introducing an older log file does not cause it to be re-read.
|
||||||
Close();
|
Close();
|
||||||
MakeLogFile(old_log+1, 2000, "hello", "stale write");
|
MakeLogFile(old_log + 1, 2000, "hello", "stale write");
|
||||||
Open();
|
Open();
|
||||||
ASSERT_LE(1, NumTables());
|
ASSERT_LE(1, NumTables());
|
||||||
ASSERT_EQ(1, NumLogs());
|
ASSERT_EQ(1, NumLogs());
|
||||||
@ -339,6 +327,4 @@ TEST(RecoveryTest, ManifestMissing) {
|
|||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
47
db/repair.cc
47
db/repair.cc
@ -84,9 +84,7 @@ class Repairer {
|
|||||||
"recovered %d files; %llu bytes. "
|
"recovered %d files; %llu bytes. "
|
||||||
"Some data may have been lost. "
|
"Some data may have been lost. "
|
||||||
"****",
|
"****",
|
||||||
dbname_.c_str(),
|
dbname_.c_str(), static_cast<int>(tables_.size()), bytes);
|
||||||
static_cast<int>(tables_.size()),
|
|
||||||
bytes);
|
|
||||||
}
|
}
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
@ -152,8 +150,7 @@ class Repairer {
|
|||||||
Status status = ConvertLogToTable(logs_[i]);
|
Status status = ConvertLogToTable(logs_[i]);
|
||||||
if (!status.ok()) {
|
if (!status.ok()) {
|
||||||
Log(options_.info_log, "Log #%llu: ignoring conversion error: %s",
|
Log(options_.info_log, "Log #%llu: ignoring conversion error: %s",
|
||||||
(unsigned long long) logs_[i],
|
(unsigned long long)logs_[i], status.ToString().c_str());
|
||||||
status.ToString().c_str());
|
|
||||||
}
|
}
|
||||||
ArchiveFile(logname);
|
ArchiveFile(logname);
|
||||||
}
|
}
|
||||||
@ -167,8 +164,7 @@ class Repairer {
|
|||||||
virtual void Corruption(size_t bytes, const Status& s) {
|
virtual void Corruption(size_t bytes, const Status& s) {
|
||||||
// We print error messages for corruption, but continue repairing.
|
// We print error messages for corruption, but continue repairing.
|
||||||
Log(info_log, "Log #%llu: dropping %d bytes; %s",
|
Log(info_log, "Log #%llu: dropping %d bytes; %s",
|
||||||
(unsigned long long) lognum,
|
(unsigned long long)lognum, static_cast<int>(bytes),
|
||||||
static_cast<int>(bytes),
|
|
||||||
s.ToString().c_str());
|
s.ToString().c_str());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -190,8 +186,8 @@ class Repairer {
|
|||||||
// corruptions cause entire commits to be skipped instead of
|
// corruptions cause entire commits to be skipped instead of
|
||||||
// propagating bad information (like overly large sequence
|
// propagating bad information (like overly large sequence
|
||||||
// numbers).
|
// numbers).
|
||||||
log::Reader reader(lfile, &reporter, false/*do not checksum*/,
|
log::Reader reader(lfile, &reporter, false /*do not checksum*/,
|
||||||
0/*initial_offset*/);
|
0 /*initial_offset*/);
|
||||||
|
|
||||||
// Read all the records and add to a memtable
|
// Read all the records and add to a memtable
|
||||||
std::string scratch;
|
std::string scratch;
|
||||||
@ -202,8 +198,8 @@ class Repairer {
|
|||||||
int counter = 0;
|
int counter = 0;
|
||||||
while (reader.ReadRecord(&record, &scratch)) {
|
while (reader.ReadRecord(&record, &scratch)) {
|
||||||
if (record.size() < 12) {
|
if (record.size() < 12) {
|
||||||
reporter.Corruption(
|
reporter.Corruption(record.size(),
|
||||||
record.size(), Status::Corruption("log record too small"));
|
Status::Corruption("log record too small"));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
WriteBatchInternal::SetContents(&batch, record);
|
WriteBatchInternal::SetContents(&batch, record);
|
||||||
@ -212,8 +208,7 @@ class Repairer {
|
|||||||
counter += WriteBatchInternal::Count(&batch);
|
counter += WriteBatchInternal::Count(&batch);
|
||||||
} else {
|
} else {
|
||||||
Log(options_.info_log, "Log #%llu: ignoring %s",
|
Log(options_.info_log, "Log #%llu: ignoring %s",
|
||||||
(unsigned long long) log,
|
(unsigned long long)log, status.ToString().c_str());
|
||||||
status.ToString().c_str());
|
|
||||||
status = Status::OK(); // Keep going with rest of file
|
status = Status::OK(); // Keep going with rest of file
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -234,9 +229,7 @@ class Repairer {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
Log(options_.info_log, "Log #%llu: %d ops saved to Table #%llu %s",
|
Log(options_.info_log, "Log #%llu: %d ops saved to Table #%llu %s",
|
||||||
(unsigned long long) log,
|
(unsigned long long)log, counter, (unsigned long long)meta.number,
|
||||||
counter,
|
|
||||||
(unsigned long long) meta.number,
|
|
||||||
status.ToString().c_str());
|
status.ToString().c_str());
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
@ -272,8 +265,7 @@ class Repairer {
|
|||||||
ArchiveFile(TableFileName(dbname_, number));
|
ArchiveFile(TableFileName(dbname_, number));
|
||||||
ArchiveFile(SSTTableFileName(dbname_, number));
|
ArchiveFile(SSTTableFileName(dbname_, number));
|
||||||
Log(options_.info_log, "Table #%llu: dropped: %s",
|
Log(options_.info_log, "Table #%llu: dropped: %s",
|
||||||
(unsigned long long) t.meta.number,
|
(unsigned long long)t.meta.number, status.ToString().c_str());
|
||||||
status.ToString().c_str());
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -287,8 +279,7 @@ class Repairer {
|
|||||||
Slice key = iter->key();
|
Slice key = iter->key();
|
||||||
if (!ParseInternalKey(key, &parsed)) {
|
if (!ParseInternalKey(key, &parsed)) {
|
||||||
Log(options_.info_log, "Table #%llu: unparsable key %s",
|
Log(options_.info_log, "Table #%llu: unparsable key %s",
|
||||||
(unsigned long long) t.meta.number,
|
(unsigned long long)t.meta.number, EscapeString(key).c_str());
|
||||||
EscapeString(key).c_str());
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -307,9 +298,7 @@ class Repairer {
|
|||||||
}
|
}
|
||||||
delete iter;
|
delete iter;
|
||||||
Log(options_.info_log, "Table #%llu: %d entries %s",
|
Log(options_.info_log, "Table #%llu: %d entries %s",
|
||||||
(unsigned long long) t.meta.number,
|
(unsigned long long)t.meta.number, counter, status.ToString().c_str());
|
||||||
counter,
|
|
||||||
status.ToString().c_str());
|
|
||||||
|
|
||||||
if (status.ok()) {
|
if (status.ok()) {
|
||||||
tables_.push_back(t);
|
tables_.push_back(t);
|
||||||
@ -363,7 +352,7 @@ class Repairer {
|
|||||||
s = env_->RenameFile(copy, orig);
|
s = env_->RenameFile(copy, orig);
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
Log(options_.info_log, "Table #%llu: %d entries repaired",
|
Log(options_.info_log, "Table #%llu: %d entries repaired",
|
||||||
(unsigned long long) t.meta.number, counter);
|
(unsigned long long)t.meta.number, counter);
|
||||||
tables_.push_back(t);
|
tables_.push_back(t);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -395,11 +384,11 @@ class Repairer {
|
|||||||
for (size_t i = 0; i < tables_.size(); i++) {
|
for (size_t i = 0; i < tables_.size(); i++) {
|
||||||
// TODO(opt): separate out into multiple levels
|
// TODO(opt): separate out into multiple levels
|
||||||
const TableInfo& t = tables_[i];
|
const TableInfo& t = tables_[i];
|
||||||
edit_.AddFile(0, t.meta.number, t.meta.file_size,
|
edit_.AddFile(0, t.meta.number, t.meta.file_size, t.meta.smallest,
|
||||||
t.meta.smallest, t.meta.largest);
|
t.meta.largest);
|
||||||
}
|
}
|
||||||
|
|
||||||
//fprintf(stderr, "NewDescriptor:\n%s\n", edit_.DebugString().c_str());
|
// fprintf(stderr, "NewDescriptor:\n%s\n", edit_.DebugString().c_str());
|
||||||
{
|
{
|
||||||
log::Writer log(file);
|
log::Writer log(file);
|
||||||
std::string record;
|
std::string record;
|
||||||
@ -447,8 +436,8 @@ class Repairer {
|
|||||||
new_file.append("/");
|
new_file.append("/");
|
||||||
new_file.append((slash == nullptr) ? fname.c_str() : slash + 1);
|
new_file.append((slash == nullptr) ? fname.c_str() : slash + 1);
|
||||||
Status s = env_->RenameFile(fname, new_file);
|
Status s = env_->RenameFile(fname, new_file);
|
||||||
Log(options_.info_log, "Archiving %s: %s\n",
|
Log(options_.info_log, "Archiving %s: %s\n", fname.c_str(),
|
||||||
fname.c_str(), s.ToString().c_str());
|
s.ToString().c_str());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
} // namespace
|
} // namespace
|
||||||
|
@ -38,7 +38,7 @@ namespace leveldb {
|
|||||||
|
|
||||||
class Arena;
|
class Arena;
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
class SkipList {
|
class SkipList {
|
||||||
private:
|
private:
|
||||||
struct Node;
|
struct Node;
|
||||||
@ -100,13 +100,13 @@ class SkipList {
|
|||||||
|
|
||||||
// Immutable after construction
|
// Immutable after construction
|
||||||
Comparator const compare_;
|
Comparator const compare_;
|
||||||
Arena* const arena_; // Arena used for allocations of nodes
|
Arena* const arena_; // Arena used for allocations of nodes
|
||||||
|
|
||||||
Node* const head_;
|
Node* const head_;
|
||||||
|
|
||||||
// Modified only by Insert(). Read racily by readers, but stale
|
// Modified only by Insert(). Read racily by readers, but stale
|
||||||
// values are ok.
|
// values are ok.
|
||||||
std::atomic<int> max_height_; // Height of the entire list
|
std::atomic<int> max_height_; // Height of the entire list
|
||||||
|
|
||||||
inline int GetMaxHeight() const {
|
inline int GetMaxHeight() const {
|
||||||
return max_height_.load(std::memory_order_relaxed);
|
return max_height_.load(std::memory_order_relaxed);
|
||||||
@ -143,9 +143,9 @@ class SkipList {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Implementation details follow
|
// Implementation details follow
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
struct SkipList<Key, Comparator>::Node {
|
struct SkipList<Key, Comparator>::Node {
|
||||||
explicit Node(const Key& k) : key(k) { }
|
explicit Node(const Key& k) : key(k) {}
|
||||||
|
|
||||||
Key const key;
|
Key const key;
|
||||||
|
|
||||||
@ -179,38 +179,38 @@ struct SkipList<Key, Comparator>::Node {
|
|||||||
std::atomic<Node*> next_[1];
|
std::atomic<Node*> next_[1];
|
||||||
};
|
};
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
typename SkipList<Key, Comparator>::Node*
|
typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::NewNode(
|
||||||
SkipList<Key, Comparator>::NewNode(const Key& key, int height) {
|
const Key& key, int height) {
|
||||||
char* const node_memory = arena_->AllocateAligned(
|
char* const node_memory = arena_->AllocateAligned(
|
||||||
sizeof(Node) + sizeof(std::atomic<Node*>) * (height - 1));
|
sizeof(Node) + sizeof(std::atomic<Node*>) * (height - 1));
|
||||||
return new (node_memory) Node(key);
|
return new (node_memory) Node(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
inline SkipList<Key, Comparator>::Iterator::Iterator(const SkipList* list) {
|
inline SkipList<Key, Comparator>::Iterator::Iterator(const SkipList* list) {
|
||||||
list_ = list;
|
list_ = list;
|
||||||
node_ = nullptr;
|
node_ = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
inline bool SkipList<Key, Comparator>::Iterator::Valid() const {
|
inline bool SkipList<Key, Comparator>::Iterator::Valid() const {
|
||||||
return node_ != nullptr;
|
return node_ != nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
inline const Key& SkipList<Key, Comparator>::Iterator::key() const {
|
inline const Key& SkipList<Key, Comparator>::Iterator::key() const {
|
||||||
assert(Valid());
|
assert(Valid());
|
||||||
return node_->key;
|
return node_->key;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
inline void SkipList<Key, Comparator>::Iterator::Next() {
|
inline void SkipList<Key, Comparator>::Iterator::Next() {
|
||||||
assert(Valid());
|
assert(Valid());
|
||||||
node_ = node_->Next(0);
|
node_ = node_->Next(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
inline void SkipList<Key, Comparator>::Iterator::Prev() {
|
inline void SkipList<Key, Comparator>::Iterator::Prev() {
|
||||||
// Instead of using explicit "prev" links, we just search for the
|
// Instead of using explicit "prev" links, we just search for the
|
||||||
// last node that falls before key.
|
// last node that falls before key.
|
||||||
@ -221,17 +221,17 @@ inline void SkipList<Key, Comparator>::Iterator::Prev() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
inline void SkipList<Key, Comparator>::Iterator::Seek(const Key& target) {
|
inline void SkipList<Key, Comparator>::Iterator::Seek(const Key& target) {
|
||||||
node_ = list_->FindGreaterOrEqual(target, nullptr);
|
node_ = list_->FindGreaterOrEqual(target, nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
inline void SkipList<Key, Comparator>::Iterator::SeekToFirst() {
|
inline void SkipList<Key, Comparator>::Iterator::SeekToFirst() {
|
||||||
node_ = list_->head_->Next(0);
|
node_ = list_->head_->Next(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
inline void SkipList<Key, Comparator>::Iterator::SeekToLast() {
|
inline void SkipList<Key, Comparator>::Iterator::SeekToLast() {
|
||||||
node_ = list_->FindLast();
|
node_ = list_->FindLast();
|
||||||
if (node_ == list_->head_) {
|
if (node_ == list_->head_) {
|
||||||
@ -239,7 +239,7 @@ inline void SkipList<Key, Comparator>::Iterator::SeekToLast() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
int SkipList<Key, Comparator>::RandomHeight() {
|
int SkipList<Key, Comparator>::RandomHeight() {
|
||||||
// Increase height with probability 1 in kBranching
|
// Increase height with probability 1 in kBranching
|
||||||
static const unsigned int kBranching = 4;
|
static const unsigned int kBranching = 4;
|
||||||
@ -252,13 +252,13 @@ int SkipList<Key, Comparator>::RandomHeight() {
|
|||||||
return height;
|
return height;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
bool SkipList<Key, Comparator>::KeyIsAfterNode(const Key& key, Node* n) const {
|
bool SkipList<Key, Comparator>::KeyIsAfterNode(const Key& key, Node* n) const {
|
||||||
// null n is considered infinite
|
// null n is considered infinite
|
||||||
return (n != nullptr) && (compare_(n->key, key) < 0);
|
return (n != nullptr) && (compare_(n->key, key) < 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
typename SkipList<Key, Comparator>::Node*
|
typename SkipList<Key, Comparator>::Node*
|
||||||
SkipList<Key, Comparator>::FindGreaterOrEqual(const Key& key,
|
SkipList<Key, Comparator>::FindGreaterOrEqual(const Key& key,
|
||||||
Node** prev) const {
|
Node** prev) const {
|
||||||
@ -281,7 +281,7 @@ SkipList<Key, Comparator>::FindGreaterOrEqual(const Key& key,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
typename SkipList<Key, Comparator>::Node*
|
typename SkipList<Key, Comparator>::Node*
|
||||||
SkipList<Key, Comparator>::FindLessThan(const Key& key) const {
|
SkipList<Key, Comparator>::FindLessThan(const Key& key) const {
|
||||||
Node* x = head_;
|
Node* x = head_;
|
||||||
@ -302,7 +302,7 @@ SkipList<Key, Comparator>::FindLessThan(const Key& key) const {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::FindLast()
|
typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::FindLast()
|
||||||
const {
|
const {
|
||||||
Node* x = head_;
|
Node* x = head_;
|
||||||
@ -322,7 +322,7 @@ typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::FindLast()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
SkipList<Key, Comparator>::SkipList(Comparator cmp, Arena* arena)
|
SkipList<Key, Comparator>::SkipList(Comparator cmp, Arena* arena)
|
||||||
: compare_(cmp),
|
: compare_(cmp),
|
||||||
arena_(arena),
|
arena_(arena),
|
||||||
@ -334,7 +334,7 @@ SkipList<Key, Comparator>::SkipList(Comparator cmp, Arena* arena)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
void SkipList<Key, Comparator>::Insert(const Key& key) {
|
void SkipList<Key, Comparator>::Insert(const Key& key) {
|
||||||
// TODO(opt): We can use a barrier-free variant of FindGreaterOrEqual()
|
// TODO(opt): We can use a barrier-free variant of FindGreaterOrEqual()
|
||||||
// here since Insert() is externally synchronized.
|
// here since Insert() is externally synchronized.
|
||||||
@ -368,7 +368,7 @@ void SkipList<Key, Comparator>::Insert(const Key& key) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename Key, class Comparator>
|
template <typename Key, class Comparator>
|
||||||
bool SkipList<Key, Comparator>::Contains(const Key& key) const {
|
bool SkipList<Key, Comparator>::Contains(const Key& key) const {
|
||||||
Node* x = FindGreaterOrEqual(key, nullptr);
|
Node* x = FindGreaterOrEqual(key, nullptr);
|
||||||
if (x != nullptr && Equal(key, x->key)) {
|
if (x != nullptr && Equal(key, x->key)) {
|
||||||
|
@ -31,7 +31,7 @@ struct Comparator {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class SkipTest { };
|
class SkipTest {};
|
||||||
|
|
||||||
TEST(SkipTest, Empty) {
|
TEST(SkipTest, Empty) {
|
||||||
Arena arena;
|
Arena arena;
|
||||||
@ -117,8 +117,7 @@ TEST(SkipTest, InsertAndLookup) {
|
|||||||
|
|
||||||
// Compare against model iterator
|
// Compare against model iterator
|
||||||
for (std::set<Key>::reverse_iterator model_iter = keys.rbegin();
|
for (std::set<Key>::reverse_iterator model_iter = keys.rbegin();
|
||||||
model_iter != keys.rend();
|
model_iter != keys.rend(); ++model_iter) {
|
||||||
++model_iter) {
|
|
||||||
ASSERT_TRUE(iter.Valid());
|
ASSERT_TRUE(iter.Valid());
|
||||||
ASSERT_EQ(*model_iter, iter.key());
|
ASSERT_EQ(*model_iter, iter.key());
|
||||||
iter.Prev();
|
iter.Prev();
|
||||||
@ -160,12 +159,12 @@ class ConcurrentTest {
|
|||||||
static uint64_t hash(Key key) { return key & 0xff; }
|
static uint64_t hash(Key key) { return key & 0xff; }
|
||||||
|
|
||||||
static uint64_t HashNumbers(uint64_t k, uint64_t g) {
|
static uint64_t HashNumbers(uint64_t k, uint64_t g) {
|
||||||
uint64_t data[2] = { k, g };
|
uint64_t data[2] = {k, g};
|
||||||
return Hash(reinterpret_cast<char*>(data), sizeof(data), 0);
|
return Hash(reinterpret_cast<char*>(data), sizeof(data), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static Key MakeKey(uint64_t k, uint64_t g) {
|
static Key MakeKey(uint64_t k, uint64_t g) {
|
||||||
assert(sizeof(Key) == sizeof(uint64_t));
|
static_assert(sizeof(Key) == sizeof(uint64_t), "");
|
||||||
assert(k <= K); // We sometimes pass K to seek to the end of the skiplist
|
assert(k <= K); // We sometimes pass K to seek to the end of the skiplist
|
||||||
assert(g <= 0xffffffffu);
|
assert(g <= 0xffffffffu);
|
||||||
return ((k << 40) | (g << 8) | (HashNumbers(k, g) & 0xff));
|
return ((k << 40) | (g << 8) | (HashNumbers(k, g) & 0xff));
|
||||||
@ -195,9 +194,7 @@ class ConcurrentTest {
|
|||||||
void Set(int k, int v) {
|
void Set(int k, int v) {
|
||||||
generation[k].store(v, std::memory_order_release);
|
generation[k].store(v, std::memory_order_release);
|
||||||
}
|
}
|
||||||
int Get(int k) {
|
int Get(int k) { return generation[k].load(std::memory_order_acquire); }
|
||||||
return generation[k].load(std::memory_order_acquire);
|
|
||||||
}
|
|
||||||
|
|
||||||
State() {
|
State() {
|
||||||
for (int k = 0; k < K; k++) {
|
for (int k = 0; k < K; k++) {
|
||||||
@ -216,7 +213,7 @@ class ConcurrentTest {
|
|||||||
SkipList<Key, Comparator> list_;
|
SkipList<Key, Comparator> list_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ConcurrentTest() : list_(Comparator(), &arena_) { }
|
ConcurrentTest() : list_(Comparator(), &arena_) {}
|
||||||
|
|
||||||
// REQUIRES: External synchronization
|
// REQUIRES: External synchronization
|
||||||
void WriteStep(Random* rnd) {
|
void WriteStep(Random* rnd) {
|
||||||
@ -255,11 +252,9 @@ class ConcurrentTest {
|
|||||||
// Note that generation 0 is never inserted, so it is ok if
|
// Note that generation 0 is never inserted, so it is ok if
|
||||||
// <*,0,*> is missing.
|
// <*,0,*> is missing.
|
||||||
ASSERT_TRUE((gen(pos) == 0) ||
|
ASSERT_TRUE((gen(pos) == 0) ||
|
||||||
(gen(pos) > static_cast<Key>(initial_state.Get(key(pos))))
|
(gen(pos) > static_cast<Key>(initial_state.Get(key(pos)))))
|
||||||
) << "key: " << key(pos)
|
<< "key: " << key(pos) << "; gen: " << gen(pos)
|
||||||
<< "; gen: " << gen(pos)
|
<< "; initgen: " << initial_state.Get(key(pos));
|
||||||
<< "; initgen: "
|
|
||||||
<< initial_state.Get(key(pos));
|
|
||||||
|
|
||||||
// Advance to next key in the valid key space
|
// Advance to next key in the valid key space
|
||||||
if (key(pos) < key(current)) {
|
if (key(pos) < key(current)) {
|
||||||
@ -305,17 +300,10 @@ class TestState {
|
|||||||
int seed_;
|
int seed_;
|
||||||
std::atomic<bool> quit_flag_;
|
std::atomic<bool> quit_flag_;
|
||||||
|
|
||||||
enum ReaderState {
|
enum ReaderState { STARTING, RUNNING, DONE };
|
||||||
STARTING,
|
|
||||||
RUNNING,
|
|
||||||
DONE
|
|
||||||
};
|
|
||||||
|
|
||||||
explicit TestState(int s)
|
explicit TestState(int s)
|
||||||
: seed_(s),
|
: seed_(s), quit_flag_(false), state_(STARTING), state_cv_(&mu_) {}
|
||||||
quit_flag_(false),
|
|
||||||
state_(STARTING),
|
|
||||||
state_cv_(&mu_) {}
|
|
||||||
|
|
||||||
void Wait(ReaderState s) LOCKS_EXCLUDED(mu_) {
|
void Wait(ReaderState s) LOCKS_EXCLUDED(mu_) {
|
||||||
mu_.Lock();
|
mu_.Lock();
|
||||||
@ -378,6 +366,4 @@ TEST(SkipTest, Concurrent5) { RunConcurrent(5); }
|
|||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
@ -44,8 +44,14 @@ class SnapshotList {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool empty() const { return head_.next_ == &head_; }
|
bool empty() const { return head_.next_ == &head_; }
|
||||||
SnapshotImpl* oldest() const { assert(!empty()); return head_.next_; }
|
SnapshotImpl* oldest() const {
|
||||||
SnapshotImpl* newest() const { assert(!empty()); return head_.prev_; }
|
assert(!empty());
|
||||||
|
return head_.next_;
|
||||||
|
}
|
||||||
|
SnapshotImpl* newest() const {
|
||||||
|
assert(!empty());
|
||||||
|
return head_.prev_;
|
||||||
|
}
|
||||||
|
|
||||||
// Creates a SnapshotImpl and appends it to the end of the list.
|
// Creates a SnapshotImpl and appends it to the end of the list.
|
||||||
SnapshotImpl* New(SequenceNumber sequence_number) {
|
SnapshotImpl* New(SequenceNumber sequence_number) {
|
||||||
|
@ -29,18 +29,14 @@ static void UnrefEntry(void* arg1, void* arg2) {
|
|||||||
cache->Release(h);
|
cache->Release(h);
|
||||||
}
|
}
|
||||||
|
|
||||||
TableCache::TableCache(const std::string& dbname,
|
TableCache::TableCache(const std::string& dbname, const Options& options,
|
||||||
const Options& options,
|
|
||||||
int entries)
|
int entries)
|
||||||
: env_(options.env),
|
: env_(options.env),
|
||||||
dbname_(dbname),
|
dbname_(dbname),
|
||||||
options_(options),
|
options_(options),
|
||||||
cache_(NewLRUCache(entries)) {
|
cache_(NewLRUCache(entries)) {}
|
||||||
}
|
|
||||||
|
|
||||||
TableCache::~TableCache() {
|
TableCache::~TableCache() { delete cache_; }
|
||||||
delete cache_;
|
|
||||||
}
|
|
||||||
|
|
||||||
Status TableCache::FindTable(uint64_t file_number, uint64_t file_size,
|
Status TableCache::FindTable(uint64_t file_number, uint64_t file_size,
|
||||||
Cache::Handle** handle) {
|
Cache::Handle** handle) {
|
||||||
@ -80,8 +76,7 @@ Status TableCache::FindTable(uint64_t file_number, uint64_t file_size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
Iterator* TableCache::NewIterator(const ReadOptions& options,
|
Iterator* TableCache::NewIterator(const ReadOptions& options,
|
||||||
uint64_t file_number,
|
uint64_t file_number, uint64_t file_size,
|
||||||
uint64_t file_size,
|
|
||||||
Table** tableptr) {
|
Table** tableptr) {
|
||||||
if (tableptr != nullptr) {
|
if (tableptr != nullptr) {
|
||||||
*tableptr = nullptr;
|
*tableptr = nullptr;
|
||||||
@ -102,17 +97,15 @@ Iterator* TableCache::NewIterator(const ReadOptions& options,
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
Status TableCache::Get(const ReadOptions& options,
|
Status TableCache::Get(const ReadOptions& options, uint64_t file_number,
|
||||||
uint64_t file_number,
|
uint64_t file_size, const Slice& k, void* arg,
|
||||||
uint64_t file_size,
|
void (*handle_result)(void*, const Slice&,
|
||||||
const Slice& k,
|
const Slice&)) {
|
||||||
void* arg,
|
|
||||||
void (*saver)(void*, const Slice&, const Slice&)) {
|
|
||||||
Cache::Handle* handle = nullptr;
|
Cache::Handle* handle = nullptr;
|
||||||
Status s = FindTable(file_number, file_size, &handle);
|
Status s = FindTable(file_number, file_size, &handle);
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
Table* t = reinterpret_cast<TableAndFile*>(cache_->Value(handle))->table;
|
Table* t = reinterpret_cast<TableAndFile*>(cache_->Value(handle))->table;
|
||||||
s = t->InternalGet(options, k, arg, saver);
|
s = t->InternalGet(options, k, arg, handle_result);
|
||||||
cache_->Release(handle);
|
cache_->Release(handle);
|
||||||
}
|
}
|
||||||
return s;
|
return s;
|
||||||
|
@ -7,8 +7,10 @@
|
|||||||
#ifndef STORAGE_LEVELDB_DB_TABLE_CACHE_H_
|
#ifndef STORAGE_LEVELDB_DB_TABLE_CACHE_H_
|
||||||
#define STORAGE_LEVELDB_DB_TABLE_CACHE_H_
|
#define STORAGE_LEVELDB_DB_TABLE_CACHE_H_
|
||||||
|
|
||||||
#include <string>
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
#include "db/dbformat.h"
|
#include "db/dbformat.h"
|
||||||
#include "leveldb/cache.h"
|
#include "leveldb/cache.h"
|
||||||
#include "leveldb/table.h"
|
#include "leveldb/table.h"
|
||||||
@ -30,18 +32,13 @@ class TableCache {
|
|||||||
// underlies the returned iterator. The returned "*tableptr" object is owned
|
// underlies the returned iterator. The returned "*tableptr" object is owned
|
||||||
// by the cache and should not be deleted, and is valid for as long as the
|
// by the cache and should not be deleted, and is valid for as long as the
|
||||||
// returned iterator is live.
|
// returned iterator is live.
|
||||||
Iterator* NewIterator(const ReadOptions& options,
|
Iterator* NewIterator(const ReadOptions& options, uint64_t file_number,
|
||||||
uint64_t file_number,
|
uint64_t file_size, Table** tableptr = nullptr);
|
||||||
uint64_t file_size,
|
|
||||||
Table** tableptr = nullptr);
|
|
||||||
|
|
||||||
// If a seek to internal key "k" in specified file finds an entry,
|
// If a seek to internal key "k" in specified file finds an entry,
|
||||||
// call (*handle_result)(arg, found_key, found_value).
|
// call (*handle_result)(arg, found_key, found_value).
|
||||||
Status Get(const ReadOptions& options,
|
Status Get(const ReadOptions& options, uint64_t file_number,
|
||||||
uint64_t file_number,
|
uint64_t file_size, const Slice& k, void* arg,
|
||||||
uint64_t file_size,
|
|
||||||
const Slice& k,
|
|
||||||
void* arg,
|
|
||||||
void (*handle_result)(void*, const Slice&, const Slice&));
|
void (*handle_result)(void*, const Slice&, const Slice&));
|
||||||
|
|
||||||
// Evict any entry for the specified file number
|
// Evict any entry for the specified file number
|
||||||
|
@ -12,15 +12,15 @@ namespace leveldb {
|
|||||||
// Tag numbers for serialized VersionEdit. These numbers are written to
|
// Tag numbers for serialized VersionEdit. These numbers are written to
|
||||||
// disk and should not be changed.
|
// disk and should not be changed.
|
||||||
enum Tag {
|
enum Tag {
|
||||||
kComparator = 1,
|
kComparator = 1,
|
||||||
kLogNumber = 2,
|
kLogNumber = 2,
|
||||||
kNextFileNumber = 3,
|
kNextFileNumber = 3,
|
||||||
kLastSequence = 4,
|
kLastSequence = 4,
|
||||||
kCompactPointer = 5,
|
kCompactPointer = 5,
|
||||||
kDeletedFile = 6,
|
kDeletedFile = 6,
|
||||||
kNewFile = 7,
|
kNewFile = 7,
|
||||||
// 8 was used for large value refs
|
// 8 was used for large value refs
|
||||||
kPrevLogNumber = 9
|
kPrevLogNumber = 9
|
||||||
};
|
};
|
||||||
|
|
||||||
void VersionEdit::Clear() {
|
void VersionEdit::Clear() {
|
||||||
@ -67,8 +67,7 @@ void VersionEdit::EncodeTo(std::string* dst) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (DeletedFileSet::const_iterator iter = deleted_files_.begin();
|
for (DeletedFileSet::const_iterator iter = deleted_files_.begin();
|
||||||
iter != deleted_files_.end();
|
iter != deleted_files_.end(); ++iter) {
|
||||||
++iter) {
|
|
||||||
PutVarint32(dst, kDeletedFile);
|
PutVarint32(dst, kDeletedFile);
|
||||||
PutVarint32(dst, iter->first); // level
|
PutVarint32(dst, iter->first); // level
|
||||||
PutVarint64(dst, iter->second); // file number
|
PutVarint64(dst, iter->second); // file number
|
||||||
@ -97,8 +96,7 @@ static bool GetInternalKey(Slice* input, InternalKey* dst) {
|
|||||||
|
|
||||||
static bool GetLevel(Slice* input, int* level) {
|
static bool GetLevel(Slice* input, int* level) {
|
||||||
uint32_t v;
|
uint32_t v;
|
||||||
if (GetVarint32(input, &v) &&
|
if (GetVarint32(input, &v) && v < config::kNumLevels) {
|
||||||
v < config::kNumLevels) {
|
|
||||||
*level = v;
|
*level = v;
|
||||||
return true;
|
return true;
|
||||||
} else {
|
} else {
|
||||||
@ -163,8 +161,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case kCompactPointer:
|
case kCompactPointer:
|
||||||
if (GetLevel(&input, &level) &&
|
if (GetLevel(&input, &level) && GetInternalKey(&input, &key)) {
|
||||||
GetInternalKey(&input, &key)) {
|
|
||||||
compact_pointers_.push_back(std::make_pair(level, key));
|
compact_pointers_.push_back(std::make_pair(level, key));
|
||||||
} else {
|
} else {
|
||||||
msg = "compaction pointer";
|
msg = "compaction pointer";
|
||||||
@ -172,8 +169,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case kDeletedFile:
|
case kDeletedFile:
|
||||||
if (GetLevel(&input, &level) &&
|
if (GetLevel(&input, &level) && GetVarint64(&input, &number)) {
|
||||||
GetVarint64(&input, &number)) {
|
|
||||||
deleted_files_.insert(std::make_pair(level, number));
|
deleted_files_.insert(std::make_pair(level, number));
|
||||||
} else {
|
} else {
|
||||||
msg = "deleted file";
|
msg = "deleted file";
|
||||||
@ -181,8 +177,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case kNewFile:
|
case kNewFile:
|
||||||
if (GetLevel(&input, &level) &&
|
if (GetLevel(&input, &level) && GetVarint64(&input, &f.number) &&
|
||||||
GetVarint64(&input, &f.number) &&
|
|
||||||
GetVarint64(&input, &f.file_size) &&
|
GetVarint64(&input, &f.file_size) &&
|
||||||
GetInternalKey(&input, &f.smallest) &&
|
GetInternalKey(&input, &f.smallest) &&
|
||||||
GetInternalKey(&input, &f.largest)) {
|
GetInternalKey(&input, &f.largest)) {
|
||||||
@ -239,8 +234,7 @@ std::string VersionEdit::DebugString() const {
|
|||||||
r.append(compact_pointers_[i].second.DebugString());
|
r.append(compact_pointers_[i].second.DebugString());
|
||||||
}
|
}
|
||||||
for (DeletedFileSet::const_iterator iter = deleted_files_.begin();
|
for (DeletedFileSet::const_iterator iter = deleted_files_.begin();
|
||||||
iter != deleted_files_.end();
|
iter != deleted_files_.end(); ++iter) {
|
||||||
++iter) {
|
|
||||||
r.append("\n DeleteFile: ");
|
r.append("\n DeleteFile: ");
|
||||||
AppendNumberTo(&r, iter->first);
|
AppendNumberTo(&r, iter->first);
|
||||||
r.append(" ");
|
r.append(" ");
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
#include <set>
|
#include <set>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "db/dbformat.h"
|
#include "db/dbformat.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
@ -16,19 +17,19 @@ class VersionSet;
|
|||||||
|
|
||||||
struct FileMetaData {
|
struct FileMetaData {
|
||||||
int refs;
|
int refs;
|
||||||
int allowed_seeks; // Seeks allowed until compaction
|
int allowed_seeks; // Seeks allowed until compaction
|
||||||
uint64_t number;
|
uint64_t number;
|
||||||
uint64_t file_size; // File size in bytes
|
uint64_t file_size; // File size in bytes
|
||||||
InternalKey smallest; // Smallest internal key served by table
|
InternalKey smallest; // Smallest internal key served by table
|
||||||
InternalKey largest; // Largest internal key served by table
|
InternalKey largest; // Largest internal key served by table
|
||||||
|
|
||||||
FileMetaData() : refs(0), allowed_seeks(1 << 30), file_size(0) { }
|
FileMetaData() : refs(0), allowed_seeks(1 << 30), file_size(0) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
class VersionEdit {
|
class VersionEdit {
|
||||||
public:
|
public:
|
||||||
VersionEdit() { Clear(); }
|
VersionEdit() { Clear(); }
|
||||||
~VersionEdit() { }
|
~VersionEdit() {}
|
||||||
|
|
||||||
void Clear();
|
void Clear();
|
||||||
|
|
||||||
@ -59,10 +60,8 @@ class VersionEdit {
|
|||||||
// Add the specified file at the specified number.
|
// Add the specified file at the specified number.
|
||||||
// REQUIRES: This version has not been saved (see VersionSet::SaveTo)
|
// REQUIRES: This version has not been saved (see VersionSet::SaveTo)
|
||||||
// REQUIRES: "smallest" and "largest" are smallest and largest keys in file
|
// REQUIRES: "smallest" and "largest" are smallest and largest keys in file
|
||||||
void AddFile(int level, uint64_t file,
|
void AddFile(int level, uint64_t file, uint64_t file_size,
|
||||||
uint64_t file_size,
|
const InternalKey& smallest, const InternalKey& largest) {
|
||||||
const InternalKey& smallest,
|
|
||||||
const InternalKey& largest) {
|
|
||||||
FileMetaData f;
|
FileMetaData f;
|
||||||
f.number = file;
|
f.number = file;
|
||||||
f.file_size = file_size;
|
f.file_size = file_size;
|
||||||
@ -84,7 +83,7 @@ class VersionEdit {
|
|||||||
private:
|
private:
|
||||||
friend class VersionSet;
|
friend class VersionSet;
|
||||||
|
|
||||||
typedef std::set< std::pair<int, uint64_t> > DeletedFileSet;
|
typedef std::set<std::pair<int, uint64_t> > DeletedFileSet;
|
||||||
|
|
||||||
std::string comparator_;
|
std::string comparator_;
|
||||||
uint64_t log_number_;
|
uint64_t log_number_;
|
||||||
@ -97,9 +96,9 @@ class VersionEdit {
|
|||||||
bool has_next_file_number_;
|
bool has_next_file_number_;
|
||||||
bool has_last_sequence_;
|
bool has_last_sequence_;
|
||||||
|
|
||||||
std::vector< std::pair<int, InternalKey> > compact_pointers_;
|
std::vector<std::pair<int, InternalKey> > compact_pointers_;
|
||||||
DeletedFileSet deleted_files_;
|
DeletedFileSet deleted_files_;
|
||||||
std::vector< std::pair<int, FileMetaData> > new_files_;
|
std::vector<std::pair<int, FileMetaData> > new_files_;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
@ -17,7 +17,7 @@ static void TestEncodeDecode(const VersionEdit& edit) {
|
|||||||
ASSERT_EQ(encoded, encoded2);
|
ASSERT_EQ(encoded, encoded2);
|
||||||
}
|
}
|
||||||
|
|
||||||
class VersionEditTest { };
|
class VersionEditTest {};
|
||||||
|
|
||||||
TEST(VersionEditTest, EncodeDecode) {
|
TEST(VersionEditTest, EncodeDecode) {
|
||||||
static const uint64_t kBig = 1ull << 50;
|
static const uint64_t kBig = 1ull << 50;
|
||||||
@ -41,6 +41,4 @@ TEST(VersionEditTest, EncodeDecode) {
|
|||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
@ -4,8 +4,10 @@
|
|||||||
|
|
||||||
#include "db/version_set.h"
|
#include "db/version_set.h"
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
|
||||||
#include "db/filename.h"
|
#include "db/filename.h"
|
||||||
#include "db/log_reader.h"
|
#include "db/log_reader.h"
|
||||||
#include "db/log_writer.h"
|
#include "db/log_writer.h"
|
||||||
@ -84,8 +86,7 @@ Version::~Version() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
int FindFile(const InternalKeyComparator& icmp,
|
int FindFile(const InternalKeyComparator& icmp,
|
||||||
const std::vector<FileMetaData*>& files,
|
const std::vector<FileMetaData*>& files, const Slice& key) {
|
||||||
const Slice& key) {
|
|
||||||
uint32_t left = 0;
|
uint32_t left = 0;
|
||||||
uint32_t right = files.size();
|
uint32_t right = files.size();
|
||||||
while (left < right) {
|
while (left < right) {
|
||||||
@ -104,26 +105,25 @@ int FindFile(const InternalKeyComparator& icmp,
|
|||||||
return right;
|
return right;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool AfterFile(const Comparator* ucmp,
|
static bool AfterFile(const Comparator* ucmp, const Slice* user_key,
|
||||||
const Slice* user_key, const FileMetaData* f) {
|
const FileMetaData* f) {
|
||||||
// null user_key occurs before all keys and is therefore never after *f
|
// null user_key occurs before all keys and is therefore never after *f
|
||||||
return (user_key != nullptr &&
|
return (user_key != nullptr &&
|
||||||
ucmp->Compare(*user_key, f->largest.user_key()) > 0);
|
ucmp->Compare(*user_key, f->largest.user_key()) > 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool BeforeFile(const Comparator* ucmp,
|
static bool BeforeFile(const Comparator* ucmp, const Slice* user_key,
|
||||||
const Slice* user_key, const FileMetaData* f) {
|
const FileMetaData* f) {
|
||||||
// null user_key occurs after all keys and is therefore never before *f
|
// null user_key occurs after all keys and is therefore never before *f
|
||||||
return (user_key != nullptr &&
|
return (user_key != nullptr &&
|
||||||
ucmp->Compare(*user_key, f->smallest.user_key()) < 0);
|
ucmp->Compare(*user_key, f->smallest.user_key()) < 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool SomeFileOverlapsRange(
|
bool SomeFileOverlapsRange(const InternalKeyComparator& icmp,
|
||||||
const InternalKeyComparator& icmp,
|
bool disjoint_sorted_files,
|
||||||
bool disjoint_sorted_files,
|
const std::vector<FileMetaData*>& files,
|
||||||
const std::vector<FileMetaData*>& files,
|
const Slice* smallest_user_key,
|
||||||
const Slice* smallest_user_key,
|
const Slice* largest_user_key) {
|
||||||
const Slice* largest_user_key) {
|
|
||||||
const Comparator* ucmp = icmp.user_comparator();
|
const Comparator* ucmp = icmp.user_comparator();
|
||||||
if (!disjoint_sorted_files) {
|
if (!disjoint_sorted_files) {
|
||||||
// Need to check against all files
|
// Need to check against all files
|
||||||
@ -143,7 +143,8 @@ bool SomeFileOverlapsRange(
|
|||||||
uint32_t index = 0;
|
uint32_t index = 0;
|
||||||
if (smallest_user_key != nullptr) {
|
if (smallest_user_key != nullptr) {
|
||||||
// Find the earliest possible internal key for smallest_user_key
|
// Find the earliest possible internal key for smallest_user_key
|
||||||
InternalKey small_key(*smallest_user_key, kMaxSequenceNumber,kValueTypeForSeek);
|
InternalKey small_key(*smallest_user_key, kMaxSequenceNumber,
|
||||||
|
kValueTypeForSeek);
|
||||||
index = FindFile(icmp, files, small_key.Encode());
|
index = FindFile(icmp, files, small_key.Encode());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -164,13 +165,9 @@ class Version::LevelFileNumIterator : public Iterator {
|
|||||||
public:
|
public:
|
||||||
LevelFileNumIterator(const InternalKeyComparator& icmp,
|
LevelFileNumIterator(const InternalKeyComparator& icmp,
|
||||||
const std::vector<FileMetaData*>* flist)
|
const std::vector<FileMetaData*>* flist)
|
||||||
: icmp_(icmp),
|
: icmp_(icmp), flist_(flist), index_(flist->size()) { // Marks as invalid
|
||||||
flist_(flist),
|
|
||||||
index_(flist->size()) { // Marks as invalid
|
|
||||||
}
|
|
||||||
virtual bool Valid() const {
|
|
||||||
return index_ < flist_->size();
|
|
||||||
}
|
}
|
||||||
|
virtual bool Valid() const { return index_ < flist_->size(); }
|
||||||
virtual void Seek(const Slice& target) {
|
virtual void Seek(const Slice& target) {
|
||||||
index_ = FindFile(icmp_, *flist_, target);
|
index_ = FindFile(icmp_, *flist_, target);
|
||||||
}
|
}
|
||||||
@ -197,10 +194,11 @@ class Version::LevelFileNumIterator : public Iterator {
|
|||||||
Slice value() const {
|
Slice value() const {
|
||||||
assert(Valid());
|
assert(Valid());
|
||||||
EncodeFixed64(value_buf_, (*flist_)[index_]->number);
|
EncodeFixed64(value_buf_, (*flist_)[index_]->number);
|
||||||
EncodeFixed64(value_buf_+8, (*flist_)[index_]->file_size);
|
EncodeFixed64(value_buf_ + 8, (*flist_)[index_]->file_size);
|
||||||
return Slice(value_buf_, sizeof(value_buf_));
|
return Slice(value_buf_, sizeof(value_buf_));
|
||||||
}
|
}
|
||||||
virtual Status status() const { return Status::OK(); }
|
virtual Status status() const { return Status::OK(); }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const InternalKeyComparator icmp_;
|
const InternalKeyComparator icmp_;
|
||||||
const std::vector<FileMetaData*>* const flist_;
|
const std::vector<FileMetaData*>* const flist_;
|
||||||
@ -210,16 +208,14 @@ class Version::LevelFileNumIterator : public Iterator {
|
|||||||
mutable char value_buf_[16];
|
mutable char value_buf_[16];
|
||||||
};
|
};
|
||||||
|
|
||||||
static Iterator* GetFileIterator(void* arg,
|
static Iterator* GetFileIterator(void* arg, const ReadOptions& options,
|
||||||
const ReadOptions& options,
|
|
||||||
const Slice& file_value) {
|
const Slice& file_value) {
|
||||||
TableCache* cache = reinterpret_cast<TableCache*>(arg);
|
TableCache* cache = reinterpret_cast<TableCache*>(arg);
|
||||||
if (file_value.size() != 16) {
|
if (file_value.size() != 16) {
|
||||||
return NewErrorIterator(
|
return NewErrorIterator(
|
||||||
Status::Corruption("FileReader invoked with unexpected value"));
|
Status::Corruption("FileReader invoked with unexpected value"));
|
||||||
} else {
|
} else {
|
||||||
return cache->NewIterator(options,
|
return cache->NewIterator(options, DecodeFixed64(file_value.data()),
|
||||||
DecodeFixed64(file_value.data()),
|
|
||||||
DecodeFixed64(file_value.data() + 8));
|
DecodeFixed64(file_value.data() + 8));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -227,17 +223,16 @@ static Iterator* GetFileIterator(void* arg,
|
|||||||
Iterator* Version::NewConcatenatingIterator(const ReadOptions& options,
|
Iterator* Version::NewConcatenatingIterator(const ReadOptions& options,
|
||||||
int level) const {
|
int level) const {
|
||||||
return NewTwoLevelIterator(
|
return NewTwoLevelIterator(
|
||||||
new LevelFileNumIterator(vset_->icmp_, &files_[level]),
|
new LevelFileNumIterator(vset_->icmp_, &files_[level]), &GetFileIterator,
|
||||||
&GetFileIterator, vset_->table_cache_, options);
|
vset_->table_cache_, options);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Version::AddIterators(const ReadOptions& options,
|
void Version::AddIterators(const ReadOptions& options,
|
||||||
std::vector<Iterator*>* iters) {
|
std::vector<Iterator*>* iters) {
|
||||||
// Merge all level zero files together since they may overlap
|
// Merge all level zero files together since they may overlap
|
||||||
for (size_t i = 0; i < files_[0].size(); i++) {
|
for (size_t i = 0; i < files_[0].size(); i++) {
|
||||||
iters->push_back(
|
iters->push_back(vset_->table_cache_->NewIterator(
|
||||||
vset_->table_cache_->NewIterator(
|
options, files_[0][i]->number, files_[0][i]->file_size));
|
||||||
options, files_[0][i]->number, files_[0][i]->file_size));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// For levels > 0, we can use a concatenating iterator that sequentially
|
// For levels > 0, we can use a concatenating iterator that sequentially
|
||||||
@ -264,7 +259,7 @@ struct Saver {
|
|||||||
Slice user_key;
|
Slice user_key;
|
||||||
std::string* value;
|
std::string* value;
|
||||||
};
|
};
|
||||||
}
|
} // namespace
|
||||||
static void SaveValue(void* arg, const Slice& ikey, const Slice& v) {
|
static void SaveValue(void* arg, const Slice& ikey, const Slice& v) {
|
||||||
Saver* s = reinterpret_cast<Saver*>(arg);
|
Saver* s = reinterpret_cast<Saver*>(arg);
|
||||||
ParsedInternalKey parsed_key;
|
ParsedInternalKey parsed_key;
|
||||||
@ -284,8 +279,7 @@ static bool NewestFirst(FileMetaData* a, FileMetaData* b) {
|
|||||||
return a->number > b->number;
|
return a->number > b->number;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Version::ForEachOverlapping(Slice user_key, Slice internal_key,
|
void Version::ForEachOverlapping(Slice user_key, Slice internal_key, void* arg,
|
||||||
void* arg,
|
|
||||||
bool (*func)(void*, int, FileMetaData*)) {
|
bool (*func)(void*, int, FileMetaData*)) {
|
||||||
// TODO(sanjay): Change Version::Get() to use this function.
|
// TODO(sanjay): Change Version::Get() to use this function.
|
||||||
const Comparator* ucmp = vset_->icmp_.user_comparator();
|
const Comparator* ucmp = vset_->icmp_.user_comparator();
|
||||||
@ -329,10 +323,8 @@ void Version::ForEachOverlapping(Slice user_key, Slice internal_key,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Status Version::Get(const ReadOptions& options,
|
Status Version::Get(const ReadOptions& options, const LookupKey& k,
|
||||||
const LookupKey& k,
|
std::string* value, GetStats* stats) {
|
||||||
std::string* value,
|
|
||||||
GetStats* stats) {
|
|
||||||
Slice ikey = k.internal_key();
|
Slice ikey = k.internal_key();
|
||||||
Slice user_key = k.user_key();
|
Slice user_key = k.user_key();
|
||||||
const Comparator* ucmp = vset_->icmp_.user_comparator();
|
const Comparator* ucmp = vset_->icmp_.user_comparator();
|
||||||
@ -405,14 +397,14 @@ Status Version::Get(const ReadOptions& options,
|
|||||||
saver.ucmp = ucmp;
|
saver.ucmp = ucmp;
|
||||||
saver.user_key = user_key;
|
saver.user_key = user_key;
|
||||||
saver.value = value;
|
saver.value = value;
|
||||||
s = vset_->table_cache_->Get(options, f->number, f->file_size,
|
s = vset_->table_cache_->Get(options, f->number, f->file_size, ikey,
|
||||||
ikey, &saver, SaveValue);
|
&saver, SaveValue);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
switch (saver.state) {
|
switch (saver.state) {
|
||||||
case kNotFound:
|
case kNotFound:
|
||||||
break; // Keep searching in other files
|
break; // Keep searching in other files
|
||||||
case kFound:
|
case kFound:
|
||||||
return s;
|
return s;
|
||||||
case kDeleted:
|
case kDeleted:
|
||||||
@ -479,9 +471,7 @@ bool Version::RecordReadSample(Slice internal_key) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Version::Ref() {
|
void Version::Ref() { ++refs_; }
|
||||||
++refs_;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Version::Unref() {
|
void Version::Unref() {
|
||||||
assert(this != &vset_->dummy_versions_);
|
assert(this != &vset_->dummy_versions_);
|
||||||
@ -492,16 +482,14 @@ void Version::Unref() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Version::OverlapInLevel(int level,
|
bool Version::OverlapInLevel(int level, const Slice* smallest_user_key,
|
||||||
const Slice* smallest_user_key,
|
|
||||||
const Slice* largest_user_key) {
|
const Slice* largest_user_key) {
|
||||||
return SomeFileOverlapsRange(vset_->icmp_, (level > 0), files_[level],
|
return SomeFileOverlapsRange(vset_->icmp_, (level > 0), files_[level],
|
||||||
smallest_user_key, largest_user_key);
|
smallest_user_key, largest_user_key);
|
||||||
}
|
}
|
||||||
|
|
||||||
int Version::PickLevelForMemTableOutput(
|
int Version::PickLevelForMemTableOutput(const Slice& smallest_user_key,
|
||||||
const Slice& smallest_user_key,
|
const Slice& largest_user_key) {
|
||||||
const Slice& largest_user_key) {
|
|
||||||
int level = 0;
|
int level = 0;
|
||||||
if (!OverlapInLevel(0, &smallest_user_key, &largest_user_key)) {
|
if (!OverlapInLevel(0, &smallest_user_key, &largest_user_key)) {
|
||||||
// Push to next level if there is no overlap in next level,
|
// Push to next level if there is no overlap in next level,
|
||||||
@ -528,11 +516,9 @@ int Version::PickLevelForMemTableOutput(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Store in "*inputs" all files in "level" that overlap [begin,end]
|
// Store in "*inputs" all files in "level" that overlap [begin,end]
|
||||||
void Version::GetOverlappingInputs(
|
void Version::GetOverlappingInputs(int level, const InternalKey* begin,
|
||||||
int level,
|
const InternalKey* end,
|
||||||
const InternalKey* begin,
|
std::vector<FileMetaData*>* inputs) {
|
||||||
const InternalKey* end,
|
|
||||||
std::vector<FileMetaData*>* inputs) {
|
|
||||||
assert(level >= 0);
|
assert(level >= 0);
|
||||||
assert(level < config::kNumLevels);
|
assert(level < config::kNumLevels);
|
||||||
inputs->clear();
|
inputs->clear();
|
||||||
@ -544,7 +530,7 @@ void Version::GetOverlappingInputs(
|
|||||||
user_end = end->user_key();
|
user_end = end->user_key();
|
||||||
}
|
}
|
||||||
const Comparator* user_cmp = vset_->icmp_.user_comparator();
|
const Comparator* user_cmp = vset_->icmp_.user_comparator();
|
||||||
for (size_t i = 0; i < files_[level].size(); ) {
|
for (size_t i = 0; i < files_[level].size();) {
|
||||||
FileMetaData* f = files_[level][i++];
|
FileMetaData* f = files_[level][i++];
|
||||||
const Slice file_start = f->smallest.user_key();
|
const Slice file_start = f->smallest.user_key();
|
||||||
const Slice file_limit = f->largest.user_key();
|
const Slice file_limit = f->largest.user_key();
|
||||||
@ -561,8 +547,8 @@ void Version::GetOverlappingInputs(
|
|||||||
user_begin = file_start;
|
user_begin = file_start;
|
||||||
inputs->clear();
|
inputs->clear();
|
||||||
i = 0;
|
i = 0;
|
||||||
} else if (end != nullptr && user_cmp->Compare(file_limit,
|
} else if (end != nullptr &&
|
||||||
user_end) > 0) {
|
user_cmp->Compare(file_limit, user_end) > 0) {
|
||||||
user_end = file_limit;
|
user_end = file_limit;
|
||||||
inputs->clear();
|
inputs->clear();
|
||||||
i = 0;
|
i = 0;
|
||||||
@ -630,9 +616,7 @@ class VersionSet::Builder {
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
// Initialize a builder with the files from *base and other info from *vset
|
// Initialize a builder with the files from *base and other info from *vset
|
||||||
Builder(VersionSet* vset, Version* base)
|
Builder(VersionSet* vset, Version* base) : vset_(vset), base_(base) {
|
||||||
: vset_(vset),
|
|
||||||
base_(base) {
|
|
||||||
base_->Ref();
|
base_->Ref();
|
||||||
BySmallestKey cmp;
|
BySmallestKey cmp;
|
||||||
cmp.internal_comparator = &vset_->icmp_;
|
cmp.internal_comparator = &vset_->icmp_;
|
||||||
@ -646,8 +630,8 @@ class VersionSet::Builder {
|
|||||||
const FileSet* added = levels_[level].added_files;
|
const FileSet* added = levels_[level].added_files;
|
||||||
std::vector<FileMetaData*> to_unref;
|
std::vector<FileMetaData*> to_unref;
|
||||||
to_unref.reserve(added->size());
|
to_unref.reserve(added->size());
|
||||||
for (FileSet::const_iterator it = added->begin();
|
for (FileSet::const_iterator it = added->begin(); it != added->end();
|
||||||
it != added->end(); ++it) {
|
++it) {
|
||||||
to_unref.push_back(*it);
|
to_unref.push_back(*it);
|
||||||
}
|
}
|
||||||
delete added;
|
delete added;
|
||||||
@ -674,8 +658,7 @@ class VersionSet::Builder {
|
|||||||
// Delete files
|
// Delete files
|
||||||
const VersionEdit::DeletedFileSet& del = edit->deleted_files_;
|
const VersionEdit::DeletedFileSet& del = edit->deleted_files_;
|
||||||
for (VersionEdit::DeletedFileSet::const_iterator iter = del.begin();
|
for (VersionEdit::DeletedFileSet::const_iterator iter = del.begin();
|
||||||
iter != del.end();
|
iter != del.end(); ++iter) {
|
||||||
++iter) {
|
|
||||||
const int level = iter->first;
|
const int level = iter->first;
|
||||||
const uint64_t number = iter->second;
|
const uint64_t number = iter->second;
|
||||||
levels_[level].deleted_files.insert(number);
|
levels_[level].deleted_files.insert(number);
|
||||||
@ -721,13 +704,11 @@ class VersionSet::Builder {
|
|||||||
const FileSet* added = levels_[level].added_files;
|
const FileSet* added = levels_[level].added_files;
|
||||||
v->files_[level].reserve(base_files.size() + added->size());
|
v->files_[level].reserve(base_files.size() + added->size());
|
||||||
for (FileSet::const_iterator added_iter = added->begin();
|
for (FileSet::const_iterator added_iter = added->begin();
|
||||||
added_iter != added->end();
|
added_iter != added->end(); ++added_iter) {
|
||||||
++added_iter) {
|
|
||||||
// Add all smaller files listed in base_
|
// Add all smaller files listed in base_
|
||||||
for (std::vector<FileMetaData*>::const_iterator bpos
|
for (std::vector<FileMetaData*>::const_iterator bpos =
|
||||||
= std::upper_bound(base_iter, base_end, *added_iter, cmp);
|
std::upper_bound(base_iter, base_end, *added_iter, cmp);
|
||||||
base_iter != bpos;
|
base_iter != bpos; ++base_iter) {
|
||||||
++base_iter) {
|
|
||||||
MaybeAddFile(v, level, *base_iter);
|
MaybeAddFile(v, level, *base_iter);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -743,7 +724,7 @@ class VersionSet::Builder {
|
|||||||
// Make sure there is no overlap in levels > 0
|
// Make sure there is no overlap in levels > 0
|
||||||
if (level > 0) {
|
if (level > 0) {
|
||||||
for (uint32_t i = 1; i < v->files_[level].size(); i++) {
|
for (uint32_t i = 1; i < v->files_[level].size(); i++) {
|
||||||
const InternalKey& prev_end = v->files_[level][i-1]->largest;
|
const InternalKey& prev_end = v->files_[level][i - 1]->largest;
|
||||||
const InternalKey& this_begin = v->files_[level][i]->smallest;
|
const InternalKey& this_begin = v->files_[level][i]->smallest;
|
||||||
if (vset_->icmp_.Compare(prev_end, this_begin) >= 0) {
|
if (vset_->icmp_.Compare(prev_end, this_begin) >= 0) {
|
||||||
fprintf(stderr, "overlapping ranges in same level %s vs. %s\n",
|
fprintf(stderr, "overlapping ranges in same level %s vs. %s\n",
|
||||||
@ -764,7 +745,7 @@ class VersionSet::Builder {
|
|||||||
std::vector<FileMetaData*>* files = &v->files_[level];
|
std::vector<FileMetaData*>* files = &v->files_[level];
|
||||||
if (level > 0 && !files->empty()) {
|
if (level > 0 && !files->empty()) {
|
||||||
// Must not overlap
|
// Must not overlap
|
||||||
assert(vset_->icmp_.Compare((*files)[files->size()-1]->largest,
|
assert(vset_->icmp_.Compare((*files)[files->size() - 1]->largest,
|
||||||
f->smallest) < 0);
|
f->smallest) < 0);
|
||||||
}
|
}
|
||||||
f->refs++;
|
f->refs++;
|
||||||
@ -773,8 +754,7 @@ class VersionSet::Builder {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
VersionSet::VersionSet(const std::string& dbname,
|
VersionSet::VersionSet(const std::string& dbname, const Options* options,
|
||||||
const Options* options,
|
|
||||||
TableCache* table_cache,
|
TableCache* table_cache,
|
||||||
const InternalKeyComparator* cmp)
|
const InternalKeyComparator* cmp)
|
||||||
: env_(options->env),
|
: env_(options->env),
|
||||||
@ -903,7 +883,7 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) {
|
|||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
Status VersionSet::Recover(bool *save_manifest) {
|
Status VersionSet::Recover(bool* save_manifest) {
|
||||||
struct LogReporter : public log::Reader::Reporter {
|
struct LogReporter : public log::Reader::Reporter {
|
||||||
Status* status;
|
Status* status;
|
||||||
virtual void Corruption(size_t bytes, const Status& s) {
|
virtual void Corruption(size_t bytes, const Status& s) {
|
||||||
@ -917,7 +897,7 @@ Status VersionSet::Recover(bool *save_manifest) {
|
|||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
if (current.empty() || current[current.size()-1] != '\n') {
|
if (current.empty() || current[current.size() - 1] != '\n') {
|
||||||
return Status::Corruption("CURRENT file does not end with newline");
|
return Status::Corruption("CURRENT file does not end with newline");
|
||||||
}
|
}
|
||||||
current.resize(current.size() - 1);
|
current.resize(current.size() - 1);
|
||||||
@ -927,8 +907,8 @@ Status VersionSet::Recover(bool *save_manifest) {
|
|||||||
s = env_->NewSequentialFile(dscname, &file);
|
s = env_->NewSequentialFile(dscname, &file);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
if (s.IsNotFound()) {
|
if (s.IsNotFound()) {
|
||||||
return Status::Corruption(
|
return Status::Corruption("CURRENT points to a non-existent file",
|
||||||
"CURRENT points to a non-existent file", s.ToString());
|
s.ToString());
|
||||||
}
|
}
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
@ -946,7 +926,8 @@ Status VersionSet::Recover(bool *save_manifest) {
|
|||||||
{
|
{
|
||||||
LogReporter reporter;
|
LogReporter reporter;
|
||||||
reporter.status = &s;
|
reporter.status = &s;
|
||||||
log::Reader reader(file, &reporter, true/*checksum*/, 0/*initial_offset*/);
|
log::Reader reader(file, &reporter, true /*checksum*/,
|
||||||
|
0 /*initial_offset*/);
|
||||||
Slice record;
|
Slice record;
|
||||||
std::string scratch;
|
std::string scratch;
|
||||||
while (reader.ReadRecord(&record, &scratch) && s.ok()) {
|
while (reader.ReadRecord(&record, &scratch) && s.ok()) {
|
||||||
@ -1071,7 +1052,7 @@ void VersionSet::Finalize(Version* v) {
|
|||||||
int best_level = -1;
|
int best_level = -1;
|
||||||
double best_score = -1;
|
double best_score = -1;
|
||||||
|
|
||||||
for (int level = 0; level < config::kNumLevels-1; level++) {
|
for (int level = 0; level < config::kNumLevels - 1; level++) {
|
||||||
double score;
|
double score;
|
||||||
if (level == 0) {
|
if (level == 0) {
|
||||||
// We treat level-0 specially by bounding the number of files
|
// We treat level-0 specially by bounding the number of files
|
||||||
@ -1086,7 +1067,7 @@ void VersionSet::Finalize(Version* v) {
|
|||||||
// setting, or very high compression ratios, or lots of
|
// setting, or very high compression ratios, or lots of
|
||||||
// overwrites/deletions).
|
// overwrites/deletions).
|
||||||
score = v->files_[level].size() /
|
score = v->files_[level].size() /
|
||||||
static_cast<double>(config::kL0_CompactionTrigger);
|
static_cast<double>(config::kL0_CompactionTrigger);
|
||||||
} else {
|
} else {
|
||||||
// Compute the ratio of current size to size limit.
|
// Compute the ratio of current size to size limit.
|
||||||
const uint64_t level_bytes = TotalFileSize(v->files_[level]);
|
const uint64_t level_bytes = TotalFileSize(v->files_[level]);
|
||||||
@ -1142,16 +1123,12 @@ int VersionSet::NumLevelFiles(int level) const {
|
|||||||
|
|
||||||
const char* VersionSet::LevelSummary(LevelSummaryStorage* scratch) const {
|
const char* VersionSet::LevelSummary(LevelSummaryStorage* scratch) const {
|
||||||
// Update code if kNumLevels changes
|
// Update code if kNumLevels changes
|
||||||
assert(config::kNumLevels == 7);
|
static_assert(config::kNumLevels == 7, "");
|
||||||
snprintf(scratch->buffer, sizeof(scratch->buffer),
|
snprintf(scratch->buffer, sizeof(scratch->buffer),
|
||||||
"files[ %d %d %d %d %d %d %d ]",
|
"files[ %d %d %d %d %d %d %d ]", int(current_->files_[0].size()),
|
||||||
int(current_->files_[0].size()),
|
int(current_->files_[1].size()), int(current_->files_[2].size()),
|
||||||
int(current_->files_[1].size()),
|
int(current_->files_[3].size()), int(current_->files_[4].size()),
|
||||||
int(current_->files_[2].size()),
|
int(current_->files_[5].size()), int(current_->files_[6].size()));
|
||||||
int(current_->files_[3].size()),
|
|
||||||
int(current_->files_[4].size()),
|
|
||||||
int(current_->files_[5].size()),
|
|
||||||
int(current_->files_[6].size()));
|
|
||||||
return scratch->buffer;
|
return scratch->buffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1188,8 +1165,7 @@ uint64_t VersionSet::ApproximateOffsetOf(Version* v, const InternalKey& ikey) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void VersionSet::AddLiveFiles(std::set<uint64_t>* live) {
|
void VersionSet::AddLiveFiles(std::set<uint64_t>* live) {
|
||||||
for (Version* v = dummy_versions_.next_;
|
for (Version* v = dummy_versions_.next_; v != &dummy_versions_;
|
||||||
v != &dummy_versions_;
|
|
||||||
v = v->next_) {
|
v = v->next_) {
|
||||||
for (int level = 0; level < config::kNumLevels; level++) {
|
for (int level = 0; level < config::kNumLevels; level++) {
|
||||||
const std::vector<FileMetaData*>& files = v->files_[level];
|
const std::vector<FileMetaData*>& files = v->files_[level];
|
||||||
@ -1212,7 +1188,7 @@ int64_t VersionSet::MaxNextLevelOverlappingBytes() {
|
|||||||
for (int level = 1; level < config::kNumLevels - 1; level++) {
|
for (int level = 1; level < config::kNumLevels - 1; level++) {
|
||||||
for (size_t i = 0; i < current_->files_[level].size(); i++) {
|
for (size_t i = 0; i < current_->files_[level].size(); i++) {
|
||||||
const FileMetaData* f = current_->files_[level][i];
|
const FileMetaData* f = current_->files_[level][i];
|
||||||
current_->GetOverlappingInputs(level+1, &f->smallest, &f->largest,
|
current_->GetOverlappingInputs(level + 1, &f->smallest, &f->largest,
|
||||||
&overlaps);
|
&overlaps);
|
||||||
const int64_t sum = TotalFileSize(overlaps);
|
const int64_t sum = TotalFileSize(overlaps);
|
||||||
if (sum > result) {
|
if (sum > result) {
|
||||||
@ -1227,8 +1203,7 @@ int64_t VersionSet::MaxNextLevelOverlappingBytes() {
|
|||||||
// *smallest, *largest.
|
// *smallest, *largest.
|
||||||
// REQUIRES: inputs is not empty
|
// REQUIRES: inputs is not empty
|
||||||
void VersionSet::GetRange(const std::vector<FileMetaData*>& inputs,
|
void VersionSet::GetRange(const std::vector<FileMetaData*>& inputs,
|
||||||
InternalKey* smallest,
|
InternalKey* smallest, InternalKey* largest) {
|
||||||
InternalKey* largest) {
|
|
||||||
assert(!inputs.empty());
|
assert(!inputs.empty());
|
||||||
smallest->Clear();
|
smallest->Clear();
|
||||||
largest->Clear();
|
largest->Clear();
|
||||||
@ -1253,8 +1228,7 @@ void VersionSet::GetRange(const std::vector<FileMetaData*>& inputs,
|
|||||||
// REQUIRES: inputs is not empty
|
// REQUIRES: inputs is not empty
|
||||||
void VersionSet::GetRange2(const std::vector<FileMetaData*>& inputs1,
|
void VersionSet::GetRange2(const std::vector<FileMetaData*>& inputs1,
|
||||||
const std::vector<FileMetaData*>& inputs2,
|
const std::vector<FileMetaData*>& inputs2,
|
||||||
InternalKey* smallest,
|
InternalKey* smallest, InternalKey* largest) {
|
||||||
InternalKey* largest) {
|
|
||||||
std::vector<FileMetaData*> all = inputs1;
|
std::vector<FileMetaData*> all = inputs1;
|
||||||
all.insert(all.end(), inputs2.begin(), inputs2.end());
|
all.insert(all.end(), inputs2.begin(), inputs2.end());
|
||||||
GetRange(all, smallest, largest);
|
GetRange(all, smallest, largest);
|
||||||
@ -1276,8 +1250,8 @@ Iterator* VersionSet::MakeInputIterator(Compaction* c) {
|
|||||||
if (c->level() + which == 0) {
|
if (c->level() + which == 0) {
|
||||||
const std::vector<FileMetaData*>& files = c->inputs_[which];
|
const std::vector<FileMetaData*>& files = c->inputs_[which];
|
||||||
for (size_t i = 0; i < files.size(); i++) {
|
for (size_t i = 0; i < files.size(); i++) {
|
||||||
list[num++] = table_cache_->NewIterator(
|
list[num++] = table_cache_->NewIterator(options, files[i]->number,
|
||||||
options, files[i]->number, files[i]->file_size);
|
files[i]->file_size);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Create concatenating iterator for the files from this level
|
// Create concatenating iterator for the files from this level
|
||||||
@ -1304,7 +1278,7 @@ Compaction* VersionSet::PickCompaction() {
|
|||||||
if (size_compaction) {
|
if (size_compaction) {
|
||||||
level = current_->compaction_level_;
|
level = current_->compaction_level_;
|
||||||
assert(level >= 0);
|
assert(level >= 0);
|
||||||
assert(level+1 < config::kNumLevels);
|
assert(level + 1 < config::kNumLevels);
|
||||||
c = new Compaction(options_, level);
|
c = new Compaction(options_, level);
|
||||||
|
|
||||||
// Pick the first file that comes after compact_pointer_[level]
|
// Pick the first file that comes after compact_pointer_[level]
|
||||||
@ -1433,7 +1407,8 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
|
|||||||
AddBoundaryInputs(icmp_, current_->files_[level], &c->inputs_[0]);
|
AddBoundaryInputs(icmp_, current_->files_[level], &c->inputs_[0]);
|
||||||
GetRange(c->inputs_[0], &smallest, &largest);
|
GetRange(c->inputs_[0], &smallest, &largest);
|
||||||
|
|
||||||
current_->GetOverlappingInputs(level+1, &smallest, &largest, &c->inputs_[1]);
|
current_->GetOverlappingInputs(level + 1, &smallest, &largest,
|
||||||
|
&c->inputs_[1]);
|
||||||
|
|
||||||
// Get entire range covered by compaction
|
// Get entire range covered by compaction
|
||||||
InternalKey all_start, all_limit;
|
InternalKey all_start, all_limit;
|
||||||
@ -1454,18 +1429,14 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
|
|||||||
InternalKey new_start, new_limit;
|
InternalKey new_start, new_limit;
|
||||||
GetRange(expanded0, &new_start, &new_limit);
|
GetRange(expanded0, &new_start, &new_limit);
|
||||||
std::vector<FileMetaData*> expanded1;
|
std::vector<FileMetaData*> expanded1;
|
||||||
current_->GetOverlappingInputs(level+1, &new_start, &new_limit,
|
current_->GetOverlappingInputs(level + 1, &new_start, &new_limit,
|
||||||
&expanded1);
|
&expanded1);
|
||||||
if (expanded1.size() == c->inputs_[1].size()) {
|
if (expanded1.size() == c->inputs_[1].size()) {
|
||||||
Log(options_->info_log,
|
Log(options_->info_log,
|
||||||
"Expanding@%d %d+%d (%ld+%ld bytes) to %d+%d (%ld+%ld bytes)\n",
|
"Expanding@%d %d+%d (%ld+%ld bytes) to %d+%d (%ld+%ld bytes)\n",
|
||||||
level,
|
level, int(c->inputs_[0].size()), int(c->inputs_[1].size()),
|
||||||
int(c->inputs_[0].size()),
|
long(inputs0_size), long(inputs1_size), int(expanded0.size()),
|
||||||
int(c->inputs_[1].size()),
|
int(expanded1.size()), long(expanded0_size), long(inputs1_size));
|
||||||
long(inputs0_size), long(inputs1_size),
|
|
||||||
int(expanded0.size()),
|
|
||||||
int(expanded1.size()),
|
|
||||||
long(expanded0_size), long(inputs1_size));
|
|
||||||
smallest = new_start;
|
smallest = new_start;
|
||||||
largest = new_limit;
|
largest = new_limit;
|
||||||
c->inputs_[0] = expanded0;
|
c->inputs_[0] = expanded0;
|
||||||
@ -1490,10 +1461,8 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
|
|||||||
c->edit_.SetCompactPointer(level, largest);
|
c->edit_.SetCompactPointer(level, largest);
|
||||||
}
|
}
|
||||||
|
|
||||||
Compaction* VersionSet::CompactRange(
|
Compaction* VersionSet::CompactRange(int level, const InternalKey* begin,
|
||||||
int level,
|
const InternalKey* end) {
|
||||||
const InternalKey* begin,
|
|
||||||
const InternalKey* end) {
|
|
||||||
std::vector<FileMetaData*> inputs;
|
std::vector<FileMetaData*> inputs;
|
||||||
current_->GetOverlappingInputs(level, begin, end, &inputs);
|
current_->GetOverlappingInputs(level, begin, end, &inputs);
|
||||||
if (inputs.empty()) {
|
if (inputs.empty()) {
|
||||||
@ -1566,7 +1535,7 @@ bool Compaction::IsBaseLevelForKey(const Slice& user_key) {
|
|||||||
const Comparator* user_cmp = input_version_->vset_->icmp_.user_comparator();
|
const Comparator* user_cmp = input_version_->vset_->icmp_.user_comparator();
|
||||||
for (int lvl = level_ + 2; lvl < config::kNumLevels; lvl++) {
|
for (int lvl = level_ + 2; lvl < config::kNumLevels; lvl++) {
|
||||||
const std::vector<FileMetaData*>& files = input_version_->files_[lvl];
|
const std::vector<FileMetaData*>& files = input_version_->files_[lvl];
|
||||||
for (; level_ptrs_[lvl] < files.size(); ) {
|
for (; level_ptrs_[lvl] < files.size();) {
|
||||||
FileMetaData* f = files[level_ptrs_[lvl]];
|
FileMetaData* f = files[level_ptrs_[lvl]];
|
||||||
if (user_cmp->Compare(user_key, f->largest.user_key()) <= 0) {
|
if (user_cmp->Compare(user_key, f->largest.user_key()) <= 0) {
|
||||||
// We've advanced far enough
|
// We've advanced far enough
|
||||||
@ -1587,8 +1556,9 @@ bool Compaction::ShouldStopBefore(const Slice& internal_key) {
|
|||||||
// Scan to find earliest grandparent file that contains key.
|
// Scan to find earliest grandparent file that contains key.
|
||||||
const InternalKeyComparator* icmp = &vset->icmp_;
|
const InternalKeyComparator* icmp = &vset->icmp_;
|
||||||
while (grandparent_index_ < grandparents_.size() &&
|
while (grandparent_index_ < grandparents_.size() &&
|
||||||
icmp->Compare(internal_key,
|
icmp->Compare(internal_key,
|
||||||
grandparents_[grandparent_index_]->largest.Encode()) > 0) {
|
grandparents_[grandparent_index_]->largest.Encode()) >
|
||||||
|
0) {
|
||||||
if (seen_key_) {
|
if (seen_key_) {
|
||||||
overlapped_bytes_ += grandparents_[grandparent_index_]->file_size;
|
overlapped_bytes_ += grandparents_[grandparent_index_]->file_size;
|
||||||
}
|
}
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
#include <map>
|
#include <map>
|
||||||
#include <set>
|
#include <set>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "db/dbformat.h"
|
#include "db/dbformat.h"
|
||||||
#include "db/version_edit.h"
|
#include "db/version_edit.h"
|
||||||
#include "port/port.h"
|
#include "port/port.h"
|
||||||
@ -25,7 +26,9 @@
|
|||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
namespace log { class Writer; }
|
namespace log {
|
||||||
|
class Writer;
|
||||||
|
}
|
||||||
|
|
||||||
class Compaction;
|
class Compaction;
|
||||||
class Iterator;
|
class Iterator;
|
||||||
@ -40,8 +43,7 @@ class WritableFile;
|
|||||||
// Return files.size() if there is no such file.
|
// Return files.size() if there is no such file.
|
||||||
// REQUIRES: "files" contains a sorted list of non-overlapping files.
|
// REQUIRES: "files" contains a sorted list of non-overlapping files.
|
||||||
int FindFile(const InternalKeyComparator& icmp,
|
int FindFile(const InternalKeyComparator& icmp,
|
||||||
const std::vector<FileMetaData*>& files,
|
const std::vector<FileMetaData*>& files, const Slice& key);
|
||||||
const Slice& key);
|
|
||||||
|
|
||||||
// Returns true iff some file in "files" overlaps the user key range
|
// Returns true iff some file in "files" overlaps the user key range
|
||||||
// [*smallest,*largest].
|
// [*smallest,*largest].
|
||||||
@ -90,16 +92,15 @@ class Version {
|
|||||||
|
|
||||||
void GetOverlappingInputs(
|
void GetOverlappingInputs(
|
||||||
int level,
|
int level,
|
||||||
const InternalKey* begin, // nullptr means before all keys
|
const InternalKey* begin, // nullptr means before all keys
|
||||||
const InternalKey* end, // nullptr means after all keys
|
const InternalKey* end, // nullptr means after all keys
|
||||||
std::vector<FileMetaData*>* inputs);
|
std::vector<FileMetaData*>* inputs);
|
||||||
|
|
||||||
// Returns true iff some file in the specified level overlaps
|
// Returns true iff some file in the specified level overlaps
|
||||||
// some part of [*smallest_user_key,*largest_user_key].
|
// some part of [*smallest_user_key,*largest_user_key].
|
||||||
// smallest_user_key==nullptr represents a key smaller than all the DB's keys.
|
// smallest_user_key==nullptr represents a key smaller than all the DB's keys.
|
||||||
// largest_user_key==nullptr represents a key largest than all the DB's keys.
|
// largest_user_key==nullptr represents a key largest than all the DB's keys.
|
||||||
bool OverlapInLevel(int level,
|
bool OverlapInLevel(int level, const Slice* smallest_user_key,
|
||||||
const Slice* smallest_user_key,
|
|
||||||
const Slice* largest_user_key);
|
const Slice* largest_user_key);
|
||||||
|
|
||||||
// Return the level at which we should place a new memtable compaction
|
// Return the level at which we should place a new memtable compaction
|
||||||
@ -124,14 +125,13 @@ class Version {
|
|||||||
// false, makes no more calls.
|
// false, makes no more calls.
|
||||||
//
|
//
|
||||||
// REQUIRES: user portion of internal_key == user_key.
|
// REQUIRES: user portion of internal_key == user_key.
|
||||||
void ForEachOverlapping(Slice user_key, Slice internal_key,
|
void ForEachOverlapping(Slice user_key, Slice internal_key, void* arg,
|
||||||
void* arg,
|
|
||||||
bool (*func)(void*, int, FileMetaData*));
|
bool (*func)(void*, int, FileMetaData*));
|
||||||
|
|
||||||
VersionSet* vset_; // VersionSet to which this Version belongs
|
VersionSet* vset_; // VersionSet to which this Version belongs
|
||||||
Version* next_; // Next version in linked list
|
Version* next_; // Next version in linked list
|
||||||
Version* prev_; // Previous version in linked list
|
Version* prev_; // Previous version in linked list
|
||||||
int refs_; // Number of live refs to this version
|
int refs_; // Number of live refs to this version
|
||||||
|
|
||||||
// List of files per level
|
// List of files per level
|
||||||
std::vector<FileMetaData*> files_[config::kNumLevels];
|
std::vector<FileMetaData*> files_[config::kNumLevels];
|
||||||
@ -147,12 +147,14 @@ class Version {
|
|||||||
int compaction_level_;
|
int compaction_level_;
|
||||||
|
|
||||||
explicit Version(VersionSet* vset)
|
explicit Version(VersionSet* vset)
|
||||||
: vset_(vset), next_(this), prev_(this), refs_(0),
|
: vset_(vset),
|
||||||
|
next_(this),
|
||||||
|
prev_(this),
|
||||||
|
refs_(0),
|
||||||
file_to_compact_(nullptr),
|
file_to_compact_(nullptr),
|
||||||
file_to_compact_level_(-1),
|
file_to_compact_level_(-1),
|
||||||
compaction_score_(-1),
|
compaction_score_(-1),
|
||||||
compaction_level_(-1) {
|
compaction_level_(-1) {}
|
||||||
}
|
|
||||||
|
|
||||||
~Version();
|
~Version();
|
||||||
|
|
||||||
@ -163,10 +165,8 @@ class Version {
|
|||||||
|
|
||||||
class VersionSet {
|
class VersionSet {
|
||||||
public:
|
public:
|
||||||
VersionSet(const std::string& dbname,
|
VersionSet(const std::string& dbname, const Options* options,
|
||||||
const Options* options,
|
TableCache* table_cache, const InternalKeyComparator*);
|
||||||
TableCache* table_cache,
|
|
||||||
const InternalKeyComparator*);
|
|
||||||
~VersionSet();
|
~VersionSet();
|
||||||
|
|
||||||
// Apply *edit to the current version to form a new descriptor that
|
// Apply *edit to the current version to form a new descriptor that
|
||||||
@ -178,7 +178,7 @@ class VersionSet {
|
|||||||
EXCLUSIVE_LOCKS_REQUIRED(mu);
|
EXCLUSIVE_LOCKS_REQUIRED(mu);
|
||||||
|
|
||||||
// Recover the last saved descriptor from persistent storage.
|
// Recover the last saved descriptor from persistent storage.
|
||||||
Status Recover(bool *save_manifest);
|
Status Recover(bool* save_manifest);
|
||||||
|
|
||||||
// Return the current version.
|
// Return the current version.
|
||||||
Version* current() const { return current_; }
|
Version* current() const { return current_; }
|
||||||
@ -233,10 +233,8 @@ class VersionSet {
|
|||||||
// the specified level. Returns nullptr if there is nothing in that
|
// the specified level. Returns nullptr if there is nothing in that
|
||||||
// level that overlaps the specified range. Caller should delete
|
// level that overlaps the specified range. Caller should delete
|
||||||
// the result.
|
// the result.
|
||||||
Compaction* CompactRange(
|
Compaction* CompactRange(int level, const InternalKey* begin,
|
||||||
int level,
|
const InternalKey* end);
|
||||||
const InternalKey* begin,
|
|
||||||
const InternalKey* end);
|
|
||||||
|
|
||||||
// Return the maximum overlapping data (in bytes) at next level for any
|
// Return the maximum overlapping data (in bytes) at next level for any
|
||||||
// file at a level >= 1.
|
// file at a level >= 1.
|
||||||
@ -277,14 +275,12 @@ class VersionSet {
|
|||||||
|
|
||||||
void Finalize(Version* v);
|
void Finalize(Version* v);
|
||||||
|
|
||||||
void GetRange(const std::vector<FileMetaData*>& inputs,
|
void GetRange(const std::vector<FileMetaData*>& inputs, InternalKey* smallest,
|
||||||
InternalKey* smallest,
|
|
||||||
InternalKey* largest);
|
InternalKey* largest);
|
||||||
|
|
||||||
void GetRange2(const std::vector<FileMetaData*>& inputs1,
|
void GetRange2(const std::vector<FileMetaData*>& inputs1,
|
||||||
const std::vector<FileMetaData*>& inputs2,
|
const std::vector<FileMetaData*>& inputs2,
|
||||||
InternalKey* smallest,
|
InternalKey* smallest, InternalKey* largest);
|
||||||
InternalKey* largest);
|
|
||||||
|
|
||||||
void SetupOtherInputs(Compaction* c);
|
void SetupOtherInputs(Compaction* c);
|
||||||
|
|
||||||
@ -373,7 +369,7 @@ class Compaction {
|
|||||||
VersionEdit edit_;
|
VersionEdit edit_;
|
||||||
|
|
||||||
// Each compaction reads inputs from "level_" and "level_+1"
|
// Each compaction reads inputs from "level_" and "level_+1"
|
||||||
std::vector<FileMetaData*> inputs_[2]; // The two sets of inputs
|
std::vector<FileMetaData*> inputs_[2]; // The two sets of inputs
|
||||||
|
|
||||||
// State used to check for number of overlapping grandparent files
|
// State used to check for number of overlapping grandparent files
|
||||||
// (parent == level_ + 1, grandparent == level_ + 2)
|
// (parent == level_ + 1, grandparent == level_ + 2)
|
||||||
|
@ -14,7 +14,7 @@ class FindFileTest {
|
|||||||
std::vector<FileMetaData*> files_;
|
std::vector<FileMetaData*> files_;
|
||||||
bool disjoint_sorted_files_;
|
bool disjoint_sorted_files_;
|
||||||
|
|
||||||
FindFileTest() : disjoint_sorted_files_(true) { }
|
FindFileTest() : disjoint_sorted_files_(true) {}
|
||||||
|
|
||||||
~FindFileTest() {
|
~FindFileTest() {
|
||||||
for (int i = 0; i < files_.size(); i++) {
|
for (int i = 0; i < files_.size(); i++) {
|
||||||
@ -50,10 +50,10 @@ class FindFileTest {
|
|||||||
|
|
||||||
TEST(FindFileTest, Empty) {
|
TEST(FindFileTest, Empty) {
|
||||||
ASSERT_EQ(0, Find("foo"));
|
ASSERT_EQ(0, Find("foo"));
|
||||||
ASSERT_TRUE(! Overlaps("a", "z"));
|
ASSERT_TRUE(!Overlaps("a", "z"));
|
||||||
ASSERT_TRUE(! Overlaps(nullptr, "z"));
|
ASSERT_TRUE(!Overlaps(nullptr, "z"));
|
||||||
ASSERT_TRUE(! Overlaps("a", nullptr));
|
ASSERT_TRUE(!Overlaps("a", nullptr));
|
||||||
ASSERT_TRUE(! Overlaps(nullptr, nullptr));
|
ASSERT_TRUE(!Overlaps(nullptr, nullptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(FindFileTest, Single) {
|
TEST(FindFileTest, Single) {
|
||||||
@ -65,8 +65,8 @@ TEST(FindFileTest, Single) {
|
|||||||
ASSERT_EQ(1, Find("q1"));
|
ASSERT_EQ(1, Find("q1"));
|
||||||
ASSERT_EQ(1, Find("z"));
|
ASSERT_EQ(1, Find("z"));
|
||||||
|
|
||||||
ASSERT_TRUE(! Overlaps("a", "b"));
|
ASSERT_TRUE(!Overlaps("a", "b"));
|
||||||
ASSERT_TRUE(! Overlaps("z1", "z2"));
|
ASSERT_TRUE(!Overlaps("z1", "z2"));
|
||||||
ASSERT_TRUE(Overlaps("a", "p"));
|
ASSERT_TRUE(Overlaps("a", "p"));
|
||||||
ASSERT_TRUE(Overlaps("a", "q"));
|
ASSERT_TRUE(Overlaps("a", "q"));
|
||||||
ASSERT_TRUE(Overlaps("a", "z"));
|
ASSERT_TRUE(Overlaps("a", "z"));
|
||||||
@ -78,15 +78,14 @@ TEST(FindFileTest, Single) {
|
|||||||
ASSERT_TRUE(Overlaps("q", "q"));
|
ASSERT_TRUE(Overlaps("q", "q"));
|
||||||
ASSERT_TRUE(Overlaps("q", "q1"));
|
ASSERT_TRUE(Overlaps("q", "q1"));
|
||||||
|
|
||||||
ASSERT_TRUE(! Overlaps(nullptr, "j"));
|
ASSERT_TRUE(!Overlaps(nullptr, "j"));
|
||||||
ASSERT_TRUE(! Overlaps("r", nullptr));
|
ASSERT_TRUE(!Overlaps("r", nullptr));
|
||||||
ASSERT_TRUE(Overlaps(nullptr, "p"));
|
ASSERT_TRUE(Overlaps(nullptr, "p"));
|
||||||
ASSERT_TRUE(Overlaps(nullptr, "p1"));
|
ASSERT_TRUE(Overlaps(nullptr, "p1"));
|
||||||
ASSERT_TRUE(Overlaps("q", nullptr));
|
ASSERT_TRUE(Overlaps("q", nullptr));
|
||||||
ASSERT_TRUE(Overlaps(nullptr, nullptr));
|
ASSERT_TRUE(Overlaps(nullptr, nullptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
TEST(FindFileTest, Multiple) {
|
TEST(FindFileTest, Multiple) {
|
||||||
Add("150", "200");
|
Add("150", "200");
|
||||||
Add("200", "250");
|
Add("200", "250");
|
||||||
@ -110,10 +109,10 @@ TEST(FindFileTest, Multiple) {
|
|||||||
ASSERT_EQ(3, Find("450"));
|
ASSERT_EQ(3, Find("450"));
|
||||||
ASSERT_EQ(4, Find("451"));
|
ASSERT_EQ(4, Find("451"));
|
||||||
|
|
||||||
ASSERT_TRUE(! Overlaps("100", "149"));
|
ASSERT_TRUE(!Overlaps("100", "149"));
|
||||||
ASSERT_TRUE(! Overlaps("251", "299"));
|
ASSERT_TRUE(!Overlaps("251", "299"));
|
||||||
ASSERT_TRUE(! Overlaps("451", "500"));
|
ASSERT_TRUE(!Overlaps("451", "500"));
|
||||||
ASSERT_TRUE(! Overlaps("351", "399"));
|
ASSERT_TRUE(!Overlaps("351", "399"));
|
||||||
|
|
||||||
ASSERT_TRUE(Overlaps("100", "150"));
|
ASSERT_TRUE(Overlaps("100", "150"));
|
||||||
ASSERT_TRUE(Overlaps("100", "200"));
|
ASSERT_TRUE(Overlaps("100", "200"));
|
||||||
@ -130,8 +129,8 @@ TEST(FindFileTest, MultipleNullBoundaries) {
|
|||||||
Add("200", "250");
|
Add("200", "250");
|
||||||
Add("300", "350");
|
Add("300", "350");
|
||||||
Add("400", "450");
|
Add("400", "450");
|
||||||
ASSERT_TRUE(! Overlaps(nullptr, "149"));
|
ASSERT_TRUE(!Overlaps(nullptr, "149"));
|
||||||
ASSERT_TRUE(! Overlaps("451", nullptr));
|
ASSERT_TRUE(!Overlaps("451", nullptr));
|
||||||
ASSERT_TRUE(Overlaps(nullptr, nullptr));
|
ASSERT_TRUE(Overlaps(nullptr, nullptr));
|
||||||
ASSERT_TRUE(Overlaps(nullptr, "150"));
|
ASSERT_TRUE(Overlaps(nullptr, "150"));
|
||||||
ASSERT_TRUE(Overlaps(nullptr, "199"));
|
ASSERT_TRUE(Overlaps(nullptr, "199"));
|
||||||
@ -147,8 +146,8 @@ TEST(FindFileTest, MultipleNullBoundaries) {
|
|||||||
|
|
||||||
TEST(FindFileTest, OverlapSequenceChecks) {
|
TEST(FindFileTest, OverlapSequenceChecks) {
|
||||||
Add("200", "200", 5000, 3000);
|
Add("200", "200", 5000, 3000);
|
||||||
ASSERT_TRUE(! Overlaps("199", "199"));
|
ASSERT_TRUE(!Overlaps("199", "199"));
|
||||||
ASSERT_TRUE(! Overlaps("201", "300"));
|
ASSERT_TRUE(!Overlaps("201", "300"));
|
||||||
ASSERT_TRUE(Overlaps("200", "200"));
|
ASSERT_TRUE(Overlaps("200", "200"));
|
||||||
ASSERT_TRUE(Overlaps("190", "200"));
|
ASSERT_TRUE(Overlaps("190", "200"));
|
||||||
ASSERT_TRUE(Overlaps("200", "210"));
|
ASSERT_TRUE(Overlaps("200", "210"));
|
||||||
@ -158,8 +157,8 @@ TEST(FindFileTest, OverlappingFiles) {
|
|||||||
Add("150", "600");
|
Add("150", "600");
|
||||||
Add("400", "500");
|
Add("400", "500");
|
||||||
disjoint_sorted_files_ = false;
|
disjoint_sorted_files_ = false;
|
||||||
ASSERT_TRUE(! Overlaps("100", "149"));
|
ASSERT_TRUE(!Overlaps("100", "149"));
|
||||||
ASSERT_TRUE(! Overlaps("601", "700"));
|
ASSERT_TRUE(!Overlaps("601", "700"));
|
||||||
ASSERT_TRUE(Overlaps("100", "150"));
|
ASSERT_TRUE(Overlaps("100", "150"));
|
||||||
ASSERT_TRUE(Overlaps("100", "200"));
|
ASSERT_TRUE(Overlaps("100", "200"));
|
||||||
ASSERT_TRUE(Overlaps("100", "300"));
|
ASSERT_TRUE(Overlaps("100", "300"));
|
||||||
|
@ -15,10 +15,10 @@
|
|||||||
|
|
||||||
#include "leveldb/write_batch.h"
|
#include "leveldb/write_batch.h"
|
||||||
|
|
||||||
#include "leveldb/db.h"
|
|
||||||
#include "db/dbformat.h"
|
#include "db/dbformat.h"
|
||||||
#include "db/memtable.h"
|
#include "db/memtable.h"
|
||||||
#include "db/write_batch_internal.h"
|
#include "db/write_batch_internal.h"
|
||||||
|
#include "leveldb/db.h"
|
||||||
#include "util/coding.h"
|
#include "util/coding.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
@ -26,22 +26,18 @@ namespace leveldb {
|
|||||||
// WriteBatch header has an 8-byte sequence number followed by a 4-byte count.
|
// WriteBatch header has an 8-byte sequence number followed by a 4-byte count.
|
||||||
static const size_t kHeader = 12;
|
static const size_t kHeader = 12;
|
||||||
|
|
||||||
WriteBatch::WriteBatch() {
|
WriteBatch::WriteBatch() { Clear(); }
|
||||||
Clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
WriteBatch::~WriteBatch() { }
|
WriteBatch::~WriteBatch() {}
|
||||||
|
|
||||||
WriteBatch::Handler::~Handler() { }
|
WriteBatch::Handler::~Handler() {}
|
||||||
|
|
||||||
void WriteBatch::Clear() {
|
void WriteBatch::Clear() {
|
||||||
rep_.clear();
|
rep_.clear();
|
||||||
rep_.resize(kHeader);
|
rep_.resize(kHeader);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t WriteBatch::ApproximateSize() const {
|
size_t WriteBatch::ApproximateSize() const { return rep_.size(); }
|
||||||
return rep_.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
Status WriteBatch::Iterate(Handler* handler) const {
|
Status WriteBatch::Iterate(Handler* handler) const {
|
||||||
Slice input(rep_);
|
Slice input(rep_);
|
||||||
@ -112,7 +108,7 @@ void WriteBatch::Delete(const Slice& key) {
|
|||||||
PutLengthPrefixedSlice(&rep_, key);
|
PutLengthPrefixedSlice(&rep_, key);
|
||||||
}
|
}
|
||||||
|
|
||||||
void WriteBatch::Append(const WriteBatch &source) {
|
void WriteBatch::Append(const WriteBatch& source) {
|
||||||
WriteBatchInternal::Append(this, &source);
|
WriteBatchInternal::Append(this, &source);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -133,8 +129,7 @@ class MemTableInserter : public WriteBatch::Handler {
|
|||||||
};
|
};
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
Status WriteBatchInternal::InsertInto(const WriteBatch* b,
|
Status WriteBatchInternal::InsertInto(const WriteBatch* b, MemTable* memtable) {
|
||||||
MemTable* memtable) {
|
|
||||||
MemTableInserter inserter;
|
MemTableInserter inserter;
|
||||||
inserter.sequence_ = WriteBatchInternal::Sequence(b);
|
inserter.sequence_ = WriteBatchInternal::Sequence(b);
|
||||||
inserter.mem_ = memtable;
|
inserter.mem_ = memtable;
|
||||||
|
@ -29,13 +29,9 @@ class WriteBatchInternal {
|
|||||||
// this batch.
|
// this batch.
|
||||||
static void SetSequence(WriteBatch* batch, SequenceNumber seq);
|
static void SetSequence(WriteBatch* batch, SequenceNumber seq);
|
||||||
|
|
||||||
static Slice Contents(const WriteBatch* batch) {
|
static Slice Contents(const WriteBatch* batch) { return Slice(batch->rep_); }
|
||||||
return Slice(batch->rep_);
|
|
||||||
}
|
|
||||||
|
|
||||||
static size_t ByteSize(const WriteBatch* batch) {
|
static size_t ByteSize(const WriteBatch* batch) { return batch->rep_.size(); }
|
||||||
return batch->rep_.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
static void SetContents(WriteBatch* batch, const Slice& contents);
|
static void SetContents(WriteBatch* batch, const Slice& contents);
|
||||||
|
|
||||||
@ -46,5 +42,4 @@ class WriteBatchInternal {
|
|||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
|
|
||||||
#endif // STORAGE_LEVELDB_DB_WRITE_BATCH_INTERNAL_H_
|
#endif // STORAGE_LEVELDB_DB_WRITE_BATCH_INTERNAL_H_
|
||||||
|
@ -52,7 +52,7 @@ static std::string PrintContents(WriteBatch* b) {
|
|||||||
return state;
|
return state;
|
||||||
}
|
}
|
||||||
|
|
||||||
class WriteBatchTest { };
|
class WriteBatchTest {};
|
||||||
|
|
||||||
TEST(WriteBatchTest, Empty) {
|
TEST(WriteBatchTest, Empty) {
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
@ -68,10 +68,11 @@ TEST(WriteBatchTest, Multiple) {
|
|||||||
WriteBatchInternal::SetSequence(&batch, 100);
|
WriteBatchInternal::SetSequence(&batch, 100);
|
||||||
ASSERT_EQ(100, WriteBatchInternal::Sequence(&batch));
|
ASSERT_EQ(100, WriteBatchInternal::Sequence(&batch));
|
||||||
ASSERT_EQ(3, WriteBatchInternal::Count(&batch));
|
ASSERT_EQ(3, WriteBatchInternal::Count(&batch));
|
||||||
ASSERT_EQ("Put(baz, boo)@102"
|
ASSERT_EQ(
|
||||||
"Delete(box)@101"
|
"Put(baz, boo)@102"
|
||||||
"Put(foo, bar)@100",
|
"Delete(box)@101"
|
||||||
PrintContents(&batch));
|
"Put(foo, bar)@100",
|
||||||
|
PrintContents(&batch));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(WriteBatchTest, Corruption) {
|
TEST(WriteBatchTest, Corruption) {
|
||||||
@ -81,10 +82,11 @@ TEST(WriteBatchTest, Corruption) {
|
|||||||
WriteBatchInternal::SetSequence(&batch, 200);
|
WriteBatchInternal::SetSequence(&batch, 200);
|
||||||
Slice contents = WriteBatchInternal::Contents(&batch);
|
Slice contents = WriteBatchInternal::Contents(&batch);
|
||||||
WriteBatchInternal::SetContents(&batch,
|
WriteBatchInternal::SetContents(&batch,
|
||||||
Slice(contents.data(),contents.size()-1));
|
Slice(contents.data(), contents.size() - 1));
|
||||||
ASSERT_EQ("Put(foo, bar)@200"
|
ASSERT_EQ(
|
||||||
"ParseError()",
|
"Put(foo, bar)@200"
|
||||||
PrintContents(&batch));
|
"ParseError()",
|
||||||
|
PrintContents(&batch));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(WriteBatchTest, Append) {
|
TEST(WriteBatchTest, Append) {
|
||||||
@ -92,25 +94,25 @@ TEST(WriteBatchTest, Append) {
|
|||||||
WriteBatchInternal::SetSequence(&b1, 200);
|
WriteBatchInternal::SetSequence(&b1, 200);
|
||||||
WriteBatchInternal::SetSequence(&b2, 300);
|
WriteBatchInternal::SetSequence(&b2, 300);
|
||||||
b1.Append(b2);
|
b1.Append(b2);
|
||||||
ASSERT_EQ("",
|
ASSERT_EQ("", PrintContents(&b1));
|
||||||
PrintContents(&b1));
|
|
||||||
b2.Put("a", "va");
|
b2.Put("a", "va");
|
||||||
b1.Append(b2);
|
b1.Append(b2);
|
||||||
ASSERT_EQ("Put(a, va)@200",
|
ASSERT_EQ("Put(a, va)@200", PrintContents(&b1));
|
||||||
PrintContents(&b1));
|
|
||||||
b2.Clear();
|
b2.Clear();
|
||||||
b2.Put("b", "vb");
|
b2.Put("b", "vb");
|
||||||
b1.Append(b2);
|
b1.Append(b2);
|
||||||
ASSERT_EQ("Put(a, va)@200"
|
ASSERT_EQ(
|
||||||
"Put(b, vb)@201",
|
"Put(a, va)@200"
|
||||||
PrintContents(&b1));
|
"Put(b, vb)@201",
|
||||||
|
PrintContents(&b1));
|
||||||
b2.Delete("foo");
|
b2.Delete("foo");
|
||||||
b1.Append(b2);
|
b1.Append(b2);
|
||||||
ASSERT_EQ("Put(a, va)@200"
|
ASSERT_EQ(
|
||||||
"Put(b, vb)@202"
|
"Put(a, va)@200"
|
||||||
"Put(b, vb)@201"
|
"Put(b, vb)@202"
|
||||||
"Delete(foo)@203",
|
"Put(b, vb)@201"
|
||||||
PrintContents(&b1));
|
"Delete(foo)@203",
|
||||||
|
PrintContents(&b1));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(WriteBatchTest, ApproximateSize) {
|
TEST(WriteBatchTest, ApproximateSize) {
|
||||||
@ -132,6 +134,4 @@ TEST(WriteBatchTest, ApproximateSize) {
|
|||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
@ -2,9 +2,10 @@
|
|||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
|
#include <sqlite3.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <sqlite3.h>
|
|
||||||
#include "util/histogram.h"
|
#include "util/histogram.h"
|
||||||
#include "util/random.h"
|
#include "util/random.h"
|
||||||
#include "util/testutil.h"
|
#include "util/testutil.h"
|
||||||
@ -38,8 +39,7 @@ static const char* FLAGS_benchmarks =
|
|||||||
"fillrand100K,"
|
"fillrand100K,"
|
||||||
"fillseq100K,"
|
"fillseq100K,"
|
||||||
"readseq,"
|
"readseq,"
|
||||||
"readrand100K,"
|
"readrand100K,";
|
||||||
;
|
|
||||||
|
|
||||||
// Number of key/values to place in database
|
// Number of key/values to place in database
|
||||||
static int FLAGS_num = 1000000;
|
static int FLAGS_num = 1000000;
|
||||||
@ -78,8 +78,7 @@ static bool FLAGS_WAL_enabled = true;
|
|||||||
// Use the db with the following name.
|
// Use the db with the following name.
|
||||||
static const char* FLAGS_db = nullptr;
|
static const char* FLAGS_db = nullptr;
|
||||||
|
|
||||||
inline
|
inline static void ExecErrorCheck(int status, char* err_msg) {
|
||||||
static void ExecErrorCheck(int status, char *err_msg) {
|
|
||||||
if (status != SQLITE_OK) {
|
if (status != SQLITE_OK) {
|
||||||
fprintf(stderr, "SQL error: %s\n", err_msg);
|
fprintf(stderr, "SQL error: %s\n", err_msg);
|
||||||
sqlite3_free(err_msg);
|
sqlite3_free(err_msg);
|
||||||
@ -87,24 +86,21 @@ static void ExecErrorCheck(int status, char *err_msg) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline
|
inline static void StepErrorCheck(int status) {
|
||||||
static void StepErrorCheck(int status) {
|
|
||||||
if (status != SQLITE_DONE) {
|
if (status != SQLITE_DONE) {
|
||||||
fprintf(stderr, "SQL step error: status = %d\n", status);
|
fprintf(stderr, "SQL step error: status = %d\n", status);
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline
|
inline static void ErrorCheck(int status) {
|
||||||
static void ErrorCheck(int status) {
|
|
||||||
if (status != SQLITE_OK) {
|
if (status != SQLITE_OK) {
|
||||||
fprintf(stderr, "sqlite3 error: status = %d\n", status);
|
fprintf(stderr, "sqlite3 error: status = %d\n", status);
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline
|
inline static void WalCheckpoint(sqlite3* db_) {
|
||||||
static void WalCheckpoint(sqlite3* db_) {
|
|
||||||
// Flush all writes to disk
|
// Flush all writes to disk
|
||||||
if (FLAGS_WAL_enabled) {
|
if (FLAGS_WAL_enabled) {
|
||||||
sqlite3_wal_checkpoint_v2(db_, nullptr, SQLITE_CHECKPOINT_FULL, nullptr,
|
sqlite3_wal_checkpoint_v2(db_, nullptr, SQLITE_CHECKPOINT_FULL, nullptr,
|
||||||
@ -153,7 +149,7 @@ static Slice TrimSpace(Slice s) {
|
|||||||
start++;
|
start++;
|
||||||
}
|
}
|
||||||
int limit = s.size();
|
int limit = s.size();
|
||||||
while (limit > start && isspace(s[limit-1])) {
|
while (limit > start && isspace(s[limit - 1])) {
|
||||||
limit--;
|
limit--;
|
||||||
}
|
}
|
||||||
return Slice(s.data() + start, limit - start);
|
return Slice(s.data() + start, limit - start);
|
||||||
@ -177,7 +173,7 @@ class Benchmark {
|
|||||||
|
|
||||||
// State kept for progress messages
|
// State kept for progress messages
|
||||||
int done_;
|
int done_;
|
||||||
int next_report_; // When to report next
|
int next_report_; // When to report next
|
||||||
|
|
||||||
void PrintHeader() {
|
void PrintHeader() {
|
||||||
const int kKeySize = 16;
|
const int kKeySize = 16;
|
||||||
@ -186,17 +182,17 @@ class Benchmark {
|
|||||||
fprintf(stdout, "Values: %d bytes each\n", FLAGS_value_size);
|
fprintf(stdout, "Values: %d bytes each\n", FLAGS_value_size);
|
||||||
fprintf(stdout, "Entries: %d\n", num_);
|
fprintf(stdout, "Entries: %d\n", num_);
|
||||||
fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
|
fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
|
||||||
((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_)
|
((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
|
||||||
/ 1048576.0));
|
1048576.0));
|
||||||
PrintWarnings();
|
PrintWarnings();
|
||||||
fprintf(stdout, "------------------------------------------------\n");
|
fprintf(stdout, "------------------------------------------------\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
void PrintWarnings() {
|
void PrintWarnings() {
|
||||||
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
|
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
|
||||||
fprintf(stdout,
|
fprintf(
|
||||||
"WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"
|
stdout,
|
||||||
);
|
"WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
|
||||||
#endif
|
#endif
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
fprintf(stdout,
|
fprintf(stdout,
|
||||||
@ -262,13 +258,20 @@ class Benchmark {
|
|||||||
|
|
||||||
done_++;
|
done_++;
|
||||||
if (done_ >= next_report_) {
|
if (done_ >= next_report_) {
|
||||||
if (next_report_ < 1000) next_report_ += 100;
|
if (next_report_ < 1000)
|
||||||
else if (next_report_ < 5000) next_report_ += 500;
|
next_report_ += 100;
|
||||||
else if (next_report_ < 10000) next_report_ += 1000;
|
else if (next_report_ < 5000)
|
||||||
else if (next_report_ < 50000) next_report_ += 5000;
|
next_report_ += 500;
|
||||||
else if (next_report_ < 100000) next_report_ += 10000;
|
else if (next_report_ < 10000)
|
||||||
else if (next_report_ < 500000) next_report_ += 50000;
|
next_report_ += 1000;
|
||||||
else next_report_ += 100000;
|
else if (next_report_ < 50000)
|
||||||
|
next_report_ += 5000;
|
||||||
|
else if (next_report_ < 100000)
|
||||||
|
next_report_ += 10000;
|
||||||
|
else if (next_report_ < 500000)
|
||||||
|
next_report_ += 50000;
|
||||||
|
else
|
||||||
|
next_report_ += 100000;
|
||||||
fprintf(stderr, "... finished %d ops%30s\r", done_, "");
|
fprintf(stderr, "... finished %d ops%30s\r", done_, "");
|
||||||
fflush(stderr);
|
fflush(stderr);
|
||||||
}
|
}
|
||||||
@ -286,16 +289,14 @@ class Benchmark {
|
|||||||
snprintf(rate, sizeof(rate), "%6.1f MB/s",
|
snprintf(rate, sizeof(rate), "%6.1f MB/s",
|
||||||
(bytes_ / 1048576.0) / (finish - start_));
|
(bytes_ / 1048576.0) / (finish - start_));
|
||||||
if (!message_.empty()) {
|
if (!message_.empty()) {
|
||||||
message_ = std::string(rate) + " " + message_;
|
message_ = std::string(rate) + " " + message_;
|
||||||
} else {
|
} else {
|
||||||
message_ = rate;
|
message_ = rate;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
|
fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(),
|
||||||
name.ToString().c_str(),
|
(finish - start_) * 1e6 / done_, (message_.empty() ? "" : " "),
|
||||||
(finish - start_) * 1e6 / done_,
|
|
||||||
(message_.empty() ? "" : " "),
|
|
||||||
message_.c_str());
|
message_.c_str());
|
||||||
if (FLAGS_histogram) {
|
if (FLAGS_histogram) {
|
||||||
fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
|
fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
|
||||||
@ -304,22 +305,16 @@ class Benchmark {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
enum Order {
|
enum Order { SEQUENTIAL, RANDOM };
|
||||||
SEQUENTIAL,
|
enum DBState { FRESH, EXISTING };
|
||||||
RANDOM
|
|
||||||
};
|
|
||||||
enum DBState {
|
|
||||||
FRESH,
|
|
||||||
EXISTING
|
|
||||||
};
|
|
||||||
|
|
||||||
Benchmark()
|
Benchmark()
|
||||||
: db_(nullptr),
|
: db_(nullptr),
|
||||||
db_num_(0),
|
db_num_(0),
|
||||||
num_(FLAGS_num),
|
num_(FLAGS_num),
|
||||||
reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
|
reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
|
||||||
bytes_(0),
|
bytes_(0),
|
||||||
rand_(301) {
|
rand_(301) {
|
||||||
std::vector<std::string> files;
|
std::vector<std::string> files;
|
||||||
std::string test_dir;
|
std::string test_dir;
|
||||||
Env::Default()->GetTestDirectory(&test_dir);
|
Env::Default()->GetTestDirectory(&test_dir);
|
||||||
@ -426,10 +421,8 @@ class Benchmark {
|
|||||||
// Open database
|
// Open database
|
||||||
std::string tmp_dir;
|
std::string tmp_dir;
|
||||||
Env::Default()->GetTestDirectory(&tmp_dir);
|
Env::Default()->GetTestDirectory(&tmp_dir);
|
||||||
snprintf(file_name, sizeof(file_name),
|
snprintf(file_name, sizeof(file_name), "%s/dbbench_sqlite3-%d.db",
|
||||||
"%s/dbbench_sqlite3-%d.db",
|
tmp_dir.c_str(), db_num_);
|
||||||
tmp_dir.c_str(),
|
|
||||||
db_num_);
|
|
||||||
status = sqlite3_open(file_name, &db_);
|
status = sqlite3_open(file_name, &db_);
|
||||||
if (status) {
|
if (status) {
|
||||||
fprintf(stderr, "open error: %s\n", sqlite3_errmsg(db_));
|
fprintf(stderr, "open error: %s\n", sqlite3_errmsg(db_));
|
||||||
@ -460,26 +453,26 @@ class Benchmark {
|
|||||||
std::string WAL_checkpoint = "PRAGMA wal_autocheckpoint = 4096";
|
std::string WAL_checkpoint = "PRAGMA wal_autocheckpoint = 4096";
|
||||||
status = sqlite3_exec(db_, WAL_stmt.c_str(), nullptr, nullptr, &err_msg);
|
status = sqlite3_exec(db_, WAL_stmt.c_str(), nullptr, nullptr, &err_msg);
|
||||||
ExecErrorCheck(status, err_msg);
|
ExecErrorCheck(status, err_msg);
|
||||||
status = sqlite3_exec(db_, WAL_checkpoint.c_str(), nullptr, nullptr,
|
status =
|
||||||
&err_msg);
|
sqlite3_exec(db_, WAL_checkpoint.c_str(), nullptr, nullptr, &err_msg);
|
||||||
ExecErrorCheck(status, err_msg);
|
ExecErrorCheck(status, err_msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Change locking mode to exclusive and create tables/index for database
|
// Change locking mode to exclusive and create tables/index for database
|
||||||
std::string locking_stmt = "PRAGMA locking_mode = EXCLUSIVE";
|
std::string locking_stmt = "PRAGMA locking_mode = EXCLUSIVE";
|
||||||
std::string create_stmt =
|
std::string create_stmt =
|
||||||
"CREATE TABLE test (key blob, value blob, PRIMARY KEY(key))";
|
"CREATE TABLE test (key blob, value blob, PRIMARY KEY(key))";
|
||||||
std::string stmt_array[] = { locking_stmt, create_stmt };
|
std::string stmt_array[] = {locking_stmt, create_stmt};
|
||||||
int stmt_array_length = sizeof(stmt_array) / sizeof(std::string);
|
int stmt_array_length = sizeof(stmt_array) / sizeof(std::string);
|
||||||
for (int i = 0; i < stmt_array_length; i++) {
|
for (int i = 0; i < stmt_array_length; i++) {
|
||||||
status = sqlite3_exec(db_, stmt_array[i].c_str(), nullptr, nullptr,
|
status =
|
||||||
&err_msg);
|
sqlite3_exec(db_, stmt_array[i].c_str(), nullptr, nullptr, &err_msg);
|
||||||
ExecErrorCheck(status, err_msg);
|
ExecErrorCheck(status, err_msg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Write(bool write_sync, Order order, DBState state,
|
void Write(bool write_sync, Order order, DBState state, int num_entries,
|
||||||
int num_entries, int value_size, int entries_per_batch) {
|
int value_size, int entries_per_batch) {
|
||||||
// Create new database if state == FRESH
|
// Create new database if state == FRESH
|
||||||
if (state == FRESH) {
|
if (state == FRESH) {
|
||||||
if (FLAGS_use_existing_db) {
|
if (FLAGS_use_existing_db) {
|
||||||
@ -507,20 +500,20 @@ class Benchmark {
|
|||||||
std::string end_trans_str = "END TRANSACTION;";
|
std::string end_trans_str = "END TRANSACTION;";
|
||||||
|
|
||||||
// Check for synchronous flag in options
|
// Check for synchronous flag in options
|
||||||
std::string sync_stmt = (write_sync) ? "PRAGMA synchronous = FULL" :
|
std::string sync_stmt =
|
||||||
"PRAGMA synchronous = OFF";
|
(write_sync) ? "PRAGMA synchronous = FULL" : "PRAGMA synchronous = OFF";
|
||||||
status = sqlite3_exec(db_, sync_stmt.c_str(), nullptr, nullptr, &err_msg);
|
status = sqlite3_exec(db_, sync_stmt.c_str(), nullptr, nullptr, &err_msg);
|
||||||
ExecErrorCheck(status, err_msg);
|
ExecErrorCheck(status, err_msg);
|
||||||
|
|
||||||
// Preparing sqlite3 statements
|
// Preparing sqlite3 statements
|
||||||
status = sqlite3_prepare_v2(db_, replace_str.c_str(), -1,
|
status = sqlite3_prepare_v2(db_, replace_str.c_str(), -1, &replace_stmt,
|
||||||
&replace_stmt, nullptr);
|
nullptr);
|
||||||
ErrorCheck(status);
|
ErrorCheck(status);
|
||||||
status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1,
|
status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1,
|
||||||
&begin_trans_stmt, nullptr);
|
&begin_trans_stmt, nullptr);
|
||||||
ErrorCheck(status);
|
ErrorCheck(status);
|
||||||
status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1,
|
status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1, &end_trans_stmt,
|
||||||
&end_trans_stmt, nullptr);
|
nullptr);
|
||||||
ErrorCheck(status);
|
ErrorCheck(status);
|
||||||
|
|
||||||
bool transaction = (entries_per_batch > 1);
|
bool transaction = (entries_per_batch > 1);
|
||||||
@ -538,16 +531,16 @@ class Benchmark {
|
|||||||
const char* value = gen_.Generate(value_size).data();
|
const char* value = gen_.Generate(value_size).data();
|
||||||
|
|
||||||
// Create values for key-value pair
|
// Create values for key-value pair
|
||||||
const int k = (order == SEQUENTIAL) ? i + j :
|
const int k =
|
||||||
(rand_.Next() % num_entries);
|
(order == SEQUENTIAL) ? i + j : (rand_.Next() % num_entries);
|
||||||
char key[100];
|
char key[100];
|
||||||
snprintf(key, sizeof(key), "%016d", k);
|
snprintf(key, sizeof(key), "%016d", k);
|
||||||
|
|
||||||
// Bind KV values into replace_stmt
|
// Bind KV values into replace_stmt
|
||||||
status = sqlite3_bind_blob(replace_stmt, 1, key, 16, SQLITE_STATIC);
|
status = sqlite3_bind_blob(replace_stmt, 1, key, 16, SQLITE_STATIC);
|
||||||
ErrorCheck(status);
|
ErrorCheck(status);
|
||||||
status = sqlite3_bind_blob(replace_stmt, 2, value,
|
status = sqlite3_bind_blob(replace_stmt, 2, value, value_size,
|
||||||
value_size, SQLITE_STATIC);
|
SQLITE_STATIC);
|
||||||
ErrorCheck(status);
|
ErrorCheck(status);
|
||||||
|
|
||||||
// Execute replace_stmt
|
// Execute replace_stmt
|
||||||
@ -593,8 +586,8 @@ class Benchmark {
|
|||||||
status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1,
|
status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1,
|
||||||
&begin_trans_stmt, nullptr);
|
&begin_trans_stmt, nullptr);
|
||||||
ErrorCheck(status);
|
ErrorCheck(status);
|
||||||
status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1,
|
status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1, &end_trans_stmt,
|
||||||
&end_trans_stmt, nullptr);
|
nullptr);
|
||||||
ErrorCheck(status);
|
ErrorCheck(status);
|
||||||
status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &read_stmt, nullptr);
|
status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &read_stmt, nullptr);
|
||||||
ErrorCheck(status);
|
ErrorCheck(status);
|
||||||
@ -621,7 +614,8 @@ class Benchmark {
|
|||||||
ErrorCheck(status);
|
ErrorCheck(status);
|
||||||
|
|
||||||
// Execute read statement
|
// Execute read statement
|
||||||
while ((status = sqlite3_step(read_stmt)) == SQLITE_ROW) {}
|
while ((status = sqlite3_step(read_stmt)) == SQLITE_ROW) {
|
||||||
|
}
|
||||||
StepErrorCheck(status);
|
StepErrorCheck(status);
|
||||||
|
|
||||||
// Reset SQLite statement for another use
|
// Reset SQLite statement for another use
|
||||||
@ -651,7 +645,7 @@ class Benchmark {
|
|||||||
|
|
||||||
void ReadSequential() {
|
void ReadSequential() {
|
||||||
int status;
|
int status;
|
||||||
sqlite3_stmt *pStmt;
|
sqlite3_stmt* pStmt;
|
||||||
std::string read_str = "SELECT * FROM test ORDER BY key";
|
std::string read_str = "SELECT * FROM test ORDER BY key";
|
||||||
|
|
||||||
status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &pStmt, nullptr);
|
status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &pStmt, nullptr);
|
||||||
@ -664,7 +658,6 @@ class Benchmark {
|
|||||||
status = sqlite3_finalize(pStmt);
|
status = sqlite3_finalize(pStmt);
|
||||||
ErrorCheck(status);
|
ErrorCheck(status);
|
||||||
}
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
@ -710,9 +703,9 @@ int main(int argc, char** argv) {
|
|||||||
|
|
||||||
// Choose a location for the test database if none given with --db=<path>
|
// Choose a location for the test database if none given with --db=<path>
|
||||||
if (FLAGS_db == nullptr) {
|
if (FLAGS_db == nullptr) {
|
||||||
leveldb::Env::Default()->GetTestDirectory(&default_db_path);
|
leveldb::Env::Default()->GetTestDirectory(&default_db_path);
|
||||||
default_db_path += "/dbbench";
|
default_db_path += "/dbbench";
|
||||||
FLAGS_db = default_db_path.c_str();
|
FLAGS_db = default_db_path.c_str();
|
||||||
}
|
}
|
||||||
|
|
||||||
leveldb::Benchmark benchmark;
|
leveldb::Benchmark benchmark;
|
||||||
|
@ -2,9 +2,10 @@
|
|||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
|
#include <kcpolydb.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <kcpolydb.h>
|
|
||||||
#include "util/histogram.h"
|
#include "util/histogram.h"
|
||||||
#include "util/random.h"
|
#include "util/random.h"
|
||||||
#include "util/testutil.h"
|
#include "util/testutil.h"
|
||||||
@ -34,8 +35,7 @@ static const char* FLAGS_benchmarks =
|
|||||||
"fillrand100K,"
|
"fillrand100K,"
|
||||||
"fillseq100K,"
|
"fillseq100K,"
|
||||||
"readseq100K,"
|
"readseq100K,"
|
||||||
"readrand100K,"
|
"readrand100K,";
|
||||||
;
|
|
||||||
|
|
||||||
// Number of key/values to place in database
|
// Number of key/values to place in database
|
||||||
static int FLAGS_num = 1000000;
|
static int FLAGS_num = 1000000;
|
||||||
@ -71,9 +71,7 @@ static bool FLAGS_compression = true;
|
|||||||
// Use the db with the following name.
|
// Use the db with the following name.
|
||||||
static const char* FLAGS_db = nullptr;
|
static const char* FLAGS_db = nullptr;
|
||||||
|
|
||||||
inline
|
inline static void DBSynchronize(kyotocabinet::TreeDB* db_) {
|
||||||
static void DBSynchronize(kyotocabinet::TreeDB* db_)
|
|
||||||
{
|
|
||||||
// Synchronize will flush writes to disk
|
// Synchronize will flush writes to disk
|
||||||
if (!db_->synchronize()) {
|
if (!db_->synchronize()) {
|
||||||
fprintf(stderr, "synchronize error: %s\n", db_->error().name());
|
fprintf(stderr, "synchronize error: %s\n", db_->error().name());
|
||||||
@ -121,7 +119,7 @@ static Slice TrimSpace(Slice s) {
|
|||||||
start++;
|
start++;
|
||||||
}
|
}
|
||||||
int limit = s.size();
|
int limit = s.size();
|
||||||
while (limit > start && isspace(s[limit-1])) {
|
while (limit > start && isspace(s[limit - 1])) {
|
||||||
limit--;
|
limit--;
|
||||||
}
|
}
|
||||||
return Slice(s.data() + start, limit - start);
|
return Slice(s.data() + start, limit - start);
|
||||||
@ -146,7 +144,7 @@ class Benchmark {
|
|||||||
|
|
||||||
// State kept for progress messages
|
// State kept for progress messages
|
||||||
int done_;
|
int done_;
|
||||||
int next_report_; // When to report next
|
int next_report_; // When to report next
|
||||||
|
|
||||||
void PrintHeader() {
|
void PrintHeader() {
|
||||||
const int kKeySize = 16;
|
const int kKeySize = 16;
|
||||||
@ -157,20 +155,20 @@ class Benchmark {
|
|||||||
static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
|
static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
|
||||||
fprintf(stdout, "Entries: %d\n", num_);
|
fprintf(stdout, "Entries: %d\n", num_);
|
||||||
fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
|
fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
|
||||||
((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_)
|
((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
|
||||||
/ 1048576.0));
|
1048576.0));
|
||||||
fprintf(stdout, "FileSize: %.1f MB (estimated)\n",
|
fprintf(stdout, "FileSize: %.1f MB (estimated)\n",
|
||||||
(((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_)
|
(((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) /
|
||||||
/ 1048576.0));
|
1048576.0));
|
||||||
PrintWarnings();
|
PrintWarnings();
|
||||||
fprintf(stdout, "------------------------------------------------\n");
|
fprintf(stdout, "------------------------------------------------\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
void PrintWarnings() {
|
void PrintWarnings() {
|
||||||
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
|
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
|
||||||
fprintf(stdout,
|
fprintf(
|
||||||
"WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"
|
stdout,
|
||||||
);
|
"WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
|
||||||
#endif
|
#endif
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
fprintf(stdout,
|
fprintf(stdout,
|
||||||
@ -237,13 +235,20 @@ class Benchmark {
|
|||||||
|
|
||||||
done_++;
|
done_++;
|
||||||
if (done_ >= next_report_) {
|
if (done_ >= next_report_) {
|
||||||
if (next_report_ < 1000) next_report_ += 100;
|
if (next_report_ < 1000)
|
||||||
else if (next_report_ < 5000) next_report_ += 500;
|
next_report_ += 100;
|
||||||
else if (next_report_ < 10000) next_report_ += 1000;
|
else if (next_report_ < 5000)
|
||||||
else if (next_report_ < 50000) next_report_ += 5000;
|
next_report_ += 500;
|
||||||
else if (next_report_ < 100000) next_report_ += 10000;
|
else if (next_report_ < 10000)
|
||||||
else if (next_report_ < 500000) next_report_ += 50000;
|
next_report_ += 1000;
|
||||||
else next_report_ += 100000;
|
else if (next_report_ < 50000)
|
||||||
|
next_report_ += 5000;
|
||||||
|
else if (next_report_ < 100000)
|
||||||
|
next_report_ += 10000;
|
||||||
|
else if (next_report_ < 500000)
|
||||||
|
next_report_ += 50000;
|
||||||
|
else
|
||||||
|
next_report_ += 100000;
|
||||||
fprintf(stderr, "... finished %d ops%30s\r", done_, "");
|
fprintf(stderr, "... finished %d ops%30s\r", done_, "");
|
||||||
fflush(stderr);
|
fflush(stderr);
|
||||||
}
|
}
|
||||||
@ -261,16 +266,14 @@ class Benchmark {
|
|||||||
snprintf(rate, sizeof(rate), "%6.1f MB/s",
|
snprintf(rate, sizeof(rate), "%6.1f MB/s",
|
||||||
(bytes_ / 1048576.0) / (finish - start_));
|
(bytes_ / 1048576.0) / (finish - start_));
|
||||||
if (!message_.empty()) {
|
if (!message_.empty()) {
|
||||||
message_ = std::string(rate) + " " + message_;
|
message_ = std::string(rate) + " " + message_;
|
||||||
} else {
|
} else {
|
||||||
message_ = rate;
|
message_ = rate;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
|
fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(),
|
||||||
name.ToString().c_str(),
|
(finish - start_) * 1e6 / done_, (message_.empty() ? "" : " "),
|
||||||
(finish - start_) * 1e6 / done_,
|
|
||||||
(message_.empty() ? "" : " "),
|
|
||||||
message_.c_str());
|
message_.c_str());
|
||||||
if (FLAGS_histogram) {
|
if (FLAGS_histogram) {
|
||||||
fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
|
fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
|
||||||
@ -279,21 +282,15 @@ class Benchmark {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
enum Order {
|
enum Order { SEQUENTIAL, RANDOM };
|
||||||
SEQUENTIAL,
|
enum DBState { FRESH, EXISTING };
|
||||||
RANDOM
|
|
||||||
};
|
|
||||||
enum DBState {
|
|
||||||
FRESH,
|
|
||||||
EXISTING
|
|
||||||
};
|
|
||||||
|
|
||||||
Benchmark()
|
Benchmark()
|
||||||
: db_(nullptr),
|
: db_(nullptr),
|
||||||
num_(FLAGS_num),
|
num_(FLAGS_num),
|
||||||
reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
|
reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
|
||||||
bytes_(0),
|
bytes_(0),
|
||||||
rand_(301) {
|
rand_(301) {
|
||||||
std::vector<std::string> files;
|
std::vector<std::string> files;
|
||||||
std::string test_dir;
|
std::string test_dir;
|
||||||
Env::Default()->GetTestDirectory(&test_dir);
|
Env::Default()->GetTestDirectory(&test_dir);
|
||||||
@ -386,7 +383,7 @@ class Benchmark {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void Open(bool sync) {
|
void Open(bool sync) {
|
||||||
assert(db_ == nullptr);
|
assert(db_ == nullptr);
|
||||||
|
|
||||||
// Initialize db_
|
// Initialize db_
|
||||||
@ -395,16 +392,14 @@ class Benchmark {
|
|||||||
db_num_++;
|
db_num_++;
|
||||||
std::string test_dir;
|
std::string test_dir;
|
||||||
Env::Default()->GetTestDirectory(&test_dir);
|
Env::Default()->GetTestDirectory(&test_dir);
|
||||||
snprintf(file_name, sizeof(file_name),
|
snprintf(file_name, sizeof(file_name), "%s/dbbench_polyDB-%d.kct",
|
||||||
"%s/dbbench_polyDB-%d.kct",
|
test_dir.c_str(), db_num_);
|
||||||
test_dir.c_str(),
|
|
||||||
db_num_);
|
|
||||||
|
|
||||||
// Create tuning options and open the database
|
// Create tuning options and open the database
|
||||||
int open_options = kyotocabinet::PolyDB::OWRITER |
|
int open_options =
|
||||||
kyotocabinet::PolyDB::OCREATE;
|
kyotocabinet::PolyDB::OWRITER | kyotocabinet::PolyDB::OCREATE;
|
||||||
int tune_options = kyotocabinet::TreeDB::TSMALL |
|
int tune_options =
|
||||||
kyotocabinet::TreeDB::TLINEAR;
|
kyotocabinet::TreeDB::TSMALL | kyotocabinet::TreeDB::TLINEAR;
|
||||||
if (FLAGS_compression) {
|
if (FLAGS_compression) {
|
||||||
tune_options |= kyotocabinet::TreeDB::TCOMPRESS;
|
tune_options |= kyotocabinet::TreeDB::TCOMPRESS;
|
||||||
db_->tune_compressor(&comp_);
|
db_->tune_compressor(&comp_);
|
||||||
@ -412,7 +407,7 @@ class Benchmark {
|
|||||||
db_->tune_options(tune_options);
|
db_->tune_options(tune_options);
|
||||||
db_->tune_page_cache(FLAGS_cache_size);
|
db_->tune_page_cache(FLAGS_cache_size);
|
||||||
db_->tune_page(FLAGS_page_size);
|
db_->tune_page(FLAGS_page_size);
|
||||||
db_->tune_map(256LL<<20);
|
db_->tune_map(256LL << 20);
|
||||||
if (sync) {
|
if (sync) {
|
||||||
open_options |= kyotocabinet::PolyDB::OAUTOSYNC;
|
open_options |= kyotocabinet::PolyDB::OAUTOSYNC;
|
||||||
}
|
}
|
||||||
@ -421,8 +416,8 @@ class Benchmark {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Write(bool sync, Order order, DBState state,
|
void Write(bool sync, Order order, DBState state, int num_entries,
|
||||||
int num_entries, int value_size, int entries_per_batch) {
|
int value_size, int entries_per_batch) {
|
||||||
// Create new database if state == FRESH
|
// Create new database if state == FRESH
|
||||||
if (state == FRESH) {
|
if (state == FRESH) {
|
||||||
if (FLAGS_use_existing_db) {
|
if (FLAGS_use_existing_db) {
|
||||||
@ -442,8 +437,7 @@ class Benchmark {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Write to database
|
// Write to database
|
||||||
for (int i = 0; i < num_entries; i++)
|
for (int i = 0; i < num_entries; i++) {
|
||||||
{
|
|
||||||
const int k = (order == SEQUENTIAL) ? i : (rand_.Next() % num_entries);
|
const int k = (order == SEQUENTIAL) ? i : (rand_.Next() % num_entries);
|
||||||
char key[100];
|
char key[100];
|
||||||
snprintf(key, sizeof(key), "%016d", k);
|
snprintf(key, sizeof(key), "%016d", k);
|
||||||
@ -517,9 +511,9 @@ int main(int argc, char** argv) {
|
|||||||
|
|
||||||
// Choose a location for the test database if none given with --db=<path>
|
// Choose a location for the test database if none given with --db=<path>
|
||||||
if (FLAGS_db == nullptr) {
|
if (FLAGS_db == nullptr) {
|
||||||
leveldb::Env::Default()->GetTestDirectory(&default_db_path);
|
leveldb::Env::Default()->GetTestDirectory(&default_db_path);
|
||||||
default_db_path += "/dbbench";
|
default_db_path += "/dbbench";
|
||||||
FLAGS_db = default_db_path.c_str();
|
FLAGS_db = default_db_path.c_str();
|
||||||
}
|
}
|
||||||
|
|
||||||
leveldb::Benchmark benchmark;
|
leveldb::Benchmark benchmark;
|
||||||
|
@ -134,9 +134,7 @@ class FileState {
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
// Private since only Unref() should be used to delete it.
|
// Private since only Unref() should be used to delete it.
|
||||||
~FileState() {
|
~FileState() { Truncate(); }
|
||||||
Truncate();
|
|
||||||
}
|
|
||||||
|
|
||||||
// No copying allowed.
|
// No copying allowed.
|
||||||
FileState(const FileState&);
|
FileState(const FileState&);
|
||||||
@ -158,9 +156,7 @@ class SequentialFileImpl : public SequentialFile {
|
|||||||
file_->Ref();
|
file_->Ref();
|
||||||
}
|
}
|
||||||
|
|
||||||
~SequentialFileImpl() {
|
~SequentialFileImpl() { file_->Unref(); }
|
||||||
file_->Unref();
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual Status Read(size_t n, Slice* result, char* scratch) {
|
virtual Status Read(size_t n, Slice* result, char* scratch) {
|
||||||
Status s = file_->Read(pos_, n, result, scratch);
|
Status s = file_->Read(pos_, n, result, scratch);
|
||||||
@ -189,13 +185,9 @@ class SequentialFileImpl : public SequentialFile {
|
|||||||
|
|
||||||
class RandomAccessFileImpl : public RandomAccessFile {
|
class RandomAccessFileImpl : public RandomAccessFile {
|
||||||
public:
|
public:
|
||||||
explicit RandomAccessFileImpl(FileState* file) : file_(file) {
|
explicit RandomAccessFileImpl(FileState* file) : file_(file) { file_->Ref(); }
|
||||||
file_->Ref();
|
|
||||||
}
|
|
||||||
|
|
||||||
~RandomAccessFileImpl() {
|
~RandomAccessFileImpl() { file_->Unref(); }
|
||||||
file_->Unref();
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual Status Read(uint64_t offset, size_t n, Slice* result,
|
virtual Status Read(uint64_t offset, size_t n, Slice* result,
|
||||||
char* scratch) const {
|
char* scratch) const {
|
||||||
@ -208,17 +200,11 @@ class RandomAccessFileImpl : public RandomAccessFile {
|
|||||||
|
|
||||||
class WritableFileImpl : public WritableFile {
|
class WritableFileImpl : public WritableFile {
|
||||||
public:
|
public:
|
||||||
WritableFileImpl(FileState* file) : file_(file) {
|
WritableFileImpl(FileState* file) : file_(file) { file_->Ref(); }
|
||||||
file_->Ref();
|
|
||||||
}
|
|
||||||
|
|
||||||
~WritableFileImpl() {
|
~WritableFileImpl() { file_->Unref(); }
|
||||||
file_->Unref();
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual Status Append(const Slice& data) {
|
virtual Status Append(const Slice& data) { return file_->Append(data); }
|
||||||
return file_->Append(data);
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual Status Close() { return Status::OK(); }
|
virtual Status Close() { return Status::OK(); }
|
||||||
virtual Status Flush() { return Status::OK(); }
|
virtual Status Flush() { return Status::OK(); }
|
||||||
@ -230,15 +216,16 @@ class WritableFileImpl : public WritableFile {
|
|||||||
|
|
||||||
class NoOpLogger : public Logger {
|
class NoOpLogger : public Logger {
|
||||||
public:
|
public:
|
||||||
virtual void Logv(const char* format, va_list ap) { }
|
virtual void Logv(const char* format, va_list ap) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
class InMemoryEnv : public EnvWrapper {
|
class InMemoryEnv : public EnvWrapper {
|
||||||
public:
|
public:
|
||||||
explicit InMemoryEnv(Env* base_env) : EnvWrapper(base_env) { }
|
explicit InMemoryEnv(Env* base_env) : EnvWrapper(base_env) {}
|
||||||
|
|
||||||
virtual ~InMemoryEnv() {
|
virtual ~InMemoryEnv() {
|
||||||
for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end(); ++i){
|
for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end();
|
||||||
|
++i) {
|
||||||
i->second->Unref();
|
i->second->Unref();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -311,7 +298,8 @@ class InMemoryEnv : public EnvWrapper {
|
|||||||
MutexLock lock(&mutex_);
|
MutexLock lock(&mutex_);
|
||||||
result->clear();
|
result->clear();
|
||||||
|
|
||||||
for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end(); ++i){
|
for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end();
|
||||||
|
++i) {
|
||||||
const std::string& filename = i->first;
|
const std::string& filename = i->first;
|
||||||
|
|
||||||
if (filename.size() >= dir.size() + 1 && filename[dir.size()] == '/' &&
|
if (filename.size() >= dir.size() + 1 && filename[dir.size()] == '/' &&
|
||||||
@ -343,13 +331,9 @@ class InMemoryEnv : public EnvWrapper {
|
|||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual Status CreateDir(const std::string& dirname) {
|
virtual Status CreateDir(const std::string& dirname) { return Status::OK(); }
|
||||||
return Status::OK();
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual Status DeleteDir(const std::string& dirname) {
|
virtual Status DeleteDir(const std::string& dirname) { return Status::OK(); }
|
||||||
return Status::OK();
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual Status GetFileSize(const std::string& fname, uint64_t* file_size) {
|
virtual Status GetFileSize(const std::string& fname, uint64_t* file_size) {
|
||||||
MutexLock lock(&mutex_);
|
MutexLock lock(&mutex_);
|
||||||
@ -361,8 +345,7 @@ class InMemoryEnv : public EnvWrapper {
|
|||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual Status RenameFile(const std::string& src,
|
virtual Status RenameFile(const std::string& src, const std::string& target) {
|
||||||
const std::string& target) {
|
|
||||||
MutexLock lock(&mutex_);
|
MutexLock lock(&mutex_);
|
||||||
if (file_map_.find(src) == file_map_.end()) {
|
if (file_map_.find(src) == file_map_.end()) {
|
||||||
return Status::IOError(src, "File not found");
|
return Status::IOError(src, "File not found");
|
||||||
@ -403,8 +386,6 @@ class InMemoryEnv : public EnvWrapper {
|
|||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
Env* NewMemEnv(Env* base_env) {
|
Env* NewMemEnv(Env* base_env) { return new InMemoryEnv(base_env); }
|
||||||
return new InMemoryEnv(base_env);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
@ -4,12 +4,13 @@
|
|||||||
|
|
||||||
#include "helpers/memenv/memenv.h"
|
#include "helpers/memenv/memenv.h"
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
#include "db/db_impl.h"
|
#include "db/db_impl.h"
|
||||||
#include "leveldb/db.h"
|
#include "leveldb/db.h"
|
||||||
#include "leveldb/env.h"
|
#include "leveldb/env.h"
|
||||||
#include "util/testharness.h"
|
#include "util/testharness.h"
|
||||||
#include <string>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
@ -17,12 +18,8 @@ class MemEnvTest {
|
|||||||
public:
|
public:
|
||||||
Env* env_;
|
Env* env_;
|
||||||
|
|
||||||
MemEnvTest()
|
MemEnvTest() : env_(NewMemEnv(Env::Default())) {}
|
||||||
: env_(NewMemEnv(Env::Default())) {
|
~MemEnvTest() { delete env_; }
|
||||||
}
|
|
||||||
~MemEnvTest() {
|
|
||||||
delete env_;
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
TEST(MemEnvTest, Basics) {
|
TEST(MemEnvTest, Basics) {
|
||||||
@ -109,25 +106,25 @@ TEST(MemEnvTest, ReadWrite) {
|
|||||||
|
|
||||||
// Read sequentially.
|
// Read sequentially.
|
||||||
ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file));
|
ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file));
|
||||||
ASSERT_OK(seq_file->Read(5, &result, scratch)); // Read "hello".
|
ASSERT_OK(seq_file->Read(5, &result, scratch)); // Read "hello".
|
||||||
ASSERT_EQ(0, result.compare("hello"));
|
ASSERT_EQ(0, result.compare("hello"));
|
||||||
ASSERT_OK(seq_file->Skip(1));
|
ASSERT_OK(seq_file->Skip(1));
|
||||||
ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Read "world".
|
ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Read "world".
|
||||||
ASSERT_EQ(0, result.compare("world"));
|
ASSERT_EQ(0, result.compare("world"));
|
||||||
ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Try reading past EOF.
|
ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Try reading past EOF.
|
||||||
ASSERT_EQ(0, result.size());
|
ASSERT_EQ(0, result.size());
|
||||||
ASSERT_OK(seq_file->Skip(100)); // Try to skip past end of file.
|
ASSERT_OK(seq_file->Skip(100)); // Try to skip past end of file.
|
||||||
ASSERT_OK(seq_file->Read(1000, &result, scratch));
|
ASSERT_OK(seq_file->Read(1000, &result, scratch));
|
||||||
ASSERT_EQ(0, result.size());
|
ASSERT_EQ(0, result.size());
|
||||||
delete seq_file;
|
delete seq_file;
|
||||||
|
|
||||||
// Random reads.
|
// Random reads.
|
||||||
ASSERT_OK(env_->NewRandomAccessFile("/dir/f", &rand_file));
|
ASSERT_OK(env_->NewRandomAccessFile("/dir/f", &rand_file));
|
||||||
ASSERT_OK(rand_file->Read(6, 5, &result, scratch)); // Read "world".
|
ASSERT_OK(rand_file->Read(6, 5, &result, scratch)); // Read "world".
|
||||||
ASSERT_EQ(0, result.compare("world"));
|
ASSERT_EQ(0, result.compare("world"));
|
||||||
ASSERT_OK(rand_file->Read(0, 5, &result, scratch)); // Read "hello".
|
ASSERT_OK(rand_file->Read(0, 5, &result, scratch)); // Read "hello".
|
||||||
ASSERT_EQ(0, result.compare("hello"));
|
ASSERT_EQ(0, result.compare("hello"));
|
||||||
ASSERT_OK(rand_file->Read(10, 100, &result, scratch)); // Read "d".
|
ASSERT_OK(rand_file->Read(10, 100, &result, scratch)); // Read "d".
|
||||||
ASSERT_EQ(0, result.compare("d"));
|
ASSERT_EQ(0, result.compare("d"));
|
||||||
|
|
||||||
// Too high offset.
|
// Too high offset.
|
||||||
@ -176,7 +173,7 @@ TEST(MemEnvTest, LargeWrite) {
|
|||||||
SequentialFile* seq_file;
|
SequentialFile* seq_file;
|
||||||
Slice result;
|
Slice result;
|
||||||
ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file));
|
ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file));
|
||||||
ASSERT_OK(seq_file->Read(3, &result, scratch)); // Read "foo".
|
ASSERT_OK(seq_file->Read(3, &result, scratch)); // Read "foo".
|
||||||
ASSERT_EQ(0, result.compare("foo"));
|
ASSERT_EQ(0, result.compare("foo"));
|
||||||
|
|
||||||
size_t read = 0;
|
size_t read = 0;
|
||||||
@ -188,7 +185,7 @@ TEST(MemEnvTest, LargeWrite) {
|
|||||||
}
|
}
|
||||||
ASSERT_TRUE(write_data == read_data);
|
ASSERT_TRUE(write_data == read_data);
|
||||||
delete seq_file;
|
delete seq_file;
|
||||||
delete [] scratch;
|
delete[] scratch;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(MemEnvTest, OverwriteOpenFile) {
|
TEST(MemEnvTest, OverwriteOpenFile) {
|
||||||
@ -259,6 +256,4 @@ TEST(MemEnvTest, DBTest) {
|
|||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
@ -47,26 +47,27 @@ extern "C" {
|
|||||||
#include <stdarg.h>
|
#include <stdarg.h>
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
#include "leveldb/export.h"
|
#include "leveldb/export.h"
|
||||||
|
|
||||||
/* Exported types */
|
/* Exported types */
|
||||||
|
|
||||||
typedef struct leveldb_t leveldb_t;
|
typedef struct leveldb_t leveldb_t;
|
||||||
typedef struct leveldb_cache_t leveldb_cache_t;
|
typedef struct leveldb_cache_t leveldb_cache_t;
|
||||||
typedef struct leveldb_comparator_t leveldb_comparator_t;
|
typedef struct leveldb_comparator_t leveldb_comparator_t;
|
||||||
typedef struct leveldb_env_t leveldb_env_t;
|
typedef struct leveldb_env_t leveldb_env_t;
|
||||||
typedef struct leveldb_filelock_t leveldb_filelock_t;
|
typedef struct leveldb_filelock_t leveldb_filelock_t;
|
||||||
typedef struct leveldb_filterpolicy_t leveldb_filterpolicy_t;
|
typedef struct leveldb_filterpolicy_t leveldb_filterpolicy_t;
|
||||||
typedef struct leveldb_iterator_t leveldb_iterator_t;
|
typedef struct leveldb_iterator_t leveldb_iterator_t;
|
||||||
typedef struct leveldb_logger_t leveldb_logger_t;
|
typedef struct leveldb_logger_t leveldb_logger_t;
|
||||||
typedef struct leveldb_options_t leveldb_options_t;
|
typedef struct leveldb_options_t leveldb_options_t;
|
||||||
typedef struct leveldb_randomfile_t leveldb_randomfile_t;
|
typedef struct leveldb_randomfile_t leveldb_randomfile_t;
|
||||||
typedef struct leveldb_readoptions_t leveldb_readoptions_t;
|
typedef struct leveldb_readoptions_t leveldb_readoptions_t;
|
||||||
typedef struct leveldb_seqfile_t leveldb_seqfile_t;
|
typedef struct leveldb_seqfile_t leveldb_seqfile_t;
|
||||||
typedef struct leveldb_snapshot_t leveldb_snapshot_t;
|
typedef struct leveldb_snapshot_t leveldb_snapshot_t;
|
||||||
typedef struct leveldb_writablefile_t leveldb_writablefile_t;
|
typedef struct leveldb_writablefile_t leveldb_writablefile_t;
|
||||||
typedef struct leveldb_writebatch_t leveldb_writebatch_t;
|
typedef struct leveldb_writebatch_t leveldb_writebatch_t;
|
||||||
typedef struct leveldb_writeoptions_t leveldb_writeoptions_t;
|
typedef struct leveldb_writeoptions_t leveldb_writeoptions_t;
|
||||||
|
|
||||||
/* DB operations */
|
/* DB operations */
|
||||||
|
|
||||||
@ -189,10 +190,7 @@ LEVELDB_EXPORT void leveldb_options_set_block_restart_interval(
|
|||||||
LEVELDB_EXPORT void leveldb_options_set_max_file_size(leveldb_options_t*,
|
LEVELDB_EXPORT void leveldb_options_set_max_file_size(leveldb_options_t*,
|
||||||
size_t);
|
size_t);
|
||||||
|
|
||||||
enum {
|
enum { leveldb_no_compression = 0, leveldb_snappy_compression = 1 };
|
||||||
leveldb_no_compression = 0,
|
|
||||||
leveldb_snappy_compression = 1
|
|
||||||
};
|
|
||||||
LEVELDB_EXPORT void leveldb_options_set_compression(leveldb_options_t*, int);
|
LEVELDB_EXPORT void leveldb_options_set_compression(leveldb_options_t*, int);
|
||||||
|
|
||||||
/* Comparator */
|
/* Comparator */
|
||||||
@ -266,7 +264,7 @@ LEVELDB_EXPORT int leveldb_major_version();
|
|||||||
LEVELDB_EXPORT int leveldb_minor_version();
|
LEVELDB_EXPORT int leveldb_minor_version();
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
} /* end extern "C" */
|
} /* end extern "C" */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* STORAGE_LEVELDB_INCLUDE_C_H_ */
|
#endif /* STORAGE_LEVELDB_INCLUDE_C_H_ */
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
#define STORAGE_LEVELDB_INCLUDE_CACHE_H_
|
#define STORAGE_LEVELDB_INCLUDE_CACHE_H_
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
#include "leveldb/export.h"
|
#include "leveldb/export.h"
|
||||||
#include "leveldb/slice.h"
|
#include "leveldb/slice.h"
|
||||||
|
|
||||||
@ -42,7 +43,7 @@ class LEVELDB_EXPORT Cache {
|
|||||||
virtual ~Cache();
|
virtual ~Cache();
|
||||||
|
|
||||||
// Opaque handle to an entry stored in the cache.
|
// Opaque handle to an entry stored in the cache.
|
||||||
struct Handle { };
|
struct Handle {};
|
||||||
|
|
||||||
// Insert a mapping from key->value into the cache and assign it
|
// Insert a mapping from key->value into the cache and assign it
|
||||||
// the specified charge against the total cache capacity.
|
// the specified charge against the total cache capacity.
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#define STORAGE_LEVELDB_INCLUDE_COMPARATOR_H_
|
#define STORAGE_LEVELDB_INCLUDE_COMPARATOR_H_
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
#include "leveldb/export.h"
|
#include "leveldb/export.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
@ -44,9 +45,8 @@ class LEVELDB_EXPORT Comparator {
|
|||||||
// If *start < limit, changes *start to a short string in [start,limit).
|
// If *start < limit, changes *start to a short string in [start,limit).
|
||||||
// Simple comparator implementations may return with *start unchanged,
|
// Simple comparator implementations may return with *start unchanged,
|
||||||
// i.e., an implementation of this method that does nothing is correct.
|
// i.e., an implementation of this method that does nothing is correct.
|
||||||
virtual void FindShortestSeparator(
|
virtual void FindShortestSeparator(std::string* start,
|
||||||
std::string* start,
|
const Slice& limit) const = 0;
|
||||||
const Slice& limit) const = 0;
|
|
||||||
|
|
||||||
// Changes *key to a short string >= *key.
|
// Changes *key to a short string >= *key.
|
||||||
// Simple comparator implementations may return with *key unchanged,
|
// Simple comparator implementations may return with *key unchanged,
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
|
||||||
#include "leveldb/export.h"
|
#include "leveldb/export.h"
|
||||||
#include "leveldb/iterator.h"
|
#include "leveldb/iterator.h"
|
||||||
#include "leveldb/options.h"
|
#include "leveldb/options.h"
|
||||||
@ -32,11 +33,11 @@ class LEVELDB_EXPORT Snapshot {
|
|||||||
|
|
||||||
// A range of keys
|
// A range of keys
|
||||||
struct LEVELDB_EXPORT Range {
|
struct LEVELDB_EXPORT Range {
|
||||||
Slice start; // Included in the range
|
Slice start; // Included in the range
|
||||||
Slice limit; // Not included in the range
|
Slice limit; // Not included in the range
|
||||||
|
|
||||||
Range() { }
|
Range() {}
|
||||||
Range(const Slice& s, const Slice& l) : start(s), limit(l) { }
|
Range(const Slice& s, const Slice& l) : start(s), limit(l) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
// A DB is a persistent ordered map from keys to values.
|
// A DB is a persistent ordered map from keys to values.
|
||||||
@ -49,8 +50,7 @@ class LEVELDB_EXPORT DB {
|
|||||||
// OK on success.
|
// OK on success.
|
||||||
// Stores nullptr in *dbptr and returns a non-OK status on error.
|
// Stores nullptr in *dbptr and returns a non-OK status on error.
|
||||||
// Caller should delete *dbptr when it is no longer needed.
|
// Caller should delete *dbptr when it is no longer needed.
|
||||||
static Status Open(const Options& options,
|
static Status Open(const Options& options, const std::string& name,
|
||||||
const std::string& name,
|
|
||||||
DB** dbptr);
|
DB** dbptr);
|
||||||
|
|
||||||
DB() = default;
|
DB() = default;
|
||||||
@ -63,8 +63,7 @@ class LEVELDB_EXPORT DB {
|
|||||||
// Set the database entry for "key" to "value". Returns OK on success,
|
// Set the database entry for "key" to "value". Returns OK on success,
|
||||||
// and a non-OK status on error.
|
// and a non-OK status on error.
|
||||||
// Note: consider setting options.sync = true.
|
// Note: consider setting options.sync = true.
|
||||||
virtual Status Put(const WriteOptions& options,
|
virtual Status Put(const WriteOptions& options, const Slice& key,
|
||||||
const Slice& key,
|
|
||||||
const Slice& value) = 0;
|
const Slice& value) = 0;
|
||||||
|
|
||||||
// Remove the database entry (if any) for "key". Returns OK on
|
// Remove the database entry (if any) for "key". Returns OK on
|
||||||
@ -85,8 +84,8 @@ class LEVELDB_EXPORT DB {
|
|||||||
// a status for which Status::IsNotFound() returns true.
|
// a status for which Status::IsNotFound() returns true.
|
||||||
//
|
//
|
||||||
// May return some other Status on an error.
|
// May return some other Status on an error.
|
||||||
virtual Status Get(const ReadOptions& options,
|
virtual Status Get(const ReadOptions& options, const Slice& key,
|
||||||
const Slice& key, std::string* value) = 0;
|
std::string* value) = 0;
|
||||||
|
|
||||||
// Return a heap-allocated iterator over the contents of the database.
|
// Return a heap-allocated iterator over the contents of the database.
|
||||||
// The result of NewIterator() is initially invalid (caller must
|
// The result of NewIterator() is initially invalid (caller must
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#define STORAGE_LEVELDB_INCLUDE_DUMPFILE_H_
|
#define STORAGE_LEVELDB_INCLUDE_DUMPFILE_H_
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
#include "leveldb/env.h"
|
#include "leveldb/env.h"
|
||||||
#include "leveldb/export.h"
|
#include "leveldb/export.h"
|
||||||
#include "leveldb/status.h"
|
#include "leveldb/status.h"
|
||||||
|
@ -15,8 +15,10 @@
|
|||||||
|
|
||||||
#include <stdarg.h>
|
#include <stdarg.h>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "leveldb/export.h"
|
#include "leveldb/export.h"
|
||||||
#include "leveldb/status.h"
|
#include "leveldb/status.h"
|
||||||
|
|
||||||
@ -164,9 +166,7 @@ class LEVELDB_EXPORT Env {
|
|||||||
// added to the same Env may run concurrently in different threads.
|
// added to the same Env may run concurrently in different threads.
|
||||||
// I.e., the caller may not assume that background work items are
|
// I.e., the caller may not assume that background work items are
|
||||||
// serialized.
|
// serialized.
|
||||||
virtual void Schedule(
|
virtual void Schedule(void (*function)(void* arg), void* arg) = 0;
|
||||||
void (*function)(void* arg),
|
|
||||||
void* arg) = 0;
|
|
||||||
|
|
||||||
// Start a new thread, invoking "function(arg)" within the new thread.
|
// Start a new thread, invoking "function(arg)" within the new thread.
|
||||||
// When "function(arg)" returns, the thread will be destroyed.
|
// When "function(arg)" returns, the thread will be destroyed.
|
||||||
@ -287,9 +287,9 @@ class LEVELDB_EXPORT FileLock {
|
|||||||
|
|
||||||
// Log the specified data to *info_log if info_log is non-null.
|
// Log the specified data to *info_log if info_log is non-null.
|
||||||
void Log(Logger* info_log, const char* format, ...)
|
void Log(Logger* info_log, const char* format, ...)
|
||||||
# if defined(__GNUC__) || defined(__clang__)
|
#if defined(__GNUC__) || defined(__clang__)
|
||||||
__attribute__((__format__ (__printf__, 2, 3)))
|
__attribute__((__format__(__printf__, 2, 3)))
|
||||||
# endif
|
#endif
|
||||||
;
|
;
|
||||||
|
|
||||||
// A utility routine: write "data" to the named file.
|
// A utility routine: write "data" to the named file.
|
||||||
@ -306,7 +306,7 @@ LEVELDB_EXPORT Status ReadFileToString(Env* env, const std::string& fname,
|
|||||||
class LEVELDB_EXPORT EnvWrapper : public Env {
|
class LEVELDB_EXPORT EnvWrapper : public Env {
|
||||||
public:
|
public:
|
||||||
// Initialize an EnvWrapper that delegates all calls to *t.
|
// Initialize an EnvWrapper that delegates all calls to *t.
|
||||||
explicit EnvWrapper(Env* t) : target_(t) { }
|
explicit EnvWrapper(Env* t) : target_(t) {}
|
||||||
virtual ~EnvWrapper();
|
virtual ~EnvWrapper();
|
||||||
|
|
||||||
// Return the target to which this Env forwards all calls.
|
// Return the target to which this Env forwards all calls.
|
||||||
@ -364,9 +364,7 @@ class LEVELDB_EXPORT EnvWrapper : public Env {
|
|||||||
Status NewLogger(const std::string& fname, Logger** result) override {
|
Status NewLogger(const std::string& fname, Logger** result) override {
|
||||||
return target_->NewLogger(fname, result);
|
return target_->NewLogger(fname, result);
|
||||||
}
|
}
|
||||||
uint64_t NowMicros() override {
|
uint64_t NowMicros() override { return target_->NowMicros(); }
|
||||||
return target_->NowMicros();
|
|
||||||
}
|
|
||||||
void SleepForMicroseconds(int micros) override {
|
void SleepForMicroseconds(int micros) override {
|
||||||
target_->SleepForMicroseconds(micros);
|
target_->SleepForMicroseconds(micros);
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
#define STORAGE_LEVELDB_INCLUDE_FILTER_POLICY_H_
|
#define STORAGE_LEVELDB_INCLUDE_FILTER_POLICY_H_
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
#include "leveldb/export.h"
|
#include "leveldb/export.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
@ -39,8 +40,8 @@ class LEVELDB_EXPORT FilterPolicy {
|
|||||||
//
|
//
|
||||||
// Warning: do not change the initial contents of *dst. Instead,
|
// Warning: do not change the initial contents of *dst. Instead,
|
||||||
// append the newly constructed filter to *dst.
|
// append the newly constructed filter to *dst.
|
||||||
virtual void CreateFilter(const Slice* keys, int n, std::string* dst)
|
virtual void CreateFilter(const Slice* keys, int n,
|
||||||
const = 0;
|
std::string* dst) const = 0;
|
||||||
|
|
||||||
// "filter" contains the data appended by a preceding call to
|
// "filter" contains the data appended by a preceding call to
|
||||||
// CreateFilter() on this class. This method must return true if
|
// CreateFilter() on this class. This method must return true if
|
||||||
|
@ -93,7 +93,10 @@ class LEVELDB_EXPORT Iterator {
|
|||||||
// True if the node is not used. Only head nodes might be unused.
|
// True if the node is not used. Only head nodes might be unused.
|
||||||
bool IsEmpty() const { return function == nullptr; }
|
bool IsEmpty() const { return function == nullptr; }
|
||||||
// Invokes the cleanup function.
|
// Invokes the cleanup function.
|
||||||
void Run() { assert(function != nullptr); (*function)(arg1, arg2); }
|
void Run() {
|
||||||
|
assert(function != nullptr);
|
||||||
|
(*function)(arg1, arg2);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
CleanupNode cleanup_head_;
|
CleanupNode cleanup_head_;
|
||||||
};
|
};
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#define STORAGE_LEVELDB_INCLUDE_OPTIONS_H_
|
#define STORAGE_LEVELDB_INCLUDE_OPTIONS_H_
|
||||||
|
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
|
|
||||||
#include "leveldb/export.h"
|
#include "leveldb/export.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
@ -24,7 +25,7 @@ class Snapshot;
|
|||||||
enum CompressionType {
|
enum CompressionType {
|
||||||
// NOTE: do not change the values of existing entries, as these are
|
// NOTE: do not change the values of existing entries, as these are
|
||||||
// part of the persistent format on disk.
|
// part of the persistent format on disk.
|
||||||
kNoCompression = 0x0,
|
kNoCompression = 0x0,
|
||||||
kSnappyCompression = 0x1
|
kSnappyCompression = 0x1
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -18,7 +18,9 @@
|
|||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
#include "leveldb/export.h"
|
#include "leveldb/export.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
@ -26,16 +28,16 @@ namespace leveldb {
|
|||||||
class LEVELDB_EXPORT Slice {
|
class LEVELDB_EXPORT Slice {
|
||||||
public:
|
public:
|
||||||
// Create an empty slice.
|
// Create an empty slice.
|
||||||
Slice() : data_(""), size_(0) { }
|
Slice() : data_(""), size_(0) {}
|
||||||
|
|
||||||
// Create a slice that refers to d[0,n-1].
|
// Create a slice that refers to d[0,n-1].
|
||||||
Slice(const char* d, size_t n) : data_(d), size_(n) { }
|
Slice(const char* d, size_t n) : data_(d), size_(n) {}
|
||||||
|
|
||||||
// Create a slice that refers to the contents of "s"
|
// Create a slice that refers to the contents of "s"
|
||||||
Slice(const std::string& s) : data_(s.data()), size_(s.size()) { }
|
Slice(const std::string& s) : data_(s.data()), size_(s.size()) {}
|
||||||
|
|
||||||
// Create a slice that refers to s[0,strlen(s)-1]
|
// Create a slice that refers to s[0,strlen(s)-1]
|
||||||
Slice(const char* s) : data_(s), size_(strlen(s)) { }
|
Slice(const char* s) : data_(s), size_(strlen(s)) {}
|
||||||
|
|
||||||
// Intentionally copyable.
|
// Intentionally copyable.
|
||||||
Slice(const Slice&) = default;
|
Slice(const Slice&) = default;
|
||||||
@ -58,7 +60,10 @@ class LEVELDB_EXPORT Slice {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Change this slice to refer to an empty array
|
// Change this slice to refer to an empty array
|
||||||
void clear() { data_ = ""; size_ = 0; }
|
void clear() {
|
||||||
|
data_ = "";
|
||||||
|
size_ = 0;
|
||||||
|
}
|
||||||
|
|
||||||
// Drop the first "n" bytes from this slice.
|
// Drop the first "n" bytes from this slice.
|
||||||
void remove_prefix(size_t n) {
|
void remove_prefix(size_t n) {
|
||||||
@ -78,8 +83,7 @@ class LEVELDB_EXPORT Slice {
|
|||||||
|
|
||||||
// Return true iff "x" is a prefix of "*this"
|
// Return true iff "x" is a prefix of "*this"
|
||||||
bool starts_with(const Slice& x) const {
|
bool starts_with(const Slice& x) const {
|
||||||
return ((size_ >= x.size_) &&
|
return ((size_ >= x.size_) && (memcmp(data_, x.data_, x.size_) == 0));
|
||||||
(memcmp(data_, x.data_, x.size_) == 0));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@ -92,21 +96,20 @@ inline bool operator==(const Slice& x, const Slice& y) {
|
|||||||
(memcmp(x.data(), y.data(), x.size()) == 0));
|
(memcmp(x.data(), y.data(), x.size()) == 0));
|
||||||
}
|
}
|
||||||
|
|
||||||
inline bool operator!=(const Slice& x, const Slice& y) {
|
inline bool operator!=(const Slice& x, const Slice& y) { return !(x == y); }
|
||||||
return !(x == y);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline int Slice::compare(const Slice& b) const {
|
inline int Slice::compare(const Slice& b) const {
|
||||||
const size_t min_len = (size_ < b.size_) ? size_ : b.size_;
|
const size_t min_len = (size_ < b.size_) ? size_ : b.size_;
|
||||||
int r = memcmp(data_, b.data_, min_len);
|
int r = memcmp(data_, b.data_, min_len);
|
||||||
if (r == 0) {
|
if (r == 0) {
|
||||||
if (size_ < b.size_) r = -1;
|
if (size_ < b.size_)
|
||||||
else if (size_ > b.size_) r = +1;
|
r = -1;
|
||||||
|
else if (size_ > b.size_)
|
||||||
|
r = +1;
|
||||||
}
|
}
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
|
|
||||||
#endif // STORAGE_LEVELDB_INCLUDE_SLICE_H_
|
#endif // STORAGE_LEVELDB_INCLUDE_SLICE_H_
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
#include "leveldb/export.h"
|
#include "leveldb/export.h"
|
||||||
#include "leveldb/slice.h"
|
#include "leveldb/slice.h"
|
||||||
|
|
||||||
@ -23,7 +24,7 @@ namespace leveldb {
|
|||||||
class LEVELDB_EXPORT Status {
|
class LEVELDB_EXPORT Status {
|
||||||
public:
|
public:
|
||||||
// Create a success status.
|
// Create a success status.
|
||||||
Status() noexcept : state_(nullptr) { }
|
Status() noexcept : state_(nullptr) {}
|
||||||
~Status() { delete[] state_; }
|
~Status() { delete[] state_; }
|
||||||
|
|
||||||
Status(const Status& rhs);
|
Status(const Status& rhs);
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#define STORAGE_LEVELDB_INCLUDE_TABLE_H_
|
#define STORAGE_LEVELDB_INCLUDE_TABLE_H_
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
#include "leveldb/export.h"
|
#include "leveldb/export.h"
|
||||||
#include "leveldb/iterator.h"
|
#include "leveldb/iterator.h"
|
||||||
|
|
||||||
@ -36,10 +37,8 @@ class LEVELDB_EXPORT Table {
|
|||||||
// for the duration of the returned table's lifetime.
|
// for the duration of the returned table's lifetime.
|
||||||
//
|
//
|
||||||
// *file must remain live while this Table is in use.
|
// *file must remain live while this Table is in use.
|
||||||
static Status Open(const Options& options,
|
static Status Open(const Options& options, RandomAccessFile* file,
|
||||||
RandomAccessFile* file,
|
uint64_t file_size, Table** table);
|
||||||
uint64_t file_size,
|
|
||||||
Table** table);
|
|
||||||
|
|
||||||
Table(const Table&) = delete;
|
Table(const Table&) = delete;
|
||||||
void operator=(const Table&) = delete;
|
void operator=(const Table&) = delete;
|
||||||
@ -70,11 +69,9 @@ class LEVELDB_EXPORT Table {
|
|||||||
// to Seek(key). May not make such a call if filter policy says
|
// to Seek(key). May not make such a call if filter policy says
|
||||||
// that key is not present.
|
// that key is not present.
|
||||||
friend class TableCache;
|
friend class TableCache;
|
||||||
Status InternalGet(
|
Status InternalGet(const ReadOptions&, const Slice& key, void* arg,
|
||||||
const ReadOptions&, const Slice& key,
|
void (*handle_result)(void* arg, const Slice& k,
|
||||||
void* arg,
|
const Slice& v));
|
||||||
void (*handle_result)(void* arg, const Slice& k, const Slice& v));
|
|
||||||
|
|
||||||
|
|
||||||
void ReadMeta(const Footer& footer);
|
void ReadMeta(const Footer& footer);
|
||||||
void ReadFilter(const Slice& filter_handle_value);
|
void ReadFilter(const Slice& filter_handle_value);
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
#define STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_
|
#define STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
#include "leveldb/export.h"
|
#include "leveldb/export.h"
|
||||||
#include "leveldb/options.h"
|
#include "leveldb/options.h"
|
||||||
#include "leveldb/status.h"
|
#include "leveldb/status.h"
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
#define STORAGE_LEVELDB_INCLUDE_WRITE_BATCH_H_
|
#define STORAGE_LEVELDB_INCLUDE_WRITE_BATCH_H_
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
#include "leveldb/export.h"
|
#include "leveldb/export.h"
|
||||||
#include "leveldb/status.h"
|
#include "leveldb/status.h"
|
||||||
|
|
||||||
@ -35,7 +36,7 @@ class LEVELDB_EXPORT WriteBatch {
|
|||||||
|
|
||||||
// Intentionally copyable.
|
// Intentionally copyable.
|
||||||
WriteBatch(const WriteBatch&) = default;
|
WriteBatch(const WriteBatch&) = default;
|
||||||
WriteBatch& operator =(const WriteBatch&) = default;
|
WriteBatch& operator=(const WriteBatch&) = default;
|
||||||
|
|
||||||
~WriteBatch();
|
~WriteBatch();
|
||||||
|
|
||||||
|
@ -3,9 +3,9 @@
|
|||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
// Test for issue 178: a manual compaction causes deleted data to reappear.
|
// Test for issue 178: a manual compaction causes deleted data to reappear.
|
||||||
|
#include <cstdlib>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
#include <cstdlib>
|
|
||||||
|
|
||||||
#include "leveldb/db.h"
|
#include "leveldb/db.h"
|
||||||
#include "leveldb/write_batch.h"
|
#include "leveldb/write_batch.h"
|
||||||
@ -21,11 +21,9 @@ std::string Key1(int i) {
|
|||||||
return buf;
|
return buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string Key2(int i) {
|
std::string Key2(int i) { return Key1(i) + "_xxx"; }
|
||||||
return Key1(i) + "_xxx";
|
|
||||||
}
|
|
||||||
|
|
||||||
class Issue178 { };
|
class Issue178 {};
|
||||||
|
|
||||||
TEST(Issue178, Test) {
|
TEST(Issue178, Test) {
|
||||||
// Get rid of any state from an old run.
|
// Get rid of any state from an old run.
|
||||||
@ -87,6 +85,4 @@ TEST(Issue178, Test) {
|
|||||||
|
|
||||||
} // anonymous namespace
|
} // anonymous namespace
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
@ -11,14 +11,14 @@
|
|||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
class Issue200 { };
|
class Issue200 {};
|
||||||
|
|
||||||
TEST(Issue200, Test) {
|
TEST(Issue200, Test) {
|
||||||
// Get rid of any state from an old run.
|
// Get rid of any state from an old run.
|
||||||
std::string dbpath = test::TmpDir() + "/leveldb_issue200_test";
|
std::string dbpath = test::TmpDir() + "/leveldb_issue200_test";
|
||||||
DestroyDB(dbpath, Options());
|
DestroyDB(dbpath, Options());
|
||||||
|
|
||||||
DB *db;
|
DB* db;
|
||||||
Options options;
|
Options options;
|
||||||
options.create_if_missing = true;
|
options.create_if_missing = true;
|
||||||
ASSERT_OK(DB::Open(options, dbpath, &db));
|
ASSERT_OK(DB::Open(options, dbpath, &db));
|
||||||
@ -31,7 +31,7 @@ TEST(Issue200, Test) {
|
|||||||
ASSERT_OK(db->Put(write_options, "5", "f"));
|
ASSERT_OK(db->Put(write_options, "5", "f"));
|
||||||
|
|
||||||
ReadOptions read_options;
|
ReadOptions read_options;
|
||||||
Iterator *iter = db->NewIterator(read_options);
|
Iterator* iter = db->NewIterator(read_options);
|
||||||
|
|
||||||
// Add an element that should not be reflected in the iterator.
|
// Add an element that should not be reflected in the iterator.
|
||||||
ASSERT_OK(db->Put(write_options, "25", "cd"));
|
ASSERT_OK(db->Put(write_options, "25", "cd"));
|
||||||
@ -54,6 +54,4 @@ TEST(Issue200, Test) {
|
|||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
@ -11,9 +11,9 @@
|
|||||||
// porting to a new platform, see "port_example.h" for documentation
|
// porting to a new platform, see "port_example.h" for documentation
|
||||||
// of what the new port_<platform>.h file must provide.
|
// of what the new port_<platform>.h file must provide.
|
||||||
#if defined(LEVELDB_PLATFORM_POSIX) || defined(LEVELDB_PLATFORM_WINDOWS)
|
#if defined(LEVELDB_PLATFORM_POSIX) || defined(LEVELDB_PLATFORM_WINDOWS)
|
||||||
# include "port/port_stdcxx.h"
|
#include "port/port_stdcxx.h"
|
||||||
#elif defined(LEVELDB_PLATFORM_CHROMIUM)
|
#elif defined(LEVELDB_PLATFORM_CHROMIUM)
|
||||||
# include "port/port_chromium.h"
|
#include "port/port_chromium.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif // STORAGE_LEVELDB_PORT_PORT_H_
|
#endif // STORAGE_LEVELDB_PORT_PORT_H_
|
||||||
|
@ -30,10 +30,10 @@
|
|||||||
#endif // HAVE_SNAPPY
|
#endif // HAVE_SNAPPY
|
||||||
|
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
|
#include <condition_variable> // NOLINT
|
||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <condition_variable> // NOLINT
|
#include <mutex> // NOLINT
|
||||||
#include <mutex> // NOLINT
|
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
#include "port/thread_annotations.h"
|
#include "port/thread_annotations.h"
|
||||||
@ -56,7 +56,7 @@ class LOCKABLE Mutex {
|
|||||||
|
|
||||||
void Lock() EXCLUSIVE_LOCK_FUNCTION() { mu_.lock(); }
|
void Lock() EXCLUSIVE_LOCK_FUNCTION() { mu_.lock(); }
|
||||||
void Unlock() UNLOCK_FUNCTION() { mu_.unlock(); }
|
void Unlock() UNLOCK_FUNCTION() { mu_.unlock(); }
|
||||||
void AssertHeld() ASSERT_EXCLUSIVE_LOCK() { }
|
void AssertHeld() ASSERT_EXCLUSIVE_LOCK() {}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
friend class CondVar;
|
friend class CondVar;
|
||||||
@ -79,6 +79,7 @@ class CondVar {
|
|||||||
}
|
}
|
||||||
void Signal() { cv_.notify_one(); }
|
void Signal() { cv_.notify_one(); }
|
||||||
void SignalAll() { cv_.notify_all(); }
|
void SignalAll() { cv_.notify_all(); }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::condition_variable cv_;
|
std::condition_variable cv_;
|
||||||
Mutex* const mu_;
|
Mutex* const mu_;
|
||||||
@ -94,7 +95,9 @@ inline bool Snappy_Compress(const char* input, size_t length,
|
|||||||
return true;
|
return true;
|
||||||
#else
|
#else
|
||||||
// Silence compiler warnings about unused arguments.
|
// Silence compiler warnings about unused arguments.
|
||||||
(void)input; (void)length; (void)output;
|
(void)input;
|
||||||
|
(void)length;
|
||||||
|
(void)output;
|
||||||
#endif // HAVE_SNAPPY
|
#endif // HAVE_SNAPPY
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
@ -106,7 +109,9 @@ inline bool Snappy_GetUncompressedLength(const char* input, size_t length,
|
|||||||
return snappy::GetUncompressedLength(input, length, result);
|
return snappy::GetUncompressedLength(input, length, result);
|
||||||
#else
|
#else
|
||||||
// Silence compiler warnings about unused arguments.
|
// Silence compiler warnings about unused arguments.
|
||||||
(void)input; (void)length; (void)result;
|
(void)input;
|
||||||
|
(void)length;
|
||||||
|
(void)result;
|
||||||
return false;
|
return false;
|
||||||
#endif // HAVE_SNAPPY
|
#endif // HAVE_SNAPPY
|
||||||
}
|
}
|
||||||
@ -116,14 +121,17 @@ inline bool Snappy_Uncompress(const char* input, size_t length, char* output) {
|
|||||||
return snappy::RawUncompress(input, length, output);
|
return snappy::RawUncompress(input, length, output);
|
||||||
#else
|
#else
|
||||||
// Silence compiler warnings about unused arguments.
|
// Silence compiler warnings about unused arguments.
|
||||||
(void)input; (void)length; (void)output;
|
(void)input;
|
||||||
|
(void)length;
|
||||||
|
(void)output;
|
||||||
return false;
|
return false;
|
||||||
#endif // HAVE_SNAPPY
|
#endif // HAVE_SNAPPY
|
||||||
}
|
}
|
||||||
|
|
||||||
inline bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg) {
|
inline bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg) {
|
||||||
// Silence compiler warnings about unused arguments.
|
// Silence compiler warnings about unused arguments.
|
||||||
(void)func; (void)arg;
|
(void)func;
|
||||||
|
(void)arg;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -132,7 +140,9 @@ inline uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size) {
|
|||||||
return ::crc32c::Extend(crc, reinterpret_cast<const uint8_t*>(buf), size);
|
return ::crc32c::Extend(crc, reinterpret_cast<const uint8_t*>(buf), size);
|
||||||
#else
|
#else
|
||||||
// Silence compiler warnings about unused arguments.
|
// Silence compiler warnings about unused arguments.
|
||||||
(void)crc; (void)buf; (void)size;
|
(void)crc;
|
||||||
|
(void)buf;
|
||||||
|
(void)size;
|
||||||
return 0;
|
return 0;
|
||||||
#endif // HAVE_CRC32C
|
#endif // HAVE_CRC32C
|
||||||
}
|
}
|
||||||
|
@ -13,9 +13,9 @@
|
|||||||
|
|
||||||
#if defined(__clang__)
|
#if defined(__clang__)
|
||||||
|
|
||||||
#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
|
#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
|
||||||
#else
|
#else
|
||||||
#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
|
#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif // !defined(THREAD_ANNOTATION_ATTRIBUTE__)
|
#endif // !defined(THREAD_ANNOTATION_ATTRIBUTE__)
|
||||||
@ -54,18 +54,15 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef LOCK_RETURNED
|
#ifndef LOCK_RETURNED
|
||||||
#define LOCK_RETURNED(x) \
|
#define LOCK_RETURNED(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
|
||||||
THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef LOCKABLE
|
#ifndef LOCKABLE
|
||||||
#define LOCKABLE \
|
#define LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(lockable)
|
||||||
THREAD_ANNOTATION_ATTRIBUTE__(lockable)
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef SCOPED_LOCKABLE
|
#ifndef SCOPED_LOCKABLE
|
||||||
#define SCOPED_LOCKABLE \
|
#define SCOPED_LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
|
||||||
THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef EXCLUSIVE_LOCK_FUNCTION
|
#ifndef EXCLUSIVE_LOCK_FUNCTION
|
||||||
|
@ -6,8 +6,9 @@
|
|||||||
|
|
||||||
#include "table/block.h"
|
#include "table/block.h"
|
||||||
|
|
||||||
#include <vector>
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
#include "leveldb/comparator.h"
|
#include "leveldb/comparator.h"
|
||||||
#include "table/format.h"
|
#include "table/format.h"
|
||||||
#include "util/coding.h"
|
#include "util/coding.h"
|
||||||
@ -27,7 +28,7 @@ Block::Block(const BlockContents& contents)
|
|||||||
if (size_ < sizeof(uint32_t)) {
|
if (size_ < sizeof(uint32_t)) {
|
||||||
size_ = 0; // Error marker
|
size_ = 0; // Error marker
|
||||||
} else {
|
} else {
|
||||||
size_t max_restarts_allowed = (size_-sizeof(uint32_t)) / sizeof(uint32_t);
|
size_t max_restarts_allowed = (size_ - sizeof(uint32_t)) / sizeof(uint32_t);
|
||||||
if (NumRestarts() > max_restarts_allowed) {
|
if (NumRestarts() > max_restarts_allowed) {
|
||||||
// The size is too small for NumRestarts()
|
// The size is too small for NumRestarts()
|
||||||
size_ = 0;
|
size_ = 0;
|
||||||
@ -51,8 +52,7 @@ Block::~Block() {
|
|||||||
// If any errors are detected, returns nullptr. Otherwise, returns a
|
// If any errors are detected, returns nullptr. Otherwise, returns a
|
||||||
// pointer to the key delta (just past the three decoded values).
|
// pointer to the key delta (just past the three decoded values).
|
||||||
static inline const char* DecodeEntry(const char* p, const char* limit,
|
static inline const char* DecodeEntry(const char* p, const char* limit,
|
||||||
uint32_t* shared,
|
uint32_t* shared, uint32_t* non_shared,
|
||||||
uint32_t* non_shared,
|
|
||||||
uint32_t* value_length) {
|
uint32_t* value_length) {
|
||||||
if (limit - p < 3) return nullptr;
|
if (limit - p < 3) return nullptr;
|
||||||
*shared = reinterpret_cast<const unsigned char*>(p)[0];
|
*shared = reinterpret_cast<const unsigned char*>(p)[0];
|
||||||
@ -76,9 +76,9 @@ static inline const char* DecodeEntry(const char* p, const char* limit,
|
|||||||
class Block::Iter : public Iterator {
|
class Block::Iter : public Iterator {
|
||||||
private:
|
private:
|
||||||
const Comparator* const comparator_;
|
const Comparator* const comparator_;
|
||||||
const char* const data_; // underlying block contents
|
const char* const data_; // underlying block contents
|
||||||
uint32_t const restarts_; // Offset of restart array (list of fixed32)
|
uint32_t const restarts_; // Offset of restart array (list of fixed32)
|
||||||
uint32_t const num_restarts_; // Number of uint32_t entries in restart array
|
uint32_t const num_restarts_; // Number of uint32_t entries in restart array
|
||||||
|
|
||||||
// current_ is offset in data_ of current entry. >= restarts_ if !Valid
|
// current_ is offset in data_ of current entry. >= restarts_ if !Valid
|
||||||
uint32_t current_;
|
uint32_t current_;
|
||||||
@ -112,9 +112,7 @@ class Block::Iter : public Iterator {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
Iter(const Comparator* comparator,
|
Iter(const Comparator* comparator, const char* data, uint32_t restarts,
|
||||||
const char* data,
|
|
||||||
uint32_t restarts,
|
|
||||||
uint32_t num_restarts)
|
uint32_t num_restarts)
|
||||||
: comparator_(comparator),
|
: comparator_(comparator),
|
||||||
data_(data),
|
data_(data),
|
||||||
@ -171,9 +169,9 @@ class Block::Iter : public Iterator {
|
|||||||
uint32_t mid = (left + right + 1) / 2;
|
uint32_t mid = (left + right + 1) / 2;
|
||||||
uint32_t region_offset = GetRestartPoint(mid);
|
uint32_t region_offset = GetRestartPoint(mid);
|
||||||
uint32_t shared, non_shared, value_length;
|
uint32_t shared, non_shared, value_length;
|
||||||
const char* key_ptr = DecodeEntry(data_ + region_offset,
|
const char* key_ptr =
|
||||||
data_ + restarts_,
|
DecodeEntry(data_ + region_offset, data_ + restarts_, &shared,
|
||||||
&shared, &non_shared, &value_length);
|
&non_shared, &value_length);
|
||||||
if (key_ptr == nullptr || (shared != 0)) {
|
if (key_ptr == nullptr || (shared != 0)) {
|
||||||
CorruptionError();
|
CorruptionError();
|
||||||
return;
|
return;
|
||||||
@ -253,7 +251,7 @@ class Block::Iter : public Iterator {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
Iterator* Block::NewIterator(const Comparator* cmp) {
|
Iterator* Block::NewIterator(const Comparator* comparator) {
|
||||||
if (size_ < sizeof(uint32_t)) {
|
if (size_ < sizeof(uint32_t)) {
|
||||||
return NewErrorIterator(Status::Corruption("bad block contents"));
|
return NewErrorIterator(Status::Corruption("bad block contents"));
|
||||||
}
|
}
|
||||||
@ -261,7 +259,7 @@ Iterator* Block::NewIterator(const Comparator* cmp) {
|
|||||||
if (num_restarts == 0) {
|
if (num_restarts == 0) {
|
||||||
return NewEmptyIterator();
|
return NewEmptyIterator();
|
||||||
} else {
|
} else {
|
||||||
return new Iter(cmp, data_, restart_offset_, num_restarts);
|
return new Iter(comparator, data_, restart_offset_, num_restarts);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
|
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
#include "leveldb/iterator.h"
|
#include "leveldb/iterator.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
@ -29,8 +30,8 @@ class Block {
|
|||||||
|
|
||||||
const char* data_;
|
const char* data_;
|
||||||
size_t size_;
|
size_t size_;
|
||||||
uint32_t restart_offset_; // Offset in data_ of restart array
|
uint32_t restart_offset_; // Offset in data_ of restart array
|
||||||
bool owned_; // Block owns data_[]
|
bool owned_; // Block owns data_[]
|
||||||
|
|
||||||
// No copying allowed
|
// No copying allowed
|
||||||
Block(const Block&);
|
Block(const Block&);
|
||||||
|
@ -28,8 +28,10 @@
|
|||||||
|
|
||||||
#include "table/block_builder.h"
|
#include "table/block_builder.h"
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
|
||||||
#include "leveldb/comparator.h"
|
#include "leveldb/comparator.h"
|
||||||
#include "leveldb/table_builder.h"
|
#include "leveldb/table_builder.h"
|
||||||
#include "util/coding.h"
|
#include "util/coding.h"
|
||||||
@ -37,27 +39,24 @@
|
|||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
BlockBuilder::BlockBuilder(const Options* options)
|
BlockBuilder::BlockBuilder(const Options* options)
|
||||||
: options_(options),
|
: options_(options), restarts_(), counter_(0), finished_(false) {
|
||||||
restarts_(),
|
|
||||||
counter_(0),
|
|
||||||
finished_(false) {
|
|
||||||
assert(options->block_restart_interval >= 1);
|
assert(options->block_restart_interval >= 1);
|
||||||
restarts_.push_back(0); // First restart point is at offset 0
|
restarts_.push_back(0); // First restart point is at offset 0
|
||||||
}
|
}
|
||||||
|
|
||||||
void BlockBuilder::Reset() {
|
void BlockBuilder::Reset() {
|
||||||
buffer_.clear();
|
buffer_.clear();
|
||||||
restarts_.clear();
|
restarts_.clear();
|
||||||
restarts_.push_back(0); // First restart point is at offset 0
|
restarts_.push_back(0); // First restart point is at offset 0
|
||||||
counter_ = 0;
|
counter_ = 0;
|
||||||
finished_ = false;
|
finished_ = false;
|
||||||
last_key_.clear();
|
last_key_.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t BlockBuilder::CurrentSizeEstimate() const {
|
size_t BlockBuilder::CurrentSizeEstimate() const {
|
||||||
return (buffer_.size() + // Raw data buffer
|
return (buffer_.size() + // Raw data buffer
|
||||||
restarts_.size() * sizeof(uint32_t) + // Restart array
|
restarts_.size() * sizeof(uint32_t) + // Restart array
|
||||||
sizeof(uint32_t)); // Restart array length
|
sizeof(uint32_t)); // Restart array length
|
||||||
}
|
}
|
||||||
|
|
||||||
Slice BlockBuilder::Finish() {
|
Slice BlockBuilder::Finish() {
|
||||||
@ -74,7 +73,7 @@ void BlockBuilder::Add(const Slice& key, const Slice& value) {
|
|||||||
Slice last_key_piece(last_key_);
|
Slice last_key_piece(last_key_);
|
||||||
assert(!finished_);
|
assert(!finished_);
|
||||||
assert(counter_ <= options_->block_restart_interval);
|
assert(counter_ <= options_->block_restart_interval);
|
||||||
assert(buffer_.empty() // No values yet?
|
assert(buffer_.empty() // No values yet?
|
||||||
|| options_->comparator->Compare(key, last_key_piece) > 0);
|
|| options_->comparator->Compare(key, last_key_piece) > 0);
|
||||||
size_t shared = 0;
|
size_t shared = 0;
|
||||||
if (counter_ < options_->block_restart_interval) {
|
if (counter_ < options_->block_restart_interval) {
|
||||||
|
@ -5,9 +5,10 @@
|
|||||||
#ifndef STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_
|
#ifndef STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_
|
||||||
#define STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_
|
#define STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_
|
||||||
|
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include <stdint.h>
|
|
||||||
#include "leveldb/slice.h"
|
#include "leveldb/slice.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
@ -35,17 +36,15 @@ class BlockBuilder {
|
|||||||
size_t CurrentSizeEstimate() const;
|
size_t CurrentSizeEstimate() const;
|
||||||
|
|
||||||
// Return true iff no entries have been added since the last Reset()
|
// Return true iff no entries have been added since the last Reset()
|
||||||
bool empty() const {
|
bool empty() const { return buffer_.empty(); }
|
||||||
return buffer_.empty();
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const Options* options_;
|
const Options* options_;
|
||||||
std::string buffer_; // Destination buffer
|
std::string buffer_; // Destination buffer
|
||||||
std::vector<uint32_t> restarts_; // Restart points
|
std::vector<uint32_t> restarts_; // Restart points
|
||||||
int counter_; // Number of entries emitted since restart
|
int counter_; // Number of entries emitted since restart
|
||||||
bool finished_; // Has Finish() been called?
|
bool finished_; // Has Finish() been called?
|
||||||
std::string last_key_;
|
std::string last_key_;
|
||||||
|
|
||||||
// No copying allowed
|
// No copying allowed
|
||||||
BlockBuilder(const BlockBuilder&);
|
BlockBuilder(const BlockBuilder&);
|
||||||
|
@ -16,8 +16,7 @@ static const size_t kFilterBaseLg = 11;
|
|||||||
static const size_t kFilterBase = 1 << kFilterBaseLg;
|
static const size_t kFilterBase = 1 << kFilterBaseLg;
|
||||||
|
|
||||||
FilterBlockBuilder::FilterBlockBuilder(const FilterPolicy* policy)
|
FilterBlockBuilder::FilterBlockBuilder(const FilterPolicy* policy)
|
||||||
: policy_(policy) {
|
: policy_(policy) {}
|
||||||
}
|
|
||||||
|
|
||||||
void FilterBlockBuilder::StartBlock(uint64_t block_offset) {
|
void FilterBlockBuilder::StartBlock(uint64_t block_offset) {
|
||||||
uint64_t filter_index = (block_offset / kFilterBase);
|
uint64_t filter_index = (block_offset / kFilterBase);
|
||||||
@ -62,7 +61,7 @@ void FilterBlockBuilder::GenerateFilter() {
|
|||||||
tmp_keys_.resize(num_keys);
|
tmp_keys_.resize(num_keys);
|
||||||
for (size_t i = 0; i < num_keys; i++) {
|
for (size_t i = 0; i < num_keys; i++) {
|
||||||
const char* base = keys_.data() + start_[i];
|
const char* base = keys_.data() + start_[i];
|
||||||
size_t length = start_[i+1] - start_[i];
|
size_t length = start_[i + 1] - start_[i];
|
||||||
tmp_keys_[i] = Slice(base, length);
|
tmp_keys_[i] = Slice(base, length);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -77,14 +76,10 @@ void FilterBlockBuilder::GenerateFilter() {
|
|||||||
|
|
||||||
FilterBlockReader::FilterBlockReader(const FilterPolicy* policy,
|
FilterBlockReader::FilterBlockReader(const FilterPolicy* policy,
|
||||||
const Slice& contents)
|
const Slice& contents)
|
||||||
: policy_(policy),
|
: policy_(policy), data_(nullptr), offset_(nullptr), num_(0), base_lg_(0) {
|
||||||
data_(nullptr),
|
|
||||||
offset_(nullptr),
|
|
||||||
num_(0),
|
|
||||||
base_lg_(0) {
|
|
||||||
size_t n = contents.size();
|
size_t n = contents.size();
|
||||||
if (n < 5) return; // 1 byte for base_lg_ and 4 for start of offset array
|
if (n < 5) return; // 1 byte for base_lg_ and 4 for start of offset array
|
||||||
base_lg_ = contents[n-1];
|
base_lg_ = contents[n - 1];
|
||||||
uint32_t last_word = DecodeFixed32(contents.data() + n - 5);
|
uint32_t last_word = DecodeFixed32(contents.data() + n - 5);
|
||||||
if (last_word > n - 5) return;
|
if (last_word > n - 5) return;
|
||||||
data_ = contents.data();
|
data_ = contents.data();
|
||||||
@ -95,8 +90,8 @@ FilterBlockReader::FilterBlockReader(const FilterPolicy* policy,
|
|||||||
bool FilterBlockReader::KeyMayMatch(uint64_t block_offset, const Slice& key) {
|
bool FilterBlockReader::KeyMayMatch(uint64_t block_offset, const Slice& key) {
|
||||||
uint64_t index = block_offset >> base_lg_;
|
uint64_t index = block_offset >> base_lg_;
|
||||||
if (index < num_) {
|
if (index < num_) {
|
||||||
uint32_t start = DecodeFixed32(offset_ + index*4);
|
uint32_t start = DecodeFixed32(offset_ + index * 4);
|
||||||
uint32_t limit = DecodeFixed32(offset_ + index*4 + 4);
|
uint32_t limit = DecodeFixed32(offset_ + index * 4 + 4);
|
||||||
if (start <= limit && limit <= static_cast<size_t>(offset_ - data_)) {
|
if (start <= limit && limit <= static_cast<size_t>(offset_ - data_)) {
|
||||||
Slice filter = Slice(data_ + start, limit - start);
|
Slice filter = Slice(data_ + start, limit - start);
|
||||||
return policy_->KeyMayMatch(key, filter);
|
return policy_->KeyMayMatch(key, filter);
|
||||||
@ -108,4 +103,4 @@ bool FilterBlockReader::KeyMayMatch(uint64_t block_offset, const Slice& key) {
|
|||||||
return true; // Errors are treated as potential matches
|
return true; // Errors are treated as potential matches
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
} // namespace leveldb
|
||||||
|
@ -11,8 +11,10 @@
|
|||||||
|
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "leveldb/slice.h"
|
#include "leveldb/slice.h"
|
||||||
#include "util/hash.h"
|
#include "util/hash.h"
|
||||||
|
|
||||||
@ -38,10 +40,10 @@ class FilterBlockBuilder {
|
|||||||
void GenerateFilter();
|
void GenerateFilter();
|
||||||
|
|
||||||
const FilterPolicy* policy_;
|
const FilterPolicy* policy_;
|
||||||
std::string keys_; // Flattened key contents
|
std::string keys_; // Flattened key contents
|
||||||
std::vector<size_t> start_; // Starting index in keys_ of each key
|
std::vector<size_t> start_; // Starting index in keys_ of each key
|
||||||
std::string result_; // Filter data computed so far
|
std::string result_; // Filter data computed so far
|
||||||
std::vector<Slice> tmp_keys_; // policy_->CreateFilter() argument
|
std::vector<Slice> tmp_keys_; // policy_->CreateFilter() argument
|
||||||
std::vector<uint32_t> filter_offsets_;
|
std::vector<uint32_t> filter_offsets_;
|
||||||
|
|
||||||
// No copying allowed
|
// No copying allowed
|
||||||
@ -51,7 +53,7 @@ class FilterBlockBuilder {
|
|||||||
|
|
||||||
class FilterBlockReader {
|
class FilterBlockReader {
|
||||||
public:
|
public:
|
||||||
// REQUIRES: "contents" and *policy must stay live while *this is live.
|
// REQUIRES: "contents" and *policy must stay live while *this is live.
|
||||||
FilterBlockReader(const FilterPolicy* policy, const Slice& contents);
|
FilterBlockReader(const FilterPolicy* policy, const Slice& contents);
|
||||||
bool KeyMayMatch(uint64_t block_offset, const Slice& key);
|
bool KeyMayMatch(uint64_t block_offset, const Slice& key);
|
||||||
|
|
||||||
@ -63,6 +65,6 @@ class FilterBlockReader {
|
|||||||
size_t base_lg_; // Encoding parameter (see kFilterBaseLg in .cc file)
|
size_t base_lg_; // Encoding parameter (see kFilterBaseLg in .cc file)
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
} // namespace leveldb
|
||||||
|
|
||||||
#endif // STORAGE_LEVELDB_TABLE_FILTER_BLOCK_H_
|
#endif // STORAGE_LEVELDB_TABLE_FILTER_BLOCK_H_
|
||||||
|
@ -16,9 +16,7 @@ namespace leveldb {
|
|||||||
// For testing: emit an array with one hash value per key
|
// For testing: emit an array with one hash value per key
|
||||||
class TestHashFilter : public FilterPolicy {
|
class TestHashFilter : public FilterPolicy {
|
||||||
public:
|
public:
|
||||||
virtual const char* Name() const {
|
virtual const char* Name() const { return "TestHashFilter"; }
|
||||||
return "TestHashFilter";
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
|
virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
|
||||||
for (int i = 0; i < n; i++) {
|
for (int i = 0; i < n; i++) {
|
||||||
@ -69,8 +67,8 @@ TEST(FilterBlockTest, SingleChunk) {
|
|||||||
ASSERT_TRUE(reader.KeyMayMatch(100, "box"));
|
ASSERT_TRUE(reader.KeyMayMatch(100, "box"));
|
||||||
ASSERT_TRUE(reader.KeyMayMatch(100, "hello"));
|
ASSERT_TRUE(reader.KeyMayMatch(100, "hello"));
|
||||||
ASSERT_TRUE(reader.KeyMayMatch(100, "foo"));
|
ASSERT_TRUE(reader.KeyMayMatch(100, "foo"));
|
||||||
ASSERT_TRUE(! reader.KeyMayMatch(100, "missing"));
|
ASSERT_TRUE(!reader.KeyMayMatch(100, "missing"));
|
||||||
ASSERT_TRUE(! reader.KeyMayMatch(100, "other"));
|
ASSERT_TRUE(!reader.KeyMayMatch(100, "other"));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(FilterBlockTest, MultiChunk) {
|
TEST(FilterBlockTest, MultiChunk) {
|
||||||
@ -99,30 +97,28 @@ TEST(FilterBlockTest, MultiChunk) {
|
|||||||
// Check first filter
|
// Check first filter
|
||||||
ASSERT_TRUE(reader.KeyMayMatch(0, "foo"));
|
ASSERT_TRUE(reader.KeyMayMatch(0, "foo"));
|
||||||
ASSERT_TRUE(reader.KeyMayMatch(2000, "bar"));
|
ASSERT_TRUE(reader.KeyMayMatch(2000, "bar"));
|
||||||
ASSERT_TRUE(! reader.KeyMayMatch(0, "box"));
|
ASSERT_TRUE(!reader.KeyMayMatch(0, "box"));
|
||||||
ASSERT_TRUE(! reader.KeyMayMatch(0, "hello"));
|
ASSERT_TRUE(!reader.KeyMayMatch(0, "hello"));
|
||||||
|
|
||||||
// Check second filter
|
// Check second filter
|
||||||
ASSERT_TRUE(reader.KeyMayMatch(3100, "box"));
|
ASSERT_TRUE(reader.KeyMayMatch(3100, "box"));
|
||||||
ASSERT_TRUE(! reader.KeyMayMatch(3100, "foo"));
|
ASSERT_TRUE(!reader.KeyMayMatch(3100, "foo"));
|
||||||
ASSERT_TRUE(! reader.KeyMayMatch(3100, "bar"));
|
ASSERT_TRUE(!reader.KeyMayMatch(3100, "bar"));
|
||||||
ASSERT_TRUE(! reader.KeyMayMatch(3100, "hello"));
|
ASSERT_TRUE(!reader.KeyMayMatch(3100, "hello"));
|
||||||
|
|
||||||
// Check third filter (empty)
|
// Check third filter (empty)
|
||||||
ASSERT_TRUE(! reader.KeyMayMatch(4100, "foo"));
|
ASSERT_TRUE(!reader.KeyMayMatch(4100, "foo"));
|
||||||
ASSERT_TRUE(! reader.KeyMayMatch(4100, "bar"));
|
ASSERT_TRUE(!reader.KeyMayMatch(4100, "bar"));
|
||||||
ASSERT_TRUE(! reader.KeyMayMatch(4100, "box"));
|
ASSERT_TRUE(!reader.KeyMayMatch(4100, "box"));
|
||||||
ASSERT_TRUE(! reader.KeyMayMatch(4100, "hello"));
|
ASSERT_TRUE(!reader.KeyMayMatch(4100, "hello"));
|
||||||
|
|
||||||
// Check last filter
|
// Check last filter
|
||||||
ASSERT_TRUE(reader.KeyMayMatch(9000, "box"));
|
ASSERT_TRUE(reader.KeyMayMatch(9000, "box"));
|
||||||
ASSERT_TRUE(reader.KeyMayMatch(9000, "hello"));
|
ASSERT_TRUE(reader.KeyMayMatch(9000, "hello"));
|
||||||
ASSERT_TRUE(! reader.KeyMayMatch(9000, "foo"));
|
ASSERT_TRUE(!reader.KeyMayMatch(9000, "foo"));
|
||||||
ASSERT_TRUE(! reader.KeyMayMatch(9000, "bar"));
|
ASSERT_TRUE(!reader.KeyMayMatch(9000, "bar"));
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
@ -21,8 +21,7 @@ void BlockHandle::EncodeTo(std::string* dst) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Status BlockHandle::DecodeFrom(Slice* input) {
|
Status BlockHandle::DecodeFrom(Slice* input) {
|
||||||
if (GetVarint64(input, &offset_) &&
|
if (GetVarint64(input, &offset_) && GetVarint64(input, &size_)) {
|
||||||
GetVarint64(input, &size_)) {
|
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
} else {
|
} else {
|
||||||
return Status::Corruption("bad block handle");
|
return Status::Corruption("bad block handle");
|
||||||
@ -62,10 +61,8 @@ Status Footer::DecodeFrom(Slice* input) {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
Status ReadBlock(RandomAccessFile* file,
|
Status ReadBlock(RandomAccessFile* file, const ReadOptions& options,
|
||||||
const ReadOptions& options,
|
const BlockHandle& handle, BlockContents* result) {
|
||||||
const BlockHandle& handle,
|
|
||||||
BlockContents* result) {
|
|
||||||
result->data = Slice();
|
result->data = Slice();
|
||||||
result->cachable = false;
|
result->cachable = false;
|
||||||
result->heap_allocated = false;
|
result->heap_allocated = false;
|
||||||
@ -86,7 +83,7 @@ Status ReadBlock(RandomAccessFile* file,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check the crc of the type and the block contents
|
// Check the crc of the type and the block contents
|
||||||
const char* data = contents.data(); // Pointer to where Read put the data
|
const char* data = contents.data(); // Pointer to where Read put the data
|
||||||
if (options.verify_checksums) {
|
if (options.verify_checksums) {
|
||||||
const uint32_t crc = crc32c::Unmask(DecodeFixed32(data + n + 1));
|
const uint32_t crc = crc32c::Unmask(DecodeFixed32(data + n + 1));
|
||||||
const uint32_t actual = crc32c::Value(data, n + 1);
|
const uint32_t actual = crc32c::Value(data, n + 1);
|
||||||
|
@ -5,8 +5,10 @@
|
|||||||
#ifndef STORAGE_LEVELDB_TABLE_FORMAT_H_
|
#ifndef STORAGE_LEVELDB_TABLE_FORMAT_H_
|
||||||
#define STORAGE_LEVELDB_TABLE_FORMAT_H_
|
#define STORAGE_LEVELDB_TABLE_FORMAT_H_
|
||||||
|
|
||||||
#include <string>
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
#include "leveldb/slice.h"
|
#include "leveldb/slice.h"
|
||||||
#include "leveldb/status.h"
|
#include "leveldb/status.h"
|
||||||
#include "leveldb/table_builder.h"
|
#include "leveldb/table_builder.h"
|
||||||
@ -46,19 +48,15 @@ class BlockHandle {
|
|||||||
// end of every table file.
|
// end of every table file.
|
||||||
class Footer {
|
class Footer {
|
||||||
public:
|
public:
|
||||||
Footer() { }
|
Footer() {}
|
||||||
|
|
||||||
// The block handle for the metaindex block of the table
|
// The block handle for the metaindex block of the table
|
||||||
const BlockHandle& metaindex_handle() const { return metaindex_handle_; }
|
const BlockHandle& metaindex_handle() const { return metaindex_handle_; }
|
||||||
void set_metaindex_handle(const BlockHandle& h) { metaindex_handle_ = h; }
|
void set_metaindex_handle(const BlockHandle& h) { metaindex_handle_ = h; }
|
||||||
|
|
||||||
// The block handle for the index block of the table
|
// The block handle for the index block of the table
|
||||||
const BlockHandle& index_handle() const {
|
const BlockHandle& index_handle() const { return index_handle_; }
|
||||||
return index_handle_;
|
void set_index_handle(const BlockHandle& h) { index_handle_ = h; }
|
||||||
}
|
|
||||||
void set_index_handle(const BlockHandle& h) {
|
|
||||||
index_handle_ = h;
|
|
||||||
}
|
|
||||||
|
|
||||||
void EncodeTo(std::string* dst) const;
|
void EncodeTo(std::string* dst) const;
|
||||||
Status DecodeFrom(Slice* input);
|
Status DecodeFrom(Slice* input);
|
||||||
@ -66,9 +64,7 @@ class Footer {
|
|||||||
// Encoded length of a Footer. Note that the serialization of a
|
// Encoded length of a Footer. Note that the serialization of a
|
||||||
// Footer will always occupy exactly this many bytes. It consists
|
// Footer will always occupy exactly this many bytes. It consists
|
||||||
// of two block handles and a magic number.
|
// of two block handles and a magic number.
|
||||||
enum {
|
enum { kEncodedLength = 2 * BlockHandle::kMaxEncodedLength + 8 };
|
||||||
kEncodedLength = 2*BlockHandle::kMaxEncodedLength + 8
|
|
||||||
};
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
BlockHandle metaindex_handle_;
|
BlockHandle metaindex_handle_;
|
||||||
@ -91,17 +87,13 @@ struct BlockContents {
|
|||||||
|
|
||||||
// Read the block identified by "handle" from "file". On failure
|
// Read the block identified by "handle" from "file". On failure
|
||||||
// return non-OK. On success fill *result and return OK.
|
// return non-OK. On success fill *result and return OK.
|
||||||
Status ReadBlock(RandomAccessFile* file,
|
Status ReadBlock(RandomAccessFile* file, const ReadOptions& options,
|
||||||
const ReadOptions& options,
|
const BlockHandle& handle, BlockContents* result);
|
||||||
const BlockHandle& handle,
|
|
||||||
BlockContents* result);
|
|
||||||
|
|
||||||
// Implementation details follow. Clients should ignore,
|
// Implementation details follow. Clients should ignore,
|
||||||
|
|
||||||
inline BlockHandle::BlockHandle()
|
inline BlockHandle::BlockHandle()
|
||||||
: offset_(~static_cast<uint64_t>(0)),
|
: offset_(~static_cast<uint64_t>(0)), size_(~static_cast<uint64_t>(0)) {}
|
||||||
size_(~static_cast<uint64_t>(0)) {
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ Iterator::Iterator() {
|
|||||||
Iterator::~Iterator() {
|
Iterator::~Iterator() {
|
||||||
if (!cleanup_head_.IsEmpty()) {
|
if (!cleanup_head_.IsEmpty()) {
|
||||||
cleanup_head_.Run();
|
cleanup_head_.Run();
|
||||||
for (CleanupNode* node = cleanup_head_.next; node != nullptr; ) {
|
for (CleanupNode* node = cleanup_head_.next; node != nullptr;) {
|
||||||
node->Run();
|
node->Run();
|
||||||
CleanupNode* next_node = node->next;
|
CleanupNode* next_node = node->next;
|
||||||
delete node;
|
delete node;
|
||||||
@ -42,17 +42,23 @@ namespace {
|
|||||||
|
|
||||||
class EmptyIterator : public Iterator {
|
class EmptyIterator : public Iterator {
|
||||||
public:
|
public:
|
||||||
EmptyIterator(const Status& s) : status_(s) { }
|
EmptyIterator(const Status& s) : status_(s) {}
|
||||||
~EmptyIterator() override = default;
|
~EmptyIterator() override = default;
|
||||||
|
|
||||||
bool Valid() const override { return false; }
|
bool Valid() const override { return false; }
|
||||||
void Seek(const Slice& target) override { }
|
void Seek(const Slice& target) override {}
|
||||||
void SeekToFirst() override { }
|
void SeekToFirst() override {}
|
||||||
void SeekToLast() override { }
|
void SeekToLast() override {}
|
||||||
void Next() override { assert(false); }
|
void Next() override { assert(false); }
|
||||||
void Prev() override { assert(false); }
|
void Prev() override { assert(false); }
|
||||||
Slice key() const override { assert(false); return Slice(); }
|
Slice key() const override {
|
||||||
Slice value() const override { assert(false); return Slice(); }
|
assert(false);
|
||||||
|
return Slice();
|
||||||
|
}
|
||||||
|
Slice value() const override {
|
||||||
|
assert(false);
|
||||||
|
return Slice();
|
||||||
|
}
|
||||||
Status status() const override { return status_; }
|
Status status() const override { return status_; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@ -61,9 +67,7 @@ class EmptyIterator : public Iterator {
|
|||||||
|
|
||||||
} // anonymous namespace
|
} // anonymous namespace
|
||||||
|
|
||||||
Iterator* NewEmptyIterator() {
|
Iterator* NewEmptyIterator() { return new EmptyIterator(Status::OK()); }
|
||||||
return new EmptyIterator(Status::OK());
|
|
||||||
}
|
|
||||||
|
|
||||||
Iterator* NewErrorIterator(const Status& status) {
|
Iterator* NewErrorIterator(const Status& status) {
|
||||||
return new EmptyIterator(status);
|
return new EmptyIterator(status);
|
||||||
|
@ -16,10 +16,8 @@ namespace leveldb {
|
|||||||
// cache locality.
|
// cache locality.
|
||||||
class IteratorWrapper {
|
class IteratorWrapper {
|
||||||
public:
|
public:
|
||||||
IteratorWrapper(): iter_(nullptr), valid_(false) { }
|
IteratorWrapper() : iter_(nullptr), valid_(false) {}
|
||||||
explicit IteratorWrapper(Iterator* iter): iter_(nullptr) {
|
explicit IteratorWrapper(Iterator* iter) : iter_(nullptr) { Set(iter); }
|
||||||
Set(iter);
|
|
||||||
}
|
|
||||||
~IteratorWrapper() { delete iter_; }
|
~IteratorWrapper() { delete iter_; }
|
||||||
Iterator* iter() const { return iter_; }
|
Iterator* iter() const { return iter_; }
|
||||||
|
|
||||||
@ -35,18 +33,46 @@ class IteratorWrapper {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Iterator interface methods
|
// Iterator interface methods
|
||||||
bool Valid() const { return valid_; }
|
bool Valid() const { return valid_; }
|
||||||
Slice key() const { assert(Valid()); return key_; }
|
Slice key() const {
|
||||||
Slice value() const { assert(Valid()); return iter_->value(); }
|
assert(Valid());
|
||||||
|
return key_;
|
||||||
|
}
|
||||||
|
Slice value() const {
|
||||||
|
assert(Valid());
|
||||||
|
return iter_->value();
|
||||||
|
}
|
||||||
// Methods below require iter() != nullptr
|
// Methods below require iter() != nullptr
|
||||||
Status status() const { assert(iter_); return iter_->status(); }
|
Status status() const {
|
||||||
void Next() { assert(iter_); iter_->Next(); Update(); }
|
assert(iter_);
|
||||||
void Prev() { assert(iter_); iter_->Prev(); Update(); }
|
return iter_->status();
|
||||||
void Seek(const Slice& k) { assert(iter_); iter_->Seek(k); Update(); }
|
}
|
||||||
void SeekToFirst() { assert(iter_); iter_->SeekToFirst(); Update(); }
|
void Next() {
|
||||||
void SeekToLast() { assert(iter_); iter_->SeekToLast(); Update(); }
|
assert(iter_);
|
||||||
|
iter_->Next();
|
||||||
|
Update();
|
||||||
|
}
|
||||||
|
void Prev() {
|
||||||
|
assert(iter_);
|
||||||
|
iter_->Prev();
|
||||||
|
Update();
|
||||||
|
}
|
||||||
|
void Seek(const Slice& k) {
|
||||||
|
assert(iter_);
|
||||||
|
iter_->Seek(k);
|
||||||
|
Update();
|
||||||
|
}
|
||||||
|
void SeekToFirst() {
|
||||||
|
assert(iter_);
|
||||||
|
iter_->SeekToFirst();
|
||||||
|
Update();
|
||||||
|
}
|
||||||
|
void SeekToLast() {
|
||||||
|
assert(iter_);
|
||||||
|
iter_->SeekToLast();
|
||||||
|
Update();
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void Update() {
|
void Update() {
|
||||||
|
@ -24,13 +24,9 @@ class MergingIterator : public Iterator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual ~MergingIterator() {
|
virtual ~MergingIterator() { delete[] children_; }
|
||||||
delete[] children_;
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual bool Valid() const {
|
virtual bool Valid() const { return (current_ != nullptr); }
|
||||||
return (current_ != nullptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void SeekToFirst() {
|
virtual void SeekToFirst() {
|
||||||
for (int i = 0; i < n_; i++) {
|
for (int i = 0; i < n_; i++) {
|
||||||
@ -145,10 +141,7 @@ class MergingIterator : public Iterator {
|
|||||||
IteratorWrapper* current_;
|
IteratorWrapper* current_;
|
||||||
|
|
||||||
// Which direction is the iterator moving?
|
// Which direction is the iterator moving?
|
||||||
enum Direction {
|
enum Direction { kForward, kReverse };
|
||||||
kForward,
|
|
||||||
kReverse
|
|
||||||
};
|
|
||||||
Direction direction_;
|
Direction direction_;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -169,7 +162,7 @@ void MergingIterator::FindSmallest() {
|
|||||||
|
|
||||||
void MergingIterator::FindLargest() {
|
void MergingIterator::FindLargest() {
|
||||||
IteratorWrapper* largest = nullptr;
|
IteratorWrapper* largest = nullptr;
|
||||||
for (int i = n_-1; i >= 0; i--) {
|
for (int i = n_ - 1; i >= 0; i--) {
|
||||||
IteratorWrapper* child = &children_[i];
|
IteratorWrapper* child = &children_[i];
|
||||||
if (child->Valid()) {
|
if (child->Valid()) {
|
||||||
if (largest == nullptr) {
|
if (largest == nullptr) {
|
||||||
@ -183,14 +176,15 @@ void MergingIterator::FindLargest() {
|
|||||||
}
|
}
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
Iterator* NewMergingIterator(const Comparator* cmp, Iterator** list, int n) {
|
Iterator* NewMergingIterator(const Comparator* comparator, Iterator** children,
|
||||||
|
int n) {
|
||||||
assert(n >= 0);
|
assert(n >= 0);
|
||||||
if (n == 0) {
|
if (n == 0) {
|
||||||
return NewEmptyIterator();
|
return NewEmptyIterator();
|
||||||
} else if (n == 1) {
|
} else if (n == 1) {
|
||||||
return list[0];
|
return children[0];
|
||||||
} else {
|
} else {
|
||||||
return new MergingIterator(cmp, list, n);
|
return new MergingIterator(comparator, children, n);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,8 +18,8 @@ class Iterator;
|
|||||||
// key is present in K child iterators, it will be yielded K times.
|
// key is present in K child iterators, it will be yielded K times.
|
||||||
//
|
//
|
||||||
// REQUIRES: n >= 0
|
// REQUIRES: n >= 0
|
||||||
Iterator* NewMergingIterator(
|
Iterator* NewMergingIterator(const Comparator* comparator, Iterator** children,
|
||||||
const Comparator* comparator, Iterator** children, int n);
|
int n);
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ namespace leveldb {
|
|||||||
struct Table::Rep {
|
struct Table::Rep {
|
||||||
~Rep() {
|
~Rep() {
|
||||||
delete filter;
|
delete filter;
|
||||||
delete [] filter_data;
|
delete[] filter_data;
|
||||||
delete index_block;
|
delete index_block;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -35,10 +35,8 @@ struct Table::Rep {
|
|||||||
Block* index_block;
|
Block* index_block;
|
||||||
};
|
};
|
||||||
|
|
||||||
Status Table::Open(const Options& options,
|
Status Table::Open(const Options& options, RandomAccessFile* file,
|
||||||
RandomAccessFile* file,
|
uint64_t size, Table** table) {
|
||||||
uint64_t size,
|
|
||||||
Table** table) {
|
|
||||||
*table = nullptr;
|
*table = nullptr;
|
||||||
if (size < Footer::kEncodedLength) {
|
if (size < Footer::kEncodedLength) {
|
||||||
return Status::Corruption("file is too short to be an sstable");
|
return Status::Corruption("file is too short to be an sstable");
|
||||||
@ -130,14 +128,12 @@ void Table::ReadFilter(const Slice& filter_handle_value) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (block.heap_allocated) {
|
if (block.heap_allocated) {
|
||||||
rep_->filter_data = block.data.data(); // Will need to delete later
|
rep_->filter_data = block.data.data(); // Will need to delete later
|
||||||
}
|
}
|
||||||
rep_->filter = new FilterBlockReader(rep_->options.filter_policy, block.data);
|
rep_->filter = new FilterBlockReader(rep_->options.filter_policy, block.data);
|
||||||
}
|
}
|
||||||
|
|
||||||
Table::~Table() {
|
Table::~Table() { delete rep_; }
|
||||||
delete rep_;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void DeleteBlock(void* arg, void* ignored) {
|
static void DeleteBlock(void* arg, void* ignored) {
|
||||||
delete reinterpret_cast<Block*>(arg);
|
delete reinterpret_cast<Block*>(arg);
|
||||||
@ -156,8 +152,7 @@ static void ReleaseBlock(void* arg, void* h) {
|
|||||||
|
|
||||||
// Convert an index iterator value (i.e., an encoded BlockHandle)
|
// Convert an index iterator value (i.e., an encoded BlockHandle)
|
||||||
// into an iterator over the contents of the corresponding block.
|
// into an iterator over the contents of the corresponding block.
|
||||||
Iterator* Table::BlockReader(void* arg,
|
Iterator* Table::BlockReader(void* arg, const ReadOptions& options,
|
||||||
const ReadOptions& options,
|
|
||||||
const Slice& index_value) {
|
const Slice& index_value) {
|
||||||
Table* table = reinterpret_cast<Table*>(arg);
|
Table* table = reinterpret_cast<Table*>(arg);
|
||||||
Cache* block_cache = table->rep_->options.block_cache;
|
Cache* block_cache = table->rep_->options.block_cache;
|
||||||
@ -175,7 +170,7 @@ Iterator* Table::BlockReader(void* arg,
|
|||||||
if (block_cache != nullptr) {
|
if (block_cache != nullptr) {
|
||||||
char cache_key_buffer[16];
|
char cache_key_buffer[16];
|
||||||
EncodeFixed64(cache_key_buffer, table->rep_->cache_id);
|
EncodeFixed64(cache_key_buffer, table->rep_->cache_id);
|
||||||
EncodeFixed64(cache_key_buffer+8, handle.offset());
|
EncodeFixed64(cache_key_buffer + 8, handle.offset());
|
||||||
Slice key(cache_key_buffer, sizeof(cache_key_buffer));
|
Slice key(cache_key_buffer, sizeof(cache_key_buffer));
|
||||||
cache_handle = block_cache->Lookup(key);
|
cache_handle = block_cache->Lookup(key);
|
||||||
if (cache_handle != nullptr) {
|
if (cache_handle != nullptr) {
|
||||||
@ -185,8 +180,8 @@ Iterator* Table::BlockReader(void* arg,
|
|||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
block = new Block(contents);
|
block = new Block(contents);
|
||||||
if (contents.cachable && options.fill_cache) {
|
if (contents.cachable && options.fill_cache) {
|
||||||
cache_handle = block_cache->Insert(
|
cache_handle = block_cache->Insert(key, block, block->size(),
|
||||||
key, block, block->size(), &DeleteCachedBlock);
|
&DeleteCachedBlock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -218,9 +213,9 @@ Iterator* Table::NewIterator(const ReadOptions& options) const {
|
|||||||
&Table::BlockReader, const_cast<Table*>(this), options);
|
&Table::BlockReader, const_cast<Table*>(this), options);
|
||||||
}
|
}
|
||||||
|
|
||||||
Status Table::InternalGet(const ReadOptions& options, const Slice& k,
|
Status Table::InternalGet(const ReadOptions& options, const Slice& k, void* arg,
|
||||||
void* arg,
|
void (*handle_result)(void*, const Slice&,
|
||||||
void (*saver)(void*, const Slice&, const Slice&)) {
|
const Slice&)) {
|
||||||
Status s;
|
Status s;
|
||||||
Iterator* iiter = rep_->index_block->NewIterator(rep_->options.comparator);
|
Iterator* iiter = rep_->index_block->NewIterator(rep_->options.comparator);
|
||||||
iiter->Seek(k);
|
iiter->Seek(k);
|
||||||
@ -228,15 +223,14 @@ Status Table::InternalGet(const ReadOptions& options, const Slice& k,
|
|||||||
Slice handle_value = iiter->value();
|
Slice handle_value = iiter->value();
|
||||||
FilterBlockReader* filter = rep_->filter;
|
FilterBlockReader* filter = rep_->filter;
|
||||||
BlockHandle handle;
|
BlockHandle handle;
|
||||||
if (filter != nullptr &&
|
if (filter != nullptr && handle.DecodeFrom(&handle_value).ok() &&
|
||||||
handle.DecodeFrom(&handle_value).ok() &&
|
|
||||||
!filter->KeyMayMatch(handle.offset(), k)) {
|
!filter->KeyMayMatch(handle.offset(), k)) {
|
||||||
// Not found
|
// Not found
|
||||||
} else {
|
} else {
|
||||||
Iterator* block_iter = BlockReader(this, options, iiter->value());
|
Iterator* block_iter = BlockReader(this, options, iiter->value());
|
||||||
block_iter->Seek(k);
|
block_iter->Seek(k);
|
||||||
if (block_iter->Valid()) {
|
if (block_iter->Valid()) {
|
||||||
(*saver)(arg, block_iter->key(), block_iter->value());
|
(*handle_result)(arg, block_iter->key(), block_iter->value());
|
||||||
}
|
}
|
||||||
s = block_iter->status();
|
s = block_iter->status();
|
||||||
delete block_iter;
|
delete block_iter;
|
||||||
@ -249,7 +243,6 @@ Status Table::InternalGet(const ReadOptions& options, const Slice& k,
|
|||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
uint64_t Table::ApproximateOffsetOf(const Slice& key) const {
|
uint64_t Table::ApproximateOffsetOf(const Slice& key) const {
|
||||||
Iterator* index_iter =
|
Iterator* index_iter =
|
||||||
rep_->index_block->NewIterator(rep_->options.comparator);
|
rep_->index_block->NewIterator(rep_->options.comparator);
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
#include "leveldb/table_builder.h"
|
#include "leveldb/table_builder.h"
|
||||||
|
|
||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
|
|
||||||
#include "leveldb/comparator.h"
|
#include "leveldb/comparator.h"
|
||||||
#include "leveldb/env.h"
|
#include "leveldb/env.h"
|
||||||
#include "leveldb/filter_policy.h"
|
#include "leveldb/filter_policy.h"
|
||||||
@ -27,7 +28,7 @@ struct TableBuilder::Rep {
|
|||||||
BlockBuilder index_block;
|
BlockBuilder index_block;
|
||||||
std::string last_key;
|
std::string last_key;
|
||||||
int64_t num_entries;
|
int64_t num_entries;
|
||||||
bool closed; // Either Finish() or Abandon() has been called.
|
bool closed; // Either Finish() or Abandon() has been called.
|
||||||
FilterBlockBuilder* filter_block;
|
FilterBlockBuilder* filter_block;
|
||||||
|
|
||||||
// We do not emit the index entry for a block until we have seen the
|
// We do not emit the index entry for a block until we have seen the
|
||||||
@ -53,8 +54,9 @@ struct TableBuilder::Rep {
|
|||||||
index_block(&index_block_options),
|
index_block(&index_block_options),
|
||||||
num_entries(0),
|
num_entries(0),
|
||||||
closed(false),
|
closed(false),
|
||||||
filter_block(opt.filter_policy == nullptr ? nullptr
|
filter_block(opt.filter_policy == nullptr
|
||||||
: new FilterBlockBuilder(opt.filter_policy)),
|
? nullptr
|
||||||
|
: new FilterBlockBuilder(opt.filter_policy)),
|
||||||
pending_index_entry(false) {
|
pending_index_entry(false) {
|
||||||
index_block_options.block_restart_interval = 1;
|
index_block_options.block_restart_interval = 1;
|
||||||
}
|
}
|
||||||
@ -173,8 +175,7 @@ void TableBuilder::WriteBlock(BlockBuilder* block, BlockHandle* handle) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void TableBuilder::WriteRawBlock(const Slice& block_contents,
|
void TableBuilder::WriteRawBlock(const Slice& block_contents,
|
||||||
CompressionType type,
|
CompressionType type, BlockHandle* handle) {
|
||||||
BlockHandle* handle) {
|
|
||||||
Rep* r = rep_;
|
Rep* r = rep_;
|
||||||
handle->set_offset(r->offset);
|
handle->set_offset(r->offset);
|
||||||
handle->set_size(block_contents.size());
|
handle->set_size(block_contents.size());
|
||||||
@ -184,7 +185,7 @@ void TableBuilder::WriteRawBlock(const Slice& block_contents,
|
|||||||
trailer[0] = type;
|
trailer[0] = type;
|
||||||
uint32_t crc = crc32c::Value(block_contents.data(), block_contents.size());
|
uint32_t crc = crc32c::Value(block_contents.data(), block_contents.size());
|
||||||
crc = crc32c::Extend(crc, trailer, 1); // Extend crc to cover block type
|
crc = crc32c::Extend(crc, trailer, 1); // Extend crc to cover block type
|
||||||
EncodeFixed32(trailer+1, crc32c::Mask(crc));
|
EncodeFixed32(trailer + 1, crc32c::Mask(crc));
|
||||||
r->status = r->file->Append(Slice(trailer, kBlockTrailerSize));
|
r->status = r->file->Append(Slice(trailer, kBlockTrailerSize));
|
||||||
if (r->status.ok()) {
|
if (r->status.ok()) {
|
||||||
r->offset += block_contents.size() + kBlockTrailerSize;
|
r->offset += block_contents.size() + kBlockTrailerSize;
|
||||||
@ -192,9 +193,7 @@ void TableBuilder::WriteRawBlock(const Slice& block_contents,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Status TableBuilder::status() const {
|
Status TableBuilder::status() const { return rep_->status; }
|
||||||
return rep_->status;
|
|
||||||
}
|
|
||||||
|
|
||||||
Status TableBuilder::Finish() {
|
Status TableBuilder::Finish() {
|
||||||
Rep* r = rep_;
|
Rep* r = rep_;
|
||||||
@ -259,12 +258,8 @@ void TableBuilder::Abandon() {
|
|||||||
r->closed = true;
|
r->closed = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t TableBuilder::NumEntries() const {
|
uint64_t TableBuilder::NumEntries() const { return rep_->num_entries; }
|
||||||
return rep_->num_entries;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint64_t TableBuilder::FileSize() const {
|
uint64_t TableBuilder::FileSize() const { return rep_->offset; }
|
||||||
return rep_->offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
|
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
#include "db/dbformat.h"
|
#include "db/dbformat.h"
|
||||||
#include "db/memtable.h"
|
#include "db/memtable.h"
|
||||||
#include "db/write_batch_internal.h"
|
#include "db/write_batch_internal.h"
|
||||||
@ -27,8 +28,8 @@ namespace leveldb {
|
|||||||
static std::string Reverse(const Slice& key) {
|
static std::string Reverse(const Slice& key) {
|
||||||
std::string str(key.ToString());
|
std::string str(key.ToString());
|
||||||
std::string rev("");
|
std::string rev("");
|
||||||
for (std::string::reverse_iterator rit = str.rbegin();
|
for (std::string::reverse_iterator rit = str.rbegin(); rit != str.rend();
|
||||||
rit != str.rend(); ++rit) {
|
++rit) {
|
||||||
rev.push_back(*rit);
|
rev.push_back(*rit);
|
||||||
}
|
}
|
||||||
return rev;
|
return rev;
|
||||||
@ -45,9 +46,8 @@ class ReverseKeyComparator : public Comparator {
|
|||||||
return BytewiseComparator()->Compare(Reverse(a), Reverse(b));
|
return BytewiseComparator()->Compare(Reverse(a), Reverse(b));
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void FindShortestSeparator(
|
virtual void FindShortestSeparator(std::string* start,
|
||||||
std::string* start,
|
const Slice& limit) const {
|
||||||
const Slice& limit) const {
|
|
||||||
std::string s = Reverse(*start);
|
std::string s = Reverse(*start);
|
||||||
std::string l = Reverse(limit);
|
std::string l = Reverse(limit);
|
||||||
BytewiseComparator()->FindShortestSeparator(&s, l);
|
BytewiseComparator()->FindShortestSeparator(&s, l);
|
||||||
@ -79,17 +79,17 @@ namespace {
|
|||||||
struct STLLessThan {
|
struct STLLessThan {
|
||||||
const Comparator* cmp;
|
const Comparator* cmp;
|
||||||
|
|
||||||
STLLessThan() : cmp(BytewiseComparator()) { }
|
STLLessThan() : cmp(BytewiseComparator()) {}
|
||||||
STLLessThan(const Comparator* c) : cmp(c) { }
|
STLLessThan(const Comparator* c) : cmp(c) {}
|
||||||
bool operator()(const std::string& a, const std::string& b) const {
|
bool operator()(const std::string& a, const std::string& b) const {
|
||||||
return cmp->Compare(Slice(a), Slice(b)) < 0;
|
return cmp->Compare(Slice(a), Slice(b)) < 0;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
class StringSink: public WritableFile {
|
class StringSink : public WritableFile {
|
||||||
public:
|
public:
|
||||||
~StringSink() { }
|
~StringSink() {}
|
||||||
|
|
||||||
const std::string& contents() const { return contents_; }
|
const std::string& contents() const { return contents_; }
|
||||||
|
|
||||||
@ -106,19 +106,17 @@ class StringSink: public WritableFile {
|
|||||||
std::string contents_;
|
std::string contents_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class StringSource : public RandomAccessFile {
|
||||||
class StringSource: public RandomAccessFile {
|
|
||||||
public:
|
public:
|
||||||
StringSource(const Slice& contents)
|
StringSource(const Slice& contents)
|
||||||
: contents_(contents.data(), contents.size()) {
|
: contents_(contents.data(), contents.size()) {}
|
||||||
}
|
|
||||||
|
|
||||||
virtual ~StringSource() { }
|
virtual ~StringSource() {}
|
||||||
|
|
||||||
uint64_t Size() const { return contents_.size(); }
|
uint64_t Size() const { return contents_.size(); }
|
||||||
|
|
||||||
virtual Status Read(uint64_t offset, size_t n, Slice* result,
|
virtual Status Read(uint64_t offset, size_t n, Slice* result,
|
||||||
char* scratch) const {
|
char* scratch) const {
|
||||||
if (offset >= contents_.size()) {
|
if (offset >= contents_.size()) {
|
||||||
return Status::InvalidArgument("invalid Read offset");
|
return Status::InvalidArgument("invalid Read offset");
|
||||||
}
|
}
|
||||||
@ -140,8 +138,8 @@ typedef std::map<std::string, std::string, STLLessThan> KVMap;
|
|||||||
// BlockBuilder/TableBuilder and Block/Table.
|
// BlockBuilder/TableBuilder and Block/Table.
|
||||||
class Constructor {
|
class Constructor {
|
||||||
public:
|
public:
|
||||||
explicit Constructor(const Comparator* cmp) : data_(STLLessThan(cmp)) { }
|
explicit Constructor(const Comparator* cmp) : data_(STLLessThan(cmp)) {}
|
||||||
virtual ~Constructor() { }
|
virtual ~Constructor() {}
|
||||||
|
|
||||||
void Add(const std::string& key, const Slice& value) {
|
void Add(const std::string& key, const Slice& value) {
|
||||||
data_[key] = value.ToString();
|
data_[key] = value.ToString();
|
||||||
@ -150,14 +148,11 @@ class Constructor {
|
|||||||
// Finish constructing the data structure with all the keys that have
|
// Finish constructing the data structure with all the keys that have
|
||||||
// been added so far. Returns the keys in sorted order in "*keys"
|
// been added so far. Returns the keys in sorted order in "*keys"
|
||||||
// and stores the key/value pairs in "*kvmap"
|
// and stores the key/value pairs in "*kvmap"
|
||||||
void Finish(const Options& options,
|
void Finish(const Options& options, std::vector<std::string>* keys,
|
||||||
std::vector<std::string>* keys,
|
|
||||||
KVMap* kvmap) {
|
KVMap* kvmap) {
|
||||||
*kvmap = data_;
|
*kvmap = data_;
|
||||||
keys->clear();
|
keys->clear();
|
||||||
for (KVMap::const_iterator it = data_.begin();
|
for (KVMap::const_iterator it = data_.begin(); it != data_.end(); ++it) {
|
||||||
it != data_.end();
|
|
||||||
++it) {
|
|
||||||
keys->push_back(it->first);
|
keys->push_back(it->first);
|
||||||
}
|
}
|
||||||
data_.clear();
|
data_.clear();
|
||||||
@ -178,23 +173,17 @@ class Constructor {
|
|||||||
KVMap data_;
|
KVMap data_;
|
||||||
};
|
};
|
||||||
|
|
||||||
class BlockConstructor: public Constructor {
|
class BlockConstructor : public Constructor {
|
||||||
public:
|
public:
|
||||||
explicit BlockConstructor(const Comparator* cmp)
|
explicit BlockConstructor(const Comparator* cmp)
|
||||||
: Constructor(cmp),
|
: Constructor(cmp), comparator_(cmp), block_(nullptr) {}
|
||||||
comparator_(cmp),
|
~BlockConstructor() { delete block_; }
|
||||||
block_(nullptr) { }
|
|
||||||
~BlockConstructor() {
|
|
||||||
delete block_;
|
|
||||||
}
|
|
||||||
virtual Status FinishImpl(const Options& options, const KVMap& data) {
|
virtual Status FinishImpl(const Options& options, const KVMap& data) {
|
||||||
delete block_;
|
delete block_;
|
||||||
block_ = nullptr;
|
block_ = nullptr;
|
||||||
BlockBuilder builder(&options);
|
BlockBuilder builder(&options);
|
||||||
|
|
||||||
for (KVMap::const_iterator it = data.begin();
|
for (KVMap::const_iterator it = data.begin(); it != data.end(); ++it) {
|
||||||
it != data.end();
|
|
||||||
++it) {
|
|
||||||
builder.Add(it->first, it->second);
|
builder.Add(it->first, it->second);
|
||||||
}
|
}
|
||||||
// Open the block
|
// Open the block
|
||||||
@ -218,23 +207,17 @@ class BlockConstructor: public Constructor {
|
|||||||
BlockConstructor();
|
BlockConstructor();
|
||||||
};
|
};
|
||||||
|
|
||||||
class TableConstructor: public Constructor {
|
class TableConstructor : public Constructor {
|
||||||
public:
|
public:
|
||||||
TableConstructor(const Comparator* cmp)
|
TableConstructor(const Comparator* cmp)
|
||||||
: Constructor(cmp),
|
: Constructor(cmp), source_(nullptr), table_(nullptr) {}
|
||||||
source_(nullptr), table_(nullptr) {
|
~TableConstructor() { Reset(); }
|
||||||
}
|
|
||||||
~TableConstructor() {
|
|
||||||
Reset();
|
|
||||||
}
|
|
||||||
virtual Status FinishImpl(const Options& options, const KVMap& data) {
|
virtual Status FinishImpl(const Options& options, const KVMap& data) {
|
||||||
Reset();
|
Reset();
|
||||||
StringSink sink;
|
StringSink sink;
|
||||||
TableBuilder builder(options, &sink);
|
TableBuilder builder(options, &sink);
|
||||||
|
|
||||||
for (KVMap::const_iterator it = data.begin();
|
for (KVMap::const_iterator it = data.begin(); it != data.end(); ++it) {
|
||||||
it != data.end();
|
|
||||||
++it) {
|
|
||||||
builder.Add(it->first, it->second);
|
builder.Add(it->first, it->second);
|
||||||
ASSERT_TRUE(builder.status().ok());
|
ASSERT_TRUE(builder.status().ok());
|
||||||
}
|
}
|
||||||
@ -273,9 +256,9 @@ class TableConstructor: public Constructor {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// A helper class that converts internal format keys into user keys
|
// A helper class that converts internal format keys into user keys
|
||||||
class KeyConvertingIterator: public Iterator {
|
class KeyConvertingIterator : public Iterator {
|
||||||
public:
|
public:
|
||||||
explicit KeyConvertingIterator(Iterator* iter) : iter_(iter) { }
|
explicit KeyConvertingIterator(Iterator* iter) : iter_(iter) {}
|
||||||
virtual ~KeyConvertingIterator() { delete iter_; }
|
virtual ~KeyConvertingIterator() { delete iter_; }
|
||||||
virtual bool Valid() const { return iter_->Valid(); }
|
virtual bool Valid() const { return iter_->Valid(); }
|
||||||
virtual void Seek(const Slice& target) {
|
virtual void Seek(const Slice& target) {
|
||||||
@ -313,25 +296,20 @@ class KeyConvertingIterator: public Iterator {
|
|||||||
void operator=(const KeyConvertingIterator&);
|
void operator=(const KeyConvertingIterator&);
|
||||||
};
|
};
|
||||||
|
|
||||||
class MemTableConstructor: public Constructor {
|
class MemTableConstructor : public Constructor {
|
||||||
public:
|
public:
|
||||||
explicit MemTableConstructor(const Comparator* cmp)
|
explicit MemTableConstructor(const Comparator* cmp)
|
||||||
: Constructor(cmp),
|
: Constructor(cmp), internal_comparator_(cmp) {
|
||||||
internal_comparator_(cmp) {
|
|
||||||
memtable_ = new MemTable(internal_comparator_);
|
memtable_ = new MemTable(internal_comparator_);
|
||||||
memtable_->Ref();
|
memtable_->Ref();
|
||||||
}
|
}
|
||||||
~MemTableConstructor() {
|
~MemTableConstructor() { memtable_->Unref(); }
|
||||||
memtable_->Unref();
|
|
||||||
}
|
|
||||||
virtual Status FinishImpl(const Options& options, const KVMap& data) {
|
virtual Status FinishImpl(const Options& options, const KVMap& data) {
|
||||||
memtable_->Unref();
|
memtable_->Unref();
|
||||||
memtable_ = new MemTable(internal_comparator_);
|
memtable_ = new MemTable(internal_comparator_);
|
||||||
memtable_->Ref();
|
memtable_->Ref();
|
||||||
int seq = 1;
|
int seq = 1;
|
||||||
for (KVMap::const_iterator it = data.begin();
|
for (KVMap::const_iterator it = data.begin(); it != data.end(); ++it) {
|
||||||
it != data.end();
|
|
||||||
++it) {
|
|
||||||
memtable_->Add(seq, kTypeValue, it->first, it->second);
|
memtable_->Add(seq, kTypeValue, it->first, it->second);
|
||||||
seq++;
|
seq++;
|
||||||
}
|
}
|
||||||
@ -346,24 +324,19 @@ class MemTableConstructor: public Constructor {
|
|||||||
MemTable* memtable_;
|
MemTable* memtable_;
|
||||||
};
|
};
|
||||||
|
|
||||||
class DBConstructor: public Constructor {
|
class DBConstructor : public Constructor {
|
||||||
public:
|
public:
|
||||||
explicit DBConstructor(const Comparator* cmp)
|
explicit DBConstructor(const Comparator* cmp)
|
||||||
: Constructor(cmp),
|
: Constructor(cmp), comparator_(cmp) {
|
||||||
comparator_(cmp) {
|
|
||||||
db_ = nullptr;
|
db_ = nullptr;
|
||||||
NewDB();
|
NewDB();
|
||||||
}
|
}
|
||||||
~DBConstructor() {
|
~DBConstructor() { delete db_; }
|
||||||
delete db_;
|
|
||||||
}
|
|
||||||
virtual Status FinishImpl(const Options& options, const KVMap& data) {
|
virtual Status FinishImpl(const Options& options, const KVMap& data) {
|
||||||
delete db_;
|
delete db_;
|
||||||
db_ = nullptr;
|
db_ = nullptr;
|
||||||
NewDB();
|
NewDB();
|
||||||
for (KVMap::const_iterator it = data.begin();
|
for (KVMap::const_iterator it = data.begin(); it != data.end(); ++it) {
|
||||||
it != data.end();
|
|
||||||
++it) {
|
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
batch.Put(it->first, it->second);
|
batch.Put(it->first, it->second);
|
||||||
ASSERT_TRUE(db_->Write(WriteOptions(), &batch).ok());
|
ASSERT_TRUE(db_->Write(WriteOptions(), &batch).ok());
|
||||||
@ -396,12 +369,7 @@ class DBConstructor: public Constructor {
|
|||||||
DB* db_;
|
DB* db_;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum TestType {
|
enum TestType { TABLE_TEST, BLOCK_TEST, MEMTABLE_TEST, DB_TEST };
|
||||||
TABLE_TEST,
|
|
||||||
BLOCK_TEST,
|
|
||||||
MEMTABLE_TEST,
|
|
||||||
DB_TEST
|
|
||||||
};
|
|
||||||
|
|
||||||
struct TestArgs {
|
struct TestArgs {
|
||||||
TestType type;
|
TestType type;
|
||||||
@ -410,33 +378,33 @@ struct TestArgs {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static const TestArgs kTestArgList[] = {
|
static const TestArgs kTestArgList[] = {
|
||||||
{ TABLE_TEST, false, 16 },
|
{TABLE_TEST, false, 16},
|
||||||
{ TABLE_TEST, false, 1 },
|
{TABLE_TEST, false, 1},
|
||||||
{ TABLE_TEST, false, 1024 },
|
{TABLE_TEST, false, 1024},
|
||||||
{ TABLE_TEST, true, 16 },
|
{TABLE_TEST, true, 16},
|
||||||
{ TABLE_TEST, true, 1 },
|
{TABLE_TEST, true, 1},
|
||||||
{ TABLE_TEST, true, 1024 },
|
{TABLE_TEST, true, 1024},
|
||||||
|
|
||||||
{ BLOCK_TEST, false, 16 },
|
{BLOCK_TEST, false, 16},
|
||||||
{ BLOCK_TEST, false, 1 },
|
{BLOCK_TEST, false, 1},
|
||||||
{ BLOCK_TEST, false, 1024 },
|
{BLOCK_TEST, false, 1024},
|
||||||
{ BLOCK_TEST, true, 16 },
|
{BLOCK_TEST, true, 16},
|
||||||
{ BLOCK_TEST, true, 1 },
|
{BLOCK_TEST, true, 1},
|
||||||
{ BLOCK_TEST, true, 1024 },
|
{BLOCK_TEST, true, 1024},
|
||||||
|
|
||||||
// Restart interval does not matter for memtables
|
// Restart interval does not matter for memtables
|
||||||
{ MEMTABLE_TEST, false, 16 },
|
{MEMTABLE_TEST, false, 16},
|
||||||
{ MEMTABLE_TEST, true, 16 },
|
{MEMTABLE_TEST, true, 16},
|
||||||
|
|
||||||
// Do not bother with restart interval variations for DB
|
// Do not bother with restart interval variations for DB
|
||||||
{ DB_TEST, false, 16 },
|
{DB_TEST, false, 16},
|
||||||
{ DB_TEST, true, 16 },
|
{DB_TEST, true, 16},
|
||||||
};
|
};
|
||||||
static const int kNumTestArgs = sizeof(kTestArgList) / sizeof(kTestArgList[0]);
|
static const int kNumTestArgs = sizeof(kTestArgList) / sizeof(kTestArgList[0]);
|
||||||
|
|
||||||
class Harness {
|
class Harness {
|
||||||
public:
|
public:
|
||||||
Harness() : constructor_(nullptr) { }
|
Harness() : constructor_(nullptr) {}
|
||||||
|
|
||||||
void Init(const TestArgs& args) {
|
void Init(const TestArgs& args) {
|
||||||
delete constructor_;
|
delete constructor_;
|
||||||
@ -466,9 +434,7 @@ class Harness {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
~Harness() {
|
~Harness() { delete constructor_; }
|
||||||
delete constructor_;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Add(const std::string& key, const std::string& value) {
|
void Add(const std::string& key, const std::string& value) {
|
||||||
constructor_->Add(key, value);
|
constructor_->Add(key, value);
|
||||||
@ -490,8 +456,7 @@ class Harness {
|
|||||||
ASSERT_TRUE(!iter->Valid());
|
ASSERT_TRUE(!iter->Valid());
|
||||||
iter->SeekToFirst();
|
iter->SeekToFirst();
|
||||||
for (KVMap::const_iterator model_iter = data.begin();
|
for (KVMap::const_iterator model_iter = data.begin();
|
||||||
model_iter != data.end();
|
model_iter != data.end(); ++model_iter) {
|
||||||
++model_iter) {
|
|
||||||
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
||||||
iter->Next();
|
iter->Next();
|
||||||
}
|
}
|
||||||
@ -505,8 +470,7 @@ class Harness {
|
|||||||
ASSERT_TRUE(!iter->Valid());
|
ASSERT_TRUE(!iter->Valid());
|
||||||
iter->SeekToLast();
|
iter->SeekToLast();
|
||||||
for (KVMap::const_reverse_iterator model_iter = data.rbegin();
|
for (KVMap::const_reverse_iterator model_iter = data.rbegin();
|
||||||
model_iter != data.rend();
|
model_iter != data.rend(); ++model_iter) {
|
||||||
++model_iter) {
|
|
||||||
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
||||||
iter->Prev();
|
iter->Prev();
|
||||||
}
|
}
|
||||||
@ -514,8 +478,7 @@ class Harness {
|
|||||||
delete iter;
|
delete iter;
|
||||||
}
|
}
|
||||||
|
|
||||||
void TestRandomAccess(Random* rnd,
|
void TestRandomAccess(Random* rnd, const std::vector<std::string>& keys,
|
||||||
const std::vector<std::string>& keys,
|
|
||||||
const KVMap& data) {
|
const KVMap& data) {
|
||||||
static const bool kVerbose = false;
|
static const bool kVerbose = false;
|
||||||
Iterator* iter = constructor_->NewIterator();
|
Iterator* iter = constructor_->NewIterator();
|
||||||
@ -546,8 +509,8 @@ class Harness {
|
|||||||
case 2: {
|
case 2: {
|
||||||
std::string key = PickRandomKey(rnd, keys);
|
std::string key = PickRandomKey(rnd, keys);
|
||||||
model_iter = data.lower_bound(key);
|
model_iter = data.lower_bound(key);
|
||||||
if (kVerbose) fprintf(stderr, "Seek '%s'\n",
|
if (kVerbose)
|
||||||
EscapeString(key).c_str());
|
fprintf(stderr, "Seek '%s'\n", EscapeString(key).c_str());
|
||||||
iter->Seek(Slice(key));
|
iter->Seek(Slice(key));
|
||||||
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
||||||
break;
|
break;
|
||||||
@ -558,7 +521,7 @@ class Harness {
|
|||||||
if (kVerbose) fprintf(stderr, "Prev\n");
|
if (kVerbose) fprintf(stderr, "Prev\n");
|
||||||
iter->Prev();
|
iter->Prev();
|
||||||
if (model_iter == data.begin()) {
|
if (model_iter == data.begin()) {
|
||||||
model_iter = data.end(); // Wrap around to invalid value
|
model_iter = data.end(); // Wrap around to invalid value
|
||||||
} else {
|
} else {
|
||||||
--model_iter;
|
--model_iter;
|
||||||
}
|
}
|
||||||
@ -621,8 +584,8 @@ class Harness {
|
|||||||
break;
|
break;
|
||||||
case 1: {
|
case 1: {
|
||||||
// Attempt to return something smaller than an existing key
|
// Attempt to return something smaller than an existing key
|
||||||
if (result.size() > 0 && result[result.size()-1] > '\0') {
|
if (!result.empty() && result[result.size() - 1] > '\0') {
|
||||||
result[result.size()-1]--;
|
result[result.size() - 1]--;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -720,8 +683,8 @@ TEST(Harness, Randomized) {
|
|||||||
for (int num_entries = 0; num_entries < 2000;
|
for (int num_entries = 0; num_entries < 2000;
|
||||||
num_entries += (num_entries < 50 ? 1 : 200)) {
|
num_entries += (num_entries < 50 ? 1 : 200)) {
|
||||||
if ((num_entries % 10) == 0) {
|
if ((num_entries % 10) == 0) {
|
||||||
fprintf(stderr, "case %d of %d: num_entries = %d\n",
|
fprintf(stderr, "case %d of %d: num_entries = %d\n", (i + 1),
|
||||||
(i + 1), int(kNumTestArgs), num_entries);
|
int(kNumTestArgs), num_entries);
|
||||||
}
|
}
|
||||||
for (int e = 0; e < num_entries; e++) {
|
for (int e = 0; e < num_entries; e++) {
|
||||||
std::string v;
|
std::string v;
|
||||||
@ -735,7 +698,7 @@ TEST(Harness, Randomized) {
|
|||||||
|
|
||||||
TEST(Harness, RandomizedLongDB) {
|
TEST(Harness, RandomizedLongDB) {
|
||||||
Random rnd(test::RandomSeed());
|
Random rnd(test::RandomSeed());
|
||||||
TestArgs args = { DB_TEST, false, 16 };
|
TestArgs args = {DB_TEST, false, 16};
|
||||||
Init(args);
|
Init(args);
|
||||||
int num_entries = 100000;
|
int num_entries = 100000;
|
||||||
for (int e = 0; e < num_entries; e++) {
|
for (int e = 0; e < num_entries; e++) {
|
||||||
@ -757,7 +720,7 @@ TEST(Harness, RandomizedLongDB) {
|
|||||||
ASSERT_GT(files, 0);
|
ASSERT_GT(files, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
class MemTableTest { };
|
class MemTableTest {};
|
||||||
|
|
||||||
TEST(MemTableTest, Simple) {
|
TEST(MemTableTest, Simple) {
|
||||||
InternalKeyComparator cmp(BytewiseComparator());
|
InternalKeyComparator cmp(BytewiseComparator());
|
||||||
@ -774,8 +737,7 @@ TEST(MemTableTest, Simple) {
|
|||||||
Iterator* iter = memtable->NewIterator();
|
Iterator* iter = memtable->NewIterator();
|
||||||
iter->SeekToFirst();
|
iter->SeekToFirst();
|
||||||
while (iter->Valid()) {
|
while (iter->Valid()) {
|
||||||
fprintf(stderr, "key: '%s' -> '%s'\n",
|
fprintf(stderr, "key: '%s' -> '%s'\n", iter->key().ToString().c_str(),
|
||||||
iter->key().ToString().c_str(),
|
|
||||||
iter->value().ToString().c_str());
|
iter->value().ToString().c_str());
|
||||||
iter->Next();
|
iter->Next();
|
||||||
}
|
}
|
||||||
@ -788,14 +750,13 @@ static bool Between(uint64_t val, uint64_t low, uint64_t high) {
|
|||||||
bool result = (val >= low) && (val <= high);
|
bool result = (val >= low) && (val <= high);
|
||||||
if (!result) {
|
if (!result) {
|
||||||
fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
|
fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
|
||||||
(unsigned long long)(val),
|
(unsigned long long)(val), (unsigned long long)(low),
|
||||||
(unsigned long long)(low),
|
|
||||||
(unsigned long long)(high));
|
(unsigned long long)(high));
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
class TableTest { };
|
class TableTest {};
|
||||||
|
|
||||||
TEST(TableTest, ApproximateOffsetOfPlain) {
|
TEST(TableTest, ApproximateOffsetOfPlain) {
|
||||||
TableConstructor c(BytewiseComparator());
|
TableConstructor c(BytewiseComparator());
|
||||||
@ -813,18 +774,17 @@ TEST(TableTest, ApproximateOffsetOfPlain) {
|
|||||||
options.compression = kNoCompression;
|
options.compression = kNoCompression;
|
||||||
c.Finish(options, &keys, &kvmap);
|
c.Finish(options, &keys, &kvmap);
|
||||||
|
|
||||||
ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
|
||||||
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));
|
||||||
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01a"), 0, 0));
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01a"), 0, 0));
|
||||||
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0));
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0));
|
||||||
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 0, 0));
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 0, 0));
|
||||||
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 10000, 11000));
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 10000, 11000));
|
||||||
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04a"), 210000, 211000));
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04a"), 210000, 211000));
|
||||||
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k05"), 210000, 211000));
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k05"), 210000, 211000));
|
||||||
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k06"), 510000, 511000));
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k06"), 510000, 511000));
|
||||||
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k07"), 510000, 511000));
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("k07"), 510000, 511000));
|
||||||
ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 610000, 612000));
|
ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 610000, 612000));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool SnappyCompressionSupported() {
|
static bool SnappyCompressionSupported() {
|
||||||
@ -855,7 +815,7 @@ TEST(TableTest, ApproximateOffsetOfCompressed) {
|
|||||||
|
|
||||||
// Expected upper and lower bounds of space used by compressible strings.
|
// Expected upper and lower bounds of space used by compressible strings.
|
||||||
static const int kSlop = 1000; // Compressor effectiveness varies.
|
static const int kSlop = 1000; // Compressor effectiveness varies.
|
||||||
const int expected = 2500; // 10000 * compression ratio (0.25)
|
const int expected = 2500; // 10000 * compression ratio (0.25)
|
||||||
const int min_z = expected - kSlop;
|
const int min_z = expected - kSlop;
|
||||||
const int max_z = expected + kSlop;
|
const int max_z = expected + kSlop;
|
||||||
|
|
||||||
@ -871,6 +831,4 @@ TEST(TableTest, ApproximateOffsetOfCompressed) {
|
|||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
@ -15,13 +15,10 @@ namespace {
|
|||||||
|
|
||||||
typedef Iterator* (*BlockFunction)(void*, const ReadOptions&, const Slice&);
|
typedef Iterator* (*BlockFunction)(void*, const ReadOptions&, const Slice&);
|
||||||
|
|
||||||
class TwoLevelIterator: public Iterator {
|
class TwoLevelIterator : public Iterator {
|
||||||
public:
|
public:
|
||||||
TwoLevelIterator(
|
TwoLevelIterator(Iterator* index_iter, BlockFunction block_function,
|
||||||
Iterator* index_iter,
|
void* arg, const ReadOptions& options);
|
||||||
BlockFunction block_function,
|
|
||||||
void* arg,
|
|
||||||
const ReadOptions& options);
|
|
||||||
|
|
||||||
virtual ~TwoLevelIterator();
|
virtual ~TwoLevelIterator();
|
||||||
|
|
||||||
@ -31,9 +28,7 @@ class TwoLevelIterator: public Iterator {
|
|||||||
virtual void Next();
|
virtual void Next();
|
||||||
virtual void Prev();
|
virtual void Prev();
|
||||||
|
|
||||||
virtual bool Valid() const {
|
virtual bool Valid() const { return data_iter_.Valid(); }
|
||||||
return data_iter_.Valid();
|
|
||||||
}
|
|
||||||
virtual Slice key() const {
|
virtual Slice key() const {
|
||||||
assert(Valid());
|
assert(Valid());
|
||||||
return data_iter_.key();
|
return data_iter_.key();
|
||||||
@ -67,26 +62,22 @@ class TwoLevelIterator: public Iterator {
|
|||||||
const ReadOptions options_;
|
const ReadOptions options_;
|
||||||
Status status_;
|
Status status_;
|
||||||
IteratorWrapper index_iter_;
|
IteratorWrapper index_iter_;
|
||||||
IteratorWrapper data_iter_; // May be nullptr
|
IteratorWrapper data_iter_; // May be nullptr
|
||||||
// If data_iter_ is non-null, then "data_block_handle_" holds the
|
// If data_iter_ is non-null, then "data_block_handle_" holds the
|
||||||
// "index_value" passed to block_function_ to create the data_iter_.
|
// "index_value" passed to block_function_ to create the data_iter_.
|
||||||
std::string data_block_handle_;
|
std::string data_block_handle_;
|
||||||
};
|
};
|
||||||
|
|
||||||
TwoLevelIterator::TwoLevelIterator(
|
TwoLevelIterator::TwoLevelIterator(Iterator* index_iter,
|
||||||
Iterator* index_iter,
|
BlockFunction block_function, void* arg,
|
||||||
BlockFunction block_function,
|
const ReadOptions& options)
|
||||||
void* arg,
|
|
||||||
const ReadOptions& options)
|
|
||||||
: block_function_(block_function),
|
: block_function_(block_function),
|
||||||
arg_(arg),
|
arg_(arg),
|
||||||
options_(options),
|
options_(options),
|
||||||
index_iter_(index_iter),
|
index_iter_(index_iter),
|
||||||
data_iter_(nullptr) {
|
data_iter_(nullptr) {}
|
||||||
}
|
|
||||||
|
|
||||||
TwoLevelIterator::~TwoLevelIterator() {
|
TwoLevelIterator::~TwoLevelIterator() {}
|
||||||
}
|
|
||||||
|
|
||||||
void TwoLevelIterator::Seek(const Slice& target) {
|
void TwoLevelIterator::Seek(const Slice& target) {
|
||||||
index_iter_.Seek(target);
|
index_iter_.Seek(target);
|
||||||
@ -121,7 +112,6 @@ void TwoLevelIterator::Prev() {
|
|||||||
SkipEmptyDataBlocksBackward();
|
SkipEmptyDataBlocksBackward();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void TwoLevelIterator::SkipEmptyDataBlocksForward() {
|
void TwoLevelIterator::SkipEmptyDataBlocksForward() {
|
||||||
while (data_iter_.iter() == nullptr || !data_iter_.Valid()) {
|
while (data_iter_.iter() == nullptr || !data_iter_.Valid()) {
|
||||||
// Move to next block
|
// Move to next block
|
||||||
@ -158,7 +148,8 @@ void TwoLevelIterator::InitDataBlock() {
|
|||||||
SetDataIterator(nullptr);
|
SetDataIterator(nullptr);
|
||||||
} else {
|
} else {
|
||||||
Slice handle = index_iter_.value();
|
Slice handle = index_iter_.value();
|
||||||
if (data_iter_.iter() != nullptr && handle.compare(data_block_handle_) == 0) {
|
if (data_iter_.iter() != nullptr &&
|
||||||
|
handle.compare(data_block_handle_) == 0) {
|
||||||
// data_iter_ is already constructed with this iterator, so
|
// data_iter_ is already constructed with this iterator, so
|
||||||
// no need to change anything
|
// no need to change anything
|
||||||
} else {
|
} else {
|
||||||
@ -171,11 +162,9 @@ void TwoLevelIterator::InitDataBlock() {
|
|||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
Iterator* NewTwoLevelIterator(
|
Iterator* NewTwoLevelIterator(Iterator* index_iter,
|
||||||
Iterator* index_iter,
|
BlockFunction block_function, void* arg,
|
||||||
BlockFunction block_function,
|
const ReadOptions& options) {
|
||||||
void* arg,
|
|
||||||
const ReadOptions& options) {
|
|
||||||
return new TwoLevelIterator(index_iter, block_function, arg, options);
|
return new TwoLevelIterator(index_iter, block_function, arg, options);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -22,12 +22,9 @@ struct ReadOptions;
|
|||||||
// an iterator over the contents of the corresponding block.
|
// an iterator over the contents of the corresponding block.
|
||||||
Iterator* NewTwoLevelIterator(
|
Iterator* NewTwoLevelIterator(
|
||||||
Iterator* index_iter,
|
Iterator* index_iter,
|
||||||
Iterator* (*block_function)(
|
Iterator* (*block_function)(void* arg, const ReadOptions& options,
|
||||||
void* arg,
|
const Slice& index_value),
|
||||||
const ReadOptions& options,
|
void* arg, const ReadOptions& options);
|
||||||
const Slice& index_value),
|
|
||||||
void* arg,
|
|
||||||
const ReadOptions& options);
|
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
|
@ -39,8 +39,9 @@ char* Arena::AllocateFallback(size_t bytes) {
|
|||||||
|
|
||||||
char* Arena::AllocateAligned(size_t bytes) {
|
char* Arena::AllocateAligned(size_t bytes) {
|
||||||
const int align = (sizeof(void*) > 8) ? sizeof(void*) : 8;
|
const int align = (sizeof(void*) > 8) ? sizeof(void*) : 8;
|
||||||
assert((align & (align-1)) == 0); // Pointer size should be a power of 2
|
static_assert((align & (align - 1)) == 0,
|
||||||
size_t current_mod = reinterpret_cast<uintptr_t>(alloc_ptr_) & (align-1);
|
"Pointer size should be a power of 2");
|
||||||
|
size_t current_mod = reinterpret_cast<uintptr_t>(alloc_ptr_) & (align - 1);
|
||||||
size_t slop = (current_mod == 0 ? 0 : align - current_mod);
|
size_t slop = (current_mod == 0 ? 0 : align - current_mod);
|
||||||
size_t needed = bytes + slop;
|
size_t needed = bytes + slop;
|
||||||
char* result;
|
char* result;
|
||||||
@ -52,7 +53,7 @@ char* Arena::AllocateAligned(size_t bytes) {
|
|||||||
// AllocateFallback always returned aligned memory
|
// AllocateFallback always returned aligned memory
|
||||||
result = AllocateFallback(bytes);
|
result = AllocateFallback(bytes);
|
||||||
}
|
}
|
||||||
assert((reinterpret_cast<uintptr_t>(result) & (align-1)) == 0);
|
assert((reinterpret_cast<uintptr_t>(result) & (align - 1)) == 0);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -9,11 +9,9 @@
|
|||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
class ArenaTest { };
|
class ArenaTest {};
|
||||||
|
|
||||||
TEST(ArenaTest, Empty) {
|
TEST(ArenaTest, Empty) { Arena arena; }
|
||||||
Arena arena;
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(ArenaTest, Simple) {
|
TEST(ArenaTest, Simple) {
|
||||||
std::vector<std::pair<size_t, char*> > allocated;
|
std::vector<std::pair<size_t, char*> > allocated;
|
||||||
@ -26,8 +24,9 @@ TEST(ArenaTest, Simple) {
|
|||||||
if (i % (N / 10) == 0) {
|
if (i % (N / 10) == 0) {
|
||||||
s = i;
|
s = i;
|
||||||
} else {
|
} else {
|
||||||
s = rnd.OneIn(4000) ? rnd.Uniform(6000) :
|
s = rnd.OneIn(4000)
|
||||||
(rnd.OneIn(10) ? rnd.Uniform(100) : rnd.Uniform(20));
|
? rnd.Uniform(6000)
|
||||||
|
: (rnd.OneIn(10) ? rnd.Uniform(100) : rnd.Uniform(20));
|
||||||
}
|
}
|
||||||
if (s == 0) {
|
if (s == 0) {
|
||||||
// Our arena disallows size 0 allocations.
|
// Our arena disallows size 0 allocations.
|
||||||
@ -47,7 +46,7 @@ TEST(ArenaTest, Simple) {
|
|||||||
bytes += s;
|
bytes += s;
|
||||||
allocated.push_back(std::make_pair(s, r));
|
allocated.push_back(std::make_pair(s, r));
|
||||||
ASSERT_GE(arena.MemoryUsage(), bytes);
|
ASSERT_GE(arena.MemoryUsage(), bytes);
|
||||||
if (i > N/10) {
|
if (i > N / 10) {
|
||||||
ASSERT_LE(arena.MemoryUsage(), bytes * 1.10);
|
ASSERT_LE(arena.MemoryUsage(), bytes * 1.10);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -63,6 +62,4 @@ TEST(ArenaTest, Simple) {
|
|||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
@ -20,17 +20,14 @@ class BloomFilterPolicy : public FilterPolicy {
|
|||||||
size_t k_;
|
size_t k_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
explicit BloomFilterPolicy(int bits_per_key)
|
explicit BloomFilterPolicy(int bits_per_key) : bits_per_key_(bits_per_key) {
|
||||||
: bits_per_key_(bits_per_key) {
|
|
||||||
// We intentionally round down to reduce probing cost a little bit
|
// We intentionally round down to reduce probing cost a little bit
|
||||||
k_ = static_cast<size_t>(bits_per_key * 0.69); // 0.69 =~ ln(2)
|
k_ = static_cast<size_t>(bits_per_key * 0.69); // 0.69 =~ ln(2)
|
||||||
if (k_ < 1) k_ = 1;
|
if (k_ < 1) k_ = 1;
|
||||||
if (k_ > 30) k_ = 30;
|
if (k_ > 30) k_ = 30;
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual const char* Name() const {
|
virtual const char* Name() const { return "leveldb.BuiltinBloomFilter2"; }
|
||||||
return "leveldb.BuiltinBloomFilter2";
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
|
virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
|
||||||
// Compute bloom filter size (in both bits and bytes)
|
// Compute bloom filter size (in both bits and bytes)
|
||||||
@ -54,7 +51,7 @@ class BloomFilterPolicy : public FilterPolicy {
|
|||||||
const uint32_t delta = (h >> 17) | (h << 15); // Rotate right 17 bits
|
const uint32_t delta = (h >> 17) | (h << 15); // Rotate right 17 bits
|
||||||
for (size_t j = 0; j < k_; j++) {
|
for (size_t j = 0; j < k_; j++) {
|
||||||
const uint32_t bitpos = h % bits;
|
const uint32_t bitpos = h % bits;
|
||||||
array[bitpos/8] |= (1 << (bitpos % 8));
|
array[bitpos / 8] |= (1 << (bitpos % 8));
|
||||||
h += delta;
|
h += delta;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -69,7 +66,7 @@ class BloomFilterPolicy : public FilterPolicy {
|
|||||||
|
|
||||||
// Use the encoded k so that we can read filters generated by
|
// Use the encoded k so that we can read filters generated by
|
||||||
// bloom filters created using different parameters.
|
// bloom filters created using different parameters.
|
||||||
const size_t k = array[len-1];
|
const size_t k = array[len - 1];
|
||||||
if (k > 30) {
|
if (k > 30) {
|
||||||
// Reserved for potentially new encodings for short bloom filters.
|
// Reserved for potentially new encodings for short bloom filters.
|
||||||
// Consider it a match.
|
// Consider it a match.
|
||||||
@ -80,13 +77,13 @@ class BloomFilterPolicy : public FilterPolicy {
|
|||||||
const uint32_t delta = (h >> 17) | (h << 15); // Rotate right 17 bits
|
const uint32_t delta = (h >> 17) | (h << 15); // Rotate right 17 bits
|
||||||
for (size_t j = 0; j < k; j++) {
|
for (size_t j = 0; j < k; j++) {
|
||||||
const uint32_t bitpos = h % bits;
|
const uint32_t bitpos = h % bits;
|
||||||
if ((array[bitpos/8] & (1 << (bitpos % 8))) == 0) return false;
|
if ((array[bitpos / 8] & (1 << (bitpos % 8))) == 0) return false;
|
||||||
h += delta;
|
h += delta;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
} // namespace
|
||||||
|
|
||||||
const FilterPolicy* NewBloomFilterPolicy(int bits_per_key) {
|
const FilterPolicy* NewBloomFilterPolicy(int bits_per_key) {
|
||||||
return new BloomFilterPolicy(bits_per_key);
|
return new BloomFilterPolicy(bits_per_key);
|
||||||
|
@ -25,20 +25,16 @@ class BloomTest {
|
|||||||
std::vector<std::string> keys_;
|
std::vector<std::string> keys_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
BloomTest() : policy_(NewBloomFilterPolicy(10)) { }
|
BloomTest() : policy_(NewBloomFilterPolicy(10)) {}
|
||||||
|
|
||||||
~BloomTest() {
|
~BloomTest() { delete policy_; }
|
||||||
delete policy_;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Reset() {
|
void Reset() {
|
||||||
keys_.clear();
|
keys_.clear();
|
||||||
filter_.clear();
|
filter_.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Add(const Slice& s) {
|
void Add(const Slice& s) { keys_.push_back(s.ToString()); }
|
||||||
keys_.push_back(s.ToString());
|
|
||||||
}
|
|
||||||
|
|
||||||
void Build() {
|
void Build() {
|
||||||
std::vector<Slice> key_slices;
|
std::vector<Slice> key_slices;
|
||||||
@ -52,16 +48,14 @@ class BloomTest {
|
|||||||
if (kVerbose >= 2) DumpFilter();
|
if (kVerbose >= 2) DumpFilter();
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t FilterSize() const {
|
size_t FilterSize() const { return filter_.size(); }
|
||||||
return filter_.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
void DumpFilter() {
|
void DumpFilter() {
|
||||||
fprintf(stderr, "F(");
|
fprintf(stderr, "F(");
|
||||||
for (size_t i = 0; i+1 < filter_.size(); i++) {
|
for (size_t i = 0; i + 1 < filter_.size(); i++) {
|
||||||
const unsigned int c = static_cast<unsigned int>(filter_[i]);
|
const unsigned int c = static_cast<unsigned int>(filter_[i]);
|
||||||
for (int j = 0; j < 8; j++) {
|
for (int j = 0; j < 8; j++) {
|
||||||
fprintf(stderr, "%c", (c & (1 <<j)) ? '1' : '.');
|
fprintf(stderr, "%c", (c & (1 << j)) ? '1' : '.');
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fprintf(stderr, ")\n");
|
fprintf(stderr, ")\n");
|
||||||
@ -87,8 +81,8 @@ class BloomTest {
|
|||||||
};
|
};
|
||||||
|
|
||||||
TEST(BloomTest, EmptyFilter) {
|
TEST(BloomTest, EmptyFilter) {
|
||||||
ASSERT_TRUE(! Matches("hello"));
|
ASSERT_TRUE(!Matches("hello"));
|
||||||
ASSERT_TRUE(! Matches("world"));
|
ASSERT_TRUE(!Matches("world"));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(BloomTest, Small) {
|
TEST(BloomTest, Small) {
|
||||||
@ -96,8 +90,8 @@ TEST(BloomTest, Small) {
|
|||||||
Add("world");
|
Add("world");
|
||||||
ASSERT_TRUE(Matches("hello"));
|
ASSERT_TRUE(Matches("hello"));
|
||||||
ASSERT_TRUE(Matches("world"));
|
ASSERT_TRUE(Matches("world"));
|
||||||
ASSERT_TRUE(! Matches("x"));
|
ASSERT_TRUE(!Matches("x"));
|
||||||
ASSERT_TRUE(! Matches("foo"));
|
ASSERT_TRUE(!Matches("foo"));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int NextLength(int length) {
|
static int NextLength(int length) {
|
||||||
@ -140,23 +134,23 @@ TEST(BloomTest, VaryingLengths) {
|
|||||||
double rate = FalsePositiveRate();
|
double rate = FalsePositiveRate();
|
||||||
if (kVerbose >= 1) {
|
if (kVerbose >= 1) {
|
||||||
fprintf(stderr, "False positives: %5.2f%% @ length = %6d ; bytes = %6d\n",
|
fprintf(stderr, "False positives: %5.2f%% @ length = %6d ; bytes = %6d\n",
|
||||||
rate*100.0, length, static_cast<int>(FilterSize()));
|
rate * 100.0, length, static_cast<int>(FilterSize()));
|
||||||
}
|
}
|
||||||
ASSERT_LE(rate, 0.02); // Must not be over 2%
|
ASSERT_LE(rate, 0.02); // Must not be over 2%
|
||||||
if (rate > 0.0125) mediocre_filters++; // Allowed, but not too often
|
if (rate > 0.0125)
|
||||||
else good_filters++;
|
mediocre_filters++; // Allowed, but not too often
|
||||||
|
else
|
||||||
|
good_filters++;
|
||||||
}
|
}
|
||||||
if (kVerbose >= 1) {
|
if (kVerbose >= 1) {
|
||||||
fprintf(stderr, "Filters: %d good, %d mediocre\n",
|
fprintf(stderr, "Filters: %d good, %d mediocre\n", good_filters,
|
||||||
good_filters, mediocre_filters);
|
mediocre_filters);
|
||||||
}
|
}
|
||||||
ASSERT_LE(mediocre_filters, good_filters/5);
|
ASSERT_LE(mediocre_filters, good_filters / 5);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Different bits-per-byte
|
// Different bits-per-byte
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
@ -14,8 +14,7 @@
|
|||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
Cache::~Cache() {
|
Cache::~Cache() {}
|
||||||
}
|
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
@ -46,12 +45,12 @@ struct LRUHandle {
|
|||||||
LRUHandle* next_hash;
|
LRUHandle* next_hash;
|
||||||
LRUHandle* next;
|
LRUHandle* next;
|
||||||
LRUHandle* prev;
|
LRUHandle* prev;
|
||||||
size_t charge; // TODO(opt): Only allow uint32_t?
|
size_t charge; // TODO(opt): Only allow uint32_t?
|
||||||
size_t key_length;
|
size_t key_length;
|
||||||
bool in_cache; // Whether entry is in the cache.
|
bool in_cache; // Whether entry is in the cache.
|
||||||
uint32_t refs; // References, including cache reference, if present.
|
uint32_t refs; // References, including cache reference, if present.
|
||||||
uint32_t hash; // Hash of key(); used for fast sharding and comparisons
|
uint32_t hash; // Hash of key(); used for fast sharding and comparisons
|
||||||
char key_data[1]; // Beginning of key
|
char key_data[1]; // Beginning of key
|
||||||
|
|
||||||
Slice key() const {
|
Slice key() const {
|
||||||
// next_ is only equal to this if the LRU handle is the list head of an
|
// next_ is only equal to this if the LRU handle is the list head of an
|
||||||
@ -114,8 +113,7 @@ class HandleTable {
|
|||||||
// pointer to the trailing slot in the corresponding linked list.
|
// pointer to the trailing slot in the corresponding linked list.
|
||||||
LRUHandle** FindPointer(const Slice& key, uint32_t hash) {
|
LRUHandle** FindPointer(const Slice& key, uint32_t hash) {
|
||||||
LRUHandle** ptr = &list_[hash & (length_ - 1)];
|
LRUHandle** ptr = &list_[hash & (length_ - 1)];
|
||||||
while (*ptr != nullptr &&
|
while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) {
|
||||||
((*ptr)->hash != hash || key != (*ptr)->key())) {
|
|
||||||
ptr = &(*ptr)->next_hash;
|
ptr = &(*ptr)->next_hash;
|
||||||
}
|
}
|
||||||
return ptr;
|
return ptr;
|
||||||
@ -158,8 +156,8 @@ class LRUCache {
|
|||||||
void SetCapacity(size_t capacity) { capacity_ = capacity; }
|
void SetCapacity(size_t capacity) { capacity_ = capacity; }
|
||||||
|
|
||||||
// Like Cache methods, but with an extra "hash" parameter.
|
// Like Cache methods, but with an extra "hash" parameter.
|
||||||
Cache::Handle* Insert(const Slice& key, uint32_t hash,
|
Cache::Handle* Insert(const Slice& key, uint32_t hash, void* value,
|
||||||
void* value, size_t charge,
|
size_t charge,
|
||||||
void (*deleter)(const Slice& key, void* value));
|
void (*deleter)(const Slice& key, void* value));
|
||||||
Cache::Handle* Lookup(const Slice& key, uint32_t hash);
|
Cache::Handle* Lookup(const Slice& key, uint32_t hash);
|
||||||
void Release(Cache::Handle* handle);
|
void Release(Cache::Handle* handle);
|
||||||
@ -172,7 +170,7 @@ class LRUCache {
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
void LRU_Remove(LRUHandle* e);
|
void LRU_Remove(LRUHandle* e);
|
||||||
void LRU_Append(LRUHandle*list, LRUHandle* e);
|
void LRU_Append(LRUHandle* list, LRUHandle* e);
|
||||||
void Ref(LRUHandle* e);
|
void Ref(LRUHandle* e);
|
||||||
void Unref(LRUHandle* e);
|
void Unref(LRUHandle* e);
|
||||||
bool FinishErase(LRUHandle* e) EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
bool FinishErase(LRUHandle* e) EXCLUSIVE_LOCKS_REQUIRED(mutex_);
|
||||||
@ -206,7 +204,7 @@ LRUCache::LRUCache() : capacity_(0), usage_(0) {
|
|||||||
|
|
||||||
LRUCache::~LRUCache() {
|
LRUCache::~LRUCache() {
|
||||||
assert(in_use_.next == &in_use_); // Error if caller has an unreleased handle
|
assert(in_use_.next == &in_use_); // Error if caller has an unreleased handle
|
||||||
for (LRUHandle* e = lru_.next; e != &lru_; ) {
|
for (LRUHandle* e = lru_.next; e != &lru_;) {
|
||||||
LRUHandle* next = e->next;
|
LRUHandle* next = e->next;
|
||||||
assert(e->in_cache);
|
assert(e->in_cache);
|
||||||
e->in_cache = false;
|
e->in_cache = false;
|
||||||
@ -265,13 +263,14 @@ void LRUCache::Release(Cache::Handle* handle) {
|
|||||||
Unref(reinterpret_cast<LRUHandle*>(handle));
|
Unref(reinterpret_cast<LRUHandle*>(handle));
|
||||||
}
|
}
|
||||||
|
|
||||||
Cache::Handle* LRUCache::Insert(
|
Cache::Handle* LRUCache::Insert(const Slice& key, uint32_t hash, void* value,
|
||||||
const Slice& key, uint32_t hash, void* value, size_t charge,
|
size_t charge,
|
||||||
void (*deleter)(const Slice& key, void* value)) {
|
void (*deleter)(const Slice& key,
|
||||||
|
void* value)) {
|
||||||
MutexLock l(&mutex_);
|
MutexLock l(&mutex_);
|
||||||
|
|
||||||
LRUHandle* e = reinterpret_cast<LRUHandle*>(
|
LRUHandle* e =
|
||||||
malloc(sizeof(LRUHandle)-1 + key.size()));
|
reinterpret_cast<LRUHandle*>(malloc(sizeof(LRUHandle) - 1 + key.size()));
|
||||||
e->value = value;
|
e->value = value;
|
||||||
e->deleter = deleter;
|
e->deleter = deleter;
|
||||||
e->charge = charge;
|
e->charge = charge;
|
||||||
@ -346,19 +345,16 @@ class ShardedLRUCache : public Cache {
|
|||||||
return Hash(s.data(), s.size(), 0);
|
return Hash(s.data(), s.size(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint32_t Shard(uint32_t hash) {
|
static uint32_t Shard(uint32_t hash) { return hash >> (32 - kNumShardBits); }
|
||||||
return hash >> (32 - kNumShardBits);
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
explicit ShardedLRUCache(size_t capacity)
|
explicit ShardedLRUCache(size_t capacity) : last_id_(0) {
|
||||||
: last_id_(0) {
|
|
||||||
const size_t per_shard = (capacity + (kNumShards - 1)) / kNumShards;
|
const size_t per_shard = (capacity + (kNumShards - 1)) / kNumShards;
|
||||||
for (int s = 0; s < kNumShards; s++) {
|
for (int s = 0; s < kNumShards; s++) {
|
||||||
shard_[s].SetCapacity(per_shard);
|
shard_[s].SetCapacity(per_shard);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
virtual ~ShardedLRUCache() { }
|
virtual ~ShardedLRUCache() {}
|
||||||
virtual Handle* Insert(const Slice& key, void* value, size_t charge,
|
virtual Handle* Insert(const Slice& key, void* value, size_t charge,
|
||||||
void (*deleter)(const Slice& key, void* value)) {
|
void (*deleter)(const Slice& key, void* value)) {
|
||||||
const uint32_t hash = HashSlice(key);
|
const uint32_t hash = HashSlice(key);
|
||||||
@ -399,8 +395,6 @@ class ShardedLRUCache : public Cache {
|
|||||||
|
|
||||||
} // end anonymous namespace
|
} // end anonymous namespace
|
||||||
|
|
||||||
Cache* NewLRUCache(size_t capacity) {
|
Cache* NewLRUCache(size_t capacity) { return new ShardedLRUCache(capacity); }
|
||||||
return new ShardedLRUCache(capacity);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
@ -37,13 +37,9 @@ class CacheTest {
|
|||||||
std::vector<int> deleted_values_;
|
std::vector<int> deleted_values_;
|
||||||
Cache* cache_;
|
Cache* cache_;
|
||||||
|
|
||||||
CacheTest() : cache_(NewLRUCache(kCacheSize)) {
|
CacheTest() : cache_(NewLRUCache(kCacheSize)) { current_ = this; }
|
||||||
current_ = this;
|
|
||||||
}
|
|
||||||
|
|
||||||
~CacheTest() {
|
~CacheTest() { delete cache_; }
|
||||||
delete cache_;
|
|
||||||
}
|
|
||||||
|
|
||||||
int Lookup(int key) {
|
int Lookup(int key) {
|
||||||
Cache::Handle* handle = cache_->Lookup(EncodeKey(key));
|
Cache::Handle* handle = cache_->Lookup(EncodeKey(key));
|
||||||
@ -64,9 +60,7 @@ class CacheTest {
|
|||||||
&CacheTest::Deleter);
|
&CacheTest::Deleter);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Erase(int key) {
|
void Erase(int key) { cache_->Erase(EncodeKey(key)); }
|
||||||
cache_->Erase(EncodeKey(key));
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
CacheTest* CacheTest::current_;
|
CacheTest* CacheTest::current_;
|
||||||
|
|
||||||
@ -75,18 +69,18 @@ TEST(CacheTest, HitAndMiss) {
|
|||||||
|
|
||||||
Insert(100, 101);
|
Insert(100, 101);
|
||||||
ASSERT_EQ(101, Lookup(100));
|
ASSERT_EQ(101, Lookup(100));
|
||||||
ASSERT_EQ(-1, Lookup(200));
|
ASSERT_EQ(-1, Lookup(200));
|
||||||
ASSERT_EQ(-1, Lookup(300));
|
ASSERT_EQ(-1, Lookup(300));
|
||||||
|
|
||||||
Insert(200, 201);
|
Insert(200, 201);
|
||||||
ASSERT_EQ(101, Lookup(100));
|
ASSERT_EQ(101, Lookup(100));
|
||||||
ASSERT_EQ(201, Lookup(200));
|
ASSERT_EQ(201, Lookup(200));
|
||||||
ASSERT_EQ(-1, Lookup(300));
|
ASSERT_EQ(-1, Lookup(300));
|
||||||
|
|
||||||
Insert(100, 102);
|
Insert(100, 102);
|
||||||
ASSERT_EQ(102, Lookup(100));
|
ASSERT_EQ(102, Lookup(100));
|
||||||
ASSERT_EQ(201, Lookup(200));
|
ASSERT_EQ(201, Lookup(200));
|
||||||
ASSERT_EQ(-1, Lookup(300));
|
ASSERT_EQ(-1, Lookup(300));
|
||||||
|
|
||||||
ASSERT_EQ(1, deleted_keys_.size());
|
ASSERT_EQ(1, deleted_keys_.size());
|
||||||
ASSERT_EQ(100, deleted_keys_[0]);
|
ASSERT_EQ(100, deleted_keys_[0]);
|
||||||
@ -100,14 +94,14 @@ TEST(CacheTest, Erase) {
|
|||||||
Insert(100, 101);
|
Insert(100, 101);
|
||||||
Insert(200, 201);
|
Insert(200, 201);
|
||||||
Erase(100);
|
Erase(100);
|
||||||
ASSERT_EQ(-1, Lookup(100));
|
ASSERT_EQ(-1, Lookup(100));
|
||||||
ASSERT_EQ(201, Lookup(200));
|
ASSERT_EQ(201, Lookup(200));
|
||||||
ASSERT_EQ(1, deleted_keys_.size());
|
ASSERT_EQ(1, deleted_keys_.size());
|
||||||
ASSERT_EQ(100, deleted_keys_[0]);
|
ASSERT_EQ(100, deleted_keys_[0]);
|
||||||
ASSERT_EQ(101, deleted_values_[0]);
|
ASSERT_EQ(101, deleted_values_[0]);
|
||||||
|
|
||||||
Erase(100);
|
Erase(100);
|
||||||
ASSERT_EQ(-1, Lookup(100));
|
ASSERT_EQ(-1, Lookup(100));
|
||||||
ASSERT_EQ(201, Lookup(200));
|
ASSERT_EQ(201, Lookup(200));
|
||||||
ASSERT_EQ(1, deleted_keys_.size());
|
ASSERT_EQ(1, deleted_keys_.size());
|
||||||
}
|
}
|
||||||
@ -146,8 +140,8 @@ TEST(CacheTest, EvictionPolicy) {
|
|||||||
// Frequently used entry must be kept around,
|
// Frequently used entry must be kept around,
|
||||||
// as must things that are still in use.
|
// as must things that are still in use.
|
||||||
for (int i = 0; i < kCacheSize + 100; i++) {
|
for (int i = 0; i < kCacheSize + 100; i++) {
|
||||||
Insert(1000+i, 2000+i);
|
Insert(1000 + i, 2000 + i);
|
||||||
ASSERT_EQ(2000+i, Lookup(1000+i));
|
ASSERT_EQ(2000 + i, Lookup(1000 + i));
|
||||||
ASSERT_EQ(101, Lookup(100));
|
ASSERT_EQ(101, Lookup(100));
|
||||||
}
|
}
|
||||||
ASSERT_EQ(101, Lookup(100));
|
ASSERT_EQ(101, Lookup(100));
|
||||||
@ -160,12 +154,12 @@ TEST(CacheTest, UseExceedsCacheSize) {
|
|||||||
// Overfill the cache, keeping handles on all inserted entries.
|
// Overfill the cache, keeping handles on all inserted entries.
|
||||||
std::vector<Cache::Handle*> h;
|
std::vector<Cache::Handle*> h;
|
||||||
for (int i = 0; i < kCacheSize + 100; i++) {
|
for (int i = 0; i < kCacheSize + 100; i++) {
|
||||||
h.push_back(InsertAndReturnHandle(1000+i, 2000+i));
|
h.push_back(InsertAndReturnHandle(1000 + i, 2000 + i));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check that all the entries can be found in the cache.
|
// Check that all the entries can be found in the cache.
|
||||||
for (int i = 0; i < h.size(); i++) {
|
for (int i = 0; i < h.size(); i++) {
|
||||||
ASSERT_EQ(2000+i, Lookup(1000+i));
|
ASSERT_EQ(2000 + i, Lookup(1000 + i));
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < h.size(); i++) {
|
for (int i = 0; i < h.size(); i++) {
|
||||||
@ -181,9 +175,9 @@ TEST(CacheTest, HeavyEntries) {
|
|||||||
const int kHeavy = 10;
|
const int kHeavy = 10;
|
||||||
int added = 0;
|
int added = 0;
|
||||||
int index = 0;
|
int index = 0;
|
||||||
while (added < 2*kCacheSize) {
|
while (added < 2 * kCacheSize) {
|
||||||
const int weight = (index & 1) ? kLight : kHeavy;
|
const int weight = (index & 1) ? kLight : kHeavy;
|
||||||
Insert(index, 1000+index, weight);
|
Insert(index, 1000 + index, weight);
|
||||||
added += weight;
|
added += weight;
|
||||||
index++;
|
index++;
|
||||||
}
|
}
|
||||||
@ -194,10 +188,10 @@ TEST(CacheTest, HeavyEntries) {
|
|||||||
int r = Lookup(i);
|
int r = Lookup(i);
|
||||||
if (r >= 0) {
|
if (r >= 0) {
|
||||||
cached_weight += weight;
|
cached_weight += weight;
|
||||||
ASSERT_EQ(1000+i, r);
|
ASSERT_EQ(1000 + i, r);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ASSERT_LE(cached_weight, kCacheSize + kCacheSize/10);
|
ASSERT_LE(cached_weight, kCacheSize + kCacheSize / 10);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(CacheTest, NewId) {
|
TEST(CacheTest, NewId) {
|
||||||
@ -229,6 +223,4 @@ TEST(CacheTest, ZeroSizeCache) {
|
|||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
@ -6,29 +6,29 @@
|
|||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
void EncodeFixed32(char* buf, uint32_t value) {
|
void EncodeFixed32(char* dst, uint32_t value) {
|
||||||
if (port::kLittleEndian) {
|
if (port::kLittleEndian) {
|
||||||
memcpy(buf, &value, sizeof(value));
|
memcpy(dst, &value, sizeof(value));
|
||||||
} else {
|
} else {
|
||||||
buf[0] = value & 0xff;
|
dst[0] = value & 0xff;
|
||||||
buf[1] = (value >> 8) & 0xff;
|
dst[1] = (value >> 8) & 0xff;
|
||||||
buf[2] = (value >> 16) & 0xff;
|
dst[2] = (value >> 16) & 0xff;
|
||||||
buf[3] = (value >> 24) & 0xff;
|
dst[3] = (value >> 24) & 0xff;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void EncodeFixed64(char* buf, uint64_t value) {
|
void EncodeFixed64(char* dst, uint64_t value) {
|
||||||
if (port::kLittleEndian) {
|
if (port::kLittleEndian) {
|
||||||
memcpy(buf, &value, sizeof(value));
|
memcpy(dst, &value, sizeof(value));
|
||||||
} else {
|
} else {
|
||||||
buf[0] = value & 0xff;
|
dst[0] = value & 0xff;
|
||||||
buf[1] = (value >> 8) & 0xff;
|
dst[1] = (value >> 8) & 0xff;
|
||||||
buf[2] = (value >> 16) & 0xff;
|
dst[2] = (value >> 16) & 0xff;
|
||||||
buf[3] = (value >> 24) & 0xff;
|
dst[3] = (value >> 24) & 0xff;
|
||||||
buf[4] = (value >> 32) & 0xff;
|
dst[4] = (value >> 32) & 0xff;
|
||||||
buf[5] = (value >> 40) & 0xff;
|
dst[5] = (value >> 40) & 0xff;
|
||||||
buf[6] = (value >> 48) & 0xff;
|
dst[6] = (value >> 48) & 0xff;
|
||||||
buf[7] = (value >> 56) & 0xff;
|
dst[7] = (value >> 56) & 0xff;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -48,26 +48,26 @@ char* EncodeVarint32(char* dst, uint32_t v) {
|
|||||||
// Operate on characters as unsigneds
|
// Operate on characters as unsigneds
|
||||||
unsigned char* ptr = reinterpret_cast<unsigned char*>(dst);
|
unsigned char* ptr = reinterpret_cast<unsigned char*>(dst);
|
||||||
static const int B = 128;
|
static const int B = 128;
|
||||||
if (v < (1<<7)) {
|
if (v < (1 << 7)) {
|
||||||
*(ptr++) = v;
|
*(ptr++) = v;
|
||||||
} else if (v < (1<<14)) {
|
} else if (v < (1 << 14)) {
|
||||||
*(ptr++) = v | B;
|
*(ptr++) = v | B;
|
||||||
*(ptr++) = v>>7;
|
*(ptr++) = v >> 7;
|
||||||
} else if (v < (1<<21)) {
|
} else if (v < (1 << 21)) {
|
||||||
*(ptr++) = v | B;
|
*(ptr++) = v | B;
|
||||||
*(ptr++) = (v>>7) | B;
|
*(ptr++) = (v >> 7) | B;
|
||||||
*(ptr++) = v>>14;
|
*(ptr++) = v >> 14;
|
||||||
} else if (v < (1<<28)) {
|
} else if (v < (1 << 28)) {
|
||||||
*(ptr++) = v | B;
|
*(ptr++) = v | B;
|
||||||
*(ptr++) = (v>>7) | B;
|
*(ptr++) = (v >> 7) | B;
|
||||||
*(ptr++) = (v>>14) | B;
|
*(ptr++) = (v >> 14) | B;
|
||||||
*(ptr++) = v>>21;
|
*(ptr++) = v >> 21;
|
||||||
} else {
|
} else {
|
||||||
*(ptr++) = v | B;
|
*(ptr++) = v | B;
|
||||||
*(ptr++) = (v>>7) | B;
|
*(ptr++) = (v >> 7) | B;
|
||||||
*(ptr++) = (v>>14) | B;
|
*(ptr++) = (v >> 14) | B;
|
||||||
*(ptr++) = (v>>21) | B;
|
*(ptr++) = (v >> 21) | B;
|
||||||
*(ptr++) = v>>28;
|
*(ptr++) = v >> 28;
|
||||||
}
|
}
|
||||||
return reinterpret_cast<char*>(ptr);
|
return reinterpret_cast<char*>(ptr);
|
||||||
}
|
}
|
||||||
@ -109,8 +109,7 @@ int VarintLength(uint64_t v) {
|
|||||||
return len;
|
return len;
|
||||||
}
|
}
|
||||||
|
|
||||||
const char* GetVarint32PtrFallback(const char* p,
|
const char* GetVarint32PtrFallback(const char* p, const char* limit,
|
||||||
const char* limit,
|
|
||||||
uint32_t* value) {
|
uint32_t* value) {
|
||||||
uint32_t result = 0;
|
uint32_t result = 0;
|
||||||
for (uint32_t shift = 0; shift <= 28 && p < limit; shift += 7) {
|
for (uint32_t shift = 0; shift <= 28 && p < limit; shift += 7) {
|
||||||
@ -181,8 +180,7 @@ const char* GetLengthPrefixedSlice(const char* p, const char* limit,
|
|||||||
|
|
||||||
bool GetLengthPrefixedSlice(Slice* input, Slice* result) {
|
bool GetLengthPrefixedSlice(Slice* input, Slice* result) {
|
||||||
uint32_t len;
|
uint32_t len;
|
||||||
if (GetVarint32(input, &len) &&
|
if (GetVarint32(input, &len) && input->size() >= len) {
|
||||||
input->size() >= len) {
|
|
||||||
*result = Slice(input->data(), len);
|
*result = Slice(input->data(), len);
|
||||||
input->remove_prefix(len);
|
input->remove_prefix(len);
|
||||||
return true;
|
return true;
|
||||||
|
@ -64,10 +64,10 @@ inline uint32_t DecodeFixed32(const char* ptr) {
|
|||||||
memcpy(&result, ptr, sizeof(result)); // gcc optimizes this to a plain load
|
memcpy(&result, ptr, sizeof(result)); // gcc optimizes this to a plain load
|
||||||
return result;
|
return result;
|
||||||
} else {
|
} else {
|
||||||
return ((static_cast<uint32_t>(static_cast<unsigned char>(ptr[0])))
|
return ((static_cast<uint32_t>(static_cast<unsigned char>(ptr[0]))) |
|
||||||
| (static_cast<uint32_t>(static_cast<unsigned char>(ptr[1])) << 8)
|
(static_cast<uint32_t>(static_cast<unsigned char>(ptr[1])) << 8) |
|
||||||
| (static_cast<uint32_t>(static_cast<unsigned char>(ptr[2])) << 16)
|
(static_cast<uint32_t>(static_cast<unsigned char>(ptr[2])) << 16) |
|
||||||
| (static_cast<uint32_t>(static_cast<unsigned char>(ptr[3])) << 24));
|
(static_cast<uint32_t>(static_cast<unsigned char>(ptr[3])) << 24));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -85,11 +85,9 @@ inline uint64_t DecodeFixed64(const char* ptr) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Internal routine for use by fallback path of GetVarint32Ptr
|
// Internal routine for use by fallback path of GetVarint32Ptr
|
||||||
const char* GetVarint32PtrFallback(const char* p,
|
const char* GetVarint32PtrFallback(const char* p, const char* limit,
|
||||||
const char* limit,
|
|
||||||
uint32_t* value);
|
uint32_t* value);
|
||||||
inline const char* GetVarint32Ptr(const char* p,
|
inline const char* GetVarint32Ptr(const char* p, const char* limit,
|
||||||
const char* limit,
|
|
||||||
uint32_t* value) {
|
uint32_t* value) {
|
||||||
if (p < limit) {
|
if (p < limit) {
|
||||||
uint32_t result = *(reinterpret_cast<const unsigned char*>(p));
|
uint32_t result = *(reinterpret_cast<const unsigned char*>(p));
|
||||||
|
@ -9,7 +9,7 @@
|
|||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
class Coding { };
|
class Coding {};
|
||||||
|
|
||||||
TEST(Coding, Fixed32) {
|
TEST(Coding, Fixed32) {
|
||||||
std::string s;
|
std::string s;
|
||||||
@ -39,15 +39,15 @@ TEST(Coding, Fixed64) {
|
|||||||
uint64_t v = static_cast<uint64_t>(1) << power;
|
uint64_t v = static_cast<uint64_t>(1) << power;
|
||||||
uint64_t actual;
|
uint64_t actual;
|
||||||
actual = DecodeFixed64(p);
|
actual = DecodeFixed64(p);
|
||||||
ASSERT_EQ(v-1, actual);
|
ASSERT_EQ(v - 1, actual);
|
||||||
p += sizeof(uint64_t);
|
p += sizeof(uint64_t);
|
||||||
|
|
||||||
actual = DecodeFixed64(p);
|
actual = DecodeFixed64(p);
|
||||||
ASSERT_EQ(v+0, actual);
|
ASSERT_EQ(v + 0, actual);
|
||||||
p += sizeof(uint64_t);
|
p += sizeof(uint64_t);
|
||||||
|
|
||||||
actual = DecodeFixed64(p);
|
actual = DecodeFixed64(p);
|
||||||
ASSERT_EQ(v+1, actual);
|
ASSERT_EQ(v + 1, actual);
|
||||||
p += sizeof(uint64_t);
|
p += sizeof(uint64_t);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -108,8 +108,8 @@ TEST(Coding, Varint64) {
|
|||||||
// Test values near powers of two
|
// Test values near powers of two
|
||||||
const uint64_t power = 1ull << k;
|
const uint64_t power = 1ull << k;
|
||||||
values.push_back(power);
|
values.push_back(power);
|
||||||
values.push_back(power-1);
|
values.push_back(power - 1);
|
||||||
values.push_back(power+1);
|
values.push_back(power + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string s;
|
std::string s;
|
||||||
@ -134,8 +134,8 @@ TEST(Coding, Varint64) {
|
|||||||
TEST(Coding, Varint32Overflow) {
|
TEST(Coding, Varint32Overflow) {
|
||||||
uint32_t result;
|
uint32_t result;
|
||||||
std::string input("\x81\x82\x83\x84\x85\x11");
|
std::string input("\x81\x82\x83\x84\x85\x11");
|
||||||
ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(), &result)
|
ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(),
|
||||||
== nullptr);
|
&result) == nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(Coding, Varint32Truncation) {
|
TEST(Coding, Varint32Truncation) {
|
||||||
@ -146,16 +146,16 @@ TEST(Coding, Varint32Truncation) {
|
|||||||
for (size_t len = 0; len < s.size() - 1; len++) {
|
for (size_t len = 0; len < s.size() - 1; len++) {
|
||||||
ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == nullptr);
|
ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == nullptr);
|
||||||
}
|
}
|
||||||
ASSERT_TRUE(
|
ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + s.size(), &result) !=
|
||||||
GetVarint32Ptr(s.data(), s.data() + s.size(), &result) != nullptr);
|
nullptr);
|
||||||
ASSERT_EQ(large_value, result);
|
ASSERT_EQ(large_value, result);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(Coding, Varint64Overflow) {
|
TEST(Coding, Varint64Overflow) {
|
||||||
uint64_t result;
|
uint64_t result;
|
||||||
std::string input("\x81\x82\x83\x84\x85\x81\x82\x83\x84\x85\x11");
|
std::string input("\x81\x82\x83\x84\x85\x81\x82\x83\x84\x85\x11");
|
||||||
ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(), &result)
|
ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(),
|
||||||
== nullptr);
|
&result) == nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(Coding, Varint64Truncation) {
|
TEST(Coding, Varint64Truncation) {
|
||||||
@ -166,8 +166,8 @@ TEST(Coding, Varint64Truncation) {
|
|||||||
for (size_t len = 0; len < s.size() - 1; len++) {
|
for (size_t len = 0; len < s.size() - 1; len++) {
|
||||||
ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == nullptr);
|
ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == nullptr);
|
||||||
}
|
}
|
||||||
ASSERT_TRUE(
|
ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + s.size(), &result) !=
|
||||||
GetVarint64Ptr(s.data(), s.data() + s.size(), &result) != nullptr);
|
nullptr);
|
||||||
ASSERT_EQ(large_value, result);
|
ASSERT_EQ(large_value, result);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -193,6 +193,4 @@ TEST(Coding, Strings) {
|
|||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
@ -13,24 +13,21 @@
|
|||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
Comparator::~Comparator() { }
|
Comparator::~Comparator() {}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
class BytewiseComparatorImpl : public Comparator {
|
class BytewiseComparatorImpl : public Comparator {
|
||||||
public:
|
public:
|
||||||
BytewiseComparatorImpl() { }
|
BytewiseComparatorImpl() {}
|
||||||
|
|
||||||
virtual const char* Name() const {
|
virtual const char* Name() const { return "leveldb.BytewiseComparator"; }
|
||||||
return "leveldb.BytewiseComparator";
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual int Compare(const Slice& a, const Slice& b) const {
|
virtual int Compare(const Slice& a, const Slice& b) const {
|
||||||
return a.compare(b);
|
return a.compare(b);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void FindShortestSeparator(
|
virtual void FindShortestSeparator(std::string* start,
|
||||||
std::string* start,
|
const Slice& limit) const {
|
||||||
const Slice& limit) const {
|
|
||||||
// Find length of common prefix
|
// Find length of common prefix
|
||||||
size_t min_length = std::min(start->size(), limit.size());
|
size_t min_length = std::min(start->size(), limit.size());
|
||||||
size_t diff_index = 0;
|
size_t diff_index = 0;
|
||||||
@ -59,7 +56,7 @@ class BytewiseComparatorImpl : public Comparator {
|
|||||||
const uint8_t byte = (*key)[i];
|
const uint8_t byte = (*key)[i];
|
||||||
if (byte != static_cast<uint8_t>(0xff)) {
|
if (byte != static_cast<uint8_t>(0xff)) {
|
||||||
(*key)[i] = byte + 1;
|
(*key)[i] = byte + 1;
|
||||||
key->resize(i+1);
|
key->resize(i + 1);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -256,8 +256,8 @@ inline uint32_t ReadUint32LE(const uint8_t* buffer) {
|
|||||||
template <int N>
|
template <int N>
|
||||||
constexpr inline const uint8_t* RoundUp(const uint8_t* pointer) {
|
constexpr inline const uint8_t* RoundUp(const uint8_t* pointer) {
|
||||||
return reinterpret_cast<uint8_t*>(
|
return reinterpret_cast<uint8_t*>(
|
||||||
(reinterpret_cast<uintptr_t>(pointer) + (N - 1))
|
(reinterpret_cast<uintptr_t>(pointer) + (N - 1)) &
|
||||||
& ~static_cast<uintptr_t>(N - 1));
|
~static_cast<uintptr_t>(N - 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
@ -273,14 +273,14 @@ static bool CanAccelerateCRC32C() {
|
|||||||
return port::AcceleratedCRC32C(0, kTestCRCBuffer, kBufSize) == kTestCRCValue;
|
return port::AcceleratedCRC32C(0, kTestCRCBuffer, kBufSize) == kTestCRCValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t Extend(uint32_t crc, const char* buf, size_t size) {
|
uint32_t Extend(uint32_t crc, const char* data, size_t n) {
|
||||||
static bool accelerate = CanAccelerateCRC32C();
|
static bool accelerate = CanAccelerateCRC32C();
|
||||||
if (accelerate) {
|
if (accelerate) {
|
||||||
return port::AcceleratedCRC32C(crc, buf, size);
|
return port::AcceleratedCRC32C(crc, data, n);
|
||||||
}
|
}
|
||||||
|
|
||||||
const uint8_t* p = reinterpret_cast<const uint8_t*>(buf);
|
const uint8_t* p = reinterpret_cast<const uint8_t*>(data);
|
||||||
const uint8_t* e = p + size;
|
const uint8_t* e = p + n;
|
||||||
uint32_t l = crc ^ kCRC32Xor;
|
uint32_t l = crc ^ kCRC32Xor;
|
||||||
|
|
||||||
// Process one byte at a time.
|
// Process one byte at a time.
|
||||||
|
@ -17,9 +17,7 @@ namespace crc32c {
|
|||||||
uint32_t Extend(uint32_t init_crc, const char* data, size_t n);
|
uint32_t Extend(uint32_t init_crc, const char* data, size_t n);
|
||||||
|
|
||||||
// Return the crc32c of data[0,n-1]
|
// Return the crc32c of data[0,n-1]
|
||||||
inline uint32_t Value(const char* data, size_t n) {
|
inline uint32_t Value(const char* data, size_t n) { return Extend(0, data, n); }
|
||||||
return Extend(0, data, n);
|
|
||||||
}
|
|
||||||
|
|
||||||
static const uint32_t kMaskDelta = 0xa282ead8ul;
|
static const uint32_t kMaskDelta = 0xa282ead8ul;
|
||||||
|
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
namespace crc32c {
|
namespace crc32c {
|
||||||
|
|
||||||
class CRC { };
|
class CRC {};
|
||||||
|
|
||||||
TEST(CRC, StandardResults) {
|
TEST(CRC, StandardResults) {
|
||||||
// From rfc3720 section B.4.
|
// From rfc3720 section B.4.
|
||||||
@ -31,29 +31,18 @@ TEST(CRC, StandardResults) {
|
|||||||
ASSERT_EQ(0x113fdb5c, Value(buf, sizeof(buf)));
|
ASSERT_EQ(0x113fdb5c, Value(buf, sizeof(buf)));
|
||||||
|
|
||||||
unsigned char data[48] = {
|
unsigned char data[48] = {
|
||||||
0x01, 0xc0, 0x00, 0x00,
|
0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
0x00, 0x00, 0x00, 0x00,
|
0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
|
||||||
0x00, 0x00, 0x00, 0x00,
|
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
|
||||||
0x00, 0x00, 0x00, 0x00,
|
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
0x14, 0x00, 0x00, 0x00,
|
|
||||||
0x00, 0x00, 0x04, 0x00,
|
|
||||||
0x00, 0x00, 0x00, 0x14,
|
|
||||||
0x00, 0x00, 0x00, 0x18,
|
|
||||||
0x28, 0x00, 0x00, 0x00,
|
|
||||||
0x00, 0x00, 0x00, 0x00,
|
|
||||||
0x02, 0x00, 0x00, 0x00,
|
|
||||||
0x00, 0x00, 0x00, 0x00,
|
|
||||||
};
|
};
|
||||||
ASSERT_EQ(0xd9963a56, Value(reinterpret_cast<char*>(data), sizeof(data)));
|
ASSERT_EQ(0xd9963a56, Value(reinterpret_cast<char*>(data), sizeof(data)));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(CRC, Values) {
|
TEST(CRC, Values) { ASSERT_NE(Value("a", 1), Value("foo", 3)); }
|
||||||
ASSERT_NE(Value("a", 1), Value("foo", 3));
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(CRC, Extend) {
|
TEST(CRC, Extend) {
|
||||||
ASSERT_EQ(Value("hello world", 11),
|
ASSERT_EQ(Value("hello world", 11), Extend(Value("hello ", 6), "world", 5));
|
||||||
Extend(Value("hello ", 6), "world", 5));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(CRC, Mask) {
|
TEST(CRC, Mask) {
|
||||||
@ -67,6 +56,4 @@ TEST(CRC, Mask) {
|
|||||||
} // namespace crc32c
|
} // namespace crc32c
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|
||||||
return leveldb::test::RunAllTests();
|
|
||||||
}
|
|
||||||
|
24
util/env.cc
24
util/env.cc
@ -6,27 +6,21 @@
|
|||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
Env::~Env() {
|
Env::~Env() {}
|
||||||
}
|
|
||||||
|
|
||||||
Status Env::NewAppendableFile(const std::string& fname, WritableFile** result) {
|
Status Env::NewAppendableFile(const std::string& fname, WritableFile** result) {
|
||||||
return Status::NotSupported("NewAppendableFile", fname);
|
return Status::NotSupported("NewAppendableFile", fname);
|
||||||
}
|
}
|
||||||
|
|
||||||
SequentialFile::~SequentialFile() {
|
SequentialFile::~SequentialFile() {}
|
||||||
}
|
|
||||||
|
|
||||||
RandomAccessFile::~RandomAccessFile() {
|
RandomAccessFile::~RandomAccessFile() {}
|
||||||
}
|
|
||||||
|
|
||||||
WritableFile::~WritableFile() {
|
WritableFile::~WritableFile() {}
|
||||||
}
|
|
||||||
|
|
||||||
Logger::~Logger() {
|
Logger::~Logger() {}
|
||||||
}
|
|
||||||
|
|
||||||
FileLock::~FileLock() {
|
FileLock::~FileLock() {}
|
||||||
}
|
|
||||||
|
|
||||||
void Log(Logger* info_log, const char* format, ...) {
|
void Log(Logger* info_log, const char* format, ...) {
|
||||||
if (info_log != nullptr) {
|
if (info_log != nullptr) {
|
||||||
@ -38,8 +32,7 @@ void Log(Logger* info_log, const char* format, ...) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static Status DoWriteStringToFile(Env* env, const Slice& data,
|
static Status DoWriteStringToFile(Env* env, const Slice& data,
|
||||||
const std::string& fname,
|
const std::string& fname, bool should_sync) {
|
||||||
bool should_sync) {
|
|
||||||
WritableFile* file;
|
WritableFile* file;
|
||||||
Status s = env->NewWritableFile(fname, &file);
|
Status s = env->NewWritableFile(fname, &file);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
@ -94,7 +87,6 @@ Status ReadFileToString(Env* env, const std::string& fname, std::string* data) {
|
|||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
EnvWrapper::~EnvWrapper() {
|
EnvWrapper::~EnvWrapper() {}
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user