Inline defaults in options.
This CL moves default values for leveldb::{Options,ReadOptions,WriteOptions} from constructors to member declarations, and removes now-redundant comments stating the defaults. ------------- Created by MOE: https://github.com/google/moe MOE_MIGRATED_REVID=239271242
This commit is contained in:
parent
9ce30510d4
commit
201f77d137
@ -469,7 +469,6 @@ class FaultInjectionTest {
|
||||
|
||||
void DeleteAllData() {
|
||||
Iterator* iter = db_->NewIterator(ReadOptions());
|
||||
WriteOptions options;
|
||||
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
||||
ASSERT_OK(db_->Delete(WriteOptions(), iter->key()));
|
||||
}
|
||||
|
@ -42,20 +42,17 @@ struct LEVELDB_EXPORT Options {
|
||||
const Comparator* comparator;
|
||||
|
||||
// If true, the database will be created if it is missing.
|
||||
// Default: false
|
||||
bool create_if_missing;
|
||||
bool create_if_missing = false;
|
||||
|
||||
// If true, an error is raised if the database already exists.
|
||||
// Default: false
|
||||
bool error_if_exists;
|
||||
bool error_if_exists = false;
|
||||
|
||||
// If true, the implementation will do aggressive checking of the
|
||||
// data it is processing and will stop early if it detects any
|
||||
// errors. This may have unforeseen ramifications: for example, a
|
||||
// corruption of one DB entry may cause a large number of entries to
|
||||
// become unreadable or for the entire DB to become unopenable.
|
||||
// Default: false
|
||||
bool paranoid_checks;
|
||||
bool paranoid_checks = false;
|
||||
|
||||
// Use the specified object to interact with the environment,
|
||||
// e.g. to read/write files, schedule background work, etc.
|
||||
@ -65,8 +62,7 @@ struct LEVELDB_EXPORT Options {
|
||||
// Any internal progress/error information generated by the db will
|
||||
// be written to info_log if it is non-null, or to a file stored
|
||||
// in the same directory as the DB contents if info_log is null.
|
||||
// Default: nullptr
|
||||
Logger* info_log;
|
||||
Logger* info_log = nullptr;
|
||||
|
||||
// -------------------
|
||||
// Parameters that affect performance
|
||||
@ -79,39 +75,30 @@ struct LEVELDB_EXPORT Options {
|
||||
// so you may wish to adjust this parameter to control memory usage.
|
||||
// Also, a larger write buffer will result in a longer recovery time
|
||||
// the next time the database is opened.
|
||||
//
|
||||
// Default: 4MB
|
||||
size_t write_buffer_size;
|
||||
size_t write_buffer_size = 4 * 1024 * 1024;
|
||||
|
||||
// Number of open files that can be used by the DB. You may need to
|
||||
// increase this if your database has a large working set (budget
|
||||
// one open file per 2MB of working set).
|
||||
//
|
||||
// Default: 1000
|
||||
int max_open_files;
|
||||
int max_open_files = 1000;
|
||||
|
||||
// Control over blocks (user data is stored in a set of blocks, and
|
||||
// a block is the unit of reading from disk).
|
||||
|
||||
// If non-null, use the specified cache for blocks.
|
||||
// If null, leveldb will automatically create and use an 8MB internal cache.
|
||||
// Default: nullptr
|
||||
Cache* block_cache;
|
||||
Cache* block_cache = nullptr;
|
||||
|
||||
// Approximate size of user data packed per block. Note that the
|
||||
// block size specified here corresponds to uncompressed data. The
|
||||
// actual size of the unit read from disk may be smaller if
|
||||
// compression is enabled. This parameter can be changed dynamically.
|
||||
//
|
||||
// Default: 4K
|
||||
size_t block_size;
|
||||
size_t block_size = 4 * 1024;
|
||||
|
||||
// Number of keys between restart points for delta encoding of keys.
|
||||
// This parameter can be changed dynamically. Most clients should
|
||||
// leave this parameter alone.
|
||||
//
|
||||
// Default: 16
|
||||
int block_restart_interval;
|
||||
int block_restart_interval = 16;
|
||||
|
||||
// Leveldb will write up to this amount of bytes to a file before
|
||||
// switching to a new one.
|
||||
@ -121,9 +108,7 @@ struct LEVELDB_EXPORT Options {
|
||||
// compactions and hence longer latency/performance hiccups.
|
||||
// Another reason to increase this parameter might be when you are
|
||||
// initially populating a large database.
|
||||
//
|
||||
// Default: 2MB
|
||||
size_t max_file_size;
|
||||
size_t max_file_size = 2 * 1024 * 1024;
|
||||
|
||||
// Compress blocks using the specified compression algorithm. This
|
||||
// parameter can be changed dynamically.
|
||||
@ -139,20 +124,18 @@ struct LEVELDB_EXPORT Options {
|
||||
// worth switching to kNoCompression. Even if the input data is
|
||||
// incompressible, the kSnappyCompression implementation will
|
||||
// efficiently detect that and will switch to uncompressed mode.
|
||||
CompressionType compression;
|
||||
CompressionType compression = kSnappyCompression;
|
||||
|
||||
// EXPERIMENTAL: If true, append to existing MANIFEST and log files
|
||||
// when a database is opened. This can significantly speed up open.
|
||||
//
|
||||
// Default: currently false, but may become true later.
|
||||
bool reuse_logs;
|
||||
bool reuse_logs = false;
|
||||
|
||||
// If non-null, use the specified filter policy to reduce disk reads.
|
||||
// Many applications will benefit from passing the result of
|
||||
// NewBloomFilterPolicy() here.
|
||||
//
|
||||
// Default: nullptr
|
||||
const FilterPolicy* filter_policy;
|
||||
const FilterPolicy* filter_policy = nullptr;
|
||||
|
||||
// Create an Options object with default values for all fields.
|
||||
Options();
|
||||
@ -162,26 +145,19 @@ struct LEVELDB_EXPORT Options {
|
||||
struct LEVELDB_EXPORT ReadOptions {
|
||||
// If true, all data read from underlying storage will be
|
||||
// verified against corresponding checksums.
|
||||
// Default: false
|
||||
bool verify_checksums;
|
||||
bool verify_checksums = false;
|
||||
|
||||
// Should the data read for this iteration be cached in memory?
|
||||
// Callers may wish to set this field to false for bulk scans.
|
||||
// Default: true
|
||||
bool fill_cache;
|
||||
bool fill_cache = true;
|
||||
|
||||
// If "snapshot" is non-null, read as of the supplied snapshot
|
||||
// (which must belong to the DB that is being read and which must
|
||||
// not have been released). If "snapshot" is null, use an implicit
|
||||
// snapshot of the state at the beginning of this read operation.
|
||||
// Default: nullptr
|
||||
const Snapshot* snapshot;
|
||||
const Snapshot* snapshot = nullptr;
|
||||
|
||||
ReadOptions()
|
||||
: verify_checksums(false),
|
||||
fill_cache(true),
|
||||
snapshot(nullptr) {
|
||||
}
|
||||
ReadOptions() = default;
|
||||
};
|
||||
|
||||
// Options that control write operations
|
||||
@ -200,13 +176,9 @@ struct LEVELDB_EXPORT WriteOptions {
|
||||
// crash semantics as the "write()" system call. A DB write
|
||||
// with sync==true has similar crash semantics to a "write()"
|
||||
// system call followed by "fsync()".
|
||||
//
|
||||
// Default: false
|
||||
bool sync;
|
||||
bool sync = false;
|
||||
|
||||
WriteOptions()
|
||||
: sync(false) {
|
||||
}
|
||||
WriteOptions() = default;
|
||||
};
|
||||
|
||||
} // namespace leveldb
|
||||
|
@ -11,20 +11,7 @@ namespace leveldb {
|
||||
|
||||
Options::Options()
|
||||
: comparator(BytewiseComparator()),
|
||||
create_if_missing(false),
|
||||
error_if_exists(false),
|
||||
paranoid_checks(false),
|
||||
env(Env::Default()),
|
||||
info_log(nullptr),
|
||||
write_buffer_size(4<<20),
|
||||
max_open_files(1000),
|
||||
block_cache(nullptr),
|
||||
block_size(4096),
|
||||
block_restart_interval(16),
|
||||
max_file_size(2<<20),
|
||||
compression(kSnappyCompression),
|
||||
reuse_logs(false),
|
||||
filter_policy(nullptr) {
|
||||
env(Env::Default()) {
|
||||
}
|
||||
|
||||
} // namespace leveldb
|
||||
|
Loading…
x
Reference in New Issue
Block a user