2011-03-19 06:37:00 +08:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include "db/log_reader.h"
|
|
|
|
#include "db/log_writer.h"
|
2011-03-31 02:35:40 +08:00
|
|
|
#include "leveldb/env.h"
|
2011-03-19 06:37:00 +08:00
|
|
|
#include "util/coding.h"
|
|
|
|
#include "util/crc32c.h"
|
|
|
|
#include "util/random.h"
|
|
|
|
#include "util/testharness.h"
|
|
|
|
|
|
|
|
namespace leveldb {
|
|
|
|
namespace log {
|
|
|
|
|
|
|
|
// Construct a string of the specified length made out of the supplied
|
|
|
|
// partial string.
|
|
|
|
static std::string BigString(const std::string& partial_string, size_t n) {
|
|
|
|
std::string result;
|
|
|
|
while (result.size() < n) {
|
|
|
|
result.append(partial_string);
|
|
|
|
}
|
|
|
|
result.resize(n);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Construct a string from a number
|
|
|
|
static std::string NumberString(int n) {
|
|
|
|
char buf[50];
|
|
|
|
snprintf(buf, sizeof(buf), "%d.", n);
|
|
|
|
return std::string(buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return a skewed potentially long string
|
|
|
|
static std::string RandomSkewedString(int i, Random* rnd) {
|
|
|
|
return BigString(NumberString(i), rnd->Skewed(17));
|
|
|
|
}
|
|
|
|
|
|
|
|
class LogTest {
|
|
|
|
public:
|
2019-05-03 02:01:00 +08:00
|
|
|
LogTest()
|
|
|
|
: reading_(false),
|
|
|
|
writer_(new Writer(&dest_)),
|
|
|
|
reader_(new Reader(&source_, &report_, true /*checksum*/,
|
|
|
|
0 /*initial_offset*/)) {}
|
2011-03-19 06:37:00 +08:00
|
|
|
|
2014-12-12 00:13:18 +08:00
|
|
|
~LogTest() {
|
|
|
|
delete writer_;
|
2015-08-12 06:36:45 +08:00
|
|
|
delete reader_;
|
2014-12-12 00:13:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void ReopenForAppend() {
|
|
|
|
delete writer_;
|
|
|
|
writer_ = new Writer(&dest_, dest_.contents_.size());
|
|
|
|
}
|
|
|
|
|
2011-03-19 06:37:00 +08:00
|
|
|
void Write(const std::string& msg) {
|
|
|
|
ASSERT_TRUE(!reading_) << "Write() after starting to read";
|
2014-12-12 00:13:18 +08:00
|
|
|
writer_->AddRecord(Slice(msg));
|
2011-03-19 06:37:00 +08:00
|
|
|
}
|
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
size_t WrittenBytes() const { return dest_.contents_.size(); }
|
2011-03-19 06:37:00 +08:00
|
|
|
|
|
|
|
std::string Read() {
|
|
|
|
if (!reading_) {
|
|
|
|
reading_ = true;
|
|
|
|
source_.contents_ = Slice(dest_.contents_);
|
|
|
|
}
|
|
|
|
std::string scratch;
|
|
|
|
Slice record;
|
2015-08-12 06:36:45 +08:00
|
|
|
if (reader_->ReadRecord(&record, &scratch)) {
|
2011-03-19 06:37:00 +08:00
|
|
|
return record.ToString();
|
|
|
|
} else {
|
|
|
|
return "EOF";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void IncrementByte(int offset, int delta) {
|
|
|
|
dest_.contents_[offset] += delta;
|
|
|
|
}
|
|
|
|
|
|
|
|
void SetByte(int offset, char new_byte) {
|
|
|
|
dest_.contents_[offset] = new_byte;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ShrinkSize(int bytes) {
|
|
|
|
dest_.contents_.resize(dest_.contents_.size() - bytes);
|
|
|
|
}
|
|
|
|
|
|
|
|
void FixChecksum(int header_offset, int len) {
|
|
|
|
// Compute crc of type/len/data
|
2019-05-03 02:01:00 +08:00
|
|
|
uint32_t crc = crc32c::Value(&dest_.contents_[header_offset + 6], 1 + len);
|
2011-03-19 06:37:00 +08:00
|
|
|
crc = crc32c::Mask(crc);
|
|
|
|
EncodeFixed32(&dest_.contents_[header_offset], crc);
|
|
|
|
}
|
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
void ForceError() { source_.force_error_ = true; }
|
2011-03-19 06:37:00 +08:00
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
size_t DroppedBytes() const { return report_.dropped_bytes_; }
|
2011-03-19 06:37:00 +08:00
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
std::string ReportMessage() const { return report_.message_; }
|
2011-05-21 10:17:43 +08:00
|
|
|
|
2011-03-19 06:37:00 +08:00
|
|
|
// Returns OK iff recorded error message contains "msg"
|
|
|
|
std::string MatchError(const std::string& msg) const {
|
|
|
|
if (report_.message_.find(msg) == std::string::npos) {
|
|
|
|
return report_.message_;
|
|
|
|
} else {
|
|
|
|
return "OK";
|
|
|
|
}
|
|
|
|
}
|
2011-05-21 10:17:43 +08:00
|
|
|
|
|
|
|
void WriteInitialOffsetLog() {
|
This CL fixes a bug encountered when reading records from leveldb files that have been split, as in a [] input task split.
Detailed description:
Suppose an input split is generated between two leveldb record blocks and the preceding block ends with null padding.
A reader that previously read at least 1 record within the first block (before encountering the padding) upon trying to read the next record, will successfully and correctly read the next logical record from the subsequent block, but will return a last record offset pointing to the padding in the first block.
When this happened in a [], it resulted in duplicate records being handled at what appeared to be different offsets that were separated by only a few bytes.
This behavior is only observed when at least 1 record was read from the first block before encountering the padding. If the initial offset for a reader was within the padding, the correct record offset would be reported, namely the offset within the second block.
The tests failed to catch this scenario/bug, because each read test only read a single record with an initial offset. This CL adds an explicit test case for this scenario, and modifies the test structure to read all remaining records in the test case after an initial offset is specified. Thus an initial offset that jumps to record #3, with 5 total records in the test file, will result in reading 2 records, and validating the offset of each of them in order to pass successfully.
-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=115338487
2016-02-23 23:36:39 +08:00
|
|
|
for (int i = 0; i < num_initial_offset_records_; i++) {
|
2011-05-21 10:17:43 +08:00
|
|
|
std::string record(initial_offset_record_sizes_[i],
|
|
|
|
static_cast<char>('a' + i));
|
|
|
|
Write(record);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-12 06:36:45 +08:00
|
|
|
void StartReadingAt(uint64_t initial_offset) {
|
|
|
|
delete reader_;
|
2019-05-03 02:01:00 +08:00
|
|
|
reader_ = new Reader(&source_, &report_, true /*checksum*/, initial_offset);
|
2015-08-12 06:36:45 +08:00
|
|
|
}
|
|
|
|
|
2011-05-21 10:17:43 +08:00
|
|
|
void CheckOffsetPastEndReturnsNoRecords(uint64_t offset_past_end) {
|
|
|
|
WriteInitialOffsetLog();
|
|
|
|
reading_ = true;
|
|
|
|
source_.contents_ = Slice(dest_.contents_);
|
2019-05-03 02:01:00 +08:00
|
|
|
Reader* offset_reader = new Reader(&source_, &report_, true /*checksum*/,
|
2011-05-21 10:17:43 +08:00
|
|
|
WrittenBytes() + offset_past_end);
|
|
|
|
Slice record;
|
|
|
|
std::string scratch;
|
|
|
|
ASSERT_TRUE(!offset_reader->ReadRecord(&record, &scratch));
|
|
|
|
delete offset_reader;
|
|
|
|
}
|
|
|
|
|
|
|
|
void CheckInitialOffsetRecord(uint64_t initial_offset,
|
|
|
|
int expected_record_offset) {
|
|
|
|
WriteInitialOffsetLog();
|
|
|
|
reading_ = true;
|
|
|
|
source_.contents_ = Slice(dest_.contents_);
|
2019-05-03 02:01:00 +08:00
|
|
|
Reader* offset_reader =
|
|
|
|
new Reader(&source_, &report_, true /*checksum*/, initial_offset);
|
This CL fixes a bug encountered when reading records from leveldb files that have been split, as in a [] input task split.
Detailed description:
Suppose an input split is generated between two leveldb record blocks and the preceding block ends with null padding.
A reader that previously read at least 1 record within the first block (before encountering the padding) upon trying to read the next record, will successfully and correctly read the next logical record from the subsequent block, but will return a last record offset pointing to the padding in the first block.
When this happened in a [], it resulted in duplicate records being handled at what appeared to be different offsets that were separated by only a few bytes.
This behavior is only observed when at least 1 record was read from the first block before encountering the padding. If the initial offset for a reader was within the padding, the correct record offset would be reported, namely the offset within the second block.
The tests failed to catch this scenario/bug, because each read test only read a single record with an initial offset. This CL adds an explicit test case for this scenario, and modifies the test structure to read all remaining records in the test case after an initial offset is specified. Thus an initial offset that jumps to record #3, with 5 total records in the test file, will result in reading 2 records, and validating the offset of each of them in order to pass successfully.
-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=115338487
2016-02-23 23:36:39 +08:00
|
|
|
|
|
|
|
// Read all records from expected_record_offset through the last one.
|
|
|
|
ASSERT_LT(expected_record_offset, num_initial_offset_records_);
|
|
|
|
for (; expected_record_offset < num_initial_offset_records_;
|
|
|
|
++expected_record_offset) {
|
|
|
|
Slice record;
|
|
|
|
std::string scratch;
|
|
|
|
ASSERT_TRUE(offset_reader->ReadRecord(&record, &scratch));
|
|
|
|
ASSERT_EQ(initial_offset_record_sizes_[expected_record_offset],
|
|
|
|
record.size());
|
|
|
|
ASSERT_EQ(initial_offset_last_record_offsets_[expected_record_offset],
|
|
|
|
offset_reader->LastRecordOffset());
|
|
|
|
ASSERT_EQ((char)('a' + expected_record_offset), record.data()[0]);
|
|
|
|
}
|
2011-05-21 10:17:43 +08:00
|
|
|
delete offset_reader;
|
|
|
|
}
|
2019-05-04 00:31:18 +08:00
|
|
|
|
|
|
|
private:
|
|
|
|
class StringDest : public WritableFile {
|
|
|
|
public:
|
2019-05-10 05:00:07 +08:00
|
|
|
Status Close() override { return Status::OK(); }
|
|
|
|
Status Flush() override { return Status::OK(); }
|
|
|
|
Status Sync() override { return Status::OK(); }
|
|
|
|
Status Append(const Slice& slice) override {
|
2019-05-04 00:31:18 +08:00
|
|
|
contents_.append(slice.data(), slice.size());
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string contents_;
|
|
|
|
};
|
|
|
|
|
|
|
|
class StringSource : public SequentialFile {
|
|
|
|
public:
|
|
|
|
StringSource() : force_error_(false), returned_partial_(false) {}
|
|
|
|
|
2019-05-10 05:00:07 +08:00
|
|
|
Status Read(size_t n, Slice* result, char* scratch) override {
|
2019-05-04 00:31:18 +08:00
|
|
|
ASSERT_TRUE(!returned_partial_) << "must not Read() after eof/error";
|
|
|
|
|
|
|
|
if (force_error_) {
|
|
|
|
force_error_ = false;
|
|
|
|
returned_partial_ = true;
|
|
|
|
return Status::Corruption("read error");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (contents_.size() < n) {
|
|
|
|
n = contents_.size();
|
|
|
|
returned_partial_ = true;
|
|
|
|
}
|
|
|
|
*result = Slice(contents_.data(), n);
|
|
|
|
contents_.remove_prefix(n);
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2019-05-10 05:00:07 +08:00
|
|
|
Status Skip(uint64_t n) override {
|
2019-05-04 00:31:18 +08:00
|
|
|
if (n > contents_.size()) {
|
|
|
|
contents_.clear();
|
|
|
|
return Status::NotFound("in-memory file skipped past end");
|
|
|
|
}
|
|
|
|
|
|
|
|
contents_.remove_prefix(n);
|
|
|
|
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
Slice contents_;
|
|
|
|
bool force_error_;
|
|
|
|
bool returned_partial_;
|
|
|
|
};
|
|
|
|
|
|
|
|
class ReportCollector : public Reader::Reporter {
|
|
|
|
public:
|
|
|
|
ReportCollector() : dropped_bytes_(0) {}
|
2019-05-10 05:00:07 +08:00
|
|
|
void Corruption(size_t bytes, const Status& status) override {
|
2019-05-04 00:31:18 +08:00
|
|
|
dropped_bytes_ += bytes;
|
|
|
|
message_.append(status.ToString());
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t dropped_bytes_;
|
|
|
|
std::string message_;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Record metadata for testing initial offset functionality
|
|
|
|
static size_t initial_offset_record_sizes_[];
|
|
|
|
static uint64_t initial_offset_last_record_offsets_[];
|
|
|
|
static int num_initial_offset_records_;
|
|
|
|
|
|
|
|
StringDest dest_;
|
|
|
|
StringSource source_;
|
|
|
|
ReportCollector report_;
|
|
|
|
bool reading_;
|
|
|
|
Writer* writer_;
|
|
|
|
Reader* reader_;
|
2011-03-19 06:37:00 +08:00
|
|
|
};
|
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
size_t LogTest::initial_offset_record_sizes_[] = {
|
|
|
|
10000, // Two sizable records in first block
|
|
|
|
10000,
|
|
|
|
2 * log::kBlockSize - 1000, // Span three blocks
|
|
|
|
1,
|
|
|
|
13716, // Consume all but two bytes of block 3.
|
|
|
|
log::kBlockSize - kHeaderSize, // Consume the entirety of block 4.
|
|
|
|
};
|
|
|
|
|
|
|
|
uint64_t LogTest::initial_offset_last_record_offsets_[] = {
|
|
|
|
0,
|
|
|
|
kHeaderSize + 10000,
|
|
|
|
2 * (kHeaderSize + 10000),
|
|
|
|
2 * (kHeaderSize + 10000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize,
|
|
|
|
2 * (kHeaderSize + 10000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize +
|
|
|
|
kHeaderSize + 1,
|
|
|
|
3 * log::kBlockSize,
|
|
|
|
};
|
2011-05-21 10:17:43 +08:00
|
|
|
|
This CL fixes a bug encountered when reading records from leveldb files that have been split, as in a [] input task split.
Detailed description:
Suppose an input split is generated between two leveldb record blocks and the preceding block ends with null padding.
A reader that previously read at least 1 record within the first block (before encountering the padding) upon trying to read the next record, will successfully and correctly read the next logical record from the subsequent block, but will return a last record offset pointing to the padding in the first block.
When this happened in a [], it resulted in duplicate records being handled at what appeared to be different offsets that were separated by only a few bytes.
This behavior is only observed when at least 1 record was read from the first block before encountering the padding. If the initial offset for a reader was within the padding, the correct record offset would be reported, namely the offset within the second block.
The tests failed to catch this scenario/bug, because each read test only read a single record with an initial offset. This CL adds an explicit test case for this scenario, and modifies the test structure to read all remaining records in the test case after an initial offset is specified. Thus an initial offset that jumps to record #3, with 5 total records in the test file, will result in reading 2 records, and validating the offset of each of them in order to pass successfully.
-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=115338487
2016-02-23 23:36:39 +08:00
|
|
|
// LogTest::initial_offset_last_record_offsets_ must be defined before this.
|
|
|
|
int LogTest::num_initial_offset_records_ =
|
2019-05-03 02:01:00 +08:00
|
|
|
sizeof(LogTest::initial_offset_last_record_offsets_) / sizeof(uint64_t);
|
2011-05-21 10:17:43 +08:00
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
TEST(LogTest, Empty) { ASSERT_EQ("EOF", Read()); }
|
2011-03-19 06:37:00 +08:00
|
|
|
|
|
|
|
TEST(LogTest, ReadWrite) {
|
|
|
|
Write("foo");
|
|
|
|
Write("bar");
|
|
|
|
Write("");
|
|
|
|
Write("xxxx");
|
|
|
|
ASSERT_EQ("foo", Read());
|
|
|
|
ASSERT_EQ("bar", Read());
|
|
|
|
ASSERT_EQ("", Read());
|
|
|
|
ASSERT_EQ("xxxx", Read());
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
ASSERT_EQ("EOF", Read()); // Make sure reads at eof work
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(LogTest, ManyBlocks) {
|
|
|
|
for (int i = 0; i < 100000; i++) {
|
|
|
|
Write(NumberString(i));
|
|
|
|
}
|
|
|
|
for (int i = 0; i < 100000; i++) {
|
|
|
|
ASSERT_EQ(NumberString(i), Read());
|
|
|
|
}
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(LogTest, Fragmentation) {
|
|
|
|
Write("small");
|
|
|
|
Write(BigString("medium", 50000));
|
|
|
|
Write(BigString("large", 100000));
|
|
|
|
ASSERT_EQ("small", Read());
|
|
|
|
ASSERT_EQ(BigString("medium", 50000), Read());
|
|
|
|
ASSERT_EQ(BigString("large", 100000), Read());
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(LogTest, MarginalTrailer) {
|
|
|
|
// Make a trailer that is exactly the same length as an empty record.
|
2019-05-03 02:01:00 +08:00
|
|
|
const int n = kBlockSize - 2 * kHeaderSize;
|
2011-03-19 06:37:00 +08:00
|
|
|
Write(BigString("foo", n));
|
|
|
|
ASSERT_EQ(kBlockSize - kHeaderSize, WrittenBytes());
|
|
|
|
Write("");
|
|
|
|
Write("bar");
|
|
|
|
ASSERT_EQ(BigString("foo", n), Read());
|
|
|
|
ASSERT_EQ("", Read());
|
|
|
|
ASSERT_EQ("bar", Read());
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
}
|
|
|
|
|
2011-05-21 10:17:43 +08:00
|
|
|
TEST(LogTest, MarginalTrailer2) {
|
|
|
|
// Make a trailer that is exactly the same length as an empty record.
|
2019-05-03 02:01:00 +08:00
|
|
|
const int n = kBlockSize - 2 * kHeaderSize;
|
2011-05-21 10:17:43 +08:00
|
|
|
Write(BigString("foo", n));
|
|
|
|
ASSERT_EQ(kBlockSize - kHeaderSize, WrittenBytes());
|
|
|
|
Write("bar");
|
|
|
|
ASSERT_EQ(BigString("foo", n), Read());
|
|
|
|
ASSERT_EQ("bar", Read());
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
ASSERT_EQ(0, DroppedBytes());
|
|
|
|
ASSERT_EQ("", ReportMessage());
|
|
|
|
}
|
|
|
|
|
2011-03-19 06:37:00 +08:00
|
|
|
TEST(LogTest, ShortTrailer) {
|
2019-05-03 02:01:00 +08:00
|
|
|
const int n = kBlockSize - 2 * kHeaderSize + 4;
|
2011-03-19 06:37:00 +08:00
|
|
|
Write(BigString("foo", n));
|
|
|
|
ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes());
|
|
|
|
Write("");
|
|
|
|
Write("bar");
|
|
|
|
ASSERT_EQ(BigString("foo", n), Read());
|
|
|
|
ASSERT_EQ("", Read());
|
|
|
|
ASSERT_EQ("bar", Read());
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(LogTest, AlignedEof) {
|
2019-05-03 02:01:00 +08:00
|
|
|
const int n = kBlockSize - 2 * kHeaderSize + 4;
|
2011-03-19 06:37:00 +08:00
|
|
|
Write(BigString("foo", n));
|
|
|
|
ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes());
|
|
|
|
ASSERT_EQ(BigString("foo", n), Read());
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
}
|
|
|
|
|
2014-12-12 00:13:18 +08:00
|
|
|
TEST(LogTest, OpenForAppend) {
|
|
|
|
Write("hello");
|
|
|
|
ReopenForAppend();
|
|
|
|
Write("world");
|
|
|
|
ASSERT_EQ("hello", Read());
|
|
|
|
ASSERT_EQ("world", Read());
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
}
|
|
|
|
|
2011-03-19 06:37:00 +08:00
|
|
|
TEST(LogTest, RandomRead) {
|
|
|
|
const int N = 500;
|
|
|
|
Random write_rnd(301);
|
|
|
|
for (int i = 0; i < N; i++) {
|
|
|
|
Write(RandomSkewedString(i, &write_rnd));
|
|
|
|
}
|
|
|
|
Random read_rnd(301);
|
|
|
|
for (int i = 0; i < N; i++) {
|
|
|
|
ASSERT_EQ(RandomSkewedString(i, &read_rnd), Read());
|
|
|
|
}
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests of all the error paths in log_reader.cc follow:
|
|
|
|
|
|
|
|
TEST(LogTest, ReadError) {
|
|
|
|
Write("foo");
|
|
|
|
ForceError();
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
ASSERT_EQ(kBlockSize, DroppedBytes());
|
|
|
|
ASSERT_EQ("OK", MatchError("read error"));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(LogTest, BadRecordType) {
|
|
|
|
Write("foo");
|
|
|
|
// Type is stored in header[6]
|
|
|
|
IncrementByte(6, 100);
|
|
|
|
FixChecksum(0, 3);
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
ASSERT_EQ(3, DroppedBytes());
|
|
|
|
ASSERT_EQ("OK", MatchError("unknown record type"));
|
|
|
|
}
|
|
|
|
|
2014-02-11 03:36:06 +08:00
|
|
|
TEST(LogTest, TruncatedTrailingRecordIsIgnored) {
|
2011-03-19 06:37:00 +08:00
|
|
|
Write("foo");
|
2019-05-03 02:01:00 +08:00
|
|
|
ShrinkSize(4); // Drop all payload as well as a header byte
|
2011-03-19 06:37:00 +08:00
|
|
|
ASSERT_EQ("EOF", Read());
|
2014-02-11 03:36:06 +08:00
|
|
|
// Truncated last record is ignored, not treated as an error.
|
|
|
|
ASSERT_EQ(0, DroppedBytes());
|
|
|
|
ASSERT_EQ("", ReportMessage());
|
2011-03-19 06:37:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST(LogTest, BadLength) {
|
2014-02-11 03:36:06 +08:00
|
|
|
const int kPayloadSize = kBlockSize - kHeaderSize;
|
|
|
|
Write(BigString("bar", kPayloadSize));
|
|
|
|
Write("foo");
|
|
|
|
// Least significant size byte is stored in header[4].
|
|
|
|
IncrementByte(4, 1);
|
|
|
|
ASSERT_EQ("foo", Read());
|
|
|
|
ASSERT_EQ(kBlockSize, DroppedBytes());
|
|
|
|
ASSERT_EQ("OK", MatchError("bad record length"));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(LogTest, BadLengthAtEndIsIgnored) {
|
2011-03-19 06:37:00 +08:00
|
|
|
Write("foo");
|
|
|
|
ShrinkSize(1);
|
|
|
|
ASSERT_EQ("EOF", Read());
|
2014-02-11 03:36:06 +08:00
|
|
|
ASSERT_EQ(0, DroppedBytes());
|
|
|
|
ASSERT_EQ("", ReportMessage());
|
2011-03-19 06:37:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST(LogTest, ChecksumMismatch) {
|
|
|
|
Write("foo");
|
|
|
|
IncrementByte(0, 10);
|
|
|
|
ASSERT_EQ("EOF", Read());
|
2011-03-22 03:40:57 +08:00
|
|
|
ASSERT_EQ(10, DroppedBytes());
|
2011-03-19 06:37:00 +08:00
|
|
|
ASSERT_EQ("OK", MatchError("checksum mismatch"));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(LogTest, UnexpectedMiddleType) {
|
|
|
|
Write("foo");
|
|
|
|
SetByte(6, kMiddleType);
|
|
|
|
FixChecksum(0, 3);
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
ASSERT_EQ(3, DroppedBytes());
|
|
|
|
ASSERT_EQ("OK", MatchError("missing start"));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(LogTest, UnexpectedLastType) {
|
|
|
|
Write("foo");
|
|
|
|
SetByte(6, kLastType);
|
|
|
|
FixChecksum(0, 3);
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
ASSERT_EQ(3, DroppedBytes());
|
|
|
|
ASSERT_EQ("OK", MatchError("missing start"));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(LogTest, UnexpectedFullType) {
|
|
|
|
Write("foo");
|
|
|
|
Write("bar");
|
|
|
|
SetByte(6, kFirstType);
|
|
|
|
FixChecksum(0, 3);
|
|
|
|
ASSERT_EQ("bar", Read());
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
ASSERT_EQ(3, DroppedBytes());
|
|
|
|
ASSERT_EQ("OK", MatchError("partial record without end"));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(LogTest, UnexpectedFirstType) {
|
|
|
|
Write("foo");
|
|
|
|
Write(BigString("bar", 100000));
|
|
|
|
SetByte(6, kFirstType);
|
|
|
|
FixChecksum(0, 3);
|
|
|
|
ASSERT_EQ(BigString("bar", 100000), Read());
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
ASSERT_EQ(3, DroppedBytes());
|
|
|
|
ASSERT_EQ("OK", MatchError("partial record without end"));
|
|
|
|
}
|
|
|
|
|
2014-02-11 03:36:06 +08:00
|
|
|
TEST(LogTest, MissingLastIsIgnored) {
|
|
|
|
Write(BigString("bar", kBlockSize));
|
|
|
|
// Remove the LAST block, including header.
|
|
|
|
ShrinkSize(14);
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
ASSERT_EQ("", ReportMessage());
|
|
|
|
ASSERT_EQ(0, DroppedBytes());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(LogTest, PartialLastIsIgnored) {
|
|
|
|
Write(BigString("bar", kBlockSize));
|
|
|
|
// Cause a bad record length in the LAST block.
|
|
|
|
ShrinkSize(1);
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
ASSERT_EQ("", ReportMessage());
|
|
|
|
ASSERT_EQ(0, DroppedBytes());
|
|
|
|
}
|
|
|
|
|
2015-08-12 06:36:45 +08:00
|
|
|
TEST(LogTest, SkipIntoMultiRecord) {
|
|
|
|
// Consider a fragmented record:
|
|
|
|
// first(R1), middle(R1), last(R1), first(R2)
|
|
|
|
// If initial_offset points to a record after first(R1) but before first(R2)
|
|
|
|
// incomplete fragment errors are not actual errors, and must be suppressed
|
|
|
|
// until a new first or full record is encountered.
|
2019-05-03 02:01:00 +08:00
|
|
|
Write(BigString("foo", 3 * kBlockSize));
|
2015-08-12 06:36:45 +08:00
|
|
|
Write("correct");
|
|
|
|
StartReadingAt(kBlockSize);
|
|
|
|
|
|
|
|
ASSERT_EQ("correct", Read());
|
|
|
|
ASSERT_EQ("", ReportMessage());
|
|
|
|
ASSERT_EQ(0, DroppedBytes());
|
|
|
|
ASSERT_EQ("EOF", Read());
|
|
|
|
}
|
|
|
|
|
2011-03-19 06:37:00 +08:00
|
|
|
TEST(LogTest, ErrorJoinsRecords) {
|
|
|
|
// Consider two fragmented records:
|
|
|
|
// first(R1) last(R1) first(R2) last(R2)
|
|
|
|
// where the middle two fragments disappear. We do not want
|
|
|
|
// first(R1),last(R2) to get joined and returned as a valid record.
|
|
|
|
|
|
|
|
// Write records that span two blocks
|
|
|
|
Write(BigString("foo", kBlockSize));
|
|
|
|
Write(BigString("bar", kBlockSize));
|
|
|
|
Write("correct");
|
|
|
|
|
|
|
|
// Wipe the middle block
|
2019-05-03 02:01:00 +08:00
|
|
|
for (int offset = kBlockSize; offset < 2 * kBlockSize; offset++) {
|
2011-03-19 06:37:00 +08:00
|
|
|
SetByte(offset, 'x');
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT_EQ("correct", Read());
|
|
|
|
ASSERT_EQ("EOF", Read());
|
2014-09-17 05:19:52 +08:00
|
|
|
const size_t dropped = DroppedBytes();
|
2019-05-03 02:01:00 +08:00
|
|
|
ASSERT_LE(dropped, 2 * kBlockSize + 100);
|
|
|
|
ASSERT_GE(dropped, 2 * kBlockSize);
|
2011-03-19 06:37:00 +08:00
|
|
|
}
|
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
TEST(LogTest, ReadStart) { CheckInitialOffsetRecord(0, 0); }
|
2011-05-21 10:17:43 +08:00
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
TEST(LogTest, ReadSecondOneOff) { CheckInitialOffsetRecord(1, 1); }
|
2011-05-21 10:17:43 +08:00
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
TEST(LogTest, ReadSecondTenThousand) { CheckInitialOffsetRecord(10000, 1); }
|
2011-05-21 10:17:43 +08:00
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
TEST(LogTest, ReadSecondStart) { CheckInitialOffsetRecord(10007, 1); }
|
2011-05-21 10:17:43 +08:00
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
TEST(LogTest, ReadThirdOneOff) { CheckInitialOffsetRecord(10008, 2); }
|
2011-05-21 10:17:43 +08:00
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
TEST(LogTest, ReadThirdStart) { CheckInitialOffsetRecord(20014, 2); }
|
2011-05-21 10:17:43 +08:00
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
TEST(LogTest, ReadFourthOneOff) { CheckInitialOffsetRecord(20015, 3); }
|
2011-05-21 10:17:43 +08:00
|
|
|
|
|
|
|
TEST(LogTest, ReadFourthFirstBlockTrailer) {
|
|
|
|
CheckInitialOffsetRecord(log::kBlockSize - 4, 3);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(LogTest, ReadFourthMiddleBlock) {
|
|
|
|
CheckInitialOffsetRecord(log::kBlockSize + 1, 3);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(LogTest, ReadFourthLastBlock) {
|
|
|
|
CheckInitialOffsetRecord(2 * log::kBlockSize + 1, 3);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(LogTest, ReadFourthStart) {
|
|
|
|
CheckInitialOffsetRecord(
|
|
|
|
2 * (kHeaderSize + 1000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize,
|
|
|
|
3);
|
|
|
|
}
|
|
|
|
|
This CL fixes a bug encountered when reading records from leveldb files that have been split, as in a [] input task split.
Detailed description:
Suppose an input split is generated between two leveldb record blocks and the preceding block ends with null padding.
A reader that previously read at least 1 record within the first block (before encountering the padding) upon trying to read the next record, will successfully and correctly read the next logical record from the subsequent block, but will return a last record offset pointing to the padding in the first block.
When this happened in a [], it resulted in duplicate records being handled at what appeared to be different offsets that were separated by only a few bytes.
This behavior is only observed when at least 1 record was read from the first block before encountering the padding. If the initial offset for a reader was within the padding, the correct record offset would be reported, namely the offset within the second block.
The tests failed to catch this scenario/bug, because each read test only read a single record with an initial offset. This CL adds an explicit test case for this scenario, and modifies the test structure to read all remaining records in the test case after an initial offset is specified. Thus an initial offset that jumps to record #3, with 5 total records in the test file, will result in reading 2 records, and validating the offset of each of them in order to pass successfully.
-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=115338487
2016-02-23 23:36:39 +08:00
|
|
|
TEST(LogTest, ReadInitialOffsetIntoBlockPadding) {
|
|
|
|
CheckInitialOffsetRecord(3 * log::kBlockSize - 3, 5);
|
|
|
|
}
|
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
TEST(LogTest, ReadEnd) { CheckOffsetPastEndReturnsNoRecords(0); }
|
2011-05-21 10:17:43 +08:00
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
TEST(LogTest, ReadPastEnd) { CheckOffsetPastEndReturnsNoRecords(5); }
|
2011-05-21 10:17:43 +08:00
|
|
|
|
2011-11-01 01:22:06 +08:00
|
|
|
} // namespace log
|
|
|
|
} // namespace leveldb
|
2011-03-19 06:37:00 +08:00
|
|
|
|
2019-05-03 02:01:00 +08:00
|
|
|
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
|