Merge branch 'master' into patch-1

This commit is contained in:
Victor Costan 2022-01-09 23:08:24 -08:00 committed by GitHub
commit 3180f9cb40
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
112 changed files with 1951 additions and 1859 deletions

View File

@ -1,35 +0,0 @@
# Build matrix / environment variables are explained on:
# https://www.appveyor.com/docs/appveyor-yml/
# This file can be validated on: https://ci.appveyor.com/tools/validate-yaml
version: "{build}"
environment:
matrix:
# AppVeyor currently has no custom job name feature.
# http://help.appveyor.com/discussions/questions/1623-can-i-provide-a-friendly-name-for-jobs
- JOB: Visual Studio 2017
APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
CMAKE_GENERATOR: Visual Studio 15 2017
platform:
- x86
- x64
configuration:
- RelWithDebInfo
- Debug
build_script:
- git submodule update --init --recursive
- mkdir build
- cd build
- if "%platform%"=="x64" set CMAKE_GENERATOR=%CMAKE_GENERATOR% Win64
- cmake --version
- cmake .. -G "%CMAKE_GENERATOR%"
-DCMAKE_CONFIGURATION_TYPES="%CONFIGURATION%"
- cmake --build . --config "%CONFIGURATION%"
- cd ..
test_script:
- cd build && ctest --verbose --build-config "%CONFIGURATION%" && cd ..

101
.github/workflows/build.yml vendored Normal file
View File

@ -0,0 +1,101 @@
# Copyright 2021 The LevelDB Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. See the AUTHORS file for names of contributors.
name: ci
on: [push, pull_request]
permissions:
contents: read
jobs:
build-and-test:
name: >-
CI
${{ matrix.os }}
${{ matrix.compiler }}
${{ matrix.optimized && 'release' || 'debug' }}
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
compiler: [clang, gcc, msvc]
os: [ubuntu-latest, macos-latest, windows-latest]
optimized: [true, false]
exclude:
# MSVC only works on Windows.
- os: ubuntu-latest
compiler: msvc
- os: macos-latest
compiler: msvc
# Not testing with GCC on macOS.
- os: macos-latest
compiler: gcc
# Only testing with MSVC on Windows.
- os: windows-latest
compiler: clang
- os: windows-latest
compiler: gcc
include:
- compiler: clang
CC: clang
CXX: clang++
- compiler: gcc
CC: gcc
CXX: g++
- compiler: msvc
CC:
CXX:
env:
CMAKE_BUILD_DIR: ${{ github.workspace }}/build
CMAKE_BUILD_TYPE: ${{ matrix.optimized && 'RelWithDebInfo' || 'Debug' }}
CC: ${{ matrix.CC }}
CXX: ${{ matrix.CXX }}
BINARY_SUFFIX: ${{ startsWith(matrix.os, 'windows') && '.exe' || '' }}
BINARY_PATH: >-
${{ format(
startsWith(matrix.os, 'windows') && '{0}\build\{1}\' || '{0}/build/',
github.workspace,
matrix.optimized && 'RelWithDebInfo' || 'Debug') }}
steps:
- uses: actions/checkout@v2
with:
submodules: true
- name: Install dependencies on Linux
if: ${{ runner.os == 'Linux' }}
run: |
sudo apt-get update
sudo apt-get install libgoogle-perftools-dev libkyotocabinet-dev \
libsnappy-dev libsqlite3-dev
- name: Generate build config
run: >-
cmake -S "${{ github.workspace }}" -B "${{ env.CMAKE_BUILD_DIR }}"
-DCMAKE_BUILD_TYPE=${{ env.CMAKE_BUILD_TYPE }}
-DCMAKE_INSTALL_PREFIX=${{ runner.temp }}/install_test/
- name: Build
run: >-
cmake --build "${{ env.CMAKE_BUILD_DIR }}"
--config "${{ env.CMAKE_BUILD_TYPE }}"
- name: Run Tests
working-directory: ${{ github.workspace }}/build
run: ctest -C "${{ env.CMAKE_BUILD_TYPE }}" --verbose
- name: Run LevelDB Benchmarks
run: ${{ env.BINARY_PATH }}db_bench${{ env.BINARY_SUFFIX }}
- name: Run SQLite Benchmarks
if: ${{ runner.os != 'Windows' }}
run: ${{ env.BINARY_PATH }}db_bench_sqlite3${{ env.BINARY_SUFFIX }}
- name: Run Kyoto Cabinet Benchmarks
if: ${{ runner.os == 'Linux' && matrix.compiler == 'clang' }}
run: ${{ env.BINARY_PATH }}db_bench_tree_db${{ env.BINARY_SUFFIX }}
- name: Test CMake installation
run: cmake --build "${{ env.CMAKE_BUILD_DIR }}" --target install

6
.gitmodules vendored Normal file
View File

@ -0,0 +1,6 @@
[submodule "third_party/googletest"]
path = third_party/googletest
url = https://github.com/google/googletest.git
[submodule "third_party/benchmark"]
path = third_party/benchmark
url = https://github.com/google/benchmark

View File

@ -1,80 +0,0 @@
# Build matrix / environment variables are explained on:
# http://about.travis-ci.org/docs/user/build-configuration/
# This file can be validated on: http://lint.travis-ci.org/
language: cpp
dist: bionic
osx_image: xcode10.3
compiler:
- gcc
- clang
os:
- linux
- osx
env:
- BUILD_TYPE=Debug
- BUILD_TYPE=RelWithDebInfo
addons:
apt:
sources:
- sourceline: 'deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main'
key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key'
- sourceline: 'ppa:ubuntu-toolchain-r/test'
packages:
- clang-9
- cmake
- gcc-9
- g++-9
- libgoogle-perftools-dev
- libkyotocabinet-dev
- libsnappy-dev
- libsqlite3-dev
- ninja-build
homebrew:
packages:
- cmake
- crc32c
- gcc@9
- gperftools
- kyoto-cabinet
- llvm@9
- ninja
- snappy
- sqlite3
update: true
install:
# The following Homebrew packages aren't linked by default, and need to be
# prepended to the path explicitly.
- if [ "$TRAVIS_OS_NAME" = "osx" ]; then
export PATH="$(brew --prefix llvm)/bin:$PATH";
fi
# /usr/bin/gcc points to an older compiler on both Linux and macOS.
- if [ "$CXX" = "g++" ]; then export CXX="g++-9" CC="gcc-9"; fi
# /usr/bin/clang points to an older compiler on both Linux and macOS.
#
# Homebrew's llvm package doesn't ship a versioned clang++ binary, so the values
# below don't work on macOS. Fortunately, the path change above makes the
# default values (clang and clang++) resolve to the correct compiler on macOS.
- if [ "$TRAVIS_OS_NAME" = "linux" ]; then
if [ "$CXX" = "clang++" ]; then export CXX="clang++-9" CC="clang-9"; fi;
fi
- echo ${CC}
- echo ${CXX}
- ${CXX} --version
- cmake --version
before_script:
- mkdir -p build && cd build
- cmake .. -G Ninja -DCMAKE_BUILD_TYPE=$BUILD_TYPE
- cmake --build .
- cd ..
script:
- cd build && ctest --verbose && cd ..
- "if [ -f build/db_bench ] ; then build/db_bench ; fi"
- "if [ -f build/db_bench_sqlite3 ] ; then build/db_bench_sqlite3 ; fi"
- "if [ -f build/db_bench_tree_db ] ; then build/db_bench_tree_db ; fi"

View File

@ -4,17 +4,23 @@
cmake_minimum_required(VERSION 3.9) cmake_minimum_required(VERSION 3.9)
# Keep the version below in sync with the one in db.h # Keep the version below in sync with the one in db.h
project(leveldb VERSION 1.22.0 LANGUAGES C CXX) project(leveldb VERSION 1.23.0 LANGUAGES C CXX)
# This project can use C11, but will gracefully decay down to C89. # C standard can be overridden when this is used as a sub-project.
set(CMAKE_C_STANDARD 11) if(NOT CMAKE_C_STANDARD)
set(CMAKE_C_STANDARD_REQUIRED OFF) # This project can use C11, but will gracefully decay down to C89.
set(CMAKE_C_EXTENSIONS OFF) set(CMAKE_C_STANDARD 11)
set(CMAKE_C_STANDARD_REQUIRED OFF)
set(CMAKE_C_EXTENSIONS OFF)
endif(NOT CMAKE_C_STANDARD)
# This project requires C++11. # C++ standard can be overridden when this is used as a sub-project.
set(CMAKE_CXX_STANDARD 11) if(NOT CMAKE_CXX_STANDARD)
set(CMAKE_CXX_STANDARD_REQUIRED ON) # This project requires C++11.
set(CMAKE_CXX_EXTENSIONS OFF) set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
endif(NOT CMAKE_CXX_STANDARD)
if (WIN32) if (WIN32)
set(LEVELDB_PLATFORM_NAME LEVELDB_PLATFORM_WINDOWS) set(LEVELDB_PLATFORM_NAME LEVELDB_PLATFORM_WINDOWS)
@ -28,9 +34,6 @@ option(LEVELDB_BUILD_TESTS "Build LevelDB's unit tests" ON)
option(LEVELDB_BUILD_BENCHMARKS "Build LevelDB's benchmarks" ON) option(LEVELDB_BUILD_BENCHMARKS "Build LevelDB's benchmarks" ON)
option(LEVELDB_INSTALL "Install LevelDB's header and library" ON) option(LEVELDB_INSTALL "Install LevelDB's header and library" ON)
include(TestBigEndian)
test_big_endian(LEVELDB_IS_BIG_ENDIAN)
include(CheckIncludeFile) include(CheckIncludeFile)
check_include_file("unistd.h" HAVE_UNISTD_H) check_include_file("unistd.h" HAVE_UNISTD_H)
@ -78,6 +81,10 @@ endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
include(CheckCXXCompilerFlag) include(CheckCXXCompilerFlag)
check_cxx_compiler_flag(-Wthread-safety HAVE_CLANG_THREAD_SAFETY) check_cxx_compiler_flag(-Wthread-safety HAVE_CLANG_THREAD_SAFETY)
# Used by googletest.
check_cxx_compiler_flag(-Wno-missing-field-initializers
LEVELDB_HAVE_NO_MISSING_FIELD_INITIALIZERS)
include(CheckCXXSourceCompiles) include(CheckCXXSourceCompiles)
# Test whether C++17 __has_include is available. # Test whether C++17 __has_include is available.
@ -92,13 +99,13 @@ set(LEVELDB_PUBLIC_INCLUDE_DIR "include/leveldb")
set(LEVELDB_PORT_CONFIG_DIR "include/port") set(LEVELDB_PORT_CONFIG_DIR "include/port")
configure_file( configure_file(
"${PROJECT_SOURCE_DIR}/port/port_config.h.in" "port/port_config.h.in"
"${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h" "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
) )
include_directories( include_directories(
"${PROJECT_BINARY_DIR}/include" "${PROJECT_BINARY_DIR}/include"
"${PROJECT_SOURCE_DIR}" "."
) )
if(BUILD_SHARED_LIBS) if(BUILD_SHARED_LIBS)
@ -106,79 +113,82 @@ if(BUILD_SHARED_LIBS)
add_compile_options(-fvisibility=hidden) add_compile_options(-fvisibility=hidden)
endif(BUILD_SHARED_LIBS) endif(BUILD_SHARED_LIBS)
# Must be included before CMAKE_INSTALL_INCLUDEDIR is used.
include(GNUInstallDirs)
add_library(leveldb "") add_library(leveldb "")
target_sources(leveldb target_sources(leveldb
PRIVATE PRIVATE
"${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h" "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
"${PROJECT_SOURCE_DIR}/db/builder.cc" "db/builder.cc"
"${PROJECT_SOURCE_DIR}/db/builder.h" "db/builder.h"
"${PROJECT_SOURCE_DIR}/db/c.cc" "db/c.cc"
"${PROJECT_SOURCE_DIR}/db/db_impl.cc" "db/db_impl.cc"
"${PROJECT_SOURCE_DIR}/db/db_impl.h" "db/db_impl.h"
"${PROJECT_SOURCE_DIR}/db/db_iter.cc" "db/db_iter.cc"
"${PROJECT_SOURCE_DIR}/db/db_iter.h" "db/db_iter.h"
"${PROJECT_SOURCE_DIR}/db/dbformat.cc" "db/dbformat.cc"
"${PROJECT_SOURCE_DIR}/db/dbformat.h" "db/dbformat.h"
"${PROJECT_SOURCE_DIR}/db/dumpfile.cc" "db/dumpfile.cc"
"${PROJECT_SOURCE_DIR}/db/filename.cc" "db/filename.cc"
"${PROJECT_SOURCE_DIR}/db/filename.h" "db/filename.h"
"${PROJECT_SOURCE_DIR}/db/log_format.h" "db/log_format.h"
"${PROJECT_SOURCE_DIR}/db/log_reader.cc" "db/log_reader.cc"
"${PROJECT_SOURCE_DIR}/db/log_reader.h" "db/log_reader.h"
"${PROJECT_SOURCE_DIR}/db/log_writer.cc" "db/log_writer.cc"
"${PROJECT_SOURCE_DIR}/db/log_writer.h" "db/log_writer.h"
"${PROJECT_SOURCE_DIR}/db/memtable.cc" "db/memtable.cc"
"${PROJECT_SOURCE_DIR}/db/memtable.h" "db/memtable.h"
"${PROJECT_SOURCE_DIR}/db/repair.cc" "db/repair.cc"
"${PROJECT_SOURCE_DIR}/db/skiplist.h" "db/skiplist.h"
"${PROJECT_SOURCE_DIR}/db/snapshot.h" "db/snapshot.h"
"${PROJECT_SOURCE_DIR}/db/table_cache.cc" "db/table_cache.cc"
"${PROJECT_SOURCE_DIR}/db/table_cache.h" "db/table_cache.h"
"${PROJECT_SOURCE_DIR}/db/version_edit.cc" "db/version_edit.cc"
"${PROJECT_SOURCE_DIR}/db/version_edit.h" "db/version_edit.h"
"${PROJECT_SOURCE_DIR}/db/version_set.cc" "db/version_set.cc"
"${PROJECT_SOURCE_DIR}/db/version_set.h" "db/version_set.h"
"${PROJECT_SOURCE_DIR}/db/write_batch_internal.h" "db/write_batch_internal.h"
"${PROJECT_SOURCE_DIR}/db/write_batch.cc" "db/write_batch.cc"
"${PROJECT_SOURCE_DIR}/port/port_stdcxx.h" "port/port_stdcxx.h"
"${PROJECT_SOURCE_DIR}/port/port.h" "port/port.h"
"${PROJECT_SOURCE_DIR}/port/thread_annotations.h" "port/thread_annotations.h"
"${PROJECT_SOURCE_DIR}/table/block_builder.cc" "table/block_builder.cc"
"${PROJECT_SOURCE_DIR}/table/block_builder.h" "table/block_builder.h"
"${PROJECT_SOURCE_DIR}/table/block.cc" "table/block.cc"
"${PROJECT_SOURCE_DIR}/table/block.h" "table/block.h"
"${PROJECT_SOURCE_DIR}/table/filter_block.cc" "table/filter_block.cc"
"${PROJECT_SOURCE_DIR}/table/filter_block.h" "table/filter_block.h"
"${PROJECT_SOURCE_DIR}/table/format.cc" "table/format.cc"
"${PROJECT_SOURCE_DIR}/table/format.h" "table/format.h"
"${PROJECT_SOURCE_DIR}/table/iterator_wrapper.h" "table/iterator_wrapper.h"
"${PROJECT_SOURCE_DIR}/table/iterator.cc" "table/iterator.cc"
"${PROJECT_SOURCE_DIR}/table/merger.cc" "table/merger.cc"
"${PROJECT_SOURCE_DIR}/table/merger.h" "table/merger.h"
"${PROJECT_SOURCE_DIR}/table/table_builder.cc" "table/table_builder.cc"
"${PROJECT_SOURCE_DIR}/table/table.cc" "table/table.cc"
"${PROJECT_SOURCE_DIR}/table/two_level_iterator.cc" "table/two_level_iterator.cc"
"${PROJECT_SOURCE_DIR}/table/two_level_iterator.h" "table/two_level_iterator.h"
"${PROJECT_SOURCE_DIR}/util/arena.cc" "util/arena.cc"
"${PROJECT_SOURCE_DIR}/util/arena.h" "util/arena.h"
"${PROJECT_SOURCE_DIR}/util/bloom.cc" "util/bloom.cc"
"${PROJECT_SOURCE_DIR}/util/cache.cc" "util/cache.cc"
"${PROJECT_SOURCE_DIR}/util/coding.cc" "util/coding.cc"
"${PROJECT_SOURCE_DIR}/util/coding.h" "util/coding.h"
"${PROJECT_SOURCE_DIR}/util/comparator.cc" "util/comparator.cc"
"${PROJECT_SOURCE_DIR}/util/crc32c.cc" "util/crc32c.cc"
"${PROJECT_SOURCE_DIR}/util/crc32c.h" "util/crc32c.h"
"${PROJECT_SOURCE_DIR}/util/env.cc" "util/env.cc"
"${PROJECT_SOURCE_DIR}/util/filter_policy.cc" "util/filter_policy.cc"
"${PROJECT_SOURCE_DIR}/util/hash.cc" "util/hash.cc"
"${PROJECT_SOURCE_DIR}/util/hash.h" "util/hash.h"
"${PROJECT_SOURCE_DIR}/util/logging.cc" "util/logging.cc"
"${PROJECT_SOURCE_DIR}/util/logging.h" "util/logging.h"
"${PROJECT_SOURCE_DIR}/util/mutexlock.h" "util/mutexlock.h"
"${PROJECT_SOURCE_DIR}/util/no_destructor.h" "util/no_destructor.h"
"${PROJECT_SOURCE_DIR}/util/options.cc" "util/options.cc"
"${PROJECT_SOURCE_DIR}/util/random.h" "util/random.h"
"${PROJECT_SOURCE_DIR}/util/status.cc" "util/status.cc"
# Only CMake 3.3+ supports PUBLIC sources in targets exported by "install". # Only CMake 3.3+ supports PUBLIC sources in targets exported by "install".
$<$<VERSION_GREATER:CMAKE_VERSION,3.2>:PUBLIC> $<$<VERSION_GREATER:CMAKE_VERSION,3.2>:PUBLIC>
@ -202,22 +212,22 @@ target_sources(leveldb
if (WIN32) if (WIN32)
target_sources(leveldb target_sources(leveldb
PRIVATE PRIVATE
"${PROJECT_SOURCE_DIR}/util/env_windows.cc" "util/env_windows.cc"
"${PROJECT_SOURCE_DIR}/util/windows_logger.h" "util/windows_logger.h"
) )
else (WIN32) else (WIN32)
target_sources(leveldb target_sources(leveldb
PRIVATE PRIVATE
"${PROJECT_SOURCE_DIR}/util/env_posix.cc" "util/env_posix.cc"
"${PROJECT_SOURCE_DIR}/util/posix_logger.h" "util/posix_logger.h"
) )
endif (WIN32) endif (WIN32)
# MemEnv is not part of the interface and could be pulled to a separate library. # MemEnv is not part of the interface and could be pulled to a separate library.
target_sources(leveldb target_sources(leveldb
PRIVATE PRIVATE
"${PROJECT_SOURCE_DIR}/helpers/memenv/memenv.cc" "helpers/memenv/memenv.cc"
"${PROJECT_SOURCE_DIR}/helpers/memenv/memenv.h" "helpers/memenv/memenv.h"
) )
target_include_directories(leveldb target_include_directories(leveldb
@ -272,13 +282,84 @@ find_package(Threads REQUIRED)
target_link_libraries(leveldb Threads::Threads) target_link_libraries(leveldb Threads::Threads)
add_executable(leveldbutil add_executable(leveldbutil
"${PROJECT_SOURCE_DIR}/db/leveldbutil.cc" "db/leveldbutil.cc"
) )
target_link_libraries(leveldbutil leveldb) target_link_libraries(leveldbutil leveldb)
if(LEVELDB_BUILD_TESTS) if(LEVELDB_BUILD_TESTS)
enable_testing() enable_testing()
# Prevent overriding the parent project's compiler/linker settings on Windows.
set(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
set(install_gtest OFF)
set(install_gmock OFF)
set(build_gmock ON)
# This project is tested using GoogleTest.
add_subdirectory("third_party/googletest")
# GoogleTest triggers a missing field initializers warning.
if(LEVELDB_HAVE_NO_MISSING_FIELD_INITIALIZERS)
set_property(TARGET gtest
APPEND PROPERTY COMPILE_OPTIONS -Wno-missing-field-initializers)
set_property(TARGET gmock
APPEND PROPERTY COMPILE_OPTIONS -Wno-missing-field-initializers)
endif(LEVELDB_HAVE_NO_MISSING_FIELD_INITIALIZERS)
add_executable(leveldb_tests "")
target_sources(leveldb_tests
PRIVATE
# "db/fault_injection_test.cc"
# "issues/issue178_test.cc"
# "issues/issue200_test.cc"
# "issues/issue320_test.cc"
"${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
# "util/env_test.cc"
"util/status_test.cc"
"util/no_destructor_test.cc"
"util/testutil.cc"
"util/testutil.h"
)
if(NOT BUILD_SHARED_LIBS)
target_sources(leveldb_tests
PRIVATE
"db/autocompact_test.cc"
"db/corruption_test.cc"
"db/db_test.cc"
"db/dbformat_test.cc"
"db/filename_test.cc"
"db/log_test.cc"
"db/recovery_test.cc"
"db/skiplist_test.cc"
"db/version_edit_test.cc"
"db/version_set_test.cc"
"db/write_batch_test.cc"
"helpers/memenv/memenv_test.cc"
"table/filter_block_test.cc"
"table/table_test.cc"
"util/arena_test.cc"
"util/bloom_test.cc"
"util/cache_test.cc"
"util/coding_test.cc"
"util/crc32c_test.cc"
"util/hash_test.cc"
"util/logging_test.cc"
)
endif(NOT BUILD_SHARED_LIBS)
target_link_libraries(leveldb_tests leveldb gmock gtest gtest_main)
target_compile_definitions(leveldb_tests
PRIVATE
${LEVELDB_PLATFORM_NAME}=1
)
if (NOT HAVE_CXX17_HAS_INCLUDE)
target_compile_definitions(leveldb_tests
PRIVATE
LEVELDB_HAS_PORT_CONFIG_H=1
)
endif(NOT HAVE_CXX17_HAS_INCLUDE)
add_test(NAME "leveldb_tests" COMMAND "leveldb_tests")
function(leveldb_test test_file) function(leveldb_test test_file)
get_filename_component(test_target_name "${test_file}" NAME_WE) get_filename_component(test_target_name "${test_file}" NAME_WE)
@ -286,14 +367,12 @@ if(LEVELDB_BUILD_TESTS)
target_sources("${test_target_name}" target_sources("${test_target_name}"
PRIVATE PRIVATE
"${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h" "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
"${PROJECT_SOURCE_DIR}/util/testharness.cc" "util/testutil.cc"
"${PROJECT_SOURCE_DIR}/util/testharness.h" "util/testutil.h"
"${PROJECT_SOURCE_DIR}/util/testutil.cc"
"${PROJECT_SOURCE_DIR}/util/testutil.h"
"${test_file}" "${test_file}"
) )
target_link_libraries("${test_target_name}" leveldb) target_link_libraries("${test_target_name}" leveldb gmock gtest)
target_compile_definitions("${test_target_name}" target_compile_definitions("${test_target_name}"
PRIVATE PRIVATE
${LEVELDB_PLATFORM_NAME}=1 ${LEVELDB_PLATFORM_NAME}=1
@ -308,54 +387,25 @@ if(LEVELDB_BUILD_TESTS)
add_test(NAME "${test_target_name}" COMMAND "${test_target_name}") add_test(NAME "${test_target_name}" COMMAND "${test_target_name}")
endfunction(leveldb_test) endfunction(leveldb_test)
leveldb_test("${PROJECT_SOURCE_DIR}/db/c_test.c") leveldb_test("db/c_test.c")
leveldb_test("${PROJECT_SOURCE_DIR}/db/fault_injection_test.cc")
leveldb_test("${PROJECT_SOURCE_DIR}/issues/issue178_test.cc")
leveldb_test("${PROJECT_SOURCE_DIR}/issues/issue200_test.cc")
leveldb_test("${PROJECT_SOURCE_DIR}/issues/issue320_test.cc")
leveldb_test("${PROJECT_SOURCE_DIR}/util/env_test.cc")
leveldb_test("${PROJECT_SOURCE_DIR}/util/status_test.cc")
leveldb_test("${PROJECT_SOURCE_DIR}/util/no_destructor_test.cc")
if(NOT BUILD_SHARED_LIBS) if(NOT BUILD_SHARED_LIBS)
leveldb_test("${PROJECT_SOURCE_DIR}/db/autocompact_test.cc")
leveldb_test("${PROJECT_SOURCE_DIR}/db/corruption_test.cc")
leveldb_test("${PROJECT_SOURCE_DIR}/db/db_test.cc")
leveldb_test("${PROJECT_SOURCE_DIR}/db/dbformat_test.cc")
leveldb_test("${PROJECT_SOURCE_DIR}/db/filename_test.cc")
leveldb_test("${PROJECT_SOURCE_DIR}/db/log_test.cc")
leveldb_test("${PROJECT_SOURCE_DIR}/db/recovery_test.cc")
leveldb_test("${PROJECT_SOURCE_DIR}/db/skiplist_test.cc")
leveldb_test("${PROJECT_SOURCE_DIR}/db/version_edit_test.cc")
leveldb_test("${PROJECT_SOURCE_DIR}/db/version_set_test.cc")
leveldb_test("${PROJECT_SOURCE_DIR}/db/write_batch_test.cc")
leveldb_test("${PROJECT_SOURCE_DIR}/helpers/memenv/memenv_test.cc")
leveldb_test("${PROJECT_SOURCE_DIR}/table/filter_block_test.cc")
leveldb_test("${PROJECT_SOURCE_DIR}/table/table_test.cc")
leveldb_test("${PROJECT_SOURCE_DIR}/util/arena_test.cc")
leveldb_test("${PROJECT_SOURCE_DIR}/util/bloom_test.cc")
leveldb_test("${PROJECT_SOURCE_DIR}/util/cache_test.cc")
leveldb_test("${PROJECT_SOURCE_DIR}/util/coding_test.cc")
leveldb_test("${PROJECT_SOURCE_DIR}/util/crc32c_test.cc")
leveldb_test("${PROJECT_SOURCE_DIR}/util/hash_test.cc")
leveldb_test("${PROJECT_SOURCE_DIR}/util/logging_test.cc")
# TODO(costan): This test also uses # TODO(costan): This test also uses
# "${PROJECT_SOURCE_DIR}/util/env_{posix|windows}_test_helper.h" # "util/env_{posix|windows}_test_helper.h"
if (WIN32) if (WIN32)
leveldb_test("${PROJECT_SOURCE_DIR}/util/env_windows_test.cc") leveldb_test("util/env_windows_test.cc")
else (WIN32) else (WIN32)
leveldb_test("${PROJECT_SOURCE_DIR}/util/env_posix_test.cc") leveldb_test("util/env_posix_test.cc")
endif (WIN32) endif (WIN32)
endif(NOT BUILD_SHARED_LIBS) endif(NOT BUILD_SHARED_LIBS)
endif(LEVELDB_BUILD_TESTS) endif(LEVELDB_BUILD_TESTS)
if(LEVELDB_BUILD_BENCHMARKS) if(LEVELDB_BUILD_BENCHMARKS)
# This project uses Google benchmark for benchmarking.
set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "" FORCE)
set(BENCHMARK_ENABLE_EXCEPTIONS OFF CACHE BOOL "" FORCE)
add_subdirectory("third_party/benchmark")
function(leveldb_benchmark bench_file) function(leveldb_benchmark bench_file)
get_filename_component(bench_target_name "${bench_file}" NAME_WE) get_filename_component(bench_target_name "${bench_file}" NAME_WE)
@ -363,16 +413,14 @@ if(LEVELDB_BUILD_BENCHMARKS)
target_sources("${bench_target_name}" target_sources("${bench_target_name}"
PRIVATE PRIVATE
"${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h" "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h"
"${PROJECT_SOURCE_DIR}/util/histogram.cc" "util/histogram.cc"
"${PROJECT_SOURCE_DIR}/util/histogram.h" "util/histogram.h"
"${PROJECT_SOURCE_DIR}/util/testharness.cc" "util/testutil.cc"
"${PROJECT_SOURCE_DIR}/util/testharness.h" "util/testutil.h"
"${PROJECT_SOURCE_DIR}/util/testutil.cc"
"${PROJECT_SOURCE_DIR}/util/testutil.h"
"${bench_file}" "${bench_file}"
) )
target_link_libraries("${bench_target_name}" leveldb) target_link_libraries("${bench_target_name}" leveldb gmock gtest benchmark)
target_compile_definitions("${bench_target_name}" target_compile_definitions("${bench_target_name}"
PRIVATE PRIVATE
${LEVELDB_PLATFORM_NAME}=1 ${LEVELDB_PLATFORM_NAME}=1
@ -386,12 +434,12 @@ if(LEVELDB_BUILD_BENCHMARKS)
endfunction(leveldb_benchmark) endfunction(leveldb_benchmark)
if(NOT BUILD_SHARED_LIBS) if(NOT BUILD_SHARED_LIBS)
leveldb_benchmark("${PROJECT_SOURCE_DIR}/benchmarks/db_bench.cc") leveldb_benchmark("benchmarks/db_bench.cc")
endif(NOT BUILD_SHARED_LIBS) endif(NOT BUILD_SHARED_LIBS)
check_library_exists(sqlite3 sqlite3_open "" HAVE_SQLITE3) check_library_exists(sqlite3 sqlite3_open "" HAVE_SQLITE3)
if(HAVE_SQLITE3) if(HAVE_SQLITE3)
leveldb_benchmark("${PROJECT_SOURCE_DIR}/benchmarks/db_bench_sqlite3.cc") leveldb_benchmark("benchmarks/db_bench_sqlite3.cc")
target_link_libraries(db_bench_sqlite3 sqlite3) target_link_libraries(db_bench_sqlite3 sqlite3)
endif(HAVE_SQLITE3) endif(HAVE_SQLITE3)
@ -411,13 +459,12 @@ int main() {
" HAVE_KYOTOCABINET) " HAVE_KYOTOCABINET)
set(CMAKE_REQUIRED_LIBRARIES ${OLD_CMAKE_REQURED_LIBRARIES}) set(CMAKE_REQUIRED_LIBRARIES ${OLD_CMAKE_REQURED_LIBRARIES})
if(HAVE_KYOTOCABINET) if(HAVE_KYOTOCABINET)
leveldb_benchmark("${PROJECT_SOURCE_DIR}/benchmarks/db_bench_tree_db.cc") leveldb_benchmark("benchmarks/db_bench_tree_db.cc")
target_link_libraries(db_bench_tree_db kyotocabinet) target_link_libraries(db_bench_tree_db kyotocabinet)
endif(HAVE_KYOTOCABINET) endif(HAVE_KYOTOCABINET)
endif(LEVELDB_BUILD_BENCHMARKS) endif(LEVELDB_BUILD_BENCHMARKS)
if(LEVELDB_INSTALL) if(LEVELDB_INSTALL)
include(GNUInstallDirs)
install(TARGETS leveldb install(TARGETS leveldb
EXPORT leveldbTargets EXPORT leveldbTargets
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
@ -426,38 +473,43 @@ if(LEVELDB_INSTALL)
) )
install( install(
FILES FILES
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/c.h" "${LEVELDB_PUBLIC_INCLUDE_DIR}/c.h"
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/cache.h" "${LEVELDB_PUBLIC_INCLUDE_DIR}/cache.h"
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/comparator.h" "${LEVELDB_PUBLIC_INCLUDE_DIR}/comparator.h"
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/db.h" "${LEVELDB_PUBLIC_INCLUDE_DIR}/db.h"
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/dumpfile.h" "${LEVELDB_PUBLIC_INCLUDE_DIR}/dumpfile.h"
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/env.h" "${LEVELDB_PUBLIC_INCLUDE_DIR}/env.h"
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/export.h" "${LEVELDB_PUBLIC_INCLUDE_DIR}/export.h"
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/filter_policy.h" "${LEVELDB_PUBLIC_INCLUDE_DIR}/filter_policy.h"
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/iterator.h" "${LEVELDB_PUBLIC_INCLUDE_DIR}/iterator.h"
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/options.h" "${LEVELDB_PUBLIC_INCLUDE_DIR}/options.h"
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/slice.h" "${LEVELDB_PUBLIC_INCLUDE_DIR}/slice.h"
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/status.h" "${LEVELDB_PUBLIC_INCLUDE_DIR}/status.h"
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/table_builder.h" "${LEVELDB_PUBLIC_INCLUDE_DIR}/table_builder.h"
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/table.h" "${LEVELDB_PUBLIC_INCLUDE_DIR}/table.h"
"${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/write_batch.h" "${LEVELDB_PUBLIC_INCLUDE_DIR}/write_batch.h"
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/leveldb DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/leveldb"
) )
include(CMakePackageConfigHelpers) include(CMakePackageConfigHelpers)
configure_package_config_file(
"cmake/${PROJECT_NAME}Config.cmake.in"
"${PROJECT_BINARY_DIR}/cmake/${PROJECT_NAME}Config.cmake"
INSTALL_DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}"
)
write_basic_package_version_file( write_basic_package_version_file(
"${PROJECT_BINARY_DIR}/leveldbConfigVersion.cmake" "${PROJECT_BINARY_DIR}/cmake/${PROJECT_NAME}ConfigVersion.cmake"
COMPATIBILITY SameMajorVersion COMPATIBILITY SameMajorVersion
) )
install( install(
EXPORT leveldbTargets EXPORT leveldbTargets
NAMESPACE leveldb:: NAMESPACE leveldb::
DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/leveldb" DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}"
) )
install( install(
FILES FILES
"${PROJECT_SOURCE_DIR}/cmake/leveldbConfig.cmake" "${PROJECT_BINARY_DIR}/cmake/${PROJECT_NAME}Config.cmake"
"${PROJECT_BINARY_DIR}/leveldbConfigVersion.cmake" "${PROJECT_BINARY_DIR}/cmake/${PROJECT_NAME}ConfigVersion.cmake"
DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/leveldb" DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}"
) )
endif(LEVELDB_INSTALL) endif(LEVELDB_INSTALL)

View File

@ -32,5 +32,5 @@ the CLA.
## Writing Code ## ## Writing Code ##
If your contribution contains code, please make sure that it follows If your contribution contains code, please make sure that it follows
[the style guide](http://google.github.io/styleguide/cppguide.html). [the style guide](https://google.github.io/styleguide/cppguide.html).
Otherwise we will have to ask you to make changes, and that's no fun for anyone. Otherwise we will have to ask you to make changes, and that's no fun for anyone.

View File

@ -1,7 +1,6 @@
**LevelDB is a fast key-value storage library written at Google that provides an ordered mapping from string keys to string values.** **LevelDB is a fast key-value storage library written at Google that provides an ordered mapping from string keys to string values.**
[![Build Status](https://travis-ci.org/google/leveldb.svg?branch=master)](https://travis-ci.org/google/leveldb) [![ci](https://github.com/google/leveldb/actions/workflows/build.yml/badge.svg)](https://github.com/google/leveldb/actions/workflows/build.yml)
[![Build status](https://ci.appveyor.com/api/projects/status/g2j5j4rfkda6eyw5/branch/master?svg=true)](https://ci.appveyor.com/project/pwnall/leveldb)
Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com) Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
@ -14,7 +13,7 @@ Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
* Multiple changes can be made in one atomic batch. * Multiple changes can be made in one atomic batch.
* Users can create a transient snapshot to get a consistent view of data. * Users can create a transient snapshot to get a consistent view of data.
* Forward and backward iteration is supported over the data. * Forward and backward iteration is supported over the data.
* Data is automatically compressed using the [Snappy compression library](http://google.github.io/snappy/). * Data is automatically compressed using the [Snappy compression library](https://google.github.io/snappy/).
* External activity (file system operations etc.) is relayed through a virtual interface so users can customize the operating system interactions. * External activity (file system operations etc.) is relayed through a virtual interface so users can customize the operating system interactions.
# Documentation # Documentation
@ -27,6 +26,12 @@ Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
* Only a single process (possibly multi-threaded) can access a particular database at a time. * Only a single process (possibly multi-threaded) can access a particular database at a time.
* There is no client-server support builtin to the library. An application that needs such support will have to wrap their own server around the library. * There is no client-server support builtin to the library. An application that needs such support will have to wrap their own server around the library.
# Getting the Source
```bash
git clone --recurse-submodules https://github.com/google/leveldb.git
```
# Building # Building
This project supports [CMake](https://cmake.org/) out of the box. This project supports [CMake](https://cmake.org/) out of the box.

View File

@ -2,11 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors. // found in the LICENSE file. See the AUTHORS file for names of contributors.
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h> #include <sys/types.h>
#include <atomic>
#include <cstdio>
#include <cstdlib>
#include "leveldb/cache.h" #include "leveldb/cache.h"
#include "leveldb/comparator.h"
#include "leveldb/db.h" #include "leveldb/db.h"
#include "leveldb/env.h" #include "leveldb/env.h"
#include "leveldb/filter_policy.h" #include "leveldb/filter_policy.h"
@ -33,6 +36,7 @@
// readmissing -- read N missing keys in random order // readmissing -- read N missing keys in random order
// readhot -- read N times in random order from 1% section of DB // readhot -- read N times in random order from 1% section of DB
// seekrandom -- N random seeks // seekrandom -- N random seeks
// seekordered -- N ordered seeks
// open -- cost of opening a DB // open -- cost of opening a DB
// crc32c -- repeated crc32c of 4K of data // crc32c -- repeated crc32c of 4K of data
// Meta operations: // Meta operations:
@ -77,6 +81,9 @@ static double FLAGS_compression_ratio = 0.5;
// Print histogram of operation timings // Print histogram of operation timings
static bool FLAGS_histogram = false; static bool FLAGS_histogram = false;
// Count the number of string comparisons performed
static bool FLAGS_comparisons = false;
// Number of bytes to buffer in memtable before compacting // Number of bytes to buffer in memtable before compacting
// (initialized to default value by "main") // (initialized to default value by "main")
static int FLAGS_write_buffer_size = 0; static int FLAGS_write_buffer_size = 0;
@ -100,6 +107,9 @@ static int FLAGS_open_files = 0;
// Negative means use default settings. // Negative means use default settings.
static int FLAGS_bloom_bits = -1; static int FLAGS_bloom_bits = -1;
// Common key prefix length.
static int FLAGS_key_prefix = 0;
// If true, do not destroy the existing database. If you set this // If true, do not destroy the existing database. If you set this
// flag and also specify a benchmark that wants a fresh database, that // flag and also specify a benchmark that wants a fresh database, that
// benchmark will fail. // benchmark will fail.
@ -116,6 +126,33 @@ namespace leveldb {
namespace { namespace {
leveldb::Env* g_env = nullptr; leveldb::Env* g_env = nullptr;
class CountComparator : public Comparator {
public:
CountComparator(const Comparator* wrapped) : wrapped_(wrapped) {}
~CountComparator() override {}
int Compare(const Slice& a, const Slice& b) const override {
count_.fetch_add(1, std::memory_order_relaxed);
return wrapped_->Compare(a, b);
}
const char* Name() const override { return wrapped_->Name(); }
void FindShortestSeparator(std::string* start,
const Slice& limit) const override {
wrapped_->FindShortestSeparator(start, limit);
}
void FindShortSuccessor(std::string* key) const override {
return wrapped_->FindShortSuccessor(key);
}
size_t comparisons() const { return count_.load(std::memory_order_relaxed); }
void reset() { count_.store(0, std::memory_order_relaxed); }
private:
mutable std::atomic<size_t> count_{0};
const Comparator* const wrapped_;
};
// Helper for quickly generating random data. // Helper for quickly generating random data.
class RandomGenerator { class RandomGenerator {
private: private:
@ -148,6 +185,26 @@ class RandomGenerator {
} }
}; };
class KeyBuffer {
public:
KeyBuffer() {
assert(FLAGS_key_prefix < sizeof(buffer_));
memset(buffer_, 'a', FLAGS_key_prefix);
}
KeyBuffer& operator=(KeyBuffer& other) = delete;
KeyBuffer(KeyBuffer& other) = delete;
void Set(int k) {
std::snprintf(buffer_ + FLAGS_key_prefix,
sizeof(buffer_) - FLAGS_key_prefix, "%016d", k);
}
Slice slice() const { return Slice(buffer_, FLAGS_key_prefix + 16); }
private:
char buffer_[1024];
};
#if defined(__linux) #if defined(__linux)
static Slice TrimSpace(Slice s) { static Slice TrimSpace(Slice s) {
size_t start = 0; size_t start = 0;
@ -220,8 +277,8 @@ class Stats {
double micros = now - last_op_finish_; double micros = now - last_op_finish_;
hist_.Add(micros); hist_.Add(micros);
if (micros > 20000) { if (micros > 20000) {
fprintf(stderr, "long op: %.1f micros%30s\r", micros, ""); std::fprintf(stderr, "long op: %.1f micros%30s\r", micros, "");
fflush(stderr); std::fflush(stderr);
} }
last_op_finish_ = now; last_op_finish_ = now;
} }
@ -242,8 +299,8 @@ class Stats {
next_report_ += 50000; next_report_ += 50000;
else else
next_report_ += 100000; next_report_ += 100000;
fprintf(stderr, "... finished %d ops%30s\r", done_, ""); std::fprintf(stderr, "... finished %d ops%30s\r", done_, "");
fflush(stderr); std::fflush(stderr);
} }
} }
@ -260,18 +317,20 @@ class Stats {
// elapsed times. // elapsed times.
double elapsed = (finish_ - start_) * 1e-6; double elapsed = (finish_ - start_) * 1e-6;
char rate[100]; char rate[100];
snprintf(rate, sizeof(rate), "%6.1f MB/s", std::snprintf(rate, sizeof(rate), "%6.1f MB/s",
(bytes_ / 1048576.0) / elapsed); (bytes_ / 1048576.0) / elapsed);
extra = rate; extra = rate;
} }
AppendWithSpace(&extra, message_); AppendWithSpace(&extra, message_);
fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(), std::fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
seconds_ * 1e6 / done_, (extra.empty() ? "" : " "), extra.c_str()); name.ToString().c_str(), seconds_ * 1e6 / done_,
(extra.empty() ? "" : " "), extra.c_str());
if (FLAGS_histogram) { if (FLAGS_histogram) {
fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str()); std::fprintf(stdout, "Microseconds per op:\n%s\n",
hist_.ToString().c_str());
} }
fflush(stdout); std::fflush(stdout);
} }
}; };
@ -302,7 +361,7 @@ struct ThreadState {
Stats stats; Stats stats;
SharedState* shared; SharedState* shared;
ThreadState(int index) : tid(index), rand(1000 + index), shared(nullptr) {} ThreadState(int index, int seed) : tid(index), rand(seed), shared(nullptr) {}
}; };
} // namespace } // namespace
@ -318,55 +377,61 @@ class Benchmark {
WriteOptions write_options_; WriteOptions write_options_;
int reads_; int reads_;
int heap_counter_; int heap_counter_;
CountComparator count_comparator_;
int total_thread_count_;
void PrintHeader() { void PrintHeader() {
const int kKeySize = 16; const int kKeySize = 16 + FLAGS_key_prefix;
PrintEnvironment(); PrintEnvironment();
fprintf(stdout, "Keys: %d bytes each\n", kKeySize); std::fprintf(stdout, "Keys: %d bytes each\n", kKeySize);
fprintf(stdout, "Values: %d bytes each (%d bytes after compression)\n", std::fprintf(
FLAGS_value_size, stdout, "Values: %d bytes each (%d bytes after compression)\n",
static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5)); FLAGS_value_size,
fprintf(stdout, "Entries: %d\n", num_); static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
fprintf(stdout, "RawSize: %.1f MB (estimated)\n", std::fprintf(stdout, "Entries: %d\n", num_);
((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) / std::fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
1048576.0)); ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
fprintf(stdout, "FileSize: %.1f MB (estimated)\n", 1048576.0));
(((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) / std::fprintf(
1048576.0)); stdout, "FileSize: %.1f MB (estimated)\n",
(((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) /
1048576.0));
PrintWarnings(); PrintWarnings();
fprintf(stdout, "------------------------------------------------\n"); std::fprintf(stdout, "------------------------------------------------\n");
} }
void PrintWarnings() { void PrintWarnings() {
#if defined(__GNUC__) && !defined(__OPTIMIZE__) #if defined(__GNUC__) && !defined(__OPTIMIZE__)
fprintf( std::fprintf(
stdout, stdout,
"WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"); "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
#endif #endif
#ifndef NDEBUG #ifndef NDEBUG
fprintf(stdout, std::fprintf(
"WARNING: Assertions are enabled; benchmarks unnecessarily slow\n"); stdout,
"WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
#endif #endif
// See if snappy is working by attempting to compress a compressible string // See if snappy is working by attempting to compress a compressible string
const char text[] = "yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy"; const char text[] = "yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy";
std::string compressed; std::string compressed;
if (!port::Snappy_Compress(text, sizeof(text), &compressed)) { if (!port::Snappy_Compress(text, sizeof(text), &compressed)) {
fprintf(stdout, "WARNING: Snappy compression is not enabled\n"); std::fprintf(stdout, "WARNING: Snappy compression is not enabled\n");
} else if (compressed.size() >= sizeof(text)) { } else if (compressed.size() >= sizeof(text)) {
fprintf(stdout, "WARNING: Snappy compression is not effective\n"); std::fprintf(stdout, "WARNING: Snappy compression is not effective\n");
} }
} }
void PrintEnvironment() { void PrintEnvironment() {
fprintf(stderr, "LevelDB: version %d.%d\n", kMajorVersion, std::fprintf(stderr, "LevelDB: version %d.%d\n", kMajorVersion,
kMinorVersion); kMinorVersion);
#if defined(__linux) #if defined(__linux)
time_t now = time(nullptr); time_t now = time(nullptr);
fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline std::fprintf(stderr, "Date: %s",
ctime(&now)); // ctime() adds newline
FILE* cpuinfo = fopen("/proc/cpuinfo", "r"); FILE* cpuinfo = std::fopen("/proc/cpuinfo", "r");
if (cpuinfo != nullptr) { if (cpuinfo != nullptr) {
char line[1000]; char line[1000];
int num_cpus = 0; int num_cpus = 0;
@ -386,9 +451,9 @@ class Benchmark {
cache_size = val.ToString(); cache_size = val.ToString();
} }
} }
fclose(cpuinfo); std::fclose(cpuinfo);
fprintf(stderr, "CPU: %d * %s\n", num_cpus, cpu_type.c_str()); std::fprintf(stderr, "CPU: %d * %s\n", num_cpus, cpu_type.c_str());
fprintf(stderr, "CPUCache: %s\n", cache_size.c_str()); std::fprintf(stderr, "CPUCache: %s\n", cache_size.c_str());
} }
#endif #endif
} }
@ -404,12 +469,14 @@ class Benchmark {
value_size_(FLAGS_value_size), value_size_(FLAGS_value_size),
entries_per_batch_(1), entries_per_batch_(1),
reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads), reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
heap_counter_(0) { heap_counter_(0),
count_comparator_(BytewiseComparator()),
total_thread_count_(0) {
std::vector<std::string> files; std::vector<std::string> files;
g_env->GetChildren(FLAGS_db, &files); g_env->GetChildren(FLAGS_db, &files);
for (size_t i = 0; i < files.size(); i++) { for (size_t i = 0; i < files.size(); i++) {
if (Slice(files[i]).starts_with("heap-")) { if (Slice(files[i]).starts_with("heap-")) {
g_env->DeleteFile(std::string(FLAGS_db) + "/" + files[i]); g_env->RemoveFile(std::string(FLAGS_db) + "/" + files[i]);
} }
} }
if (!FLAGS_use_existing_db) { if (!FLAGS_use_existing_db) {
@ -487,6 +554,8 @@ class Benchmark {
method = &Benchmark::ReadMissing; method = &Benchmark::ReadMissing;
} else if (name == Slice("seekrandom")) { } else if (name == Slice("seekrandom")) {
method = &Benchmark::SeekRandom; method = &Benchmark::SeekRandom;
} else if (name == Slice("seekordered")) {
method = &Benchmark::SeekOrdered;
} else if (name == Slice("readhot")) { } else if (name == Slice("readhot")) {
method = &Benchmark::ReadHot; method = &Benchmark::ReadHot;
} else if (name == Slice("readrandomsmall")) { } else if (name == Slice("readrandomsmall")) {
@ -515,14 +584,15 @@ class Benchmark {
PrintStats("leveldb.sstables"); PrintStats("leveldb.sstables");
} else { } else {
if (!name.empty()) { // No error message for empty name if (!name.empty()) { // No error message for empty name
fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str()); std::fprintf(stderr, "unknown benchmark '%s'\n",
name.ToString().c_str());
} }
} }
if (fresh_db) { if (fresh_db) {
if (FLAGS_use_existing_db) { if (FLAGS_use_existing_db) {
fprintf(stdout, "%-12s : skipped (--use_existing_db is true)\n", std::fprintf(stdout, "%-12s : skipped (--use_existing_db is true)\n",
name.ToString().c_str()); name.ToString().c_str());
method = nullptr; method = nullptr;
} else { } else {
delete db_; delete db_;
@ -583,7 +653,11 @@ class Benchmark {
arg[i].bm = this; arg[i].bm = this;
arg[i].method = method; arg[i].method = method;
arg[i].shared = &shared; arg[i].shared = &shared;
arg[i].thread = new ThreadState(i); ++total_thread_count_;
// Seed the thread's random state deterministically based upon thread
// creation across all benchmarks. This ensures that the seeds are unique
// but reproducible when rerunning the same set of benchmarks.
arg[i].thread = new ThreadState(i, /*seed=*/1000 + total_thread_count_);
arg[i].thread->shared = &shared; arg[i].thread->shared = &shared;
g_env->StartThread(ThreadBody, &arg[i]); g_env->StartThread(ThreadBody, &arg[i]);
} }
@ -604,6 +678,11 @@ class Benchmark {
arg[0].thread->stats.Merge(arg[i].thread->stats); arg[0].thread->stats.Merge(arg[i].thread->stats);
} }
arg[0].thread->stats.Report(name); arg[0].thread->stats.Report(name);
if (FLAGS_comparisons) {
fprintf(stdout, "Comparisons: %zu\n", count_comparator_.comparisons());
count_comparator_.reset();
fflush(stdout);
}
for (int i = 0; i < n; i++) { for (int i = 0; i < n; i++) {
delete arg[i].thread; delete arg[i].thread;
@ -624,7 +703,7 @@ class Benchmark {
bytes += size; bytes += size;
} }
// Print so result is not dead // Print so result is not dead
fprintf(stderr, "... crc=0x%x\r", static_cast<unsigned int>(crc)); std::fprintf(stderr, "... crc=0x%x\r", static_cast<unsigned int>(crc));
thread->stats.AddBytes(bytes); thread->stats.AddBytes(bytes);
thread->stats.AddMessage(label); thread->stats.AddMessage(label);
@ -648,8 +727,8 @@ class Benchmark {
thread->stats.AddMessage("(snappy failure)"); thread->stats.AddMessage("(snappy failure)");
} else { } else {
char buf[100]; char buf[100];
snprintf(buf, sizeof(buf), "(output: %.1f%%)", std::snprintf(buf, sizeof(buf), "(output: %.1f%%)",
(produced * 100.0) / bytes); (produced * 100.0) / bytes);
thread->stats.AddMessage(buf); thread->stats.AddMessage(buf);
thread->stats.AddBytes(bytes); thread->stats.AddBytes(bytes);
} }
@ -686,13 +765,16 @@ class Benchmark {
options.write_buffer_size = FLAGS_write_buffer_size; options.write_buffer_size = FLAGS_write_buffer_size;
options.max_file_size = FLAGS_max_file_size; options.max_file_size = FLAGS_max_file_size;
options.block_size = FLAGS_block_size; options.block_size = FLAGS_block_size;
if (FLAGS_comparisons) {
options.comparator = &count_comparator_;
}
options.max_open_files = FLAGS_open_files; options.max_open_files = FLAGS_open_files;
options.filter_policy = filter_policy_; options.filter_policy = filter_policy_;
options.reuse_logs = FLAGS_reuse_logs; options.reuse_logs = FLAGS_reuse_logs;
Status s = DB::Open(options, FLAGS_db, &db_); Status s = DB::Open(options, FLAGS_db, &db_);
if (!s.ok()) { if (!s.ok()) {
fprintf(stderr, "open error: %s\n", s.ToString().c_str()); std::fprintf(stderr, "open error: %s\n", s.ToString().c_str());
exit(1); std::exit(1);
} }
} }
@ -711,7 +793,7 @@ class Benchmark {
void DoWrite(ThreadState* thread, bool seq) { void DoWrite(ThreadState* thread, bool seq) {
if (num_ != FLAGS_num) { if (num_ != FLAGS_num) {
char msg[100]; char msg[100];
snprintf(msg, sizeof(msg), "(%d ops)", num_); std::snprintf(msg, sizeof(msg), "(%d ops)", num_);
thread->stats.AddMessage(msg); thread->stats.AddMessage(msg);
} }
@ -719,20 +801,20 @@ class Benchmark {
WriteBatch batch; WriteBatch batch;
Status s; Status s;
int64_t bytes = 0; int64_t bytes = 0;
KeyBuffer key;
for (int i = 0; i < num_; i += entries_per_batch_) { for (int i = 0; i < num_; i += entries_per_batch_) {
batch.Clear(); batch.Clear();
for (int j = 0; j < entries_per_batch_; j++) { for (int j = 0; j < entries_per_batch_; j++) {
const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num); const int k = seq ? i + j : thread->rand.Uniform(FLAGS_num);
char key[100]; key.Set(k);
snprintf(key, sizeof(key), "%016d", k); batch.Put(key.slice(), gen.Generate(value_size_));
batch.Put(key, gen.Generate(value_size_)); bytes += value_size_ + key.slice().size();
bytes += value_size_ + strlen(key);
thread->stats.FinishedSingleOp(); thread->stats.FinishedSingleOp();
} }
s = db_->Write(write_options_, &batch); s = db_->Write(write_options_, &batch);
if (!s.ok()) { if (!s.ok()) {
fprintf(stderr, "put error: %s\n", s.ToString().c_str()); std::fprintf(stderr, "put error: %s\n", s.ToString().c_str());
exit(1); std::exit(1);
} }
} }
thread->stats.AddBytes(bytes); thread->stats.AddBytes(bytes);
@ -768,28 +850,29 @@ class Benchmark {
ReadOptions options; ReadOptions options;
std::string value; std::string value;
int found = 0; int found = 0;
KeyBuffer key;
for (int i = 0; i < reads_; i++) { for (int i = 0; i < reads_; i++) {
char key[100]; const int k = thread->rand.Uniform(FLAGS_num);
const int k = thread->rand.Next() % FLAGS_num; key.Set(k);
snprintf(key, sizeof(key), "%016d", k); if (db_->Get(options, key.slice(), &value).ok()) {
if (db_->Get(options, key, &value).ok()) {
found++; found++;
} }
thread->stats.FinishedSingleOp(); thread->stats.FinishedSingleOp();
} }
char msg[100]; char msg[100];
snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_); std::snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_);
thread->stats.AddMessage(msg); thread->stats.AddMessage(msg);
} }
void ReadMissing(ThreadState* thread) { void ReadMissing(ThreadState* thread) {
ReadOptions options; ReadOptions options;
std::string value; std::string value;
KeyBuffer key;
for (int i = 0; i < reads_; i++) { for (int i = 0; i < reads_; i++) {
char key[100]; const int k = thread->rand.Uniform(FLAGS_num);
const int k = thread->rand.Next() % FLAGS_num; key.Set(k);
snprintf(key, sizeof(key), "%016d.", k); Slice s = Slice(key.slice().data(), key.slice().size() - 1);
db_->Get(options, key, &value); db_->Get(options, s, &value);
thread->stats.FinishedSingleOp(); thread->stats.FinishedSingleOp();
} }
} }
@ -798,11 +881,11 @@ class Benchmark {
ReadOptions options; ReadOptions options;
std::string value; std::string value;
const int range = (FLAGS_num + 99) / 100; const int range = (FLAGS_num + 99) / 100;
KeyBuffer key;
for (int i = 0; i < reads_; i++) { for (int i = 0; i < reads_; i++) {
char key[100]; const int k = thread->rand.Uniform(range);
const int k = thread->rand.Next() % range; key.Set(k);
snprintf(key, sizeof(key), "%016d", k); db_->Get(options, key.slice(), &value);
db_->Get(options, key, &value);
thread->stats.FinishedSingleOp(); thread->stats.FinishedSingleOp();
} }
} }
@ -810,13 +893,13 @@ class Benchmark {
void SeekRandom(ThreadState* thread) { void SeekRandom(ThreadState* thread) {
ReadOptions options; ReadOptions options;
int found = 0; int found = 0;
KeyBuffer key;
for (int i = 0; i < reads_; i++) { for (int i = 0; i < reads_; i++) {
Iterator* iter = db_->NewIterator(options); Iterator* iter = db_->NewIterator(options);
char key[100]; const int k = thread->rand.Uniform(FLAGS_num);
const int k = thread->rand.Next() % FLAGS_num; key.Set(k);
snprintf(key, sizeof(key), "%016d", k); iter->Seek(key.slice());
iter->Seek(key); if (iter->Valid() && iter->key() == key.slice()) found++;
if (iter->Valid() && iter->key() == key) found++;
delete iter; delete iter;
thread->stats.FinishedSingleOp(); thread->stats.FinishedSingleOp();
} }
@ -825,23 +908,42 @@ class Benchmark {
thread->stats.AddMessage(msg); thread->stats.AddMessage(msg);
} }
void SeekOrdered(ThreadState* thread) {
ReadOptions options;
Iterator* iter = db_->NewIterator(options);
int found = 0;
int k = 0;
KeyBuffer key;
for (int i = 0; i < reads_; i++) {
k = (k + (thread->rand.Uniform(100))) % FLAGS_num;
key.Set(k);
iter->Seek(key.slice());
if (iter->Valid() && iter->key() == key.slice()) found++;
thread->stats.FinishedSingleOp();
}
delete iter;
char msg[100];
std::snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_);
thread->stats.AddMessage(msg);
}
void DoDelete(ThreadState* thread, bool seq) { void DoDelete(ThreadState* thread, bool seq) {
RandomGenerator gen; RandomGenerator gen;
WriteBatch batch; WriteBatch batch;
Status s; Status s;
KeyBuffer key;
for (int i = 0; i < num_; i += entries_per_batch_) { for (int i = 0; i < num_; i += entries_per_batch_) {
batch.Clear(); batch.Clear();
for (int j = 0; j < entries_per_batch_; j++) { for (int j = 0; j < entries_per_batch_; j++) {
const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num); const int k = seq ? i + j : (thread->rand.Uniform(FLAGS_num));
char key[100]; key.Set(k);
snprintf(key, sizeof(key), "%016d", k); batch.Delete(key.slice());
batch.Delete(key);
thread->stats.FinishedSingleOp(); thread->stats.FinishedSingleOp();
} }
s = db_->Write(write_options_, &batch); s = db_->Write(write_options_, &batch);
if (!s.ok()) { if (!s.ok()) {
fprintf(stderr, "del error: %s\n", s.ToString().c_str()); std::fprintf(stderr, "del error: %s\n", s.ToString().c_str());
exit(1); std::exit(1);
} }
} }
} }
@ -856,6 +958,7 @@ class Benchmark {
} else { } else {
// Special thread that keeps writing until other threads are done. // Special thread that keeps writing until other threads are done.
RandomGenerator gen; RandomGenerator gen;
KeyBuffer key;
while (true) { while (true) {
{ {
MutexLock l(&thread->shared->mu); MutexLock l(&thread->shared->mu);
@ -865,13 +968,13 @@ class Benchmark {
} }
} }
const int k = thread->rand.Next() % FLAGS_num; const int k = thread->rand.Uniform(FLAGS_num);
char key[100]; key.Set(k);
snprintf(key, sizeof(key), "%016d", k); Status s =
Status s = db_->Put(write_options_, key, gen.Generate(value_size_)); db_->Put(write_options_, key.slice(), gen.Generate(value_size_));
if (!s.ok()) { if (!s.ok()) {
fprintf(stderr, "put error: %s\n", s.ToString().c_str()); std::fprintf(stderr, "put error: %s\n", s.ToString().c_str());
exit(1); std::exit(1);
} }
} }
@ -887,7 +990,7 @@ class Benchmark {
if (!db_->GetProperty(key, &stats)) { if (!db_->GetProperty(key, &stats)) {
stats = "(failed)"; stats = "(failed)";
} }
fprintf(stdout, "\n%s\n", stats.c_str()); std::fprintf(stdout, "\n%s\n", stats.c_str());
} }
static void WriteToFile(void* arg, const char* buf, int n) { static void WriteToFile(void* arg, const char* buf, int n) {
@ -896,18 +999,19 @@ class Benchmark {
void HeapProfile() { void HeapProfile() {
char fname[100]; char fname[100];
snprintf(fname, sizeof(fname), "%s/heap-%04d", FLAGS_db, ++heap_counter_); std::snprintf(fname, sizeof(fname), "%s/heap-%04d", FLAGS_db,
++heap_counter_);
WritableFile* file; WritableFile* file;
Status s = g_env->NewWritableFile(fname, &file); Status s = g_env->NewWritableFile(fname, &file);
if (!s.ok()) { if (!s.ok()) {
fprintf(stderr, "%s\n", s.ToString().c_str()); std::fprintf(stderr, "%s\n", s.ToString().c_str());
return; return;
} }
bool ok = port::GetHeapProfile(WriteToFile, file); bool ok = port::GetHeapProfile(WriteToFile, file);
delete file; delete file;
if (!ok) { if (!ok) {
fprintf(stderr, "heap profiling not supported\n"); std::fprintf(stderr, "heap profiling not supported\n");
g_env->DeleteFile(fname); g_env->RemoveFile(fname);
} }
} }
}; };
@ -932,6 +1036,9 @@ int main(int argc, char** argv) {
} else if (sscanf(argv[i], "--histogram=%d%c", &n, &junk) == 1 && } else if (sscanf(argv[i], "--histogram=%d%c", &n, &junk) == 1 &&
(n == 0 || n == 1)) { (n == 0 || n == 1)) {
FLAGS_histogram = n; FLAGS_histogram = n;
} else if (sscanf(argv[i], "--comparisons=%d%c", &n, &junk) == 1 &&
(n == 0 || n == 1)) {
FLAGS_comparisons = n;
} else if (sscanf(argv[i], "--use_existing_db=%d%c", &n, &junk) == 1 && } else if (sscanf(argv[i], "--use_existing_db=%d%c", &n, &junk) == 1 &&
(n == 0 || n == 1)) { (n == 0 || n == 1)) {
FLAGS_use_existing_db = n; FLAGS_use_existing_db = n;
@ -952,6 +1059,8 @@ int main(int argc, char** argv) {
FLAGS_max_file_size = n; FLAGS_max_file_size = n;
} else if (sscanf(argv[i], "--block_size=%d%c", &n, &junk) == 1) { } else if (sscanf(argv[i], "--block_size=%d%c", &n, &junk) == 1) {
FLAGS_block_size = n; FLAGS_block_size = n;
} else if (sscanf(argv[i], "--key_prefix=%d%c", &n, &junk) == 1) {
FLAGS_key_prefix = n;
} else if (sscanf(argv[i], "--cache_size=%d%c", &n, &junk) == 1) { } else if (sscanf(argv[i], "--cache_size=%d%c", &n, &junk) == 1) {
FLAGS_cache_size = n; FLAGS_cache_size = n;
} else if (sscanf(argv[i], "--bloom_bits=%d%c", &n, &junk) == 1) { } else if (sscanf(argv[i], "--bloom_bits=%d%c", &n, &junk) == 1) {
@ -961,8 +1070,8 @@ int main(int argc, char** argv) {
} else if (strncmp(argv[i], "--db=", 5) == 0) { } else if (strncmp(argv[i], "--db=", 5) == 0) {
FLAGS_db = argv[i] + 5; FLAGS_db = argv[i] + 5;
} else { } else {
fprintf(stderr, "Invalid flag '%s'\n", argv[i]); std::fprintf(stderr, "Invalid flag '%s'\n", argv[i]);
exit(1); std::exit(1);
} }
} }

View File

@ -0,0 +1,92 @@
// Copyright (c) 2019 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include <cinttypes>
#include <cstdio>
#include <string>
#include "gtest/gtest.h"
#include "benchmark/benchmark.h"
#include "db/version_set.h"
#include "leveldb/comparator.h"
#include "leveldb/db.h"
#include "leveldb/env.h"
#include "leveldb/options.h"
#include "port/port.h"
#include "util/mutexlock.h"
#include "util/testutil.h"
namespace leveldb {
namespace {
std::string MakeKey(unsigned int num) {
char buf[30];
std::snprintf(buf, sizeof(buf), "%016u", num);
return std::string(buf);
}
void BM_LogAndApply(benchmark::State& state) {
const int num_base_files = state.range(0);
std::string dbname = testing::TempDir() + "leveldb_test_benchmark";
DestroyDB(dbname, Options());
DB* db = nullptr;
Options opts;
opts.create_if_missing = true;
Status s = DB::Open(opts, dbname, &db);
ASSERT_LEVELDB_OK(s);
ASSERT_TRUE(db != nullptr);
delete db;
db = nullptr;
Env* env = Env::Default();
port::Mutex mu;
MutexLock l(&mu);
InternalKeyComparator cmp(BytewiseComparator());
Options options;
VersionSet vset(dbname, &options, nullptr, &cmp);
bool save_manifest;
ASSERT_LEVELDB_OK(vset.Recover(&save_manifest));
VersionEdit vbase;
uint64_t fnum = 1;
for (int i = 0; i < num_base_files; i++) {
InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
vbase.AddFile(2, fnum++, 1 /* file size */, start, limit);
}
ASSERT_LEVELDB_OK(vset.LogAndApply(&vbase, &mu));
uint64_t start_micros = env->NowMicros();
for (auto st : state) {
VersionEdit vedit;
vedit.RemoveFile(2, fnum);
InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
vedit.AddFile(2, fnum++, 1 /* file size */, start, limit);
vset.LogAndApply(&vedit, &mu);
}
uint64_t stop_micros = env->NowMicros();
unsigned int us = stop_micros - start_micros;
char buf[16];
std::snprintf(buf, sizeof(buf), "%d", num_base_files);
std::fprintf(stderr,
"BM_LogAndApply/%-6s %8" PRIu64
" iters : %9u us (%7.0f us / iter)\n",
buf, state.iterations(), us, ((float)us) / state.iterations());
}
BENCHMARK(BM_LogAndApply)->Arg(1)->Arg(100)->Arg(10000)->Arg(100000);
} // namespace
} // namespace leveldb
BENCHMARK_MAIN();

View File

@ -3,8 +3,9 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors. // found in the LICENSE file. See the AUTHORS file for names of contributors.
#include <sqlite3.h> #include <sqlite3.h>
#include <stdio.h>
#include <stdlib.h> #include <cstdio>
#include <cstdlib>
#include "util/histogram.h" #include "util/histogram.h"
#include "util/random.h" #include "util/random.h"
@ -69,6 +70,9 @@ static int FLAGS_num_pages = 4096;
// benchmark will fail. // benchmark will fail.
static bool FLAGS_use_existing_db = false; static bool FLAGS_use_existing_db = false;
// If true, the SQLite table has ROWIDs.
static bool FLAGS_use_rowids = false;
// If true, we allow batch writes to occur // If true, we allow batch writes to occur
static bool FLAGS_transaction = true; static bool FLAGS_transaction = true;
@ -80,23 +84,23 @@ static const char* FLAGS_db = nullptr;
inline static void ExecErrorCheck(int status, char* err_msg) { inline static void ExecErrorCheck(int status, char* err_msg) {
if (status != SQLITE_OK) { if (status != SQLITE_OK) {
fprintf(stderr, "SQL error: %s\n", err_msg); std::fprintf(stderr, "SQL error: %s\n", err_msg);
sqlite3_free(err_msg); sqlite3_free(err_msg);
exit(1); std::exit(1);
} }
} }
inline static void StepErrorCheck(int status) { inline static void StepErrorCheck(int status) {
if (status != SQLITE_DONE) { if (status != SQLITE_DONE) {
fprintf(stderr, "SQL step error: status = %d\n", status); std::fprintf(stderr, "SQL step error: status = %d\n", status);
exit(1); std::exit(1);
} }
} }
inline static void ErrorCheck(int status) { inline static void ErrorCheck(int status) {
if (status != SQLITE_OK) { if (status != SQLITE_OK) {
fprintf(stderr, "sqlite3 error: status = %d\n", status); std::fprintf(stderr, "sqlite3 error: status = %d\n", status);
exit(1); std::exit(1);
} }
} }
@ -178,36 +182,38 @@ class Benchmark {
void PrintHeader() { void PrintHeader() {
const int kKeySize = 16; const int kKeySize = 16;
PrintEnvironment(); PrintEnvironment();
fprintf(stdout, "Keys: %d bytes each\n", kKeySize); std::fprintf(stdout, "Keys: %d bytes each\n", kKeySize);
fprintf(stdout, "Values: %d bytes each\n", FLAGS_value_size); std::fprintf(stdout, "Values: %d bytes each\n", FLAGS_value_size);
fprintf(stdout, "Entries: %d\n", num_); std::fprintf(stdout, "Entries: %d\n", num_);
fprintf(stdout, "RawSize: %.1f MB (estimated)\n", std::fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) / ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
1048576.0)); 1048576.0));
PrintWarnings(); PrintWarnings();
fprintf(stdout, "------------------------------------------------\n"); std::fprintf(stdout, "------------------------------------------------\n");
} }
void PrintWarnings() { void PrintWarnings() {
#if defined(__GNUC__) && !defined(__OPTIMIZE__) #if defined(__GNUC__) && !defined(__OPTIMIZE__)
fprintf( std::fprintf(
stdout, stdout,
"WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"); "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
#endif #endif
#ifndef NDEBUG #ifndef NDEBUG
fprintf(stdout, std::fprintf(
"WARNING: Assertions are enabled; benchmarks unnecessarily slow\n"); stdout,
"WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
#endif #endif
} }
void PrintEnvironment() { void PrintEnvironment() {
fprintf(stderr, "SQLite: version %s\n", SQLITE_VERSION); std::fprintf(stderr, "SQLite: version %s\n", SQLITE_VERSION);
#if defined(__linux) #if defined(__linux)
time_t now = time(nullptr); time_t now = time(nullptr);
fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline std::fprintf(stderr, "Date: %s",
ctime(&now)); // ctime() adds newline
FILE* cpuinfo = fopen("/proc/cpuinfo", "r"); FILE* cpuinfo = std::fopen("/proc/cpuinfo", "r");
if (cpuinfo != nullptr) { if (cpuinfo != nullptr) {
char line[1000]; char line[1000];
int num_cpus = 0; int num_cpus = 0;
@ -227,9 +233,9 @@ class Benchmark {
cache_size = val.ToString(); cache_size = val.ToString();
} }
} }
fclose(cpuinfo); std::fclose(cpuinfo);
fprintf(stderr, "CPU: %d * %s\n", num_cpus, cpu_type.c_str()); std::fprintf(stderr, "CPU: %d * %s\n", num_cpus, cpu_type.c_str());
fprintf(stderr, "CPUCache: %s\n", cache_size.c_str()); std::fprintf(stderr, "CPUCache: %s\n", cache_size.c_str());
} }
#endif #endif
} }
@ -250,8 +256,8 @@ class Benchmark {
double micros = (now - last_op_finish_) * 1e6; double micros = (now - last_op_finish_) * 1e6;
hist_.Add(micros); hist_.Add(micros);
if (micros > 20000) { if (micros > 20000) {
fprintf(stderr, "long op: %.1f micros%30s\r", micros, ""); std::fprintf(stderr, "long op: %.1f micros%30s\r", micros, "");
fflush(stderr); std::fflush(stderr);
} }
last_op_finish_ = now; last_op_finish_ = now;
} }
@ -272,8 +278,8 @@ class Benchmark {
next_report_ += 50000; next_report_ += 50000;
else else
next_report_ += 100000; next_report_ += 100000;
fprintf(stderr, "... finished %d ops%30s\r", done_, ""); std::fprintf(stderr, "... finished %d ops%30s\r", done_, "");
fflush(stderr); std::fflush(stderr);
} }
} }
@ -286,8 +292,8 @@ class Benchmark {
if (bytes_ > 0) { if (bytes_ > 0) {
char rate[100]; char rate[100];
snprintf(rate, sizeof(rate), "%6.1f MB/s", std::snprintf(rate, sizeof(rate), "%6.1f MB/s",
(bytes_ / 1048576.0) / (finish - start_)); (bytes_ / 1048576.0) / (finish - start_));
if (!message_.empty()) { if (!message_.empty()) {
message_ = std::string(rate) + " " + message_; message_ = std::string(rate) + " " + message_;
} else { } else {
@ -295,13 +301,14 @@ class Benchmark {
} }
} }
fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(), std::fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
(finish - start_) * 1e6 / done_, (message_.empty() ? "" : " "), name.ToString().c_str(), (finish - start_) * 1e6 / done_,
message_.c_str()); (message_.empty() ? "" : " "), message_.c_str());
if (FLAGS_histogram) { if (FLAGS_histogram) {
fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str()); std::fprintf(stdout, "Microseconds per op:\n%s\n",
hist_.ToString().c_str());
} }
fflush(stdout); std::fflush(stdout);
} }
public: public:
@ -325,7 +332,7 @@ class Benchmark {
std::string file_name(test_dir); std::string file_name(test_dir);
file_name += "/"; file_name += "/";
file_name += files[i]; file_name += files[i];
Env::Default()->DeleteFile(file_name.c_str()); Env::Default()->RemoveFile(file_name.c_str());
} }
} }
} }
@ -401,7 +408,8 @@ class Benchmark {
} else { } else {
known = false; known = false;
if (name != Slice()) { // No error message for empty name if (name != Slice()) { // No error message for empty name
fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str()); std::fprintf(stderr, "unknown benchmark '%s'\n",
name.ToString().c_str());
} }
} }
if (known) { if (known) {
@ -421,26 +429,26 @@ class Benchmark {
// Open database // Open database
std::string tmp_dir; std::string tmp_dir;
Env::Default()->GetTestDirectory(&tmp_dir); Env::Default()->GetTestDirectory(&tmp_dir);
snprintf(file_name, sizeof(file_name), "%s/dbbench_sqlite3-%d.db", std::snprintf(file_name, sizeof(file_name), "%s/dbbench_sqlite3-%d.db",
tmp_dir.c_str(), db_num_); tmp_dir.c_str(), db_num_);
status = sqlite3_open(file_name, &db_); status = sqlite3_open(file_name, &db_);
if (status) { if (status) {
fprintf(stderr, "open error: %s\n", sqlite3_errmsg(db_)); std::fprintf(stderr, "open error: %s\n", sqlite3_errmsg(db_));
exit(1); std::exit(1);
} }
// Change SQLite cache size // Change SQLite cache size
char cache_size[100]; char cache_size[100];
snprintf(cache_size, sizeof(cache_size), "PRAGMA cache_size = %d", std::snprintf(cache_size, sizeof(cache_size), "PRAGMA cache_size = %d",
FLAGS_num_pages); FLAGS_num_pages);
status = sqlite3_exec(db_, cache_size, nullptr, nullptr, &err_msg); status = sqlite3_exec(db_, cache_size, nullptr, nullptr, &err_msg);
ExecErrorCheck(status, err_msg); ExecErrorCheck(status, err_msg);
// FLAGS_page_size is defaulted to 1024 // FLAGS_page_size is defaulted to 1024
if (FLAGS_page_size != 1024) { if (FLAGS_page_size != 1024) {
char page_size[100]; char page_size[100];
snprintf(page_size, sizeof(page_size), "PRAGMA page_size = %d", std::snprintf(page_size, sizeof(page_size), "PRAGMA page_size = %d",
FLAGS_page_size); FLAGS_page_size);
status = sqlite3_exec(db_, page_size, nullptr, nullptr, &err_msg); status = sqlite3_exec(db_, page_size, nullptr, nullptr, &err_msg);
ExecErrorCheck(status, err_msg); ExecErrorCheck(status, err_msg);
} }
@ -462,6 +470,7 @@ class Benchmark {
std::string locking_stmt = "PRAGMA locking_mode = EXCLUSIVE"; std::string locking_stmt = "PRAGMA locking_mode = EXCLUSIVE";
std::string create_stmt = std::string create_stmt =
"CREATE TABLE test (key blob, value blob, PRIMARY KEY(key))"; "CREATE TABLE test (key blob, value blob, PRIMARY KEY(key))";
if (!FLAGS_use_rowids) create_stmt += " WITHOUT ROWID";
std::string stmt_array[] = {locking_stmt, create_stmt}; std::string stmt_array[] = {locking_stmt, create_stmt};
int stmt_array_length = sizeof(stmt_array) / sizeof(std::string); int stmt_array_length = sizeof(stmt_array) / sizeof(std::string);
for (int i = 0; i < stmt_array_length; i++) { for (int i = 0; i < stmt_array_length; i++) {
@ -487,7 +496,7 @@ class Benchmark {
if (num_entries != num_) { if (num_entries != num_) {
char msg[100]; char msg[100];
snprintf(msg, sizeof(msg), "(%d ops)", num_entries); std::snprintf(msg, sizeof(msg), "(%d ops)", num_entries);
message_ = msg; message_ = msg;
} }
@ -534,7 +543,7 @@ class Benchmark {
const int k = const int k =
(order == SEQUENTIAL) ? i + j : (rand_.Next() % num_entries); (order == SEQUENTIAL) ? i + j : (rand_.Next() % num_entries);
char key[100]; char key[100];
snprintf(key, sizeof(key), "%016d", k); std::snprintf(key, sizeof(key), "%016d", k);
// Bind KV values into replace_stmt // Bind KV values into replace_stmt
status = sqlite3_bind_blob(replace_stmt, 1, key, 16, SQLITE_STATIC); status = sqlite3_bind_blob(replace_stmt, 1, key, 16, SQLITE_STATIC);
@ -607,7 +616,7 @@ class Benchmark {
// Create key value // Create key value
char key[100]; char key[100];
int k = (order == SEQUENTIAL) ? i + j : (rand_.Next() % reads_); int k = (order == SEQUENTIAL) ? i + j : (rand_.Next() % reads_);
snprintf(key, sizeof(key), "%016d", k); std::snprintf(key, sizeof(key), "%016d", k);
// Bind key value into read_stmt // Bind key value into read_stmt
status = sqlite3_bind_blob(read_stmt, 1, key, 16, SQLITE_STATIC); status = sqlite3_bind_blob(read_stmt, 1, key, 16, SQLITE_STATIC);
@ -678,6 +687,9 @@ int main(int argc, char** argv) {
} else if (sscanf(argv[i], "--use_existing_db=%d%c", &n, &junk) == 1 && } else if (sscanf(argv[i], "--use_existing_db=%d%c", &n, &junk) == 1 &&
(n == 0 || n == 1)) { (n == 0 || n == 1)) {
FLAGS_use_existing_db = n; FLAGS_use_existing_db = n;
} else if (sscanf(argv[i], "--use_rowids=%d%c", &n, &junk) == 1 &&
(n == 0 || n == 1)) {
FLAGS_use_rowids = n;
} else if (sscanf(argv[i], "--num=%d%c", &n, &junk) == 1) { } else if (sscanf(argv[i], "--num=%d%c", &n, &junk) == 1) {
FLAGS_num = n; FLAGS_num = n;
} else if (sscanf(argv[i], "--reads=%d%c", &n, &junk) == 1) { } else if (sscanf(argv[i], "--reads=%d%c", &n, &junk) == 1) {
@ -696,8 +708,8 @@ int main(int argc, char** argv) {
} else if (strncmp(argv[i], "--db=", 5) == 0) { } else if (strncmp(argv[i], "--db=", 5) == 0) {
FLAGS_db = argv[i] + 5; FLAGS_db = argv[i] + 5;
} else { } else {
fprintf(stderr, "Invalid flag '%s'\n", argv[i]); std::fprintf(stderr, "Invalid flag '%s'\n", argv[i]);
exit(1); std::exit(1);
} }
} }

View File

@ -3,8 +3,9 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors. // found in the LICENSE file. See the AUTHORS file for names of contributors.
#include <kcpolydb.h> #include <kcpolydb.h>
#include <stdio.h>
#include <stdlib.h> #include <cstdio>
#include <cstdlib>
#include "util/histogram.h" #include "util/histogram.h"
#include "util/random.h" #include "util/random.h"
@ -74,7 +75,7 @@ static const char* FLAGS_db = nullptr;
inline static void DBSynchronize(kyotocabinet::TreeDB* db_) { inline static void DBSynchronize(kyotocabinet::TreeDB* db_) {
// Synchronize will flush writes to disk // Synchronize will flush writes to disk
if (!db_->synchronize()) { if (!db_->synchronize()) {
fprintf(stderr, "synchronize error: %s\n", db_->error().name()); std::fprintf(stderr, "synchronize error: %s\n", db_->error().name());
} }
} }
@ -149,42 +150,47 @@ class Benchmark {
void PrintHeader() { void PrintHeader() {
const int kKeySize = 16; const int kKeySize = 16;
PrintEnvironment(); PrintEnvironment();
fprintf(stdout, "Keys: %d bytes each\n", kKeySize); std::fprintf(stdout, "Keys: %d bytes each\n", kKeySize);
fprintf(stdout, "Values: %d bytes each (%d bytes after compression)\n", std::fprintf(
FLAGS_value_size, stdout, "Values: %d bytes each (%d bytes after compression)\n",
static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5)); FLAGS_value_size,
fprintf(stdout, "Entries: %d\n", num_); static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
fprintf(stdout, "RawSize: %.1f MB (estimated)\n", std::fprintf(stdout, "Entries: %d\n", num_);
((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) / std::fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
1048576.0)); ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
fprintf(stdout, "FileSize: %.1f MB (estimated)\n", 1048576.0));
(((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) / std::fprintf(
1048576.0)); stdout, "FileSize: %.1f MB (estimated)\n",
(((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) /
1048576.0));
PrintWarnings(); PrintWarnings();
fprintf(stdout, "------------------------------------------------\n"); std::fprintf(stdout, "------------------------------------------------\n");
} }
void PrintWarnings() { void PrintWarnings() {
#if defined(__GNUC__) && !defined(__OPTIMIZE__) #if defined(__GNUC__) && !defined(__OPTIMIZE__)
fprintf( std::fprintf(
stdout, stdout,
"WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"); "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
#endif #endif
#ifndef NDEBUG #ifndef NDEBUG
fprintf(stdout, std::fprintf(
"WARNING: Assertions are enabled; benchmarks unnecessarily slow\n"); stdout,
"WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
#endif #endif
} }
void PrintEnvironment() { void PrintEnvironment() {
fprintf(stderr, "Kyoto Cabinet: version %s, lib ver %d, lib rev %d\n", std::fprintf(
kyotocabinet::VERSION, kyotocabinet::LIBVER, kyotocabinet::LIBREV); stderr, "Kyoto Cabinet: version %s, lib ver %d, lib rev %d\n",
kyotocabinet::VERSION, kyotocabinet::LIBVER, kyotocabinet::LIBREV);
#if defined(__linux) #if defined(__linux)
time_t now = time(nullptr); time_t now = time(nullptr);
fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline std::fprintf(stderr, "Date: %s",
ctime(&now)); // ctime() adds newline
FILE* cpuinfo = fopen("/proc/cpuinfo", "r"); FILE* cpuinfo = std::fopen("/proc/cpuinfo", "r");
if (cpuinfo != nullptr) { if (cpuinfo != nullptr) {
char line[1000]; char line[1000];
int num_cpus = 0; int num_cpus = 0;
@ -204,9 +210,10 @@ class Benchmark {
cache_size = val.ToString(); cache_size = val.ToString();
} }
} }
fclose(cpuinfo); std::fclose(cpuinfo);
fprintf(stderr, "CPU: %d * %s\n", num_cpus, cpu_type.c_str()); std::fprintf(stderr, "CPU: %d * %s\n", num_cpus,
fprintf(stderr, "CPUCache: %s\n", cache_size.c_str()); cpu_type.c_str());
std::fprintf(stderr, "CPUCache: %s\n", cache_size.c_str());
} }
#endif #endif
} }
@ -227,8 +234,8 @@ class Benchmark {
double micros = (now - last_op_finish_) * 1e6; double micros = (now - last_op_finish_) * 1e6;
hist_.Add(micros); hist_.Add(micros);
if (micros > 20000) { if (micros > 20000) {
fprintf(stderr, "long op: %.1f micros%30s\r", micros, ""); std::fprintf(stderr, "long op: %.1f micros%30s\r", micros, "");
fflush(stderr); std::fflush(stderr);
} }
last_op_finish_ = now; last_op_finish_ = now;
} }
@ -249,8 +256,8 @@ class Benchmark {
next_report_ += 50000; next_report_ += 50000;
else else
next_report_ += 100000; next_report_ += 100000;
fprintf(stderr, "... finished %d ops%30s\r", done_, ""); std::fprintf(stderr, "... finished %d ops%30s\r", done_, "");
fflush(stderr); std::fflush(stderr);
} }
} }
@ -263,8 +270,8 @@ class Benchmark {
if (bytes_ > 0) { if (bytes_ > 0) {
char rate[100]; char rate[100];
snprintf(rate, sizeof(rate), "%6.1f MB/s", std::snprintf(rate, sizeof(rate), "%6.1f MB/s",
(bytes_ / 1048576.0) / (finish - start_)); (bytes_ / 1048576.0) / (finish - start_));
if (!message_.empty()) { if (!message_.empty()) {
message_ = std::string(rate) + " " + message_; message_ = std::string(rate) + " " + message_;
} else { } else {
@ -272,13 +279,14 @@ class Benchmark {
} }
} }
fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(), std::fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
(finish - start_) * 1e6 / done_, (message_.empty() ? "" : " "), name.ToString().c_str(), (finish - start_) * 1e6 / done_,
message_.c_str()); (message_.empty() ? "" : " "), message_.c_str());
if (FLAGS_histogram) { if (FLAGS_histogram) {
fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str()); std::fprintf(stdout, "Microseconds per op:\n%s\n",
hist_.ToString().c_str());
} }
fflush(stdout); std::fflush(stdout);
} }
public: public:
@ -301,7 +309,7 @@ class Benchmark {
std::string file_name(test_dir); std::string file_name(test_dir);
file_name += "/"; file_name += "/";
file_name += files[i]; file_name += files[i];
Env::Default()->DeleteFile(file_name.c_str()); Env::Default()->RemoveFile(file_name.c_str());
} }
} }
} }
@ -309,7 +317,7 @@ class Benchmark {
~Benchmark() { ~Benchmark() {
if (!db_->close()) { if (!db_->close()) {
fprintf(stderr, "close error: %s\n", db_->error().name()); std::fprintf(stderr, "close error: %s\n", db_->error().name());
} }
} }
@ -373,7 +381,8 @@ class Benchmark {
} else { } else {
known = false; known = false;
if (name != Slice()) { // No error message for empty name if (name != Slice()) { // No error message for empty name
fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str()); std::fprintf(stderr, "unknown benchmark '%s'\n",
name.ToString().c_str());
} }
} }
if (known) { if (known) {
@ -392,8 +401,8 @@ class Benchmark {
db_num_++; db_num_++;
std::string test_dir; std::string test_dir;
Env::Default()->GetTestDirectory(&test_dir); Env::Default()->GetTestDirectory(&test_dir);
snprintf(file_name, sizeof(file_name), "%s/dbbench_polyDB-%d.kct", std::snprintf(file_name, sizeof(file_name), "%s/dbbench_polyDB-%d.kct",
test_dir.c_str(), db_num_); test_dir.c_str(), db_num_);
// Create tuning options and open the database // Create tuning options and open the database
int open_options = int open_options =
@ -412,7 +421,7 @@ class Benchmark {
open_options |= kyotocabinet::PolyDB::OAUTOSYNC; open_options |= kyotocabinet::PolyDB::OAUTOSYNC;
} }
if (!db_->open(file_name, open_options)) { if (!db_->open(file_name, open_options)) {
fprintf(stderr, "open error: %s\n", db_->error().name()); std::fprintf(stderr, "open error: %s\n", db_->error().name());
} }
} }
@ -432,7 +441,7 @@ class Benchmark {
if (num_entries != num_) { if (num_entries != num_) {
char msg[100]; char msg[100];
snprintf(msg, sizeof(msg), "(%d ops)", num_entries); std::snprintf(msg, sizeof(msg), "(%d ops)", num_entries);
message_ = msg; message_ = msg;
} }
@ -440,11 +449,11 @@ class Benchmark {
for (int i = 0; i < num_entries; i++) { for (int i = 0; i < num_entries; i++) {
const int k = (order == SEQUENTIAL) ? i : (rand_.Next() % num_entries); const int k = (order == SEQUENTIAL) ? i : (rand_.Next() % num_entries);
char key[100]; char key[100];
snprintf(key, sizeof(key), "%016d", k); std::snprintf(key, sizeof(key), "%016d", k);
bytes_ += value_size + strlen(key); bytes_ += value_size + strlen(key);
std::string cpp_key = key; std::string cpp_key = key;
if (!db_->set(cpp_key, gen_.Generate(value_size).ToString())) { if (!db_->set(cpp_key, gen_.Generate(value_size).ToString())) {
fprintf(stderr, "set error: %s\n", db_->error().name()); std::fprintf(stderr, "set error: %s\n", db_->error().name());
} }
FinishedSingleOp(); FinishedSingleOp();
} }
@ -466,7 +475,7 @@ class Benchmark {
for (int i = 0; i < reads_; i++) { for (int i = 0; i < reads_; i++) {
char key[100]; char key[100];
const int k = rand_.Next() % reads_; const int k = rand_.Next() % reads_;
snprintf(key, sizeof(key), "%016d", k); std::snprintf(key, sizeof(key), "%016d", k);
db_->get(key, &value); db_->get(key, &value);
FinishedSingleOp(); FinishedSingleOp();
} }
@ -504,8 +513,8 @@ int main(int argc, char** argv) {
} else if (strncmp(argv[i], "--db=", 5) == 0) { } else if (strncmp(argv[i], "--db=", 5) == 0) {
FLAGS_db = argv[i] + 5; FLAGS_db = argv[i] + 5;
} else { } else {
fprintf(stderr, "Invalid flag '%s'\n", argv[i]); std::fprintf(stderr, "Invalid flag '%s'\n", argv[i]);
exit(1); std::exit(1);
} }
} }

View File

@ -1 +0,0 @@
include("${CMAKE_CURRENT_LIST_DIR}/leveldbTargets.cmake")

View File

@ -0,0 +1,9 @@
# Copyright 2019 The LevelDB Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. See the AUTHORS file for names of contributors.
@PACKAGE_INIT@
include("${CMAKE_CURRENT_LIST_DIR}/leveldbTargets.cmake")
check_required_components(leveldb)

View File

@ -2,24 +2,24 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors. // found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "gtest/gtest.h"
#include "db/db_impl.h" #include "db/db_impl.h"
#include "leveldb/cache.h" #include "leveldb/cache.h"
#include "leveldb/db.h" #include "leveldb/db.h"
#include "util/testharness.h"
#include "util/testutil.h" #include "util/testutil.h"
namespace leveldb { namespace leveldb {
class AutoCompactTest { class AutoCompactTest : public testing::Test {
public: public:
AutoCompactTest() { AutoCompactTest() {
dbname_ = test::TmpDir() + "/autocompact_test"; dbname_ = testing::TempDir() + "autocompact_test";
tiny_cache_ = NewLRUCache(100); tiny_cache_ = NewLRUCache(100);
options_.block_cache = tiny_cache_; options_.block_cache = tiny_cache_;
DestroyDB(dbname_, options_); DestroyDB(dbname_, options_);
options_.create_if_missing = true; options_.create_if_missing = true;
options_.compression = kNoCompression; options_.compression = kNoCompression;
ASSERT_OK(DB::Open(options_, dbname_, &db_)); EXPECT_LEVELDB_OK(DB::Open(options_, dbname_, &db_));
} }
~AutoCompactTest() { ~AutoCompactTest() {
@ -30,7 +30,7 @@ class AutoCompactTest {
std::string Key(int i) { std::string Key(int i) {
char buf[100]; char buf[100];
snprintf(buf, sizeof(buf), "key%06d", i); std::snprintf(buf, sizeof(buf), "key%06d", i);
return std::string(buf); return std::string(buf);
} }
@ -62,15 +62,15 @@ void AutoCompactTest::DoReads(int n) {
// Fill database // Fill database
for (int i = 0; i < kCount; i++) { for (int i = 0; i < kCount; i++) {
ASSERT_OK(db_->Put(WriteOptions(), Key(i), value)); ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), Key(i), value));
} }
ASSERT_OK(dbi->TEST_CompactMemTable()); ASSERT_LEVELDB_OK(dbi->TEST_CompactMemTable());
// Delete everything // Delete everything
for (int i = 0; i < kCount; i++) { for (int i = 0; i < kCount; i++) {
ASSERT_OK(db_->Delete(WriteOptions(), Key(i))); ASSERT_LEVELDB_OK(db_->Delete(WriteOptions(), Key(i)));
} }
ASSERT_OK(dbi->TEST_CompactMemTable()); ASSERT_LEVELDB_OK(dbi->TEST_CompactMemTable());
// Get initial measurement of the space we will be reading. // Get initial measurement of the space we will be reading.
const int64_t initial_size = Size(Key(0), Key(n)); const int64_t initial_size = Size(Key(0), Key(n));
@ -89,8 +89,8 @@ void AutoCompactTest::DoReads(int n) {
// Wait a little bit to allow any triggered compactions to complete. // Wait a little bit to allow any triggered compactions to complete.
Env::Default()->SleepForMicroseconds(1000000); Env::Default()->SleepForMicroseconds(1000000);
uint64_t size = Size(Key(0), Key(n)); uint64_t size = Size(Key(0), Key(n));
fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n", read + 1, std::fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n", read + 1,
size / 1048576.0, Size(Key(n), Key(kCount)) / 1048576.0); size / 1048576.0, Size(Key(n), Key(kCount)) / 1048576.0);
if (size <= initial_size / 10) { if (size <= initial_size / 10) {
break; break;
} }
@ -103,10 +103,8 @@ void AutoCompactTest::DoReads(int n) {
ASSERT_GE(final_other_size, initial_other_size / 5 - 1048576); ASSERT_GE(final_other_size, initial_other_size / 5 - 1048576);
} }
TEST(AutoCompactTest, ReadAll) { DoReads(kCount); } TEST_F(AutoCompactTest, ReadAll) { DoReads(kCount); }
TEST(AutoCompactTest, ReadHalf) { DoReads(kCount / 2); } TEST_F(AutoCompactTest, ReadHalf) { DoReads(kCount / 2); }
} // namespace leveldb } // namespace leveldb
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }

View File

@ -30,11 +30,14 @@ Status BuildTable(const std::string& dbname, Env* env, const Options& options,
TableBuilder* builder = new TableBuilder(options, file); TableBuilder* builder = new TableBuilder(options, file);
meta->smallest.DecodeFrom(iter->key()); meta->smallest.DecodeFrom(iter->key());
Slice key;
for (; iter->Valid(); iter->Next()) { for (; iter->Valid(); iter->Next()) {
Slice key = iter->key(); key = iter->key();
meta->largest.DecodeFrom(key);
builder->Add(key, iter->value()); builder->Add(key, iter->value());
} }
if (!key.empty()) {
meta->largest.DecodeFrom(key);
}
// Finish and check for builder errors // Finish and check for builder errors
s = builder->Finish(); s = builder->Finish();
@ -71,7 +74,7 @@ Status BuildTable(const std::string& dbname, Env* env, const Options& options,
if (s.ok() && meta->file_size > 0) { if (s.ok() && meta->file_size > 0) {
// Keep it // Keep it
} else { } else {
env->DeleteFile(fname); env->RemoveFile(fname);
} }
return s; return s;
} }

17
db/c.cc
View File

@ -4,6 +4,8 @@
#include "leveldb/c.h" #include "leveldb/c.h"
#include <string.h>
#include <cstdint> #include <cstdint>
#include <cstdlib> #include <cstdlib>
@ -119,7 +121,7 @@ struct leveldb_filterpolicy_t : public FilterPolicy {
size_t len; size_t len;
char* filter = (*create_)(state_, &key_pointers[0], &key_sizes[0], n, &len); char* filter = (*create_)(state_, &key_pointers[0], &key_sizes[0], n, &len);
dst->append(filter, len); dst->append(filter, len);
free(filter); std::free(filter);
} }
bool KeyMayMatch(const Slice& key, const Slice& filter) const override { bool KeyMayMatch(const Slice& key, const Slice& filter) const override {
@ -150,15 +152,16 @@ static bool SaveError(char** errptr, const Status& s) {
*errptr = strdup(s.ToString().c_str()); *errptr = strdup(s.ToString().c_str());
} else { } else {
// TODO(sanjay): Merge with existing error? // TODO(sanjay): Merge with existing error?
free(*errptr); std::free(*errptr);
*errptr = strdup(s.ToString().c_str()); *errptr = strdup(s.ToString().c_str());
} }
return true; return true;
} }
static char* CopyString(const std::string& str) { static char* CopyString(const std::string& str) {
char* result = reinterpret_cast<char*>(malloc(sizeof(char) * str.size())); char* result =
memcpy(result, str.data(), sizeof(char) * str.size()); reinterpret_cast<char*>(std::malloc(sizeof(char) * str.size()));
std::memcpy(result, str.data(), sizeof(char) * str.size());
return result; return result;
} }
@ -547,13 +550,13 @@ char* leveldb_env_get_test_directory(leveldb_env_t* env) {
return nullptr; return nullptr;
} }
char* buffer = static_cast<char*>(malloc(result.size() + 1)); char* buffer = static_cast<char*>(std::malloc(result.size() + 1));
memcpy(buffer, result.data(), result.size()); std::memcpy(buffer, result.data(), result.size());
buffer[result.size()] = '\0'; buffer[result.size()] = '\0';
return buffer; return buffer;
} }
void leveldb_free(void* ptr) { free(ptr); } void leveldb_free(void* ptr) { std::free(ptr); }
int leveldb_major_version() { return kMajorVersion; } int leveldb_major_version() { return kMajorVersion; }

View File

@ -4,6 +4,7 @@
#include <sys/types.h> #include <sys/types.h>
#include "gtest/gtest.h"
#include "db/db_impl.h" #include "db/db_impl.h"
#include "db/filename.h" #include "db/filename.h"
#include "db/log_format.h" #include "db/log_format.h"
@ -13,14 +14,13 @@
#include "leveldb/table.h" #include "leveldb/table.h"
#include "leveldb/write_batch.h" #include "leveldb/write_batch.h"
#include "util/logging.h" #include "util/logging.h"
#include "util/testharness.h"
#include "util/testutil.h" #include "util/testutil.h"
namespace leveldb { namespace leveldb {
static const int kValueSize = 1000; static const int kValueSize = 1000;
class CorruptionTest { class CorruptionTest : public testing::Test {
public: public:
CorruptionTest() CorruptionTest()
: db_(nullptr), : db_(nullptr),
@ -46,19 +46,19 @@ class CorruptionTest {
return DB::Open(options_, dbname_, &db_); return DB::Open(options_, dbname_, &db_);
} }
void Reopen() { ASSERT_OK(TryReopen()); } void Reopen() { ASSERT_LEVELDB_OK(TryReopen()); }
void RepairDB() { void RepairDB() {
delete db_; delete db_;
db_ = nullptr; db_ = nullptr;
ASSERT_OK(::leveldb::RepairDB(dbname_, options_)); ASSERT_LEVELDB_OK(::leveldb::RepairDB(dbname_, options_));
} }
void Build(int n) { void Build(int n) {
std::string key_space, value_space; std::string key_space, value_space;
WriteBatch batch; WriteBatch batch;
for (int i = 0; i < n; i++) { for (int i = 0; i < n; i++) {
// if ((i % 100) == 0) fprintf(stderr, "@ %d of %d\n", i, n); // if ((i % 100) == 0) std::fprintf(stderr, "@ %d of %d\n", i, n);
Slice key = Key(i, &key_space); Slice key = Key(i, &key_space);
batch.Clear(); batch.Clear();
batch.Put(key, Value(i, &value_space)); batch.Put(key, Value(i, &value_space));
@ -68,7 +68,7 @@ class CorruptionTest {
if (i == n - 1) { if (i == n - 1) {
options.sync = true; options.sync = true;
} }
ASSERT_OK(db_->Write(options, &batch)); ASSERT_LEVELDB_OK(db_->Write(options, &batch));
} }
} }
@ -102,9 +102,10 @@ class CorruptionTest {
} }
delete iter; delete iter;
fprintf(stderr, std::fprintf(
"expected=%d..%d; got=%d; bad_keys=%d; bad_values=%d; missed=%d\n", stderr,
min_expected, max_expected, correct, bad_keys, bad_values, missed); "expected=%d..%d; got=%d; bad_keys=%d; bad_values=%d; missed=%d\n",
min_expected, max_expected, correct, bad_keys, bad_values, missed);
ASSERT_LE(min_expected, correct); ASSERT_LE(min_expected, correct);
ASSERT_GE(max_expected, correct); ASSERT_GE(max_expected, correct);
} }
@ -112,7 +113,7 @@ class CorruptionTest {
void Corrupt(FileType filetype, int offset, int bytes_to_corrupt) { void Corrupt(FileType filetype, int offset, int bytes_to_corrupt) {
// Pick file to corrupt // Pick file to corrupt
std::vector<std::string> filenames; std::vector<std::string> filenames;
ASSERT_OK(env_.target()->GetChildren(dbname_, &filenames)); ASSERT_LEVELDB_OK(env_.target()->GetChildren(dbname_, &filenames));
uint64_t number; uint64_t number;
FileType type; FileType type;
std::string fname; std::string fname;
@ -127,7 +128,7 @@ class CorruptionTest {
ASSERT_TRUE(!fname.empty()) << filetype; ASSERT_TRUE(!fname.empty()) << filetype;
uint64_t file_size; uint64_t file_size;
ASSERT_OK(env_.target()->GetFileSize(fname, &file_size)); ASSERT_LEVELDB_OK(env_.target()->GetFileSize(fname, &file_size));
if (offset < 0) { if (offset < 0) {
// Relative to end of file; make it absolute // Relative to end of file; make it absolute
@ -169,7 +170,7 @@ class CorruptionTest {
// Return the ith key // Return the ith key
Slice Key(int i, std::string* storage) { Slice Key(int i, std::string* storage) {
char buf[100]; char buf[100];
snprintf(buf, sizeof(buf), "%016d", i); std::snprintf(buf, sizeof(buf), "%016d", i);
storage->assign(buf, strlen(buf)); storage->assign(buf, strlen(buf));
return Slice(*storage); return Slice(*storage);
} }
@ -189,7 +190,7 @@ class CorruptionTest {
Cache* tiny_cache_; Cache* tiny_cache_;
}; };
TEST(CorruptionTest, Recovery) { TEST_F(CorruptionTest, Recovery) {
Build(100); Build(100);
Check(100, 100); Check(100, 100);
Corrupt(kLogFile, 19, 1); // WriteBatch tag for first record Corrupt(kLogFile, 19, 1); // WriteBatch tag for first record
@ -200,13 +201,13 @@ TEST(CorruptionTest, Recovery) {
Check(36, 36); Check(36, 36);
} }
TEST(CorruptionTest, RecoverWriteError) { TEST_F(CorruptionTest, RecoverWriteError) {
env_.writable_file_error_ = true; env_.writable_file_error_ = true;
Status s = TryReopen(); Status s = TryReopen();
ASSERT_TRUE(!s.ok()); ASSERT_TRUE(!s.ok());
} }
TEST(CorruptionTest, NewFileErrorDuringWrite) { TEST_F(CorruptionTest, NewFileErrorDuringWrite) {
// Do enough writing to force minor compaction // Do enough writing to force minor compaction
env_.writable_file_error_ = true; env_.writable_file_error_ = true;
const int num = 3 + (Options().write_buffer_size / kValueSize); const int num = 3 + (Options().write_buffer_size / kValueSize);
@ -223,7 +224,7 @@ TEST(CorruptionTest, NewFileErrorDuringWrite) {
Reopen(); Reopen();
} }
TEST(CorruptionTest, TableFile) { TEST_F(CorruptionTest, TableFile) {
Build(100); Build(100);
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_); DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
dbi->TEST_CompactMemTable(); dbi->TEST_CompactMemTable();
@ -234,7 +235,7 @@ TEST(CorruptionTest, TableFile) {
Check(90, 99); Check(90, 99);
} }
TEST(CorruptionTest, TableFileRepair) { TEST_F(CorruptionTest, TableFileRepair) {
options_.block_size = 2 * kValueSize; // Limit scope of corruption options_.block_size = 2 * kValueSize; // Limit scope of corruption
options_.paranoid_checks = true; options_.paranoid_checks = true;
Reopen(); Reopen();
@ -250,7 +251,7 @@ TEST(CorruptionTest, TableFileRepair) {
Check(95, 99); Check(95, 99);
} }
TEST(CorruptionTest, TableFileIndexData) { TEST_F(CorruptionTest, TableFileIndexData) {
Build(10000); // Enough to build multiple Tables Build(10000); // Enough to build multiple Tables
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_); DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
dbi->TEST_CompactMemTable(); dbi->TEST_CompactMemTable();
@ -260,36 +261,36 @@ TEST(CorruptionTest, TableFileIndexData) {
Check(5000, 9999); Check(5000, 9999);
} }
TEST(CorruptionTest, MissingDescriptor) { TEST_F(CorruptionTest, MissingDescriptor) {
Build(1000); Build(1000);
RepairDB(); RepairDB();
Reopen(); Reopen();
Check(1000, 1000); Check(1000, 1000);
} }
TEST(CorruptionTest, SequenceNumberRecovery) { TEST_F(CorruptionTest, SequenceNumberRecovery) {
ASSERT_OK(db_->Put(WriteOptions(), "foo", "v1")); ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v1"));
ASSERT_OK(db_->Put(WriteOptions(), "foo", "v2")); ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v2"));
ASSERT_OK(db_->Put(WriteOptions(), "foo", "v3")); ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v3"));
ASSERT_OK(db_->Put(WriteOptions(), "foo", "v4")); ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v4"));
ASSERT_OK(db_->Put(WriteOptions(), "foo", "v5")); ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v5"));
RepairDB(); RepairDB();
Reopen(); Reopen();
std::string v; std::string v;
ASSERT_OK(db_->Get(ReadOptions(), "foo", &v)); ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), "foo", &v));
ASSERT_EQ("v5", v); ASSERT_EQ("v5", v);
// Write something. If sequence number was not recovered properly, // Write something. If sequence number was not recovered properly,
// it will be hidden by an earlier write. // it will be hidden by an earlier write.
ASSERT_OK(db_->Put(WriteOptions(), "foo", "v6")); ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v6"));
ASSERT_OK(db_->Get(ReadOptions(), "foo", &v)); ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), "foo", &v));
ASSERT_EQ("v6", v); ASSERT_EQ("v6", v);
Reopen(); Reopen();
ASSERT_OK(db_->Get(ReadOptions(), "foo", &v)); ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), "foo", &v));
ASSERT_EQ("v6", v); ASSERT_EQ("v6", v);
} }
TEST(CorruptionTest, CorruptedDescriptor) { TEST_F(CorruptionTest, CorruptedDescriptor) {
ASSERT_OK(db_->Put(WriteOptions(), "foo", "hello")); ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "hello"));
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_); DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
dbi->TEST_CompactMemTable(); dbi->TEST_CompactMemTable();
dbi->TEST_CompactRange(0, nullptr, nullptr); dbi->TEST_CompactRange(0, nullptr, nullptr);
@ -301,11 +302,11 @@ TEST(CorruptionTest, CorruptedDescriptor) {
RepairDB(); RepairDB();
Reopen(); Reopen();
std::string v; std::string v;
ASSERT_OK(db_->Get(ReadOptions(), "foo", &v)); ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), "foo", &v));
ASSERT_EQ("hello", v); ASSERT_EQ("hello", v);
} }
TEST(CorruptionTest, CompactionInputError) { TEST_F(CorruptionTest, CompactionInputError) {
Build(10); Build(10);
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_); DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
dbi->TEST_CompactMemTable(); dbi->TEST_CompactMemTable();
@ -320,7 +321,7 @@ TEST(CorruptionTest, CompactionInputError) {
Check(10000, 10000); Check(10000, 10000);
} }
TEST(CorruptionTest, CompactionInputErrorParanoid) { TEST_F(CorruptionTest, CompactionInputErrorParanoid) {
options_.paranoid_checks = true; options_.paranoid_checks = true;
options_.write_buffer_size = 512 << 10; options_.write_buffer_size = 512 << 10;
Reopen(); Reopen();
@ -341,22 +342,21 @@ TEST(CorruptionTest, CompactionInputErrorParanoid) {
ASSERT_TRUE(!s.ok()) << "write did not fail in corrupted paranoid db"; ASSERT_TRUE(!s.ok()) << "write did not fail in corrupted paranoid db";
} }
TEST(CorruptionTest, UnrelatedKeys) { TEST_F(CorruptionTest, UnrelatedKeys) {
Build(10); Build(10);
DBImpl* dbi = reinterpret_cast<DBImpl*>(db_); DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
dbi->TEST_CompactMemTable(); dbi->TEST_CompactMemTable();
Corrupt(kTableFile, 100, 1); Corrupt(kTableFile, 100, 1);
std::string tmp1, tmp2; std::string tmp1, tmp2;
ASSERT_OK(db_->Put(WriteOptions(), Key(1000, &tmp1), Value(1000, &tmp2))); ASSERT_LEVELDB_OK(
db_->Put(WriteOptions(), Key(1000, &tmp1), Value(1000, &tmp2)));
std::string v; std::string v;
ASSERT_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v)); ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v));
ASSERT_EQ(Value(1000, &tmp2).ToString(), v); ASSERT_EQ(Value(1000, &tmp2).ToString(), v);
dbi->TEST_CompactMemTable(); dbi->TEST_CompactMemTable();
ASSERT_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v)); ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v));
ASSERT_EQ(Value(1000, &tmp2).ToString(), v); ASSERT_EQ(Value(1000, &tmp2).ToString(), v);
} }
} // namespace leveldb } // namespace leveldb
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }

View File

@ -4,11 +4,10 @@
#include "db/db_impl.h" #include "db/db_impl.h"
#include <stdint.h>
#include <stdio.h>
#include <algorithm> #include <algorithm>
#include <atomic> #include <atomic>
#include <cstdint>
#include <cstdio>
#include <set> #include <set>
#include <string> #include <string>
#include <vector> #include <vector>
@ -197,6 +196,9 @@ Status DBImpl::NewDB() {
std::string record; std::string record;
new_db.EncodeTo(&record); new_db.EncodeTo(&record);
s = log.AddRecord(record); s = log.AddRecord(record);
if (s.ok()) {
s = file->Sync();
}
if (s.ok()) { if (s.ok()) {
s = file->Close(); s = file->Close();
} }
@ -206,7 +208,7 @@ Status DBImpl::NewDB() {
// Make "CURRENT" file that points to the new manifest file. // Make "CURRENT" file that points to the new manifest file.
s = SetCurrentFile(env_, dbname_, 1); s = SetCurrentFile(env_, dbname_, 1);
} else { } else {
env_->DeleteFile(manifest); env_->RemoveFile(manifest);
} }
return s; return s;
} }
@ -220,7 +222,7 @@ void DBImpl::MaybeIgnoreError(Status* s) const {
} }
} }
void DBImpl::DeleteObsoleteFiles() { void DBImpl::RemoveObsoleteFiles() {
mutex_.AssertHeld(); mutex_.AssertHeld();
if (!bg_error_.ok()) { if (!bg_error_.ok()) {
@ -282,7 +284,7 @@ void DBImpl::DeleteObsoleteFiles() {
// are therefore safe to delete while allowing other threads to proceed. // are therefore safe to delete while allowing other threads to proceed.
mutex_.Unlock(); mutex_.Unlock();
for (const std::string& filename : files_to_delete) { for (const std::string& filename : files_to_delete) {
env_->DeleteFile(dbname_ + "/" + filename); env_->RemoveFile(dbname_ + "/" + filename);
} }
mutex_.Lock(); mutex_.Lock();
} }
@ -302,6 +304,8 @@ Status DBImpl::Recover(VersionEdit* edit, bool* save_manifest) {
if (!env_->FileExists(CurrentFileName(dbname_))) { if (!env_->FileExists(CurrentFileName(dbname_))) {
if (options_.create_if_missing) { if (options_.create_if_missing) {
Log(options_.info_log, "Creating DB %s since it was missing.",
dbname_.c_str());
s = NewDB(); s = NewDB();
if (!s.ok()) { if (!s.ok()) {
return s; return s;
@ -351,8 +355,8 @@ Status DBImpl::Recover(VersionEdit* edit, bool* save_manifest) {
} }
if (!expected.empty()) { if (!expected.empty()) {
char buf[50]; char buf[50];
snprintf(buf, sizeof(buf), "%d missing files; e.g.", std::snprintf(buf, sizeof(buf), "%d missing files; e.g.",
static_cast<int>(expected.size())); static_cast<int>(expected.size()));
return Status::Corruption(buf, TableFileName(dbname_, *(expected.begin()))); return Status::Corruption(buf, TableFileName(dbname_, *(expected.begin())));
} }
@ -569,7 +573,7 @@ void DBImpl::CompactMemTable() {
imm_->Unref(); imm_->Unref();
imm_ = nullptr; imm_ = nullptr;
has_imm_.store(false, std::memory_order_release); has_imm_.store(false, std::memory_order_release);
DeleteObsoleteFiles(); RemoveObsoleteFiles();
} else { } else {
RecordBackgroundError(s); RecordBackgroundError(s);
} }
@ -729,7 +733,7 @@ void DBImpl::BackgroundCompaction() {
// Move file to next level // Move file to next level
assert(c->num_input_files(0) == 1); assert(c->num_input_files(0) == 1);
FileMetaData* f = c->input(0, 0); FileMetaData* f = c->input(0, 0);
c->edit()->DeleteFile(c->level(), f->number); c->edit()->RemoveFile(c->level(), f->number);
c->edit()->AddFile(c->level() + 1, f->number, f->file_size, f->smallest, c->edit()->AddFile(c->level() + 1, f->number, f->file_size, f->smallest,
f->largest); f->largest);
status = versions_->LogAndApply(c->edit(), &mutex_); status = versions_->LogAndApply(c->edit(), &mutex_);
@ -749,7 +753,7 @@ void DBImpl::BackgroundCompaction() {
} }
CleanupCompaction(compact); CleanupCompaction(compact);
c->ReleaseInputs(); c->ReleaseInputs();
DeleteObsoleteFiles(); RemoveObsoleteFiles();
} }
delete c; delete c;
@ -1213,9 +1217,9 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* updates) {
uint64_t last_sequence = versions_->LastSequence(); uint64_t last_sequence = versions_->LastSequence();
Writer* last_writer = &w; Writer* last_writer = &w;
if (status.ok() && updates != nullptr) { // nullptr batch is for compactions if (status.ok() && updates != nullptr) { // nullptr batch is for compactions
WriteBatch* updates = BuildBatchGroup(&last_writer); WriteBatch* write_batch = BuildBatchGroup(&last_writer);
WriteBatchInternal::SetSequence(updates, last_sequence + 1); WriteBatchInternal::SetSequence(write_batch, last_sequence + 1);
last_sequence += WriteBatchInternal::Count(updates); last_sequence += WriteBatchInternal::Count(write_batch);
// Add to log and apply to memtable. We can release the lock // Add to log and apply to memtable. We can release the lock
// during this phase since &w is currently responsible for logging // during this phase since &w is currently responsible for logging
@ -1223,7 +1227,7 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* updates) {
// into mem_. // into mem_.
{ {
mutex_.Unlock(); mutex_.Unlock();
status = log_->AddRecord(WriteBatchInternal::Contents(updates)); status = log_->AddRecord(WriteBatchInternal::Contents(write_batch));
bool sync_error = false; bool sync_error = false;
if (status.ok() && options.sync) { if (status.ok() && options.sync) {
status = logfile_->Sync(); status = logfile_->Sync();
@ -1232,7 +1236,7 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* updates) {
} }
} }
if (status.ok()) { if (status.ok()) {
status = WriteBatchInternal::InsertInto(updates, mem_); status = WriteBatchInternal::InsertInto(write_batch, mem_);
} }
mutex_.Lock(); mutex_.Lock();
if (sync_error) { if (sync_error) {
@ -1242,7 +1246,7 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* updates) {
RecordBackgroundError(status); RecordBackgroundError(status);
} }
} }
if (updates == tmp_batch_) tmp_batch_->Clear(); if (write_batch == tmp_batch_) tmp_batch_->Clear();
versions_->SetLastSequence(last_sequence); versions_->SetLastSequence(last_sequence);
} }
@ -1397,26 +1401,26 @@ bool DBImpl::GetProperty(const Slice& property, std::string* value) {
return false; return false;
} else { } else {
char buf[100]; char buf[100];
snprintf(buf, sizeof(buf), "%d", std::snprintf(buf, sizeof(buf), "%d",
versions_->NumLevelFiles(static_cast<int>(level))); versions_->NumLevelFiles(static_cast<int>(level)));
*value = buf; *value = buf;
return true; return true;
} }
} else if (in == "stats") { } else if (in == "stats") {
char buf[200]; char buf[200];
snprintf(buf, sizeof(buf), std::snprintf(buf, sizeof(buf),
" Compactions\n" " Compactions\n"
"Level Files Size(MB) Time(sec) Read(MB) Write(MB)\n" "Level Files Size(MB) Time(sec) Read(MB) Write(MB)\n"
"--------------------------------------------------\n"); "--------------------------------------------------\n");
value->append(buf); value->append(buf);
for (int level = 0; level < config::kNumLevels; level++) { for (int level = 0; level < config::kNumLevels; level++) {
int files = versions_->NumLevelFiles(level); int files = versions_->NumLevelFiles(level);
if (stats_[level].micros > 0 || files > 0) { if (stats_[level].micros > 0 || files > 0) {
snprintf(buf, sizeof(buf), "%3d %8d %8.0f %9.0f %8.0f %9.0f\n", level, std::snprintf(buf, sizeof(buf), "%3d %8d %8.0f %9.0f %8.0f %9.0f\n",
files, versions_->NumLevelBytes(level) / 1048576.0, level, files, versions_->NumLevelBytes(level) / 1048576.0,
stats_[level].micros / 1e6, stats_[level].micros / 1e6,
stats_[level].bytes_read / 1048576.0, stats_[level].bytes_read / 1048576.0,
stats_[level].bytes_written / 1048576.0); stats_[level].bytes_written / 1048576.0);
value->append(buf); value->append(buf);
} }
} }
@ -1433,8 +1437,8 @@ bool DBImpl::GetProperty(const Slice& property, std::string* value) {
total_usage += imm_->ApproximateMemoryUsage(); total_usage += imm_->ApproximateMemoryUsage();
} }
char buf[50]; char buf[50];
snprintf(buf, sizeof(buf), "%llu", std::snprintf(buf, sizeof(buf), "%llu",
static_cast<unsigned long long>(total_usage)); static_cast<unsigned long long>(total_usage));
value->append(buf); value->append(buf);
return true; return true;
} }
@ -1506,7 +1510,7 @@ Status DB::Open(const Options& options, const std::string& dbname, DB** dbptr) {
s = impl->versions_->LogAndApply(&edit, &impl->mutex_); s = impl->versions_->LogAndApply(&edit, &impl->mutex_);
} }
if (s.ok()) { if (s.ok()) {
impl->DeleteObsoleteFiles(); impl->RemoveObsoleteFiles();
impl->MaybeScheduleCompaction(); impl->MaybeScheduleCompaction();
} }
impl->mutex_.Unlock(); impl->mutex_.Unlock();
@ -1539,15 +1543,15 @@ Status DestroyDB(const std::string& dbname, const Options& options) {
for (size_t i = 0; i < filenames.size(); i++) { for (size_t i = 0; i < filenames.size(); i++) {
if (ParseFileName(filenames[i], &number, &type) && if (ParseFileName(filenames[i], &number, &type) &&
type != kDBLockFile) { // Lock file will be deleted at end type != kDBLockFile) { // Lock file will be deleted at end
Status del = env->DeleteFile(dbname + "/" + filenames[i]); Status del = env->RemoveFile(dbname + "/" + filenames[i]);
if (result.ok() && !del.ok()) { if (result.ok() && !del.ok()) {
result = del; result = del;
} }
} }
} }
env->UnlockFile(lock); // Ignore error since state is already gone env->UnlockFile(lock); // Ignore error since state is already gone
env->DeleteFile(lockname); env->RemoveFile(lockname);
env->DeleteDir(dbname); // Ignore error in case dir contains other files env->RemoveDir(dbname); // Ignore error in case dir contains other files
} }
return result; return result;
} }

View File

@ -116,7 +116,7 @@ class DBImpl : public DB {
void MaybeIgnoreError(Status* s) const; void MaybeIgnoreError(Status* s) const;
// Delete any unneeded files and stale in-memory entries. // Delete any unneeded files and stale in-memory entries.
void DeleteObsoleteFiles() EXCLUSIVE_LOCKS_REQUIRED(mutex_); void RemoveObsoleteFiles() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
// Compact the in-memory write buffer to disk. Switches to a new // Compact the in-memory write buffer to disk. Switches to a new
// log-file/memtable and writes a new descriptor iff successful. // log-file/memtable and writes a new descriptor iff successful.

View File

@ -21,9 +21,9 @@ static void DumpInternalIter(Iterator* iter) {
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
ParsedInternalKey k; ParsedInternalKey k;
if (!ParseInternalKey(iter->key(), &k)) { if (!ParseInternalKey(iter->key(), &k)) {
fprintf(stderr, "Corrupt '%s'\n", EscapeString(iter->key()).c_str()); std::fprintf(stderr, "Corrupt '%s'\n", EscapeString(iter->key()).c_str());
} else { } else {
fprintf(stderr, "@ '%s'\n", k.DebugString().c_str()); std::fprintf(stderr, "@ '%s'\n", k.DebugString().c_str());
} }
} }
} }

View File

@ -5,7 +5,7 @@
#ifndef STORAGE_LEVELDB_DB_DB_ITER_H_ #ifndef STORAGE_LEVELDB_DB_DB_ITER_H_
#define STORAGE_LEVELDB_DB_DB_ITER_H_ #define STORAGE_LEVELDB_DB_DB_ITER_H_
#include <stdint.h> #include <cstdint>
#include "db/dbformat.h" #include "db/dbformat.h"
#include "leveldb/db.h" #include "leveldb/db.h"

File diff suppressed because it is too large Load Diff

View File

@ -4,8 +4,7 @@
#include "db/dbformat.h" #include "db/dbformat.h"
#include <stdio.h> #include <cstdio>
#include <sstream> #include <sstream>
#include "port/port.h" #include "port/port.h"
@ -127,7 +126,7 @@ LookupKey::LookupKey(const Slice& user_key, SequenceNumber s) {
start_ = dst; start_ = dst;
dst = EncodeVarint32(dst, usize + 8); dst = EncodeVarint32(dst, usize + 8);
kstart_ = dst; kstart_ = dst;
memcpy(dst, user_key.data(), usize); std::memcpy(dst, user_key.data(), usize);
dst += usize; dst += usize;
EncodeFixed64(dst, PackSequenceAndType(s, kValueTypeForSeek)); EncodeFixed64(dst, PackSequenceAndType(s, kValueTypeForSeek));
dst += 8; dst += 8;

View File

@ -3,8 +3,9 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors. // found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "db/dbformat.h" #include "db/dbformat.h"
#include "gtest/gtest.h"
#include "util/logging.h" #include "util/logging.h"
#include "util/testharness.h"
namespace leveldb { namespace leveldb {
@ -41,8 +42,6 @@ static void TestKey(const std::string& key, uint64_t seq, ValueType vt) {
ASSERT_TRUE(!ParseInternalKey(Slice("bar"), &decoded)); ASSERT_TRUE(!ParseInternalKey(Slice("bar"), &decoded));
} }
class FormatTest {};
TEST(FormatTest, InternalKey_EncodeDecode) { TEST(FormatTest, InternalKey_EncodeDecode) {
const char* keys[] = {"", "k", "hello", "longggggggggggggggggggggg"}; const char* keys[] = {"", "k", "hello", "longggggggggggggggggggggg"};
const uint64_t seq[] = {1, const uint64_t seq[] = {1,
@ -127,5 +126,3 @@ TEST(FormatTest, InternalKeyDebugString) {
} }
} // namespace leveldb } // namespace leveldb
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }

View File

@ -4,7 +4,7 @@
#include "leveldb/dumpfile.h" #include "leveldb/dumpfile.h"
#include <stdio.h> #include <cstdio>
#include "db/dbformat.h" #include "db/dbformat.h"
#include "db/filename.h" #include "db/filename.h"

View File

@ -9,6 +9,7 @@
#include <map> #include <map>
#include <set> #include <set>
#include "gtest/gtest.h"
#include "db/db_impl.h" #include "db/db_impl.h"
#include "db/filename.h" #include "db/filename.h"
#include "db/log_format.h" #include "db/log_format.h"
@ -22,7 +23,6 @@
#include "port/thread_annotations.h" #include "port/thread_annotations.h"
#include "util/logging.h" #include "util/logging.h"
#include "util/mutexlock.h" #include "util/mutexlock.h"
#include "util/testharness.h"
#include "util/testutil.h" #include "util/testutil.h"
namespace leveldb { namespace leveldb {
@ -72,7 +72,7 @@ Status Truncate(const std::string& filename, uint64_t length) {
if (s.ok()) { if (s.ok()) {
s = env->RenameFile(tmp_name, filename); s = env->RenameFile(tmp_name, filename);
} else { } else {
env->DeleteFile(tmp_name); env->RemoveFile(tmp_name);
} }
} }
} }
@ -133,12 +133,12 @@ class FaultInjectionTestEnv : public EnvWrapper {
WritableFile** result) override; WritableFile** result) override;
Status NewAppendableFile(const std::string& fname, Status NewAppendableFile(const std::string& fname,
WritableFile** result) override; WritableFile** result) override;
Status DeleteFile(const std::string& f) override; Status RemoveFile(const std::string& f) override;
Status RenameFile(const std::string& s, const std::string& t) override; Status RenameFile(const std::string& s, const std::string& t) override;
void WritableFileClosed(const FileState& state); void WritableFileClosed(const FileState& state);
Status DropUnsyncedFileData(); Status DropUnsyncedFileData();
Status DeleteFilesCreatedAfterLastDirSync(); Status RemoveFilesCreatedAfterLastDirSync();
void DirWasSynced(); void DirWasSynced();
bool IsFileCreatedSinceLastDirSync(const std::string& filename); bool IsFileCreatedSinceLastDirSync(const std::string& filename);
void ResetState(); void ResetState();
@ -298,9 +298,9 @@ void FaultInjectionTestEnv::UntrackFile(const std::string& f) {
new_files_since_last_dir_sync_.erase(f); new_files_since_last_dir_sync_.erase(f);
} }
Status FaultInjectionTestEnv::DeleteFile(const std::string& f) { Status FaultInjectionTestEnv::RemoveFile(const std::string& f) {
Status s = EnvWrapper::DeleteFile(f); Status s = EnvWrapper::RemoveFile(f);
ASSERT_OK(s); EXPECT_LEVELDB_OK(s);
if (s.ok()) { if (s.ok()) {
UntrackFile(f); UntrackFile(f);
} }
@ -335,17 +335,17 @@ void FaultInjectionTestEnv::ResetState() {
SetFilesystemActive(true); SetFilesystemActive(true);
} }
Status FaultInjectionTestEnv::DeleteFilesCreatedAfterLastDirSync() { Status FaultInjectionTestEnv::RemoveFilesCreatedAfterLastDirSync() {
// Because DeleteFile access this container make a copy to avoid deadlock // Because RemoveFile access this container make a copy to avoid deadlock
mutex_.Lock(); mutex_.Lock();
std::set<std::string> new_files(new_files_since_last_dir_sync_.begin(), std::set<std::string> new_files(new_files_since_last_dir_sync_.begin(),
new_files_since_last_dir_sync_.end()); new_files_since_last_dir_sync_.end());
mutex_.Unlock(); mutex_.Unlock();
Status status; Status status;
for (const auto& new_file : new_files) { for (const auto& new_file : new_files) {
Status delete_status = DeleteFile(new_file); Status remove_status = RemoveFile(new_file);
if (!delete_status.ok() && status.ok()) { if (!remove_status.ok() && status.ok()) {
status = std::move(delete_status); status = std::move(remove_status);
} }
} }
return status; return status;
@ -361,7 +361,7 @@ Status FileState::DropUnsyncedData() const {
return Truncate(filename_, sync_pos); return Truncate(filename_, sync_pos);
} }
class FaultInjectionTest { class FaultInjectionTest : public testing::Test {
public: public:
enum ExpectedVerifResult { VAL_EXPECT_NO_ERROR, VAL_EXPECT_ERROR }; enum ExpectedVerifResult { VAL_EXPECT_NO_ERROR, VAL_EXPECT_ERROR };
enum ResetMethod { RESET_DROP_UNSYNCED_DATA, RESET_DELETE_UNSYNCED_FILES }; enum ResetMethod { RESET_DROP_UNSYNCED_DATA, RESET_DELETE_UNSYNCED_FILES };
@ -376,7 +376,7 @@ class FaultInjectionTest {
: env_(new FaultInjectionTestEnv), : env_(new FaultInjectionTestEnv),
tiny_cache_(NewLRUCache(100)), tiny_cache_(NewLRUCache(100)),
db_(nullptr) { db_(nullptr) {
dbname_ = test::TmpDir() + "/fault_test"; dbname_ = testing::TempDir() + "fault_test";
DestroyDB(dbname_, Options()); // Destroy any db from earlier run DestroyDB(dbname_, Options()); // Destroy any db from earlier run
options_.reuse_logs = true; options_.reuse_logs = true;
options_.env = env_; options_.env = env_;
@ -402,7 +402,7 @@ class FaultInjectionTest {
batch.Clear(); batch.Clear();
batch.Put(key, Value(i, &value_space)); batch.Put(key, Value(i, &value_space));
WriteOptions options; WriteOptions options;
ASSERT_OK(db_->Write(options, &batch)); ASSERT_LEVELDB_OK(db_->Write(options, &batch));
} }
} }
@ -424,10 +424,10 @@ class FaultInjectionTest {
s = ReadValue(i, &val); s = ReadValue(i, &val);
if (expected == VAL_EXPECT_NO_ERROR) { if (expected == VAL_EXPECT_NO_ERROR) {
if (s.ok()) { if (s.ok()) {
ASSERT_EQ(value_space, val); EXPECT_EQ(value_space, val);
} }
} else if (s.ok()) { } else if (s.ok()) {
fprintf(stderr, "Expected an error at %d, but was OK\n", i); std::fprintf(stderr, "Expected an error at %d, but was OK\n", i);
s = Status::IOError(dbname_, "Expected value error:"); s = Status::IOError(dbname_, "Expected value error:");
} else { } else {
s = Status::OK(); // An expected error s = Status::OK(); // An expected error
@ -439,7 +439,7 @@ class FaultInjectionTest {
// Return the ith key // Return the ith key
Slice Key(int i, std::string* storage) const { Slice Key(int i, std::string* storage) const {
char buf[100]; char buf[100];
snprintf(buf, sizeof(buf), "%016d", i); std::snprintf(buf, sizeof(buf), "%016d", i);
storage->assign(buf, strlen(buf)); storage->assign(buf, strlen(buf));
return Slice(*storage); return Slice(*storage);
} }
@ -465,7 +465,7 @@ class FaultInjectionTest {
void DeleteAllData() { void DeleteAllData() {
Iterator* iter = db_->NewIterator(ReadOptions()); Iterator* iter = db_->NewIterator(ReadOptions());
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
ASSERT_OK(db_->Delete(WriteOptions(), iter->key())); ASSERT_LEVELDB_OK(db_->Delete(WriteOptions(), iter->key()));
} }
delete iter; delete iter;
@ -474,10 +474,10 @@ class FaultInjectionTest {
void ResetDBState(ResetMethod reset_method) { void ResetDBState(ResetMethod reset_method) {
switch (reset_method) { switch (reset_method) {
case RESET_DROP_UNSYNCED_DATA: case RESET_DROP_UNSYNCED_DATA:
ASSERT_OK(env_->DropUnsyncedFileData()); ASSERT_LEVELDB_OK(env_->DropUnsyncedFileData());
break; break;
case RESET_DELETE_UNSYNCED_FILES: case RESET_DELETE_UNSYNCED_FILES:
ASSERT_OK(env_->DeleteFilesCreatedAfterLastDirSync()); ASSERT_LEVELDB_OK(env_->RemoveFilesCreatedAfterLastDirSync());
break; break;
default: default:
assert(false); assert(false);
@ -496,10 +496,11 @@ class FaultInjectionTest {
env_->SetFilesystemActive(false); env_->SetFilesystemActive(false);
CloseDB(); CloseDB();
ResetDBState(reset_method); ResetDBState(reset_method);
ASSERT_OK(OpenDB()); ASSERT_LEVELDB_OK(OpenDB());
ASSERT_OK(Verify(0, num_pre_sync, FaultInjectionTest::VAL_EXPECT_NO_ERROR)); ASSERT_LEVELDB_OK(
ASSERT_OK(Verify(num_pre_sync, num_post_sync, Verify(0, num_pre_sync, FaultInjectionTest::VAL_EXPECT_NO_ERROR));
FaultInjectionTest::VAL_EXPECT_ERROR)); ASSERT_LEVELDB_OK(Verify(num_pre_sync, num_post_sync,
FaultInjectionTest::VAL_EXPECT_ERROR));
} }
void NoWriteTestPreFault() {} void NoWriteTestPreFault() {}
@ -507,12 +508,12 @@ class FaultInjectionTest {
void NoWriteTestReopenWithFault(ResetMethod reset_method) { void NoWriteTestReopenWithFault(ResetMethod reset_method) {
CloseDB(); CloseDB();
ResetDBState(reset_method); ResetDBState(reset_method);
ASSERT_OK(OpenDB()); ASSERT_LEVELDB_OK(OpenDB());
} }
void DoTest() { void DoTest() {
Random rnd(0); Random rnd(0);
ASSERT_OK(OpenDB()); ASSERT_LEVELDB_OK(OpenDB());
for (size_t idx = 0; idx < kNumIterations; idx++) { for (size_t idx = 0; idx < kNumIterations; idx++) {
int num_pre_sync = rnd.Uniform(kMaxNumValues); int num_pre_sync = rnd.Uniform(kMaxNumValues);
int num_post_sync = rnd.Uniform(kMaxNumValues); int num_post_sync = rnd.Uniform(kMaxNumValues);
@ -536,16 +537,14 @@ class FaultInjectionTest {
} }
}; };
TEST(FaultInjectionTest, FaultTestNoLogReuse) { TEST_F(FaultInjectionTest, FaultTestNoLogReuse) {
ReuseLogs(false); ReuseLogs(false);
DoTest(); DoTest();
} }
TEST(FaultInjectionTest, FaultTestWithLogReuse) { TEST_F(FaultInjectionTest, FaultTestWithLogReuse) {
ReuseLogs(true); ReuseLogs(true);
DoTest(); DoTest();
} }
} // namespace leveldb } // namespace leveldb
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }

View File

@ -4,8 +4,8 @@
#include "db/filename.h" #include "db/filename.h"
#include <ctype.h> #include <cassert>
#include <stdio.h> #include <cstdio>
#include "db/dbformat.h" #include "db/dbformat.h"
#include "leveldb/env.h" #include "leveldb/env.h"
@ -20,8 +20,8 @@ Status WriteStringToFileSync(Env* env, const Slice& data,
static std::string MakeFileName(const std::string& dbname, uint64_t number, static std::string MakeFileName(const std::string& dbname, uint64_t number,
const char* suffix) { const char* suffix) {
char buf[100]; char buf[100];
snprintf(buf, sizeof(buf), "/%06llu.%s", std::snprintf(buf, sizeof(buf), "/%06llu.%s",
static_cast<unsigned long long>(number), suffix); static_cast<unsigned long long>(number), suffix);
return dbname + buf; return dbname + buf;
} }
@ -43,8 +43,8 @@ std::string SSTTableFileName(const std::string& dbname, uint64_t number) {
std::string DescriptorFileName(const std::string& dbname, uint64_t number) { std::string DescriptorFileName(const std::string& dbname, uint64_t number) {
assert(number > 0); assert(number > 0);
char buf[100]; char buf[100];
snprintf(buf, sizeof(buf), "/MANIFEST-%06llu", std::snprintf(buf, sizeof(buf), "/MANIFEST-%06llu",
static_cast<unsigned long long>(number)); static_cast<unsigned long long>(number));
return dbname + buf; return dbname + buf;
} }
@ -133,7 +133,7 @@ Status SetCurrentFile(Env* env, const std::string& dbname,
s = env->RenameFile(tmp, CurrentFileName(dbname)); s = env->RenameFile(tmp, CurrentFileName(dbname));
} }
if (!s.ok()) { if (!s.ok()) {
env->DeleteFile(tmp); env->RemoveFile(tmp);
} }
return s; return s;
} }

View File

@ -7,8 +7,7 @@
#ifndef STORAGE_LEVELDB_DB_FILENAME_H_ #ifndef STORAGE_LEVELDB_DB_FILENAME_H_
#define STORAGE_LEVELDB_DB_FILENAME_H_ #define STORAGE_LEVELDB_DB_FILENAME_H_
#include <stdint.h> #include <cstdint>
#include <string> #include <string>
#include "leveldb/slice.h" #include "leveldb/slice.h"

View File

@ -4,15 +4,13 @@
#include "db/filename.h" #include "db/filename.h"
#include "gtest/gtest.h"
#include "db/dbformat.h" #include "db/dbformat.h"
#include "port/port.h" #include "port/port.h"
#include "util/logging.h" #include "util/logging.h"
#include "util/testharness.h"
namespace leveldb { namespace leveldb {
class FileNameTest {};
TEST(FileNameTest, Parse) { TEST(FileNameTest, Parse) {
Slice db; Slice db;
FileType type; FileType type;
@ -127,5 +125,3 @@ TEST(FileNameTest, Construction) {
} }
} // namespace leveldb } // namespace leveldb
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors. // found in the LICENSE file. See the AUTHORS file for names of contributors.
#include <stdio.h> #include <cstdio>
#include "leveldb/dumpfile.h" #include "leveldb/dumpfile.h"
#include "leveldb/env.h" #include "leveldb/env.h"
@ -28,7 +28,7 @@ bool HandleDumpCommand(Env* env, char** files, int num) {
for (int i = 0; i < num; i++) { for (int i = 0; i < num; i++) {
Status s = DumpFile(env, files[i], &printer); Status s = DumpFile(env, files[i], &printer);
if (!s.ok()) { if (!s.ok()) {
fprintf(stderr, "%s\n", s.ToString().c_str()); std::fprintf(stderr, "%s\n", s.ToString().c_str());
ok = false; ok = false;
} }
} }
@ -39,9 +39,10 @@ bool HandleDumpCommand(Env* env, char** files, int num) {
} // namespace leveldb } // namespace leveldb
static void Usage() { static void Usage() {
fprintf(stderr, std::fprintf(
"Usage: leveldbutil command...\n" stderr,
" dump files... -- dump contents of specified files\n"); "Usage: leveldbutil command...\n"
" dump files... -- dump contents of specified files\n");
} }
int main(int argc, char** argv) { int main(int argc, char** argv) {

View File

@ -4,7 +4,7 @@
#include "db/log_reader.h" #include "db/log_reader.h"
#include <stdio.h> #include <cstdio>
#include "leveldb/env.h" #include "leveldb/env.h"
#include "util/coding.h" #include "util/coding.h"
@ -160,7 +160,7 @@ bool Reader::ReadRecord(Slice* record, std::string* scratch) {
default: { default: {
char buf[40]; char buf[40];
snprintf(buf, sizeof(buf), "unknown record type %u", record_type); std::snprintf(buf, sizeof(buf), "unknown record type %u", record_type);
ReportCorruption( ReportCorruption(
(fragment.size() + (in_fragmented_record ? scratch->size() : 0)), (fragment.size() + (in_fragmented_record ? scratch->size() : 0)),
buf); buf);

View File

@ -5,7 +5,7 @@
#ifndef STORAGE_LEVELDB_DB_LOG_READER_H_ #ifndef STORAGE_LEVELDB_DB_LOG_READER_H_
#define STORAGE_LEVELDB_DB_LOG_READER_H_ #define STORAGE_LEVELDB_DB_LOG_READER_H_
#include <stdint.h> #include <cstdint>
#include "db/log_format.h" #include "db/log_format.h"
#include "leveldb/slice.h" #include "leveldb/slice.h"
@ -24,7 +24,7 @@ class Reader {
public: public:
virtual ~Reporter(); virtual ~Reporter();
// Some corruption was detected. "size" is the approximate number // Some corruption was detected. "bytes" is the approximate number
// of bytes dropped due to the corruption. // of bytes dropped due to the corruption.
virtual void Corruption(size_t bytes, const Status& status) = 0; virtual void Corruption(size_t bytes, const Status& status) = 0;
}; };

View File

@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors. // found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "gtest/gtest.h"
#include "db/log_reader.h" #include "db/log_reader.h"
#include "db/log_writer.h" #include "db/log_writer.h"
#include "leveldb/env.h" #include "leveldb/env.h"
#include "util/coding.h" #include "util/coding.h"
#include "util/crc32c.h" #include "util/crc32c.h"
#include "util/random.h" #include "util/random.h"
#include "util/testharness.h"
namespace leveldb { namespace leveldb {
namespace log { namespace log {
@ -27,7 +27,7 @@ static std::string BigString(const std::string& partial_string, size_t n) {
// Construct a string from a number // Construct a string from a number
static std::string NumberString(int n) { static std::string NumberString(int n) {
char buf[50]; char buf[50];
snprintf(buf, sizeof(buf), "%d.", n); std::snprintf(buf, sizeof(buf), "%d.", n);
return std::string(buf); return std::string(buf);
} }
@ -36,7 +36,7 @@ static std::string RandomSkewedString(int i, Random* rnd) {
return BigString(NumberString(i), rnd->Skewed(17)); return BigString(NumberString(i), rnd->Skewed(17));
} }
class LogTest { class LogTest : public testing::Test {
public: public:
LogTest() LogTest()
: reading_(false), : reading_(false),
@ -177,7 +177,7 @@ class LogTest {
StringSource() : force_error_(false), returned_partial_(false) {} StringSource() : force_error_(false), returned_partial_(false) {}
Status Read(size_t n, Slice* result, char* scratch) override { Status Read(size_t n, Slice* result, char* scratch) override {
ASSERT_TRUE(!returned_partial_) << "must not Read() after eof/error"; EXPECT_TRUE(!returned_partial_) << "must not Read() after eof/error";
if (force_error_) { if (force_error_) {
force_error_ = false; force_error_ = false;
@ -258,9 +258,9 @@ uint64_t LogTest::initial_offset_last_record_offsets_[] = {
int LogTest::num_initial_offset_records_ = int LogTest::num_initial_offset_records_ =
sizeof(LogTest::initial_offset_last_record_offsets_) / sizeof(uint64_t); sizeof(LogTest::initial_offset_last_record_offsets_) / sizeof(uint64_t);
TEST(LogTest, Empty) { ASSERT_EQ("EOF", Read()); } TEST_F(LogTest, Empty) { ASSERT_EQ("EOF", Read()); }
TEST(LogTest, ReadWrite) { TEST_F(LogTest, ReadWrite) {
Write("foo"); Write("foo");
Write("bar"); Write("bar");
Write(""); Write("");
@ -273,7 +273,7 @@ TEST(LogTest, ReadWrite) {
ASSERT_EQ("EOF", Read()); // Make sure reads at eof work ASSERT_EQ("EOF", Read()); // Make sure reads at eof work
} }
TEST(LogTest, ManyBlocks) { TEST_F(LogTest, ManyBlocks) {
for (int i = 0; i < 100000; i++) { for (int i = 0; i < 100000; i++) {
Write(NumberString(i)); Write(NumberString(i));
} }
@ -283,7 +283,7 @@ TEST(LogTest, ManyBlocks) {
ASSERT_EQ("EOF", Read()); ASSERT_EQ("EOF", Read());
} }
TEST(LogTest, Fragmentation) { TEST_F(LogTest, Fragmentation) {
Write("small"); Write("small");
Write(BigString("medium", 50000)); Write(BigString("medium", 50000));
Write(BigString("large", 100000)); Write(BigString("large", 100000));
@ -293,7 +293,7 @@ TEST(LogTest, Fragmentation) {
ASSERT_EQ("EOF", Read()); ASSERT_EQ("EOF", Read());
} }
TEST(LogTest, MarginalTrailer) { TEST_F(LogTest, MarginalTrailer) {
// Make a trailer that is exactly the same length as an empty record. // Make a trailer that is exactly the same length as an empty record.
const int n = kBlockSize - 2 * kHeaderSize; const int n = kBlockSize - 2 * kHeaderSize;
Write(BigString("foo", n)); Write(BigString("foo", n));
@ -306,7 +306,7 @@ TEST(LogTest, MarginalTrailer) {
ASSERT_EQ("EOF", Read()); ASSERT_EQ("EOF", Read());
} }
TEST(LogTest, MarginalTrailer2) { TEST_F(LogTest, MarginalTrailer2) {
// Make a trailer that is exactly the same length as an empty record. // Make a trailer that is exactly the same length as an empty record.
const int n = kBlockSize - 2 * kHeaderSize; const int n = kBlockSize - 2 * kHeaderSize;
Write(BigString("foo", n)); Write(BigString("foo", n));
@ -319,7 +319,7 @@ TEST(LogTest, MarginalTrailer2) {
ASSERT_EQ("", ReportMessage()); ASSERT_EQ("", ReportMessage());
} }
TEST(LogTest, ShortTrailer) { TEST_F(LogTest, ShortTrailer) {
const int n = kBlockSize - 2 * kHeaderSize + 4; const int n = kBlockSize - 2 * kHeaderSize + 4;
Write(BigString("foo", n)); Write(BigString("foo", n));
ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes()); ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes());
@ -331,7 +331,7 @@ TEST(LogTest, ShortTrailer) {
ASSERT_EQ("EOF", Read()); ASSERT_EQ("EOF", Read());
} }
TEST(LogTest, AlignedEof) { TEST_F(LogTest, AlignedEof) {
const int n = kBlockSize - 2 * kHeaderSize + 4; const int n = kBlockSize - 2 * kHeaderSize + 4;
Write(BigString("foo", n)); Write(BigString("foo", n));
ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes()); ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes());
@ -339,7 +339,7 @@ TEST(LogTest, AlignedEof) {
ASSERT_EQ("EOF", Read()); ASSERT_EQ("EOF", Read());
} }
TEST(LogTest, OpenForAppend) { TEST_F(LogTest, OpenForAppend) {
Write("hello"); Write("hello");
ReopenForAppend(); ReopenForAppend();
Write("world"); Write("world");
@ -348,7 +348,7 @@ TEST(LogTest, OpenForAppend) {
ASSERT_EQ("EOF", Read()); ASSERT_EQ("EOF", Read());
} }
TEST(LogTest, RandomRead) { TEST_F(LogTest, RandomRead) {
const int N = 500; const int N = 500;
Random write_rnd(301); Random write_rnd(301);
for (int i = 0; i < N; i++) { for (int i = 0; i < N; i++) {
@ -363,7 +363,7 @@ TEST(LogTest, RandomRead) {
// Tests of all the error paths in log_reader.cc follow: // Tests of all the error paths in log_reader.cc follow:
TEST(LogTest, ReadError) { TEST_F(LogTest, ReadError) {
Write("foo"); Write("foo");
ForceError(); ForceError();
ASSERT_EQ("EOF", Read()); ASSERT_EQ("EOF", Read());
@ -371,7 +371,7 @@ TEST(LogTest, ReadError) {
ASSERT_EQ("OK", MatchError("read error")); ASSERT_EQ("OK", MatchError("read error"));
} }
TEST(LogTest, BadRecordType) { TEST_F(LogTest, BadRecordType) {
Write("foo"); Write("foo");
// Type is stored in header[6] // Type is stored in header[6]
IncrementByte(6, 100); IncrementByte(6, 100);
@ -381,7 +381,7 @@ TEST(LogTest, BadRecordType) {
ASSERT_EQ("OK", MatchError("unknown record type")); ASSERT_EQ("OK", MatchError("unknown record type"));
} }
TEST(LogTest, TruncatedTrailingRecordIsIgnored) { TEST_F(LogTest, TruncatedTrailingRecordIsIgnored) {
Write("foo"); Write("foo");
ShrinkSize(4); // Drop all payload as well as a header byte ShrinkSize(4); // Drop all payload as well as a header byte
ASSERT_EQ("EOF", Read()); ASSERT_EQ("EOF", Read());
@ -390,7 +390,7 @@ TEST(LogTest, TruncatedTrailingRecordIsIgnored) {
ASSERT_EQ("", ReportMessage()); ASSERT_EQ("", ReportMessage());
} }
TEST(LogTest, BadLength) { TEST_F(LogTest, BadLength) {
const int kPayloadSize = kBlockSize - kHeaderSize; const int kPayloadSize = kBlockSize - kHeaderSize;
Write(BigString("bar", kPayloadSize)); Write(BigString("bar", kPayloadSize));
Write("foo"); Write("foo");
@ -401,7 +401,7 @@ TEST(LogTest, BadLength) {
ASSERT_EQ("OK", MatchError("bad record length")); ASSERT_EQ("OK", MatchError("bad record length"));
} }
TEST(LogTest, BadLengthAtEndIsIgnored) { TEST_F(LogTest, BadLengthAtEndIsIgnored) {
Write("foo"); Write("foo");
ShrinkSize(1); ShrinkSize(1);
ASSERT_EQ("EOF", Read()); ASSERT_EQ("EOF", Read());
@ -409,7 +409,7 @@ TEST(LogTest, BadLengthAtEndIsIgnored) {
ASSERT_EQ("", ReportMessage()); ASSERT_EQ("", ReportMessage());
} }
TEST(LogTest, ChecksumMismatch) { TEST_F(LogTest, ChecksumMismatch) {
Write("foo"); Write("foo");
IncrementByte(0, 10); IncrementByte(0, 10);
ASSERT_EQ("EOF", Read()); ASSERT_EQ("EOF", Read());
@ -417,7 +417,7 @@ TEST(LogTest, ChecksumMismatch) {
ASSERT_EQ("OK", MatchError("checksum mismatch")); ASSERT_EQ("OK", MatchError("checksum mismatch"));
} }
TEST(LogTest, UnexpectedMiddleType) { TEST_F(LogTest, UnexpectedMiddleType) {
Write("foo"); Write("foo");
SetByte(6, kMiddleType); SetByte(6, kMiddleType);
FixChecksum(0, 3); FixChecksum(0, 3);
@ -426,7 +426,7 @@ TEST(LogTest, UnexpectedMiddleType) {
ASSERT_EQ("OK", MatchError("missing start")); ASSERT_EQ("OK", MatchError("missing start"));
} }
TEST(LogTest, UnexpectedLastType) { TEST_F(LogTest, UnexpectedLastType) {
Write("foo"); Write("foo");
SetByte(6, kLastType); SetByte(6, kLastType);
FixChecksum(0, 3); FixChecksum(0, 3);
@ -435,7 +435,7 @@ TEST(LogTest, UnexpectedLastType) {
ASSERT_EQ("OK", MatchError("missing start")); ASSERT_EQ("OK", MatchError("missing start"));
} }
TEST(LogTest, UnexpectedFullType) { TEST_F(LogTest, UnexpectedFullType) {
Write("foo"); Write("foo");
Write("bar"); Write("bar");
SetByte(6, kFirstType); SetByte(6, kFirstType);
@ -446,7 +446,7 @@ TEST(LogTest, UnexpectedFullType) {
ASSERT_EQ("OK", MatchError("partial record without end")); ASSERT_EQ("OK", MatchError("partial record without end"));
} }
TEST(LogTest, UnexpectedFirstType) { TEST_F(LogTest, UnexpectedFirstType) {
Write("foo"); Write("foo");
Write(BigString("bar", 100000)); Write(BigString("bar", 100000));
SetByte(6, kFirstType); SetByte(6, kFirstType);
@ -457,7 +457,7 @@ TEST(LogTest, UnexpectedFirstType) {
ASSERT_EQ("OK", MatchError("partial record without end")); ASSERT_EQ("OK", MatchError("partial record without end"));
} }
TEST(LogTest, MissingLastIsIgnored) { TEST_F(LogTest, MissingLastIsIgnored) {
Write(BigString("bar", kBlockSize)); Write(BigString("bar", kBlockSize));
// Remove the LAST block, including header. // Remove the LAST block, including header.
ShrinkSize(14); ShrinkSize(14);
@ -466,7 +466,7 @@ TEST(LogTest, MissingLastIsIgnored) {
ASSERT_EQ(0, DroppedBytes()); ASSERT_EQ(0, DroppedBytes());
} }
TEST(LogTest, PartialLastIsIgnored) { TEST_F(LogTest, PartialLastIsIgnored) {
Write(BigString("bar", kBlockSize)); Write(BigString("bar", kBlockSize));
// Cause a bad record length in the LAST block. // Cause a bad record length in the LAST block.
ShrinkSize(1); ShrinkSize(1);
@ -475,7 +475,7 @@ TEST(LogTest, PartialLastIsIgnored) {
ASSERT_EQ(0, DroppedBytes()); ASSERT_EQ(0, DroppedBytes());
} }
TEST(LogTest, SkipIntoMultiRecord) { TEST_F(LogTest, SkipIntoMultiRecord) {
// Consider a fragmented record: // Consider a fragmented record:
// first(R1), middle(R1), last(R1), first(R2) // first(R1), middle(R1), last(R1), first(R2)
// If initial_offset points to a record after first(R1) but before first(R2) // If initial_offset points to a record after first(R1) but before first(R2)
@ -491,7 +491,7 @@ TEST(LogTest, SkipIntoMultiRecord) {
ASSERT_EQ("EOF", Read()); ASSERT_EQ("EOF", Read());
} }
TEST(LogTest, ErrorJoinsRecords) { TEST_F(LogTest, ErrorJoinsRecords) {
// Consider two fragmented records: // Consider two fragmented records:
// first(R1) last(R1) first(R2) last(R2) // first(R1) last(R1) first(R2) last(R2)
// where the middle two fragments disappear. We do not want // where the middle two fragments disappear. We do not want
@ -514,47 +514,45 @@ TEST(LogTest, ErrorJoinsRecords) {
ASSERT_GE(dropped, 2 * kBlockSize); ASSERT_GE(dropped, 2 * kBlockSize);
} }
TEST(LogTest, ReadStart) { CheckInitialOffsetRecord(0, 0); } TEST_F(LogTest, ReadStart) { CheckInitialOffsetRecord(0, 0); }
TEST(LogTest, ReadSecondOneOff) { CheckInitialOffsetRecord(1, 1); } TEST_F(LogTest, ReadSecondOneOff) { CheckInitialOffsetRecord(1, 1); }
TEST(LogTest, ReadSecondTenThousand) { CheckInitialOffsetRecord(10000, 1); } TEST_F(LogTest, ReadSecondTenThousand) { CheckInitialOffsetRecord(10000, 1); }
TEST(LogTest, ReadSecondStart) { CheckInitialOffsetRecord(10007, 1); } TEST_F(LogTest, ReadSecondStart) { CheckInitialOffsetRecord(10007, 1); }
TEST(LogTest, ReadThirdOneOff) { CheckInitialOffsetRecord(10008, 2); } TEST_F(LogTest, ReadThirdOneOff) { CheckInitialOffsetRecord(10008, 2); }
TEST(LogTest, ReadThirdStart) { CheckInitialOffsetRecord(20014, 2); } TEST_F(LogTest, ReadThirdStart) { CheckInitialOffsetRecord(20014, 2); }
TEST(LogTest, ReadFourthOneOff) { CheckInitialOffsetRecord(20015, 3); } TEST_F(LogTest, ReadFourthOneOff) { CheckInitialOffsetRecord(20015, 3); }
TEST(LogTest, ReadFourthFirstBlockTrailer) { TEST_F(LogTest, ReadFourthFirstBlockTrailer) {
CheckInitialOffsetRecord(log::kBlockSize - 4, 3); CheckInitialOffsetRecord(log::kBlockSize - 4, 3);
} }
TEST(LogTest, ReadFourthMiddleBlock) { TEST_F(LogTest, ReadFourthMiddleBlock) {
CheckInitialOffsetRecord(log::kBlockSize + 1, 3); CheckInitialOffsetRecord(log::kBlockSize + 1, 3);
} }
TEST(LogTest, ReadFourthLastBlock) { TEST_F(LogTest, ReadFourthLastBlock) {
CheckInitialOffsetRecord(2 * log::kBlockSize + 1, 3); CheckInitialOffsetRecord(2 * log::kBlockSize + 1, 3);
} }
TEST(LogTest, ReadFourthStart) { TEST_F(LogTest, ReadFourthStart) {
CheckInitialOffsetRecord( CheckInitialOffsetRecord(
2 * (kHeaderSize + 1000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize, 2 * (kHeaderSize + 1000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize,
3); 3);
} }
TEST(LogTest, ReadInitialOffsetIntoBlockPadding) { TEST_F(LogTest, ReadInitialOffsetIntoBlockPadding) {
CheckInitialOffsetRecord(3 * log::kBlockSize - 3, 5); CheckInitialOffsetRecord(3 * log::kBlockSize - 3, 5);
} }
TEST(LogTest, ReadEnd) { CheckOffsetPastEndReturnsNoRecords(0); } TEST_F(LogTest, ReadEnd) { CheckOffsetPastEndReturnsNoRecords(0); }
TEST(LogTest, ReadPastEnd) { CheckOffsetPastEndReturnsNoRecords(5); } TEST_F(LogTest, ReadPastEnd) { CheckOffsetPastEndReturnsNoRecords(5); }
} // namespace log } // namespace log
} // namespace leveldb } // namespace leveldb
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }

View File

@ -4,7 +4,7 @@
#include "db/log_writer.h" #include "db/log_writer.h"
#include <stdint.h> #include <cstdint>
#include "leveldb/env.h" #include "leveldb/env.h"
#include "util/coding.h" #include "util/coding.h"

View File

@ -5,7 +5,7 @@
#ifndef STORAGE_LEVELDB_DB_LOG_WRITER_H_ #ifndef STORAGE_LEVELDB_DB_LOG_WRITER_H_
#define STORAGE_LEVELDB_DB_LOG_WRITER_H_ #define STORAGE_LEVELDB_DB_LOG_WRITER_H_
#include <stdint.h> #include <cstdint>
#include "db/log_format.h" #include "db/log_format.h"
#include "leveldb/slice.h" #include "leveldb/slice.h"

View File

@ -88,12 +88,12 @@ void MemTable::Add(SequenceNumber s, ValueType type, const Slice& key,
val_size; val_size;
char* buf = arena_.Allocate(encoded_len); char* buf = arena_.Allocate(encoded_len);
char* p = EncodeVarint32(buf, internal_key_size); char* p = EncodeVarint32(buf, internal_key_size);
memcpy(p, key.data(), key_size); std::memcpy(p, key.data(), key_size);
p += key_size; p += key_size;
EncodeFixed64(p, (s << 8) | type); EncodeFixed64(p, (s << 8) | type);
p += 8; p += 8;
p = EncodeVarint32(p, val_size); p = EncodeVarint32(p, val_size);
memcpy(p, value.data(), val_size); std::memcpy(p, value.data(), val_size);
assert(p + val_size == buf + encoded_len); assert(p + val_size == buf + encoded_len);
table_.Insert(buf); table_.Insert(buf);
} }

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors. // found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "gtest/gtest.h"
#include "db/db_impl.h" #include "db/db_impl.h"
#include "db/filename.h" #include "db/filename.h"
#include "db/version_set.h" #include "db/version_set.h"
@ -10,15 +11,14 @@
#include "leveldb/env.h" #include "leveldb/env.h"
#include "leveldb/write_batch.h" #include "leveldb/write_batch.h"
#include "util/logging.h" #include "util/logging.h"
#include "util/testharness.h"
#include "util/testutil.h" #include "util/testutil.h"
namespace leveldb { namespace leveldb {
class RecoveryTest { class RecoveryTest : public testing::Test {
public: public:
RecoveryTest() : env_(Env::Default()), db_(nullptr) { RecoveryTest() : env_(Env::Default()), db_(nullptr) {
dbname_ = test::TmpDir() + "/recovery_test"; dbname_ = testing::TempDir() + "recovery_test";
DestroyDB(dbname_, Options()); DestroyDB(dbname_, Options());
Open(); Open();
} }
@ -63,7 +63,7 @@ class RecoveryTest {
} }
void Open(Options* options = nullptr) { void Open(Options* options = nullptr) {
ASSERT_OK(OpenWithStatus(options)); ASSERT_LEVELDB_OK(OpenWithStatus(options));
ASSERT_EQ(1, NumLogs()); ASSERT_EQ(1, NumLogs());
} }
@ -84,7 +84,8 @@ class RecoveryTest {
std::string ManifestFileName() { std::string ManifestFileName() {
std::string current; std::string current;
ASSERT_OK(ReadFileToString(env_, CurrentFileName(dbname_), &current)); EXPECT_LEVELDB_OK(
ReadFileToString(env_, CurrentFileName(dbname_), &current));
size_t len = current.size(); size_t len = current.size();
if (len > 0 && current[len - 1] == '\n') { if (len > 0 && current[len - 1] == '\n') {
current.resize(len - 1); current.resize(len - 1);
@ -94,24 +95,26 @@ class RecoveryTest {
std::string LogName(uint64_t number) { return LogFileName(dbname_, number); } std::string LogName(uint64_t number) { return LogFileName(dbname_, number); }
size_t DeleteLogFiles() { size_t RemoveLogFiles() {
// Linux allows unlinking open files, but Windows does not. // Linux allows unlinking open files, but Windows does not.
// Closing the db allows for file deletion. // Closing the db allows for file deletion.
Close(); Close();
std::vector<uint64_t> logs = GetFiles(kLogFile); std::vector<uint64_t> logs = GetFiles(kLogFile);
for (size_t i = 0; i < logs.size(); i++) { for (size_t i = 0; i < logs.size(); i++) {
ASSERT_OK(env_->DeleteFile(LogName(logs[i]))) << LogName(logs[i]); EXPECT_LEVELDB_OK(env_->RemoveFile(LogName(logs[i]))) << LogName(logs[i]);
} }
return logs.size(); return logs.size();
} }
void DeleteManifestFile() { ASSERT_OK(env_->DeleteFile(ManifestFileName())); } void RemoveManifestFile() {
ASSERT_LEVELDB_OK(env_->RemoveFile(ManifestFileName()));
}
uint64_t FirstLogFile() { return GetFiles(kLogFile)[0]; } uint64_t FirstLogFile() { return GetFiles(kLogFile)[0]; }
std::vector<uint64_t> GetFiles(FileType t) { std::vector<uint64_t> GetFiles(FileType t) {
std::vector<std::string> filenames; std::vector<std::string> filenames;
ASSERT_OK(env_->GetChildren(dbname_, &filenames)); EXPECT_LEVELDB_OK(env_->GetChildren(dbname_, &filenames));
std::vector<uint64_t> result; std::vector<uint64_t> result;
for (size_t i = 0; i < filenames.size(); i++) { for (size_t i = 0; i < filenames.size(); i++) {
uint64_t number; uint64_t number;
@ -129,7 +132,7 @@ class RecoveryTest {
uint64_t FileSize(const std::string& fname) { uint64_t FileSize(const std::string& fname) {
uint64_t result; uint64_t result;
ASSERT_OK(env_->GetFileSize(fname, &result)) << fname; EXPECT_LEVELDB_OK(env_->GetFileSize(fname, &result)) << fname;
return result; return result;
} }
@ -139,13 +142,13 @@ class RecoveryTest {
void MakeLogFile(uint64_t lognum, SequenceNumber seq, Slice key, Slice val) { void MakeLogFile(uint64_t lognum, SequenceNumber seq, Slice key, Slice val) {
std::string fname = LogFileName(dbname_, lognum); std::string fname = LogFileName(dbname_, lognum);
WritableFile* file; WritableFile* file;
ASSERT_OK(env_->NewWritableFile(fname, &file)); ASSERT_LEVELDB_OK(env_->NewWritableFile(fname, &file));
log::Writer writer(file); log::Writer writer(file);
WriteBatch batch; WriteBatch batch;
batch.Put(key, val); batch.Put(key, val);
WriteBatchInternal::SetSequence(&batch, seq); WriteBatchInternal::SetSequence(&batch, seq);
ASSERT_OK(writer.AddRecord(WriteBatchInternal::Contents(&batch))); ASSERT_LEVELDB_OK(writer.AddRecord(WriteBatchInternal::Contents(&batch)));
ASSERT_OK(file->Flush()); ASSERT_LEVELDB_OK(file->Flush());
delete file; delete file;
} }
@ -155,12 +158,13 @@ class RecoveryTest {
DB* db_; DB* db_;
}; };
TEST(RecoveryTest, ManifestReused) { TEST_F(RecoveryTest, ManifestReused) {
if (!CanAppend()) { if (!CanAppend()) {
fprintf(stderr, "skipping test because env does not support appending\n"); std::fprintf(stderr,
"skipping test because env does not support appending\n");
return; return;
} }
ASSERT_OK(Put("foo", "bar")); ASSERT_LEVELDB_OK(Put("foo", "bar"));
Close(); Close();
std::string old_manifest = ManifestFileName(); std::string old_manifest = ManifestFileName();
Open(); Open();
@ -171,12 +175,13 @@ TEST(RecoveryTest, ManifestReused) {
ASSERT_EQ("bar", Get("foo")); ASSERT_EQ("bar", Get("foo"));
} }
TEST(RecoveryTest, LargeManifestCompacted) { TEST_F(RecoveryTest, LargeManifestCompacted) {
if (!CanAppend()) { if (!CanAppend()) {
fprintf(stderr, "skipping test because env does not support appending\n"); std::fprintf(stderr,
"skipping test because env does not support appending\n");
return; return;
} }
ASSERT_OK(Put("foo", "bar")); ASSERT_LEVELDB_OK(Put("foo", "bar"));
Close(); Close();
std::string old_manifest = ManifestFileName(); std::string old_manifest = ManifestFileName();
@ -184,10 +189,10 @@ TEST(RecoveryTest, LargeManifestCompacted) {
{ {
uint64_t len = FileSize(old_manifest); uint64_t len = FileSize(old_manifest);
WritableFile* file; WritableFile* file;
ASSERT_OK(env()->NewAppendableFile(old_manifest, &file)); ASSERT_LEVELDB_OK(env()->NewAppendableFile(old_manifest, &file));
std::string zeroes(3 * 1048576 - static_cast<size_t>(len), 0); std::string zeroes(3 * 1048576 - static_cast<size_t>(len), 0);
ASSERT_OK(file->Append(zeroes)); ASSERT_LEVELDB_OK(file->Append(zeroes));
ASSERT_OK(file->Flush()); ASSERT_LEVELDB_OK(file->Flush());
delete file; delete file;
} }
@ -202,22 +207,23 @@ TEST(RecoveryTest, LargeManifestCompacted) {
ASSERT_EQ("bar", Get("foo")); ASSERT_EQ("bar", Get("foo"));
} }
TEST(RecoveryTest, NoLogFiles) { TEST_F(RecoveryTest, NoLogFiles) {
ASSERT_OK(Put("foo", "bar")); ASSERT_LEVELDB_OK(Put("foo", "bar"));
ASSERT_EQ(1, DeleteLogFiles()); ASSERT_EQ(1, RemoveLogFiles());
Open(); Open();
ASSERT_EQ("NOT_FOUND", Get("foo")); ASSERT_EQ("NOT_FOUND", Get("foo"));
Open(); Open();
ASSERT_EQ("NOT_FOUND", Get("foo")); ASSERT_EQ("NOT_FOUND", Get("foo"));
} }
TEST(RecoveryTest, LogFileReuse) { TEST_F(RecoveryTest, LogFileReuse) {
if (!CanAppend()) { if (!CanAppend()) {
fprintf(stderr, "skipping test because env does not support appending\n"); std::fprintf(stderr,
"skipping test because env does not support appending\n");
return; return;
} }
for (int i = 0; i < 2; i++) { for (int i = 0; i < 2; i++) {
ASSERT_OK(Put("foo", "bar")); ASSERT_LEVELDB_OK(Put("foo", "bar"));
if (i == 0) { if (i == 0) {
// Compact to ensure current log is empty // Compact to ensure current log is empty
CompactMemTable(); CompactMemTable();
@ -241,13 +247,13 @@ TEST(RecoveryTest, LogFileReuse) {
} }
} }
TEST(RecoveryTest, MultipleMemTables) { TEST_F(RecoveryTest, MultipleMemTables) {
// Make a large log. // Make a large log.
const int kNum = 1000; const int kNum = 1000;
for (int i = 0; i < kNum; i++) { for (int i = 0; i < kNum; i++) {
char buf[100]; char buf[100];
snprintf(buf, sizeof(buf), "%050d", i); std::snprintf(buf, sizeof(buf), "%050d", i);
ASSERT_OK(Put(buf, buf)); ASSERT_LEVELDB_OK(Put(buf, buf));
} }
ASSERT_EQ(0, NumTables()); ASSERT_EQ(0, NumTables());
Close(); Close();
@ -265,13 +271,13 @@ TEST(RecoveryTest, MultipleMemTables) {
ASSERT_NE(old_log_file, FirstLogFile()) << "must not reuse log"; ASSERT_NE(old_log_file, FirstLogFile()) << "must not reuse log";
for (int i = 0; i < kNum; i++) { for (int i = 0; i < kNum; i++) {
char buf[100]; char buf[100];
snprintf(buf, sizeof(buf), "%050d", i); std::snprintf(buf, sizeof(buf), "%050d", i);
ASSERT_EQ(buf, Get(buf)); ASSERT_EQ(buf, Get(buf));
} }
} }
TEST(RecoveryTest, MultipleLogFiles) { TEST_F(RecoveryTest, MultipleLogFiles) {
ASSERT_OK(Put("foo", "bar")); ASSERT_LEVELDB_OK(Put("foo", "bar"));
Close(); Close();
ASSERT_EQ(1, NumLogs()); ASSERT_EQ(1, NumLogs());
@ -316,15 +322,13 @@ TEST(RecoveryTest, MultipleLogFiles) {
ASSERT_EQ("there", Get("hi")); ASSERT_EQ("there", Get("hi"));
} }
TEST(RecoveryTest, ManifestMissing) { TEST_F(RecoveryTest, ManifestMissing) {
ASSERT_OK(Put("foo", "bar")); ASSERT_LEVELDB_OK(Put("foo", "bar"));
Close(); Close();
DeleteManifestFile(); RemoveManifestFile();
Status status = OpenWithStatus(); Status status = OpenWithStatus();
ASSERT_TRUE(status.IsCorruption()); ASSERT_TRUE(status.IsCorruption());
} }
} // namespace leveldb } // namespace leveldb
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }

View File

@ -341,7 +341,7 @@ class Repairer {
} }
} }
if (!s.ok()) { if (!s.ok()) {
env_->DeleteFile(copy); env_->RemoveFile(copy);
} }
} }
@ -372,7 +372,8 @@ class Repairer {
t.meta.largest); t.meta.largest);
} }
// fprintf(stderr, "NewDescriptor:\n%s\n", edit_.DebugString().c_str()); // std::fprintf(stderr,
// "NewDescriptor:\n%s\n", edit_.DebugString().c_str());
{ {
log::Writer log(file); log::Writer log(file);
std::string record; std::string record;
@ -386,7 +387,7 @@ class Repairer {
file = nullptr; file = nullptr;
if (!status.ok()) { if (!status.ok()) {
env_->DeleteFile(tmp); env_->RemoveFile(tmp);
} else { } else {
// Discard older manifests // Discard older manifests
for (size_t i = 0; i < manifests_.size(); i++) { for (size_t i = 0; i < manifests_.size(); i++) {
@ -398,7 +399,7 @@ class Repairer {
if (status.ok()) { if (status.ok()) {
status = SetCurrentFile(env_, dbname_, 1); status = SetCurrentFile(env_, dbname_, 1);
} else { } else {
env_->DeleteFile(tmp); env_->RemoveFile(tmp);
} }
} }
return status; return status;

View File

@ -243,7 +243,7 @@ int SkipList<Key, Comparator>::RandomHeight() {
// Increase height with probability 1 in kBranching // Increase height with probability 1 in kBranching
static const unsigned int kBranching = 4; static const unsigned int kBranching = 4;
int height = 1; int height = 1;
while (height < kMaxHeight && ((rnd_.Next() % kBranching) == 0)) { while (height < kMaxHeight && rnd_.OneIn(kBranching)) {
height++; height++;
} }
assert(height > 0); assert(height > 0);

View File

@ -7,13 +7,14 @@
#include <atomic> #include <atomic>
#include <set> #include <set>
#include "gtest/gtest.h"
#include "leveldb/env.h" #include "leveldb/env.h"
#include "port/port.h" #include "port/port.h"
#include "port/thread_annotations.h" #include "port/thread_annotations.h"
#include "util/arena.h" #include "util/arena.h"
#include "util/hash.h" #include "util/hash.h"
#include "util/random.h" #include "util/random.h"
#include "util/testharness.h" #include "util/testutil.h"
namespace leveldb { namespace leveldb {
@ -31,8 +32,6 @@ struct Comparator {
} }
}; };
class SkipTest {};
TEST(SkipTest, Empty) { TEST(SkipTest, Empty) {
Arena arena; Arena arena;
Comparator cmp; Comparator cmp;
@ -152,7 +151,7 @@ TEST(SkipTest, InsertAndLookup) {
// been concurrently added since the iterator started. // been concurrently added since the iterator started.
class ConcurrentTest { class ConcurrentTest {
private: private:
static const uint32_t K = 4; static constexpr uint32_t K = 4;
static uint64_t key(Key key) { return (key >> 40); } static uint64_t key(Key key) { return (key >> 40); }
static uint64_t gen(Key key) { return (key >> 8) & 0xffffffffu; } static uint64_t gen(Key key) { return (key >> 8) & 0xffffffffu; }
@ -281,7 +280,9 @@ class ConcurrentTest {
} }
} }
}; };
const uint32_t ConcurrentTest::K;
// Needed when building in C++11 mode.
constexpr uint32_t ConcurrentTest::K;
// Simple test that does single-threaded testing of the ConcurrentTest // Simple test that does single-threaded testing of the ConcurrentTest
// scaffolding. // scaffolding.
@ -345,7 +346,7 @@ static void RunConcurrent(int run) {
const int kSize = 1000; const int kSize = 1000;
for (int i = 0; i < N; i++) { for (int i = 0; i < N; i++) {
if ((i % 100) == 0) { if ((i % 100) == 0) {
fprintf(stderr, "Run %d of %d\n", i, N); std::fprintf(stderr, "Run %d of %d\n", i, N);
} }
TestState state(seed + 1); TestState state(seed + 1);
Env::Default()->Schedule(ConcurrentReader, &state); Env::Default()->Schedule(ConcurrentReader, &state);
@ -365,5 +366,3 @@ TEST(SkipTest, Concurrent4) { RunConcurrent(4); }
TEST(SkipTest, Concurrent5) { RunConcurrent(5); } TEST(SkipTest, Concurrent5) { RunConcurrent(5); }
} // namespace leveldb } // namespace leveldb
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }

View File

@ -25,7 +25,7 @@ class SnapshotImpl : public Snapshot {
friend class SnapshotList; friend class SnapshotList;
// SnapshotImpl is kept in a doubly-linked circular list. The SnapshotList // SnapshotImpl is kept in a doubly-linked circular list. The SnapshotList
// implementation operates on the next/previous fields direcly. // implementation operates on the next/previous fields directly.
SnapshotImpl* prev_; SnapshotImpl* prev_;
SnapshotImpl* next_; SnapshotImpl* next_;

View File

@ -7,8 +7,7 @@
#ifndef STORAGE_LEVELDB_DB_TABLE_CACHE_H_ #ifndef STORAGE_LEVELDB_DB_TABLE_CACHE_H_
#define STORAGE_LEVELDB_DB_TABLE_CACHE_H_ #define STORAGE_LEVELDB_DB_TABLE_CACHE_H_
#include <stdint.h> #include <cstdint>
#include <string> #include <string>
#include "db/dbformat.h" #include "db/dbformat.h"
@ -23,6 +22,10 @@ class Env;
class TableCache { class TableCache {
public: public:
TableCache(const std::string& dbname, const Options& options, int entries); TableCache(const std::string& dbname, const Options& options, int entries);
TableCache(const TableCache&) = delete;
TableCache& operator=(const TableCache&) = delete;
~TableCache(); ~TableCache();
// Return an iterator for the specified file number (the corresponding // Return an iterator for the specified file number (the corresponding

View File

@ -232,7 +232,7 @@ std::string VersionEdit::DebugString() const {
r.append(compact_pointers_[i].second.DebugString()); r.append(compact_pointers_[i].second.DebugString());
} }
for (const auto& deleted_files_kvp : deleted_files_) { for (const auto& deleted_files_kvp : deleted_files_) {
r.append("\n DeleteFile: "); r.append("\n RemoveFile: ");
AppendNumberTo(&r, deleted_files_kvp.first); AppendNumberTo(&r, deleted_files_kvp.first);
r.append(" "); r.append(" ");
AppendNumberTo(&r, deleted_files_kvp.second); AppendNumberTo(&r, deleted_files_kvp.second);

View File

@ -71,7 +71,7 @@ class VersionEdit {
} }
// Delete the specified "file" from the specified "level". // Delete the specified "file" from the specified "level".
void DeleteFile(int level, uint64_t file) { void RemoveFile(int level, uint64_t file) {
deleted_files_.insert(std::make_pair(level, file)); deleted_files_.insert(std::make_pair(level, file));
} }

View File

@ -3,7 +3,8 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors. // found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "db/version_edit.h" #include "db/version_edit.h"
#include "util/testharness.h"
#include "gtest/gtest.h"
namespace leveldb { namespace leveldb {
@ -17,8 +18,6 @@ static void TestEncodeDecode(const VersionEdit& edit) {
ASSERT_EQ(encoded, encoded2); ASSERT_EQ(encoded, encoded2);
} }
class VersionEditTest {};
TEST(VersionEditTest, EncodeDecode) { TEST(VersionEditTest, EncodeDecode) {
static const uint64_t kBig = 1ull << 50; static const uint64_t kBig = 1ull << 50;
@ -28,7 +27,7 @@ TEST(VersionEditTest, EncodeDecode) {
edit.AddFile(3, kBig + 300 + i, kBig + 400 + i, edit.AddFile(3, kBig + 300 + i, kBig + 400 + i,
InternalKey("foo", kBig + 500 + i, kTypeValue), InternalKey("foo", kBig + 500 + i, kTypeValue),
InternalKey("zoo", kBig + 600 + i, kTypeDeletion)); InternalKey("zoo", kBig + 600 + i, kTypeDeletion));
edit.DeleteFile(4, kBig + 700 + i); edit.RemoveFile(4, kBig + 700 + i);
edit.SetCompactPointer(i, InternalKey("x", kBig + 900 + i, kTypeValue)); edit.SetCompactPointer(i, InternalKey("x", kBig + 900 + i, kTypeValue));
} }
@ -40,5 +39,3 @@ TEST(VersionEditTest, EncodeDecode) {
} }
} // namespace leveldb } // namespace leveldb
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }

View File

@ -4,9 +4,8 @@
#include "db/version_set.h" #include "db/version_set.h"
#include <stdio.h>
#include <algorithm> #include <algorithm>
#include <cstdio>
#include "db/filename.h" #include "db/filename.h"
#include "db/log_reader.h" #include "db/log_reader.h"
@ -373,6 +372,10 @@ Status Version::Get(const ReadOptions& options, const LookupKey& k,
state->found = true; state->found = true;
return false; return false;
} }
// Not reached. Added to avoid false compilation warnings of
// "control reaches end of non-void function".
return false;
} }
}; };
@ -623,7 +626,7 @@ class VersionSet::Builder {
} }
// Apply all of the edits in *edit to the current state. // Apply all of the edits in *edit to the current state.
void Apply(VersionEdit* edit) { void Apply(const VersionEdit* edit) {
// Update compaction pointers // Update compaction pointers
for (size_t i = 0; i < edit->compact_pointers_.size(); i++) { for (size_t i = 0; i < edit->compact_pointers_.size(); i++) {
const int level = edit->compact_pointers_[i].first; const int level = edit->compact_pointers_[i].first;
@ -700,10 +703,10 @@ class VersionSet::Builder {
const InternalKey& prev_end = v->files_[level][i - 1]->largest; const InternalKey& prev_end = v->files_[level][i - 1]->largest;
const InternalKey& this_begin = v->files_[level][i]->smallest; const InternalKey& this_begin = v->files_[level][i]->smallest;
if (vset_->icmp_.Compare(prev_end, this_begin) >= 0) { if (vset_->icmp_.Compare(prev_end, this_begin) >= 0) {
fprintf(stderr, "overlapping ranges in same level %s vs. %s\n", std::fprintf(stderr, "overlapping ranges in same level %s vs. %s\n",
prev_end.DebugString().c_str(), prev_end.DebugString().c_str(),
this_begin.DebugString().c_str()); this_begin.DebugString().c_str());
abort(); std::abort();
} }
} }
} }
@ -803,7 +806,6 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) {
// first call to LogAndApply (when opening the database). // first call to LogAndApply (when opening the database).
assert(descriptor_file_ == nullptr); assert(descriptor_file_ == nullptr);
new_manifest_file = DescriptorFileName(dbname_, manifest_file_number_); new_manifest_file = DescriptorFileName(dbname_, manifest_file_number_);
edit->SetNextFile(next_file_number_);
s = env_->NewWritableFile(new_manifest_file, &descriptor_file_); s = env_->NewWritableFile(new_manifest_file, &descriptor_file_);
if (s.ok()) { if (s.ok()) {
descriptor_log_ = new log::Writer(descriptor_file_); descriptor_log_ = new log::Writer(descriptor_file_);
@ -849,7 +851,7 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) {
delete descriptor_file_; delete descriptor_file_;
descriptor_log_ = nullptr; descriptor_log_ = nullptr;
descriptor_file_ = nullptr; descriptor_file_ = nullptr;
env_->DeleteFile(new_manifest_file); env_->RemoveFile(new_manifest_file);
} }
} }
@ -895,6 +897,7 @@ Status VersionSet::Recover(bool* save_manifest) {
uint64_t log_number = 0; uint64_t log_number = 0;
uint64_t prev_log_number = 0; uint64_t prev_log_number = 0;
Builder builder(this, current_); Builder builder(this, current_);
int read_records = 0;
{ {
LogReporter reporter; LogReporter reporter;
@ -904,6 +907,7 @@ Status VersionSet::Recover(bool* save_manifest) {
Slice record; Slice record;
std::string scratch; std::string scratch;
while (reader.ReadRecord(&record, &scratch) && s.ok()) { while (reader.ReadRecord(&record, &scratch) && s.ok()) {
++read_records;
VersionEdit edit; VersionEdit edit;
s = edit.DecodeFrom(record); s = edit.DecodeFrom(record);
if (s.ok()) { if (s.ok()) {
@ -978,6 +982,10 @@ Status VersionSet::Recover(bool* save_manifest) {
} else { } else {
*save_manifest = true; *save_manifest = true;
} }
} else {
std::string error = s.ToString();
Log(options_->info_log, "Error recovering version set with %d records: %s",
read_records, error.c_str());
} }
return s; return s;
@ -1097,11 +1105,12 @@ int VersionSet::NumLevelFiles(int level) const {
const char* VersionSet::LevelSummary(LevelSummaryStorage* scratch) const { const char* VersionSet::LevelSummary(LevelSummaryStorage* scratch) const {
// Update code if kNumLevels changes // Update code if kNumLevels changes
static_assert(config::kNumLevels == 7, ""); static_assert(config::kNumLevels == 7, "");
snprintf(scratch->buffer, sizeof(scratch->buffer), std::snprintf(
"files[ %d %d %d %d %d %d %d ]", int(current_->files_[0].size()), scratch->buffer, sizeof(scratch->buffer), "files[ %d %d %d %d %d %d %d ]",
int(current_->files_[1].size()), int(current_->files_[2].size()), int(current_->files_[0].size()), int(current_->files_[1].size()),
int(current_->files_[3].size()), int(current_->files_[4].size()), int(current_->files_[2].size()), int(current_->files_[3].size()),
int(current_->files_[5].size()), int(current_->files_[6].size())); int(current_->files_[4].size()), int(current_->files_[5].size()),
int(current_->files_[6].size()));
return scratch->buffer; return scratch->buffer;
} }
@ -1294,7 +1303,7 @@ Compaction* VersionSet::PickCompaction() {
return c; return c;
} }
// Finds the largest key in a vector of files. Returns true if files it not // Finds the largest key in a vector of files. Returns true if files is not
// empty. // empty.
bool FindLargestKey(const InternalKeyComparator& icmp, bool FindLargestKey(const InternalKeyComparator& icmp,
const std::vector<FileMetaData*>& files, const std::vector<FileMetaData*>& files,
@ -1382,6 +1391,7 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
current_->GetOverlappingInputs(level + 1, &smallest, &largest, current_->GetOverlappingInputs(level + 1, &smallest, &largest,
&c->inputs_[1]); &c->inputs_[1]);
AddBoundaryInputs(icmp_, current_->files_[level + 1], &c->inputs_[1]);
// Get entire range covered by compaction // Get entire range covered by compaction
InternalKey all_start, all_limit; InternalKey all_start, all_limit;
@ -1404,6 +1414,7 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
std::vector<FileMetaData*> expanded1; std::vector<FileMetaData*> expanded1;
current_->GetOverlappingInputs(level + 1, &new_start, &new_limit, current_->GetOverlappingInputs(level + 1, &new_start, &new_limit,
&expanded1); &expanded1);
AddBoundaryInputs(icmp_, current_->files_[level + 1], &expanded1);
if (expanded1.size() == c->inputs_[1].size()) { if (expanded1.size() == c->inputs_[1].size()) {
Log(options_->info_log, Log(options_->info_log,
"Expanding@%d %d+%d (%ld+%ld bytes) to %d+%d (%ld+%ld bytes)\n", "Expanding@%d %d+%d (%ld+%ld bytes) to %d+%d (%ld+%ld bytes)\n",
@ -1498,7 +1509,7 @@ bool Compaction::IsTrivialMove() const {
void Compaction::AddInputDeletions(VersionEdit* edit) { void Compaction::AddInputDeletions(VersionEdit* edit) {
for (int which = 0; which < 2; which++) { for (int which = 0; which < 2; which++) {
for (size_t i = 0; i < inputs_[which].size(); i++) { for (size_t i = 0; i < inputs_[which].size(); i++) {
edit->DeleteFile(level_ + which, inputs_[which][i]->number); edit->RemoveFile(level_ + which, inputs_[which][i]->number);
} }
} }
} }

View File

@ -59,9 +59,6 @@ bool SomeFileOverlapsRange(const InternalKeyComparator& icmp,
class Version { class Version {
public: public:
// Lookup the value for key. If found, store it in *val and
// return OK. Else return a non-OK status. Fills *stats.
// REQUIRES: lock is not held
struct GetStats { struct GetStats {
FileMetaData* seek_file; FileMetaData* seek_file;
int seek_file_level; int seek_file_level;
@ -72,6 +69,9 @@ class Version {
// REQUIRES: This version has been saved (see VersionSet::SaveTo) // REQUIRES: This version has been saved (see VersionSet::SaveTo)
void AddIterators(const ReadOptions&, std::vector<Iterator*>* iters); void AddIterators(const ReadOptions&, std::vector<Iterator*>* iters);
// Lookup the value for key. If found, store it in *val and
// return OK. Else return a non-OK status. Fills *stats.
// REQUIRES: lock is not held
Status Get(const ReadOptions&, const LookupKey& key, std::string* val, Status Get(const ReadOptions&, const LookupKey& key, std::string* val,
GetStats* stats); GetStats* stats);

View File

@ -3,13 +3,14 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors. // found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "db/version_set.h" #include "db/version_set.h"
#include "gtest/gtest.h"
#include "util/logging.h" #include "util/logging.h"
#include "util/testharness.h"
#include "util/testutil.h" #include "util/testutil.h"
namespace leveldb { namespace leveldb {
class FindFileTest { class FindFileTest : public testing::Test {
public: public:
FindFileTest() : disjoint_sorted_files_(true) {} FindFileTest() : disjoint_sorted_files_(true) {}
@ -50,7 +51,7 @@ class FindFileTest {
std::vector<FileMetaData*> files_; std::vector<FileMetaData*> files_;
}; };
TEST(FindFileTest, Empty) { TEST_F(FindFileTest, Empty) {
ASSERT_EQ(0, Find("foo")); ASSERT_EQ(0, Find("foo"));
ASSERT_TRUE(!Overlaps("a", "z")); ASSERT_TRUE(!Overlaps("a", "z"));
ASSERT_TRUE(!Overlaps(nullptr, "z")); ASSERT_TRUE(!Overlaps(nullptr, "z"));
@ -58,7 +59,7 @@ TEST(FindFileTest, Empty) {
ASSERT_TRUE(!Overlaps(nullptr, nullptr)); ASSERT_TRUE(!Overlaps(nullptr, nullptr));
} }
TEST(FindFileTest, Single) { TEST_F(FindFileTest, Single) {
Add("p", "q"); Add("p", "q");
ASSERT_EQ(0, Find("a")); ASSERT_EQ(0, Find("a"));
ASSERT_EQ(0, Find("p")); ASSERT_EQ(0, Find("p"));
@ -88,7 +89,7 @@ TEST(FindFileTest, Single) {
ASSERT_TRUE(Overlaps(nullptr, nullptr)); ASSERT_TRUE(Overlaps(nullptr, nullptr));
} }
TEST(FindFileTest, Multiple) { TEST_F(FindFileTest, Multiple) {
Add("150", "200"); Add("150", "200");
Add("200", "250"); Add("200", "250");
Add("300", "350"); Add("300", "350");
@ -126,7 +127,7 @@ TEST(FindFileTest, Multiple) {
ASSERT_TRUE(Overlaps("450", "500")); ASSERT_TRUE(Overlaps("450", "500"));
} }
TEST(FindFileTest, MultipleNullBoundaries) { TEST_F(FindFileTest, MultipleNullBoundaries) {
Add("150", "200"); Add("150", "200");
Add("200", "250"); Add("200", "250");
Add("300", "350"); Add("300", "350");
@ -146,7 +147,7 @@ TEST(FindFileTest, MultipleNullBoundaries) {
ASSERT_TRUE(Overlaps("450", nullptr)); ASSERT_TRUE(Overlaps("450", nullptr));
} }
TEST(FindFileTest, OverlapSequenceChecks) { TEST_F(FindFileTest, OverlapSequenceChecks) {
Add("200", "200", 5000, 3000); Add("200", "200", 5000, 3000);
ASSERT_TRUE(!Overlaps("199", "199")); ASSERT_TRUE(!Overlaps("199", "199"));
ASSERT_TRUE(!Overlaps("201", "300")); ASSERT_TRUE(!Overlaps("201", "300"));
@ -155,7 +156,7 @@ TEST(FindFileTest, OverlapSequenceChecks) {
ASSERT_TRUE(Overlaps("200", "210")); ASSERT_TRUE(Overlaps("200", "210"));
} }
TEST(FindFileTest, OverlappingFiles) { TEST_F(FindFileTest, OverlappingFiles) {
Add("150", "600"); Add("150", "600");
Add("400", "500"); Add("400", "500");
disjoint_sorted_files_ = false; disjoint_sorted_files_ = false;
@ -177,7 +178,7 @@ void AddBoundaryInputs(const InternalKeyComparator& icmp,
const std::vector<FileMetaData*>& level_files, const std::vector<FileMetaData*>& level_files,
std::vector<FileMetaData*>* compaction_files); std::vector<FileMetaData*>* compaction_files);
class AddBoundaryInputsTest { class AddBoundaryInputsTest : public testing::Test {
public: public:
std::vector<FileMetaData*> level_files_; std::vector<FileMetaData*> level_files_;
std::vector<FileMetaData*> compaction_files_; std::vector<FileMetaData*> compaction_files_;
@ -204,13 +205,13 @@ class AddBoundaryInputsTest {
} }
}; };
TEST(AddBoundaryInputsTest, TestEmptyFileSets) { TEST_F(AddBoundaryInputsTest, TestEmptyFileSets) {
AddBoundaryInputs(icmp_, level_files_, &compaction_files_); AddBoundaryInputs(icmp_, level_files_, &compaction_files_);
ASSERT_TRUE(compaction_files_.empty()); ASSERT_TRUE(compaction_files_.empty());
ASSERT_TRUE(level_files_.empty()); ASSERT_TRUE(level_files_.empty());
} }
TEST(AddBoundaryInputsTest, TestEmptyLevelFiles) { TEST_F(AddBoundaryInputsTest, TestEmptyLevelFiles) {
FileMetaData* f1 = FileMetaData* f1 =
CreateFileMetaData(1, InternalKey("100", 2, kTypeValue), CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
InternalKey(InternalKey("100", 1, kTypeValue))); InternalKey(InternalKey("100", 1, kTypeValue)));
@ -222,7 +223,7 @@ TEST(AddBoundaryInputsTest, TestEmptyLevelFiles) {
ASSERT_TRUE(level_files_.empty()); ASSERT_TRUE(level_files_.empty());
} }
TEST(AddBoundaryInputsTest, TestEmptyCompactionFiles) { TEST_F(AddBoundaryInputsTest, TestEmptyCompactionFiles) {
FileMetaData* f1 = FileMetaData* f1 =
CreateFileMetaData(1, InternalKey("100", 2, kTypeValue), CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
InternalKey(InternalKey("100", 1, kTypeValue))); InternalKey(InternalKey("100", 1, kTypeValue)));
@ -234,7 +235,7 @@ TEST(AddBoundaryInputsTest, TestEmptyCompactionFiles) {
ASSERT_EQ(f1, level_files_[0]); ASSERT_EQ(f1, level_files_[0]);
} }
TEST(AddBoundaryInputsTest, TestNoBoundaryFiles) { TEST_F(AddBoundaryInputsTest, TestNoBoundaryFiles) {
FileMetaData* f1 = FileMetaData* f1 =
CreateFileMetaData(1, InternalKey("100", 2, kTypeValue), CreateFileMetaData(1, InternalKey("100", 2, kTypeValue),
InternalKey(InternalKey("100", 1, kTypeValue))); InternalKey(InternalKey("100", 1, kTypeValue)));
@ -255,7 +256,7 @@ TEST(AddBoundaryInputsTest, TestNoBoundaryFiles) {
ASSERT_EQ(2, compaction_files_.size()); ASSERT_EQ(2, compaction_files_.size());
} }
TEST(AddBoundaryInputsTest, TestOneBoundaryFiles) { TEST_F(AddBoundaryInputsTest, TestOneBoundaryFiles) {
FileMetaData* f1 = FileMetaData* f1 =
CreateFileMetaData(1, InternalKey("100", 3, kTypeValue), CreateFileMetaData(1, InternalKey("100", 3, kTypeValue),
InternalKey(InternalKey("100", 2, kTypeValue))); InternalKey(InternalKey("100", 2, kTypeValue)));
@ -277,7 +278,7 @@ TEST(AddBoundaryInputsTest, TestOneBoundaryFiles) {
ASSERT_EQ(f2, compaction_files_[1]); ASSERT_EQ(f2, compaction_files_[1]);
} }
TEST(AddBoundaryInputsTest, TestTwoBoundaryFiles) { TEST_F(AddBoundaryInputsTest, TestTwoBoundaryFiles) {
FileMetaData* f1 = FileMetaData* f1 =
CreateFileMetaData(1, InternalKey("100", 6, kTypeValue), CreateFileMetaData(1, InternalKey("100", 6, kTypeValue),
InternalKey(InternalKey("100", 5, kTypeValue))); InternalKey(InternalKey("100", 5, kTypeValue)));
@ -300,7 +301,7 @@ TEST(AddBoundaryInputsTest, TestTwoBoundaryFiles) {
ASSERT_EQ(f2, compaction_files_[2]); ASSERT_EQ(f2, compaction_files_[2]);
} }
TEST(AddBoundaryInputsTest, TestDisjoinFilePointers) { TEST_F(AddBoundaryInputsTest, TestDisjoinFilePointers) {
FileMetaData* f1 = FileMetaData* f1 =
CreateFileMetaData(1, InternalKey("100", 6, kTypeValue), CreateFileMetaData(1, InternalKey("100", 6, kTypeValue),
InternalKey(InternalKey("100", 5, kTypeValue))); InternalKey(InternalKey("100", 5, kTypeValue)));
@ -328,5 +329,3 @@ TEST(AddBoundaryInputsTest, TestDisjoinFilePointers) {
} }
} // namespace leveldb } // namespace leveldb
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }

View File

@ -2,13 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors. // found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "leveldb/db.h" #include "gtest/gtest.h"
#include "db/memtable.h" #include "db/memtable.h"
#include "db/write_batch_internal.h" #include "db/write_batch_internal.h"
#include "leveldb/db.h"
#include "leveldb/env.h" #include "leveldb/env.h"
#include "util/logging.h" #include "util/logging.h"
#include "util/testharness.h"
namespace leveldb { namespace leveldb {
@ -22,7 +21,7 @@ static std::string PrintContents(WriteBatch* b) {
Iterator* iter = mem->NewIterator(); Iterator* iter = mem->NewIterator();
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
ParsedInternalKey ikey; ParsedInternalKey ikey;
ASSERT_TRUE(ParseInternalKey(iter->key(), &ikey)); EXPECT_TRUE(ParseInternalKey(iter->key(), &ikey));
switch (ikey.type) { switch (ikey.type) {
case kTypeValue: case kTypeValue:
state.append("Put("); state.append("Put(");
@ -52,8 +51,6 @@ static std::string PrintContents(WriteBatch* b) {
return state; return state;
} }
class WriteBatchTest {};
TEST(WriteBatchTest, Empty) { TEST(WriteBatchTest, Empty) {
WriteBatch batch; WriteBatch batch;
ASSERT_EQ("", PrintContents(&batch)); ASSERT_EQ("", PrintContents(&batch));
@ -133,5 +130,3 @@ TEST(WriteBatchTest, ApproximateSize) {
} }
} // namespace leveldb } // namespace leveldb
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }

View File

@ -83,7 +83,7 @@ div.bsql {
<p>Google, July 2011</p> <p>Google, July 2011</p>
<hr> <hr>
<p>In order to test LevelDB's performance, we benchmark it against other well-established database implementations. We compare LevelDB (revision 39) against <a href="http://www.sqlite.org/">SQLite3</a> (version 3.7.6.3) and <a href="http://fallabs.com/kyotocabinet/spex.html">Kyoto Cabinet's</a> (version 1.2.67) TreeDB (a B+Tree based key-value store). We would like to acknowledge Scott Hess and Mikio Hirabayashi for their suggestions and contributions to the SQLite3 and Kyoto Cabinet benchmarks, respectively.</p> <p>In order to test LevelDB's performance, we benchmark it against other well-established database implementations. We compare LevelDB (revision 39) against <a href="https://www.sqlite.org/">SQLite3</a> (version 3.7.6.3) and <a href="https://dbmx.net/kyotocabinet/spex.html">Kyoto Cabinet's</a> (version 1.2.67) TreeDB (a B+Tree based key-value store). We would like to acknowledge Scott Hess and Mikio Hirabayashi for their suggestions and contributions to the SQLite3 and Kyoto Cabinet benchmarks, respectively.</p>
<p>Benchmarks were all performed on a six-core Intel(R) Xeon(R) CPU X5650 @ 2.67GHz, with 12288 KB of total L3 cache and 12 GB of DDR3 RAM at 1333 MHz. (Note that LevelDB uses at most two CPUs since the benchmarks are single threaded: one to run the benchmark, and one for background compactions.) We ran the benchmarks on two machines (with identical processors), one with an Ext3 file system and one with an Ext4 file system. The machine with the Ext3 file system has a SATA Hitachi HDS721050CLA362 hard drive. The machine with the Ext4 file system has a SATA Samsung HD502HJ hard drive. Both hard drives spin at 7200 RPM and have hard drive write-caching enabled (using `hdparm -W 1 [device]`). The numbers reported below are the median of three measurements.</p> <p>Benchmarks were all performed on a six-core Intel(R) Xeon(R) CPU X5650 @ 2.67GHz, with 12288 KB of total L3 cache and 12 GB of DDR3 RAM at 1333 MHz. (Note that LevelDB uses at most two CPUs since the benchmarks are single threaded: one to run the benchmark, and one for background compactions.) We ran the benchmarks on two machines (with identical processors), one with an Ext3 file system and one with an Ext4 file system. The machine with the Ext3 file system has a SATA Hitachi HDS721050CLA362 hard drive. The machine with the Ext4 file system has a SATA Samsung HD502HJ hard drive. Both hard drives spin at 7200 RPM and have hard drive write-caching enabled (using `hdparm -W 1 [device]`). The numbers reported below are the median of three measurements.</p>
@ -97,9 +97,9 @@ div.bsql {
<h4>Custom Build Specifications</h4> <h4>Custom Build Specifications</h4>
<ul> <ul>
<li>LevelDB: LevelDB was compiled with the <a href="http://code.google.com/p/google-perftools">tcmalloc</a> library and the <a href="http://code.google.com/p/snappy/">Snappy</a> compression library (revision 33). Assertions were disabled.</li> <li>LevelDB: LevelDB was compiled with the <a href="https://github.com/gperftools/gperftools">tcmalloc</a> library and the <a href="https://github.com/google/snappy">Snappy</a> compression library (revision 33). Assertions were disabled.</li>
<li>TreeDB: TreeDB was compiled using the <a href="http://www.oberhumer.com/opensource/lzo/">LZO</a> compression library (version 2.03). Furthermore, we enabled the TSMALL and TLINEAR options when opening the database in order to reduce the footprint of each record.</li> <li>TreeDB: TreeDB was compiled using the <a href="https://www.oberhumer.com/opensource/lzo/">LZO</a> compression library (version 2.03). Furthermore, we enabled the TSMALL and TLINEAR options when opening the database in order to reduce the footprint of each record.</li>
<li>SQLite: We tuned SQLite's performance, by setting its locking mode to exclusive. We also enabled SQLite's <a href="http://www.sqlite.org/draft/wal.html">write-ahead logging</a>.</li> <li>SQLite: We tuned SQLite's performance, by setting its locking mode to exclusive. We also enabled SQLite's <a href="https://www.sqlite.org/draft/wal.html">write-ahead logging</a>.</li>
</ul> </ul>
<h2>1. Baseline Performance</h2> <h2>1. Baseline Performance</h2>
@ -451,7 +451,7 @@ performance may very well be better with compression if it allows more
of the working set to fit in memory.</p> of the working set to fit in memory.</p>
<h2>Note about Ext4 Filesystems</h2> <h2>Note about Ext4 Filesystems</h2>
<p>The preceding numbers are for an ext3 file system. Synchronous writes are much slower under <a href="http://en.wikipedia.org/wiki/Ext4">ext4</a> (LevelDB drops to ~31 writes / second and TreeDB drops to ~5 writes / second; SQLite3's synchronous writes do not noticeably drop) due to ext4's different handling of <span class="code">fsync</span> / <span class="code">msync</span> calls. Even LevelDB's asynchronous write performance drops somewhat since it spreads its storage across multiple files and issues <span class="code">fsync</span> calls when switching to a new file.</p> <p>The preceding numbers are for an ext3 file system. Synchronous writes are much slower under <a href="https://en.wikipedia.org/wiki/Ext4">ext4</a> (LevelDB drops to ~31 writes / second and TreeDB drops to ~5 writes / second; SQLite3's synchronous writes do not noticeably drop) due to ext4's different handling of <span class="code">fsync</span> / <span class="code">msync</span> calls. Even LevelDB's asynchronous write performance drops somewhat since it spreads its storage across multiple files and issues <span class="code">fsync</span> calls when switching to a new file.</p>
<h2>Acknowledgements</h2> <h2>Acknowledgements</h2>
<p>Jeff Dean and Sanjay Ghemawat wrote LevelDB. Kevin Tseng wrote and compiled these benchmarks. Mikio Hirabayashi, Scott Hess, and Gabor Cselle provided help and advice.</p> <p>Jeff Dean and Sanjay Ghemawat wrote LevelDB. Kevin Tseng wrote and compiled these benchmarks. Mikio Hirabayashi, Scott Hess, and Gabor Cselle provided help and advice.</p>

View File

@ -1,7 +1,7 @@
## Files ## Files
The implementation of leveldb is similar in spirit to the representation of a The implementation of leveldb is similar in spirit to the representation of a
single [Bigtable tablet (section 5.3)](http://research.google.com/archive/bigtable.html). single [Bigtable tablet (section 5.3)](https://research.google/pubs/pub27898/).
However the organization of the files that make up the representation is However the organization of the files that make up the representation is
somewhat different and is explained below. somewhat different and is explained below.
@ -166,7 +166,7 @@ So maybe even the sharding is not necessary on modern filesystems?
## Garbage collection of files ## Garbage collection of files
`DeleteObsoleteFiles()` is called at the end of every compaction and at the end `RemoveObsoleteFiles()` is called at the end of every compaction and at the end
of recovery. It finds the names of all files in the database. It deletes all log of recovery. It finds the names of all files in the database. It deletes all log
files that are not the current log file. It deletes all table files that are not files that are not the current log file. It deletes all table files that are not
referenced from some level and are not the output of an active compaction. referenced from some level and are not the output of an active compaction.

View File

@ -369,6 +369,7 @@ leveldb::Iterator* it = db->NewIterator(options);
for (it->SeekToFirst(); it->Valid(); it->Next()) { for (it->SeekToFirst(); it->Valid(); it->Next()) {
... ...
} }
delete it;
``` ```
### Key Layout ### Key Layout
@ -424,21 +425,21 @@ spaces. For example:
```c++ ```c++
class CustomFilterPolicy : public leveldb::FilterPolicy { class CustomFilterPolicy : public leveldb::FilterPolicy {
private: private:
FilterPolicy* builtin_policy_; leveldb::FilterPolicy* builtin_policy_;
public: public:
CustomFilterPolicy() : builtin_policy_(NewBloomFilterPolicy(10)) {} CustomFilterPolicy() : builtin_policy_(leveldb::NewBloomFilterPolicy(10)) {}
~CustomFilterPolicy() { delete builtin_policy_; } ~CustomFilterPolicy() { delete builtin_policy_; }
const char* Name() const { return "IgnoreTrailingSpacesFilter"; } const char* Name() const { return "IgnoreTrailingSpacesFilter"; }
void CreateFilter(const Slice* keys, int n, std::string* dst) const { void CreateFilter(const leveldb::Slice* keys, int n, std::string* dst) const {
// Use builtin bloom filter code after removing trailing spaces // Use builtin bloom filter code after removing trailing spaces
std::vector<Slice> trimmed(n); std::vector<leveldb::Slice> trimmed(n);
for (int i = 0; i < n; i++) { for (int i = 0; i < n; i++) {
trimmed[i] = RemoveTrailingSpaces(keys[i]); trimmed[i] = RemoveTrailingSpaces(keys[i]);
} }
return builtin_policy_->CreateFilter(&trimmed[i], n, dst); builtin_policy_->CreateFilter(trimmed.data(), n, dst);
} }
}; };
``` ```
@ -478,7 +479,7 @@ leveldb::Range ranges[2];
ranges[0] = leveldb::Range("a", "c"); ranges[0] = leveldb::Range("a", "c");
ranges[1] = leveldb::Range("x", "z"); ranges[1] = leveldb::Range("x", "z");
uint64_t sizes[2]; uint64_t sizes[2];
leveldb::Status s = db->GetApproximateSizes(ranges, 2, sizes); db->GetApproximateSizes(ranges, 2, sizes);
``` ```
The preceding call will set `sizes[0]` to the approximate number of bytes of The preceding call will set `sizes[0]` to the approximate number of bytes of

View File

@ -4,8 +4,7 @@
#include "helpers/memenv/memenv.h" #include "helpers/memenv/memenv.h"
#include <string.h> #include <cstring>
#include <limits> #include <limits>
#include <map> #include <map>
#include <string> #include <string>
@ -94,7 +93,7 @@ class FileState {
if (avail > bytes_to_copy) { if (avail > bytes_to_copy) {
avail = bytes_to_copy; avail = bytes_to_copy;
} }
memcpy(dst, blocks_[block] + block_offset, avail); std::memcpy(dst, blocks_[block] + block_offset, avail);
bytes_to_copy -= avail; bytes_to_copy -= avail;
dst += avail; dst += avail;
@ -127,7 +126,7 @@ class FileState {
if (avail > src_len) { if (avail > src_len) {
avail = src_len; avail = src_len;
} }
memcpy(blocks_.back() + offset, src, avail); std::memcpy(blocks_.back() + offset, src, avail);
src_len -= avail; src_len -= avail;
src += avail; src += avail;
size_ += avail; size_ += avail;
@ -216,7 +215,7 @@ class WritableFileImpl : public WritableFile {
class NoOpLogger : public Logger { class NoOpLogger : public Logger {
public: public:
void Logv(const char* format, va_list ap) override {} void Logv(const char* format, std::va_list ap) override {}
}; };
class InMemoryEnv : public EnvWrapper { class InMemoryEnv : public EnvWrapper {
@ -309,7 +308,7 @@ class InMemoryEnv : public EnvWrapper {
return Status::OK(); return Status::OK();
} }
void DeleteFileInternal(const std::string& fname) void RemoveFileInternal(const std::string& fname)
EXCLUSIVE_LOCKS_REQUIRED(mutex_) { EXCLUSIVE_LOCKS_REQUIRED(mutex_) {
if (file_map_.find(fname) == file_map_.end()) { if (file_map_.find(fname) == file_map_.end()) {
return; return;
@ -319,19 +318,19 @@ class InMemoryEnv : public EnvWrapper {
file_map_.erase(fname); file_map_.erase(fname);
} }
Status DeleteFile(const std::string& fname) override { Status RemoveFile(const std::string& fname) override {
MutexLock lock(&mutex_); MutexLock lock(&mutex_);
if (file_map_.find(fname) == file_map_.end()) { if (file_map_.find(fname) == file_map_.end()) {
return Status::IOError(fname, "File not found"); return Status::IOError(fname, "File not found");
} }
DeleteFileInternal(fname); RemoveFileInternal(fname);
return Status::OK(); return Status::OK();
} }
Status CreateDir(const std::string& dirname) override { return Status::OK(); } Status CreateDir(const std::string& dirname) override { return Status::OK(); }
Status DeleteDir(const std::string& dirname) override { return Status::OK(); } Status RemoveDir(const std::string& dirname) override { return Status::OK(); }
Status GetFileSize(const std::string& fname, uint64_t* file_size) override { Status GetFileSize(const std::string& fname, uint64_t* file_size) override {
MutexLock lock(&mutex_); MutexLock lock(&mutex_);
@ -350,7 +349,7 @@ class InMemoryEnv : public EnvWrapper {
return Status::IOError(src, "File not found"); return Status::IOError(src, "File not found");
} }
DeleteFileInternal(target); RemoveFileInternal(target);
file_map_[target] = file_map_[src]; file_map_[target] = file_map_[src];
file_map_.erase(src); file_map_.erase(src);
return Status::OK(); return Status::OK();

View File

@ -7,14 +7,15 @@
#include <string> #include <string>
#include <vector> #include <vector>
#include "gtest/gtest.h"
#include "db/db_impl.h" #include "db/db_impl.h"
#include "leveldb/db.h" #include "leveldb/db.h"
#include "leveldb/env.h" #include "leveldb/env.h"
#include "util/testharness.h" #include "util/testutil.h"
namespace leveldb { namespace leveldb {
class MemEnvTest { class MemEnvTest : public testing::Test {
public: public:
MemEnvTest() : env_(NewMemEnv(Env::Default())) {} MemEnvTest() : env_(NewMemEnv(Env::Default())) {}
~MemEnvTest() { delete env_; } ~MemEnvTest() { delete env_; }
@ -22,55 +23,55 @@ class MemEnvTest {
Env* env_; Env* env_;
}; };
TEST(MemEnvTest, Basics) { TEST_F(MemEnvTest, Basics) {
uint64_t file_size; uint64_t file_size;
WritableFile* writable_file; WritableFile* writable_file;
std::vector<std::string> children; std::vector<std::string> children;
ASSERT_OK(env_->CreateDir("/dir")); ASSERT_LEVELDB_OK(env_->CreateDir("/dir"));
// Check that the directory is empty. // Check that the directory is empty.
ASSERT_TRUE(!env_->FileExists("/dir/non_existent")); ASSERT_TRUE(!env_->FileExists("/dir/non_existent"));
ASSERT_TRUE(!env_->GetFileSize("/dir/non_existent", &file_size).ok()); ASSERT_TRUE(!env_->GetFileSize("/dir/non_existent", &file_size).ok());
ASSERT_OK(env_->GetChildren("/dir", &children)); ASSERT_LEVELDB_OK(env_->GetChildren("/dir", &children));
ASSERT_EQ(0, children.size()); ASSERT_EQ(0, children.size());
// Create a file. // Create a file.
ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file)); ASSERT_LEVELDB_OK(env_->NewWritableFile("/dir/f", &writable_file));
ASSERT_OK(env_->GetFileSize("/dir/f", &file_size)); ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/f", &file_size));
ASSERT_EQ(0, file_size); ASSERT_EQ(0, file_size);
delete writable_file; delete writable_file;
// Check that the file exists. // Check that the file exists.
ASSERT_TRUE(env_->FileExists("/dir/f")); ASSERT_TRUE(env_->FileExists("/dir/f"));
ASSERT_OK(env_->GetFileSize("/dir/f", &file_size)); ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/f", &file_size));
ASSERT_EQ(0, file_size); ASSERT_EQ(0, file_size);
ASSERT_OK(env_->GetChildren("/dir", &children)); ASSERT_LEVELDB_OK(env_->GetChildren("/dir", &children));
ASSERT_EQ(1, children.size()); ASSERT_EQ(1, children.size());
ASSERT_EQ("f", children[0]); ASSERT_EQ("f", children[0]);
// Write to the file. // Write to the file.
ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file)); ASSERT_LEVELDB_OK(env_->NewWritableFile("/dir/f", &writable_file));
ASSERT_OK(writable_file->Append("abc")); ASSERT_LEVELDB_OK(writable_file->Append("abc"));
delete writable_file; delete writable_file;
// Check that append works. // Check that append works.
ASSERT_OK(env_->NewAppendableFile("/dir/f", &writable_file)); ASSERT_LEVELDB_OK(env_->NewAppendableFile("/dir/f", &writable_file));
ASSERT_OK(env_->GetFileSize("/dir/f", &file_size)); ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/f", &file_size));
ASSERT_EQ(3, file_size); ASSERT_EQ(3, file_size);
ASSERT_OK(writable_file->Append("hello")); ASSERT_LEVELDB_OK(writable_file->Append("hello"));
delete writable_file; delete writable_file;
// Check for expected size. // Check for expected size.
ASSERT_OK(env_->GetFileSize("/dir/f", &file_size)); ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/f", &file_size));
ASSERT_EQ(8, file_size); ASSERT_EQ(8, file_size);
// Check that renaming works. // Check that renaming works.
ASSERT_TRUE(!env_->RenameFile("/dir/non_existent", "/dir/g").ok()); ASSERT_TRUE(!env_->RenameFile("/dir/non_existent", "/dir/g").ok());
ASSERT_OK(env_->RenameFile("/dir/f", "/dir/g")); ASSERT_LEVELDB_OK(env_->RenameFile("/dir/f", "/dir/g"));
ASSERT_TRUE(!env_->FileExists("/dir/f")); ASSERT_TRUE(!env_->FileExists("/dir/f"));
ASSERT_TRUE(env_->FileExists("/dir/g")); ASSERT_TRUE(env_->FileExists("/dir/g"));
ASSERT_OK(env_->GetFileSize("/dir/g", &file_size)); ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/g", &file_size));
ASSERT_EQ(8, file_size); ASSERT_EQ(8, file_size);
// Check that opening non-existent file fails. // Check that opening non-existent file fails.
@ -82,49 +83,50 @@ TEST(MemEnvTest, Basics) {
ASSERT_TRUE(!rand_file); ASSERT_TRUE(!rand_file);
// Check that deleting works. // Check that deleting works.
ASSERT_TRUE(!env_->DeleteFile("/dir/non_existent").ok()); ASSERT_TRUE(!env_->RemoveFile("/dir/non_existent").ok());
ASSERT_OK(env_->DeleteFile("/dir/g")); ASSERT_LEVELDB_OK(env_->RemoveFile("/dir/g"));
ASSERT_TRUE(!env_->FileExists("/dir/g")); ASSERT_TRUE(!env_->FileExists("/dir/g"));
ASSERT_OK(env_->GetChildren("/dir", &children)); ASSERT_LEVELDB_OK(env_->GetChildren("/dir", &children));
ASSERT_EQ(0, children.size()); ASSERT_EQ(0, children.size());
ASSERT_OK(env_->DeleteDir("/dir")); ASSERT_LEVELDB_OK(env_->RemoveDir("/dir"));
} }
TEST(MemEnvTest, ReadWrite) { TEST_F(MemEnvTest, ReadWrite) {
WritableFile* writable_file; WritableFile* writable_file;
SequentialFile* seq_file; SequentialFile* seq_file;
RandomAccessFile* rand_file; RandomAccessFile* rand_file;
Slice result; Slice result;
char scratch[100]; char scratch[100];
ASSERT_OK(env_->CreateDir("/dir")); ASSERT_LEVELDB_OK(env_->CreateDir("/dir"));
ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file)); ASSERT_LEVELDB_OK(env_->NewWritableFile("/dir/f", &writable_file));
ASSERT_OK(writable_file->Append("hello ")); ASSERT_LEVELDB_OK(writable_file->Append("hello "));
ASSERT_OK(writable_file->Append("world")); ASSERT_LEVELDB_OK(writable_file->Append("world"));
delete writable_file; delete writable_file;
// Read sequentially. // Read sequentially.
ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file)); ASSERT_LEVELDB_OK(env_->NewSequentialFile("/dir/f", &seq_file));
ASSERT_OK(seq_file->Read(5, &result, scratch)); // Read "hello". ASSERT_LEVELDB_OK(seq_file->Read(5, &result, scratch)); // Read "hello".
ASSERT_EQ(0, result.compare("hello")); ASSERT_EQ(0, result.compare("hello"));
ASSERT_OK(seq_file->Skip(1)); ASSERT_LEVELDB_OK(seq_file->Skip(1));
ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Read "world". ASSERT_LEVELDB_OK(seq_file->Read(1000, &result, scratch)); // Read "world".
ASSERT_EQ(0, result.compare("world")); ASSERT_EQ(0, result.compare("world"));
ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Try reading past EOF. ASSERT_LEVELDB_OK(
seq_file->Read(1000, &result, scratch)); // Try reading past EOF.
ASSERT_EQ(0, result.size()); ASSERT_EQ(0, result.size());
ASSERT_OK(seq_file->Skip(100)); // Try to skip past end of file. ASSERT_LEVELDB_OK(seq_file->Skip(100)); // Try to skip past end of file.
ASSERT_OK(seq_file->Read(1000, &result, scratch)); ASSERT_LEVELDB_OK(seq_file->Read(1000, &result, scratch));
ASSERT_EQ(0, result.size()); ASSERT_EQ(0, result.size());
delete seq_file; delete seq_file;
// Random reads. // Random reads.
ASSERT_OK(env_->NewRandomAccessFile("/dir/f", &rand_file)); ASSERT_LEVELDB_OK(env_->NewRandomAccessFile("/dir/f", &rand_file));
ASSERT_OK(rand_file->Read(6, 5, &result, scratch)); // Read "world". ASSERT_LEVELDB_OK(rand_file->Read(6, 5, &result, scratch)); // Read "world".
ASSERT_EQ(0, result.compare("world")); ASSERT_EQ(0, result.compare("world"));
ASSERT_OK(rand_file->Read(0, 5, &result, scratch)); // Read "hello". ASSERT_LEVELDB_OK(rand_file->Read(0, 5, &result, scratch)); // Read "hello".
ASSERT_EQ(0, result.compare("hello")); ASSERT_EQ(0, result.compare("hello"));
ASSERT_OK(rand_file->Read(10, 100, &result, scratch)); // Read "d". ASSERT_LEVELDB_OK(rand_file->Read(10, 100, &result, scratch)); // Read "d".
ASSERT_EQ(0, result.compare("d")); ASSERT_EQ(0, result.compare("d"));
// Too high offset. // Too high offset.
@ -132,30 +134,30 @@ TEST(MemEnvTest, ReadWrite) {
delete rand_file; delete rand_file;
} }
TEST(MemEnvTest, Locks) { TEST_F(MemEnvTest, Locks) {
FileLock* lock; FileLock* lock;
// These are no-ops, but we test they return success. // These are no-ops, but we test they return success.
ASSERT_OK(env_->LockFile("some file", &lock)); ASSERT_LEVELDB_OK(env_->LockFile("some file", &lock));
ASSERT_OK(env_->UnlockFile(lock)); ASSERT_LEVELDB_OK(env_->UnlockFile(lock));
} }
TEST(MemEnvTest, Misc) { TEST_F(MemEnvTest, Misc) {
std::string test_dir; std::string test_dir;
ASSERT_OK(env_->GetTestDirectory(&test_dir)); ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
ASSERT_TRUE(!test_dir.empty()); ASSERT_TRUE(!test_dir.empty());
WritableFile* writable_file; WritableFile* writable_file;
ASSERT_OK(env_->NewWritableFile("/a/b", &writable_file)); ASSERT_LEVELDB_OK(env_->NewWritableFile("/a/b", &writable_file));
// These are no-ops, but we test they return success. // These are no-ops, but we test they return success.
ASSERT_OK(writable_file->Sync()); ASSERT_LEVELDB_OK(writable_file->Sync());
ASSERT_OK(writable_file->Flush()); ASSERT_LEVELDB_OK(writable_file->Flush());
ASSERT_OK(writable_file->Close()); ASSERT_LEVELDB_OK(writable_file->Close());
delete writable_file; delete writable_file;
} }
TEST(MemEnvTest, LargeWrite) { TEST_F(MemEnvTest, LargeWrite) {
const size_t kWriteSize = 300 * 1024; const size_t kWriteSize = 300 * 1024;
char* scratch = new char[kWriteSize * 2]; char* scratch = new char[kWriteSize * 2];
@ -165,21 +167,21 @@ TEST(MemEnvTest, LargeWrite) {
} }
WritableFile* writable_file; WritableFile* writable_file;
ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file)); ASSERT_LEVELDB_OK(env_->NewWritableFile("/dir/f", &writable_file));
ASSERT_OK(writable_file->Append("foo")); ASSERT_LEVELDB_OK(writable_file->Append("foo"));
ASSERT_OK(writable_file->Append(write_data)); ASSERT_LEVELDB_OK(writable_file->Append(write_data));
delete writable_file; delete writable_file;
SequentialFile* seq_file; SequentialFile* seq_file;
Slice result; Slice result;
ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file)); ASSERT_LEVELDB_OK(env_->NewSequentialFile("/dir/f", &seq_file));
ASSERT_OK(seq_file->Read(3, &result, scratch)); // Read "foo". ASSERT_LEVELDB_OK(seq_file->Read(3, &result, scratch)); // Read "foo".
ASSERT_EQ(0, result.compare("foo")); ASSERT_EQ(0, result.compare("foo"));
size_t read = 0; size_t read = 0;
std::string read_data; std::string read_data;
while (read < kWriteSize) { while (read < kWriteSize) {
ASSERT_OK(seq_file->Read(kWriteSize - read, &result, scratch)); ASSERT_LEVELDB_OK(seq_file->Read(kWriteSize - read, &result, scratch));
read_data.append(result.data(), result.size()); read_data.append(result.data(), result.size());
read += result.size(); read += result.size();
} }
@ -188,30 +190,30 @@ TEST(MemEnvTest, LargeWrite) {
delete[] scratch; delete[] scratch;
} }
TEST(MemEnvTest, OverwriteOpenFile) { TEST_F(MemEnvTest, OverwriteOpenFile) {
const char kWrite1Data[] = "Write #1 data"; const char kWrite1Data[] = "Write #1 data";
const size_t kFileDataLen = sizeof(kWrite1Data) - 1; const size_t kFileDataLen = sizeof(kWrite1Data) - 1;
const std::string kTestFileName = test::TmpDir() + "/leveldb-TestFile.dat"; const std::string kTestFileName = testing::TempDir() + "leveldb-TestFile.dat";
ASSERT_OK(WriteStringToFile(env_, kWrite1Data, kTestFileName)); ASSERT_LEVELDB_OK(WriteStringToFile(env_, kWrite1Data, kTestFileName));
RandomAccessFile* rand_file; RandomAccessFile* rand_file;
ASSERT_OK(env_->NewRandomAccessFile(kTestFileName, &rand_file)); ASSERT_LEVELDB_OK(env_->NewRandomAccessFile(kTestFileName, &rand_file));
const char kWrite2Data[] = "Write #2 data"; const char kWrite2Data[] = "Write #2 data";
ASSERT_OK(WriteStringToFile(env_, kWrite2Data, kTestFileName)); ASSERT_LEVELDB_OK(WriteStringToFile(env_, kWrite2Data, kTestFileName));
// Verify that overwriting an open file will result in the new file data // Verify that overwriting an open file will result in the new file data
// being read from files opened before the write. // being read from files opened before the write.
Slice result; Slice result;
char scratch[kFileDataLen]; char scratch[kFileDataLen];
ASSERT_OK(rand_file->Read(0, kFileDataLen, &result, scratch)); ASSERT_LEVELDB_OK(rand_file->Read(0, kFileDataLen, &result, scratch));
ASSERT_EQ(0, result.compare(kWrite2Data)); ASSERT_EQ(0, result.compare(kWrite2Data));
delete rand_file; delete rand_file;
} }
TEST(MemEnvTest, DBTest) { TEST_F(MemEnvTest, DBTest) {
Options options; Options options;
options.create_if_missing = true; options.create_if_missing = true;
options.env = env_; options.env = env_;
@ -220,14 +222,14 @@ TEST(MemEnvTest, DBTest) {
const Slice keys[] = {Slice("aaa"), Slice("bbb"), Slice("ccc")}; const Slice keys[] = {Slice("aaa"), Slice("bbb"), Slice("ccc")};
const Slice vals[] = {Slice("foo"), Slice("bar"), Slice("baz")}; const Slice vals[] = {Slice("foo"), Slice("bar"), Slice("baz")};
ASSERT_OK(DB::Open(options, "/dir/db", &db)); ASSERT_LEVELDB_OK(DB::Open(options, "/dir/db", &db));
for (size_t i = 0; i < 3; ++i) { for (size_t i = 0; i < 3; ++i) {
ASSERT_OK(db->Put(WriteOptions(), keys[i], vals[i])); ASSERT_LEVELDB_OK(db->Put(WriteOptions(), keys[i], vals[i]));
} }
for (size_t i = 0; i < 3; ++i) { for (size_t i = 0; i < 3; ++i) {
std::string res; std::string res;
ASSERT_OK(db->Get(ReadOptions(), keys[i], &res)); ASSERT_LEVELDB_OK(db->Get(ReadOptions(), keys[i], &res));
ASSERT_TRUE(res == vals[i]); ASSERT_TRUE(res == vals[i]);
} }
@ -243,11 +245,11 @@ TEST(MemEnvTest, DBTest) {
delete iterator; delete iterator;
DBImpl* dbi = reinterpret_cast<DBImpl*>(db); DBImpl* dbi = reinterpret_cast<DBImpl*>(db);
ASSERT_OK(dbi->TEST_CompactMemTable()); ASSERT_LEVELDB_OK(dbi->TEST_CompactMemTable());
for (size_t i = 0; i < 3; ++i) { for (size_t i = 0; i < 3; ++i) {
std::string res; std::string res;
ASSERT_OK(db->Get(ReadOptions(), keys[i], &res)); ASSERT_LEVELDB_OK(db->Get(ReadOptions(), keys[i], &res));
ASSERT_TRUE(res == vals[i]); ASSERT_TRUE(res == vals[i]);
} }
@ -255,5 +257,3 @@ TEST(MemEnvTest, DBTest) {
} }
} // namespace leveldb } // namespace leveldb
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }

View File

@ -40,16 +40,16 @@
#ifndef STORAGE_LEVELDB_INCLUDE_C_H_ #ifndef STORAGE_LEVELDB_INCLUDE_C_H_
#define STORAGE_LEVELDB_INCLUDE_C_H_ #define STORAGE_LEVELDB_INCLUDE_C_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <stdarg.h> #include <stdarg.h>
#include <stddef.h> #include <stddef.h>
#include <stdint.h> #include <stdint.h>
#include "leveldb/export.h" #include "leveldb/export.h"
#ifdef __cplusplus
extern "C" {
#endif
/* Exported types */ /* Exported types */
typedef struct leveldb_t leveldb_t; typedef struct leveldb_t leveldb_t;

View File

@ -18,7 +18,7 @@
#ifndef STORAGE_LEVELDB_INCLUDE_CACHE_H_ #ifndef STORAGE_LEVELDB_INCLUDE_CACHE_H_
#define STORAGE_LEVELDB_INCLUDE_CACHE_H_ #define STORAGE_LEVELDB_INCLUDE_CACHE_H_
#include <stdint.h> #include <cstdint>
#include "leveldb/export.h" #include "leveldb/export.h"
#include "leveldb/slice.h" #include "leveldb/slice.h"
@ -96,14 +96,6 @@ class LEVELDB_EXPORT Cache {
// Return an estimate of the combined charges of all elements stored in the // Return an estimate of the combined charges of all elements stored in the
// cache. // cache.
virtual size_t TotalCharge() const = 0; virtual size_t TotalCharge() const = 0;
private:
void LRU_Remove(Handle* e);
void LRU_Append(Handle* e);
void Unref(Handle* e);
struct Rep;
Rep* rep_;
}; };
} // namespace leveldb } // namespace leveldb

View File

@ -5,8 +5,8 @@
#ifndef STORAGE_LEVELDB_INCLUDE_DB_H_ #ifndef STORAGE_LEVELDB_INCLUDE_DB_H_
#define STORAGE_LEVELDB_INCLUDE_DB_H_ #define STORAGE_LEVELDB_INCLUDE_DB_H_
#include <stdint.h> #include <cstdint>
#include <stdio.h> #include <cstdio>
#include "leveldb/export.h" #include "leveldb/export.h"
#include "leveldb/iterator.h" #include "leveldb/iterator.h"
@ -16,7 +16,7 @@ namespace leveldb {
// Update CMakeLists.txt if you change these // Update CMakeLists.txt if you change these
static const int kMajorVersion = 1; static const int kMajorVersion = 1;
static const int kMinorVersion = 22; static const int kMinorVersion = 23;
struct Options; struct Options;
struct ReadOptions; struct ReadOptions;

View File

@ -13,30 +13,26 @@
#ifndef STORAGE_LEVELDB_INCLUDE_ENV_H_ #ifndef STORAGE_LEVELDB_INCLUDE_ENV_H_
#define STORAGE_LEVELDB_INCLUDE_ENV_H_ #define STORAGE_LEVELDB_INCLUDE_ENV_H_
#include <stdarg.h> #include <cstdarg>
#include <stdint.h> #include <cstdint>
#include <string> #include <string>
#include <vector> #include <vector>
#include "leveldb/export.h" #include "leveldb/export.h"
#include "leveldb/status.h" #include "leveldb/status.h"
// This workaround can be removed when leveldb::Env::DeleteFile is removed.
#if defined(_WIN32) #if defined(_WIN32)
// The leveldb::Env class below contains a DeleteFile method. // On Windows, the method name DeleteFile (below) introduces the risk of
// At the same time, <windows.h>, a fairly popular header // triggering undefined behavior by exposing the compiler to different
// file for Windows applications, defines a DeleteFile macro. // declarations of the Env class in different translation units.
// //
// Without any intervention on our part, the result of this // This is because <windows.h>, a fairly popular header file for Windows
// unfortunate coincidence is that the name of the // applications, defines a DeleteFile macro. So, files that include the Windows
// leveldb::Env::DeleteFile method seen by the compiler depends on // header before this header will contain an altered Env declaration.
// whether <windows.h> was included before or after the LevelDB
// headers.
// //
// To avoid headaches, we undefined DeleteFile (if defined) and // This workaround ensures that the compiler sees the same Env declaration,
// redefine it at the bottom of this file. This way <windows.h> // independently of whether <windows.h> was included.
// can be included before this file (or not at all) and the
// exported method will always be leveldb::Env::DeleteFile.
#if defined(DeleteFile) #if defined(DeleteFile)
#undef DeleteFile #undef DeleteFile
#define LEVELDB_DELETEFILE_UNDEFINED #define LEVELDB_DELETEFILE_UNDEFINED
@ -54,7 +50,7 @@ class WritableFile;
class LEVELDB_EXPORT Env { class LEVELDB_EXPORT Env {
public: public:
Env() = default; Env();
Env(const Env&) = delete; Env(const Env&) = delete;
Env& operator=(const Env&) = delete; Env& operator=(const Env&) = delete;
@ -122,15 +118,48 @@ class LEVELDB_EXPORT Env {
// Original contents of *results are dropped. // Original contents of *results are dropped.
virtual Status GetChildren(const std::string& dir, virtual Status GetChildren(const std::string& dir,
std::vector<std::string>* result) = 0; std::vector<std::string>* result) = 0;
// Delete the named file. // Delete the named file.
virtual Status DeleteFile(const std::string& fname) = 0; //
// The default implementation calls DeleteFile, to support legacy Env
// implementations. Updated Env implementations must override RemoveFile and
// ignore the existence of DeleteFile. Updated code calling into the Env API
// must call RemoveFile instead of DeleteFile.
//
// A future release will remove DeleteDir and the default implementation of
// RemoveDir.
virtual Status RemoveFile(const std::string& fname);
// DEPRECATED: Modern Env implementations should override RemoveFile instead.
//
// The default implementation calls RemoveFile, to support legacy Env user
// code that calls this method on modern Env implementations. Modern Env user
// code should call RemoveFile.
//
// A future release will remove this method.
virtual Status DeleteFile(const std::string& fname);
// Create the specified directory. // Create the specified directory.
virtual Status CreateDir(const std::string& dirname) = 0; virtual Status CreateDir(const std::string& dirname) = 0;
// Delete the specified directory. // Delete the specified directory.
virtual Status DeleteDir(const std::string& dirname) = 0; //
// The default implementation calls DeleteDir, to support legacy Env
// implementations. Updated Env implementations must override RemoveDir and
// ignore the existence of DeleteDir. Modern code calling into the Env API
// must call RemoveDir instead of DeleteDir.
//
// A future release will remove DeleteDir and the default implementation of
// RemoveDir.
virtual Status RemoveDir(const std::string& dirname);
// DEPRECATED: Modern Env implementations should override RemoveDir instead.
//
// The default implementation calls RemoveDir, to support legacy Env user
// code that calls this method on modern Env implementations. Modern Env user
// code should call RemoveDir.
//
// A future release will remove this method.
virtual Status DeleteDir(const std::string& dirname);
// Store the size of fname in *file_size. // Store the size of fname in *file_size.
virtual Status GetFileSize(const std::string& fname, uint64_t* file_size) = 0; virtual Status GetFileSize(const std::string& fname, uint64_t* file_size) = 0;
@ -271,7 +300,7 @@ class LEVELDB_EXPORT Logger {
virtual ~Logger(); virtual ~Logger();
// Write an entry to the log file with the specified format. // Write an entry to the log file with the specified format.
virtual void Logv(const char* format, va_list ap) = 0; virtual void Logv(const char* format, std::va_list ap) = 0;
}; };
// Identifies a locked file. // Identifies a locked file.
@ -333,14 +362,14 @@ class LEVELDB_EXPORT EnvWrapper : public Env {
std::vector<std::string>* r) override { std::vector<std::string>* r) override {
return target_->GetChildren(dir, r); return target_->GetChildren(dir, r);
} }
Status DeleteFile(const std::string& f) override { Status RemoveFile(const std::string& f) override {
return target_->DeleteFile(f); return target_->RemoveFile(f);
} }
Status CreateDir(const std::string& d) override { Status CreateDir(const std::string& d) override {
return target_->CreateDir(d); return target_->CreateDir(d);
} }
Status DeleteDir(const std::string& d) override { Status RemoveDir(const std::string& d) override {
return target_->DeleteDir(d); return target_->RemoveDir(d);
} }
Status GetFileSize(const std::string& f, uint64_t* s) override { Status GetFileSize(const std::string& f, uint64_t* s) override {
return target_->GetFileSize(f, s); return target_->GetFileSize(f, s);
@ -375,7 +404,8 @@ class LEVELDB_EXPORT EnvWrapper : public Env {
} // namespace leveldb } // namespace leveldb
// Redefine DeleteFile if necessary. // This workaround can be removed when leveldb::Env::DeleteFile is removed.
// Redefine DeleteFile if it was undefined earlier.
#if defined(_WIN32) && defined(LEVELDB_DELETEFILE_UNDEFINED) #if defined(_WIN32) && defined(LEVELDB_DELETEFILE_UNDEFINED)
#if defined(UNICODE) #if defined(UNICODE)
#define DeleteFile DeleteFileW #define DeleteFile DeleteFileW

View File

@ -5,7 +5,7 @@
#ifndef STORAGE_LEVELDB_INCLUDE_OPTIONS_H_ #ifndef STORAGE_LEVELDB_INCLUDE_OPTIONS_H_
#define STORAGE_LEVELDB_INCLUDE_OPTIONS_H_ #define STORAGE_LEVELDB_INCLUDE_OPTIONS_H_
#include <stddef.h> #include <cstddef>
#include "leveldb/export.h" #include "leveldb/export.h"

View File

@ -15,10 +15,9 @@
#ifndef STORAGE_LEVELDB_INCLUDE_SLICE_H_ #ifndef STORAGE_LEVELDB_INCLUDE_SLICE_H_
#define STORAGE_LEVELDB_INCLUDE_SLICE_H_ #define STORAGE_LEVELDB_INCLUDE_SLICE_H_
#include <assert.h> #include <cassert>
#include <stddef.h> #include <cstddef>
#include <string.h> #include <cstring>
#include <string> #include <string>
#include "leveldb/export.h" #include "leveldb/export.h"

View File

@ -5,7 +5,7 @@
#ifndef STORAGE_LEVELDB_INCLUDE_TABLE_H_ #ifndef STORAGE_LEVELDB_INCLUDE_TABLE_H_
#define STORAGE_LEVELDB_INCLUDE_TABLE_H_ #define STORAGE_LEVELDB_INCLUDE_TABLE_H_
#include <stdint.h> #include <cstdint>
#include "leveldb/export.h" #include "leveldb/export.h"
#include "leveldb/iterator.h" #include "leveldb/iterator.h"

View File

@ -13,7 +13,7 @@
#ifndef STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_ #ifndef STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_
#define STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_ #define STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_
#include <stdint.h> #include <cstdint>
#include "leveldb/export.h" #include "leveldb/export.h"
#include "leveldb/options.h" #include "leveldb/options.h"

View File

@ -7,9 +7,10 @@
#include <iostream> #include <iostream>
#include <sstream> #include <sstream>
#include "gtest/gtest.h"
#include "leveldb/db.h" #include "leveldb/db.h"
#include "leveldb/write_batch.h" #include "leveldb/write_batch.h"
#include "util/testharness.h" #include "util/testutil.h"
namespace { namespace {
@ -17,17 +18,15 @@ const int kNumKeys = 1100000;
std::string Key1(int i) { std::string Key1(int i) {
char buf[100]; char buf[100];
snprintf(buf, sizeof(buf), "my_key_%d", i); std::snprintf(buf, sizeof(buf), "my_key_%d", i);
return buf; return buf;
} }
std::string Key2(int i) { return Key1(i) + "_xxx"; } std::string Key2(int i) { return Key1(i) + "_xxx"; }
class Issue178 {};
TEST(Issue178, Test) { TEST(Issue178, Test) {
// Get rid of any state from an old run. // Get rid of any state from an old run.
std::string dbpath = leveldb::test::TmpDir() + "/leveldb_cbug_test"; std::string dbpath = testing::TempDir() + "leveldb_cbug_test";
DestroyDB(dbpath, leveldb::Options()); DestroyDB(dbpath, leveldb::Options());
// Open database. Disable compression since it affects the creation // Open database. Disable compression since it affects the creation
@ -37,28 +36,28 @@ TEST(Issue178, Test) {
leveldb::Options db_options; leveldb::Options db_options;
db_options.create_if_missing = true; db_options.create_if_missing = true;
db_options.compression = leveldb::kNoCompression; db_options.compression = leveldb::kNoCompression;
ASSERT_OK(leveldb::DB::Open(db_options, dbpath, &db)); ASSERT_LEVELDB_OK(leveldb::DB::Open(db_options, dbpath, &db));
// create first key range // create first key range
leveldb::WriteBatch batch; leveldb::WriteBatch batch;
for (size_t i = 0; i < kNumKeys; i++) { for (size_t i = 0; i < kNumKeys; i++) {
batch.Put(Key1(i), "value for range 1 key"); batch.Put(Key1(i), "value for range 1 key");
} }
ASSERT_OK(db->Write(leveldb::WriteOptions(), &batch)); ASSERT_LEVELDB_OK(db->Write(leveldb::WriteOptions(), &batch));
// create second key range // create second key range
batch.Clear(); batch.Clear();
for (size_t i = 0; i < kNumKeys; i++) { for (size_t i = 0; i < kNumKeys; i++) {
batch.Put(Key2(i), "value for range 2 key"); batch.Put(Key2(i), "value for range 2 key");
} }
ASSERT_OK(db->Write(leveldb::WriteOptions(), &batch)); ASSERT_LEVELDB_OK(db->Write(leveldb::WriteOptions(), &batch));
// delete second key range // delete second key range
batch.Clear(); batch.Clear();
for (size_t i = 0; i < kNumKeys; i++) { for (size_t i = 0; i < kNumKeys; i++) {
batch.Delete(Key2(i)); batch.Delete(Key2(i));
} }
ASSERT_OK(db->Write(leveldb::WriteOptions(), &batch)); ASSERT_LEVELDB_OK(db->Write(leveldb::WriteOptions(), &batch));
// compact database // compact database
std::string start_key = Key1(0); std::string start_key = Key1(0);
@ -84,5 +83,3 @@ TEST(Issue178, Test) {
} }
} // anonymous namespace } // anonymous namespace
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }

View File

@ -6,35 +6,34 @@
// to forward, the current key can be yielded unexpectedly if a new // to forward, the current key can be yielded unexpectedly if a new
// mutation has been added just before the current key. // mutation has been added just before the current key.
#include "gtest/gtest.h"
#include "leveldb/db.h" #include "leveldb/db.h"
#include "util/testharness.h" #include "util/testutil.h"
namespace leveldb { namespace leveldb {
class Issue200 {};
TEST(Issue200, Test) { TEST(Issue200, Test) {
// Get rid of any state from an old run. // Get rid of any state from an old run.
std::string dbpath = test::TmpDir() + "/leveldb_issue200_test"; std::string dbpath = testing::TempDir() + "leveldb_issue200_test";
DestroyDB(dbpath, Options()); DestroyDB(dbpath, Options());
DB* db; DB* db;
Options options; Options options;
options.create_if_missing = true; options.create_if_missing = true;
ASSERT_OK(DB::Open(options, dbpath, &db)); ASSERT_LEVELDB_OK(DB::Open(options, dbpath, &db));
WriteOptions write_options; WriteOptions write_options;
ASSERT_OK(db->Put(write_options, "1", "b")); ASSERT_LEVELDB_OK(db->Put(write_options, "1", "b"));
ASSERT_OK(db->Put(write_options, "2", "c")); ASSERT_LEVELDB_OK(db->Put(write_options, "2", "c"));
ASSERT_OK(db->Put(write_options, "3", "d")); ASSERT_LEVELDB_OK(db->Put(write_options, "3", "d"));
ASSERT_OK(db->Put(write_options, "4", "e")); ASSERT_LEVELDB_OK(db->Put(write_options, "4", "e"));
ASSERT_OK(db->Put(write_options, "5", "f")); ASSERT_LEVELDB_OK(db->Put(write_options, "5", "f"));
ReadOptions read_options; ReadOptions read_options;
Iterator* iter = db->NewIterator(read_options); Iterator* iter = db->NewIterator(read_options);
// Add an element that should not be reflected in the iterator. // Add an element that should not be reflected in the iterator.
ASSERT_OK(db->Put(write_options, "25", "cd")); ASSERT_LEVELDB_OK(db->Put(write_options, "25", "cd"));
iter->Seek("5"); iter->Seek("5");
ASSERT_EQ(iter->key().ToString(), "5"); ASSERT_EQ(iter->key().ToString(), "5");
@ -53,5 +52,3 @@ TEST(Issue200, Test) {
} }
} // namespace leveldb } // namespace leveldb
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }

View File

@ -9,9 +9,10 @@
#include <string> #include <string>
#include <vector> #include <vector>
#include "gtest/gtest.h"
#include "leveldb/db.h" #include "leveldb/db.h"
#include "leveldb/write_batch.h" #include "leveldb/write_batch.h"
#include "util/testharness.h" #include "util/testutil.h"
namespace leveldb { namespace leveldb {
@ -37,8 +38,6 @@ std::string CreateRandomString(int32_t index) {
} // namespace } // namespace
class Issue320 {};
TEST(Issue320, Test) { TEST(Issue320, Test) {
std::srand(0); std::srand(0);
@ -53,8 +52,8 @@ TEST(Issue320, Test) {
Options options; Options options;
options.create_if_missing = true; options.create_if_missing = true;
std::string dbpath = test::TmpDir() + "/leveldb_issue320_test"; std::string dbpath = testing::TempDir() + "leveldb_issue320_test";
ASSERT_OK(DB::Open(options, dbpath, &db)); ASSERT_LEVELDB_OK(DB::Open(options, dbpath, &db));
uint32_t target_size = 10000; uint32_t target_size = 10000;
uint32_t num_items = 0; uint32_t num_items = 0;
@ -78,7 +77,8 @@ TEST(Issue320, Test) {
CreateRandomString(index), CreateRandomString(index))); CreateRandomString(index), CreateRandomString(index)));
batch.Put(test_map[index]->first, test_map[index]->second); batch.Put(test_map[index]->first, test_map[index]->second);
} else { } else {
ASSERT_OK(db->Get(readOptions, test_map[index]->first, &old_value)); ASSERT_LEVELDB_OK(
db->Get(readOptions, test_map[index]->first, &old_value));
if (old_value != test_map[index]->second) { if (old_value != test_map[index]->second) {
std::cout << "ERROR incorrect value returned by Get" << std::endl; std::cout << "ERROR incorrect value returned by Get" << std::endl;
std::cout << " count=" << count << std::endl; std::cout << " count=" << count << std::endl;
@ -102,7 +102,7 @@ TEST(Issue320, Test) {
} }
} }
ASSERT_OK(db->Write(writeOptions, &batch)); ASSERT_LEVELDB_OK(db->Write(writeOptions, &batch));
if (keep_snapshots && GenerateRandomNumber(10) == 0) { if (keep_snapshots && GenerateRandomNumber(10) == 0) {
int i = GenerateRandomNumber(snapshots.size()); int i = GenerateRandomNumber(snapshots.size());
@ -124,5 +124,3 @@ TEST(Issue320, Test) {
} }
} // namespace leveldb } // namespace leveldb
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }

View File

@ -30,10 +30,4 @@
#cmakedefine01 HAVE_SNAPPY #cmakedefine01 HAVE_SNAPPY
#endif // !defined(HAVE_SNAPPY) #endif // !defined(HAVE_SNAPPY)
// Define to 1 if your processor stores words with the most significant byte
// first (like Motorola and SPARC, unlike Intel and VAX).
#if !defined(LEVELDB_IS_BIG_ENDIAN)
#cmakedefine01 LEVELDB_IS_BIG_ENDIAN
#endif // !defined(LEVELDB_IS_BIG_ENDIAN)
#endif // STORAGE_LEVELDB_PORT_PORT_CONFIG_H_ #endif // STORAGE_LEVELDB_PORT_PORT_CONFIG_H_

View File

@ -18,10 +18,6 @@ namespace port {
// TODO(jorlow): Many of these belong more in the environment class rather than // TODO(jorlow): Many of these belong more in the environment class rather than
// here. We should try moving them and see if it affects perf. // here. We should try moving them and see if it affects perf.
// The following boolean constant must be true on a little-endian machine
// and false otherwise.
static const bool kLittleEndian = true /* or some other expression */;
// ------------------ Threading ------------------- // ------------------ Threading -------------------
// A Mutex represents an exclusive lock. // A Mutex represents an exclusive lock.

View File

@ -41,8 +41,6 @@
namespace leveldb { namespace leveldb {
namespace port { namespace port {
static const bool kLittleEndian = !LEVELDB_IS_BIG_ENDIAN;
class CondVar; class CondVar;
// Thinly wraps std::mutex. // Thinly wraps std::mutex.

View File

@ -166,6 +166,24 @@ class Block::Iter : public Iterator {
// with a key < target // with a key < target
uint32_t left = 0; uint32_t left = 0;
uint32_t right = num_restarts_ - 1; uint32_t right = num_restarts_ - 1;
int current_key_compare = 0;
if (Valid()) {
// If we're already scanning, use the current position as a starting
// point. This is beneficial if the key we're seeking to is ahead of the
// current position.
current_key_compare = Compare(key_, target);
if (current_key_compare < 0) {
// key_ is smaller than target
left = restart_index_;
} else if (current_key_compare > 0) {
right = restart_index_;
} else {
// We're seeking to the key we're already at.
return;
}
}
while (left < right) { while (left < right) {
uint32_t mid = (left + right + 1) / 2; uint32_t mid = (left + right + 1) / 2;
uint32_t region_offset = GetRestartPoint(mid); uint32_t region_offset = GetRestartPoint(mid);
@ -189,8 +207,15 @@ class Block::Iter : public Iterator {
} }
} }
// We might be able to use our current position within the restart block.
// This is true if we determined the key we desire is in the current block
// and is after than the current key.
assert(current_key_compare == 0 || Valid());
bool skip_seek = left == restart_index_ && current_key_compare < 0;
if (!skip_seek) {
SeekToRestartPoint(left);
}
// Linear search (within restart block) for first key >= target // Linear search (within restart block) for first key >= target
SeekToRestartPoint(left);
while (true) { while (true) {
if (!ParseNextKey()) { if (!ParseNextKey()) {
return; return;

View File

@ -5,8 +5,8 @@
#ifndef STORAGE_LEVELDB_TABLE_BLOCK_H_ #ifndef STORAGE_LEVELDB_TABLE_BLOCK_H_
#define STORAGE_LEVELDB_TABLE_BLOCK_H_ #define STORAGE_LEVELDB_TABLE_BLOCK_H_
#include <stddef.h> #include <cstddef>
#include <stdint.h> #include <cstdint>
#include "leveldb/iterator.h" #include "leveldb/iterator.h"

View File

@ -28,9 +28,8 @@
#include "table/block_builder.h" #include "table/block_builder.h"
#include <assert.h>
#include <algorithm> #include <algorithm>
#include <cassert>
#include "leveldb/comparator.h" #include "leveldb/comparator.h"
#include "leveldb/options.h" #include "leveldb/options.h"

View File

@ -5,8 +5,7 @@
#ifndef STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_ #ifndef STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_
#define STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_ #define STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_
#include <stdint.h> #include <cstdint>
#include <vector> #include <vector>
#include "leveldb/slice.h" #include "leveldb/slice.h"

View File

@ -9,9 +9,8 @@
#ifndef STORAGE_LEVELDB_TABLE_FILTER_BLOCK_H_ #ifndef STORAGE_LEVELDB_TABLE_FILTER_BLOCK_H_
#define STORAGE_LEVELDB_TABLE_FILTER_BLOCK_H_ #define STORAGE_LEVELDB_TABLE_FILTER_BLOCK_H_
#include <stddef.h> #include <cstddef>
#include <stdint.h> #include <cstdint>
#include <string> #include <string>
#include <vector> #include <vector>

View File

@ -4,11 +4,11 @@
#include "table/filter_block.h" #include "table/filter_block.h"
#include "gtest/gtest.h"
#include "leveldb/filter_policy.h" #include "leveldb/filter_policy.h"
#include "util/coding.h" #include "util/coding.h"
#include "util/hash.h" #include "util/hash.h"
#include "util/logging.h" #include "util/logging.h"
#include "util/testharness.h"
#include "util/testutil.h" #include "util/testutil.h"
namespace leveldb { namespace leveldb {
@ -36,12 +36,12 @@ class TestHashFilter : public FilterPolicy {
} }
}; };
class FilterBlockTest { class FilterBlockTest : public testing::Test {
public: public:
TestHashFilter policy_; TestHashFilter policy_;
}; };
TEST(FilterBlockTest, EmptyBuilder) { TEST_F(FilterBlockTest, EmptyBuilder) {
FilterBlockBuilder builder(&policy_); FilterBlockBuilder builder(&policy_);
Slice block = builder.Finish(); Slice block = builder.Finish();
ASSERT_EQ("\\x00\\x00\\x00\\x00\\x0b", EscapeString(block)); ASSERT_EQ("\\x00\\x00\\x00\\x00\\x0b", EscapeString(block));
@ -50,7 +50,7 @@ TEST(FilterBlockTest, EmptyBuilder) {
ASSERT_TRUE(reader.KeyMayMatch(100000, "foo")); ASSERT_TRUE(reader.KeyMayMatch(100000, "foo"));
} }
TEST(FilterBlockTest, SingleChunk) { TEST_F(FilterBlockTest, SingleChunk) {
FilterBlockBuilder builder(&policy_); FilterBlockBuilder builder(&policy_);
builder.StartBlock(100); builder.StartBlock(100);
builder.AddKey("foo"); builder.AddKey("foo");
@ -71,7 +71,7 @@ TEST(FilterBlockTest, SingleChunk) {
ASSERT_TRUE(!reader.KeyMayMatch(100, "other")); ASSERT_TRUE(!reader.KeyMayMatch(100, "other"));
} }
TEST(FilterBlockTest, MultiChunk) { TEST_F(FilterBlockTest, MultiChunk) {
FilterBlockBuilder builder(&policy_); FilterBlockBuilder builder(&policy_);
// First filter // First filter
@ -120,5 +120,3 @@ TEST(FilterBlockTest, MultiChunk) {
} }
} // namespace leveldb } // namespace leveldb
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }

View File

@ -5,8 +5,7 @@
#ifndef STORAGE_LEVELDB_TABLE_FORMAT_H_ #ifndef STORAGE_LEVELDB_TABLE_FORMAT_H_
#define STORAGE_LEVELDB_TABLE_FORMAT_H_ #define STORAGE_LEVELDB_TABLE_FORMAT_H_
#include <stdint.h> #include <cstdint>
#include <string> #include <string>
#include "leveldb/slice.h" #include "leveldb/slice.h"

View File

@ -54,13 +54,11 @@ Status Table::Open(const Options& options, RandomAccessFile* file,
// Read the index block // Read the index block
BlockContents index_block_contents; BlockContents index_block_contents;
if (s.ok()) { ReadOptions opt;
ReadOptions opt; if (options.paranoid_checks) {
if (options.paranoid_checks) { opt.verify_checksums = true;
opt.verify_checksums = true;
}
s = ReadBlock(file, opt, footer.index_handle(), &index_block_contents);
} }
s = ReadBlock(file, opt, footer.index_handle(), &index_block_contents);
if (s.ok()) { if (s.ok()) {
// We've successfully read the footer and the index block: we're // We've successfully read the footer and the index block: we're

View File

@ -4,7 +4,7 @@
#include "leveldb/table_builder.h" #include "leveldb/table_builder.h"
#include <assert.h> #include <cassert>
#include "leveldb/comparator.h" #include "leveldb/comparator.h"
#include "leveldb/env.h" #include "leveldb/env.h"

View File

@ -7,6 +7,7 @@
#include <map> #include <map>
#include <string> #include <string>
#include "gtest/gtest.h"
#include "db/dbformat.h" #include "db/dbformat.h"
#include "db/memtable.h" #include "db/memtable.h"
#include "db/write_batch_internal.h" #include "db/write_batch_internal.h"
@ -18,7 +19,6 @@
#include "table/block_builder.h" #include "table/block_builder.h"
#include "table/format.h" #include "table/format.h"
#include "util/random.h" #include "util/random.h"
#include "util/testharness.h"
#include "util/testutil.h" #include "util/testutil.h"
namespace leveldb { namespace leveldb {
@ -123,7 +123,7 @@ class StringSource : public RandomAccessFile {
if (offset + n > contents_.size()) { if (offset + n > contents_.size()) {
n = contents_.size() - offset; n = contents_.size() - offset;
} }
memcpy(scratch, &contents_[offset], n); std::memcpy(scratch, &contents_[offset], n);
*result = Slice(scratch, n); *result = Slice(scratch, n);
return Status::OK(); return Status::OK();
} }
@ -219,12 +219,12 @@ class TableConstructor : public Constructor {
for (const auto& kvp : data) { for (const auto& kvp : data) {
builder.Add(kvp.first, kvp.second); builder.Add(kvp.first, kvp.second);
ASSERT_TRUE(builder.status().ok()); EXPECT_LEVELDB_OK(builder.status());
} }
Status s = builder.Finish(); Status s = builder.Finish();
ASSERT_TRUE(s.ok()) << s.ToString(); EXPECT_LEVELDB_OK(s);
ASSERT_EQ(sink.contents().size(), builder.FileSize()); EXPECT_EQ(sink.contents().size(), builder.FileSize());
// Open the table // Open the table
source_ = new StringSource(sink.contents()); source_ = new StringSource(sink.contents());
@ -340,7 +340,7 @@ class DBConstructor : public Constructor {
for (const auto& kvp : data) { for (const auto& kvp : data) {
WriteBatch batch; WriteBatch batch;
batch.Put(kvp.first, kvp.second); batch.Put(kvp.first, kvp.second);
ASSERT_TRUE(db_->Write(WriteOptions(), &batch).ok()); EXPECT_TRUE(db_->Write(WriteOptions(), &batch).ok());
} }
return Status::OK(); return Status::OK();
} }
@ -352,7 +352,7 @@ class DBConstructor : public Constructor {
private: private:
void NewDB() { void NewDB() {
std::string name = test::TmpDir() + "/table_testdb"; std::string name = testing::TempDir() + "table_testdb";
Options options; Options options;
options.comparator = comparator_; options.comparator = comparator_;
@ -403,7 +403,7 @@ static const TestArgs kTestArgList[] = {
}; };
static const int kNumTestArgs = sizeof(kTestArgList) / sizeof(kTestArgList[0]); static const int kNumTestArgs = sizeof(kTestArgList) / sizeof(kTestArgList[0]);
class Harness { class Harness : public testing::Test {
public: public:
Harness() : constructor_(nullptr) {} Harness() : constructor_(nullptr) {}
@ -485,13 +485,13 @@ class Harness {
Iterator* iter = constructor_->NewIterator(); Iterator* iter = constructor_->NewIterator();
ASSERT_TRUE(!iter->Valid()); ASSERT_TRUE(!iter->Valid());
KVMap::const_iterator model_iter = data.begin(); KVMap::const_iterator model_iter = data.begin();
if (kVerbose) fprintf(stderr, "---\n"); if (kVerbose) std::fprintf(stderr, "---\n");
for (int i = 0; i < 200; i++) { for (int i = 0; i < 200; i++) {
const int toss = rnd->Uniform(5); const int toss = rnd->Uniform(5);
switch (toss) { switch (toss) {
case 0: { case 0: {
if (iter->Valid()) { if (iter->Valid()) {
if (kVerbose) fprintf(stderr, "Next\n"); if (kVerbose) std::fprintf(stderr, "Next\n");
iter->Next(); iter->Next();
++model_iter; ++model_iter;
ASSERT_EQ(ToString(data, model_iter), ToString(iter)); ASSERT_EQ(ToString(data, model_iter), ToString(iter));
@ -500,7 +500,7 @@ class Harness {
} }
case 1: { case 1: {
if (kVerbose) fprintf(stderr, "SeekToFirst\n"); if (kVerbose) std::fprintf(stderr, "SeekToFirst\n");
iter->SeekToFirst(); iter->SeekToFirst();
model_iter = data.begin(); model_iter = data.begin();
ASSERT_EQ(ToString(data, model_iter), ToString(iter)); ASSERT_EQ(ToString(data, model_iter), ToString(iter));
@ -511,7 +511,7 @@ class Harness {
std::string key = PickRandomKey(rnd, keys); std::string key = PickRandomKey(rnd, keys);
model_iter = data.lower_bound(key); model_iter = data.lower_bound(key);
if (kVerbose) if (kVerbose)
fprintf(stderr, "Seek '%s'\n", EscapeString(key).c_str()); std::fprintf(stderr, "Seek '%s'\n", EscapeString(key).c_str());
iter->Seek(Slice(key)); iter->Seek(Slice(key));
ASSERT_EQ(ToString(data, model_iter), ToString(iter)); ASSERT_EQ(ToString(data, model_iter), ToString(iter));
break; break;
@ -519,7 +519,7 @@ class Harness {
case 3: { case 3: {
if (iter->Valid()) { if (iter->Valid()) {
if (kVerbose) fprintf(stderr, "Prev\n"); if (kVerbose) std::fprintf(stderr, "Prev\n");
iter->Prev(); iter->Prev();
if (model_iter == data.begin()) { if (model_iter == data.begin()) {
model_iter = data.end(); // Wrap around to invalid value model_iter = data.end(); // Wrap around to invalid value
@ -532,7 +532,7 @@ class Harness {
} }
case 4: { case 4: {
if (kVerbose) fprintf(stderr, "SeekToLast\n"); if (kVerbose) std::fprintf(stderr, "SeekToLast\n");
iter->SeekToLast(); iter->SeekToLast();
if (keys.empty()) { if (keys.empty()) {
model_iter = data.end(); model_iter = data.end();
@ -609,7 +609,7 @@ class Harness {
}; };
// Test empty table/block. // Test empty table/block.
TEST(Harness, Empty) { TEST_F(Harness, Empty) {
for (int i = 0; i < kNumTestArgs; i++) { for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]); Init(kTestArgList[i]);
Random rnd(test::RandomSeed() + 1); Random rnd(test::RandomSeed() + 1);
@ -620,7 +620,7 @@ TEST(Harness, Empty) {
// Special test for a block with no restart entries. The C++ leveldb // Special test for a block with no restart entries. The C++ leveldb
// code never generates such blocks, but the Java version of leveldb // code never generates such blocks, but the Java version of leveldb
// seems to. // seems to.
TEST(Harness, ZeroRestartPointsInBlock) { TEST_F(Harness, ZeroRestartPointsInBlock) {
char data[sizeof(uint32_t)]; char data[sizeof(uint32_t)];
memset(data, 0, sizeof(data)); memset(data, 0, sizeof(data));
BlockContents contents; BlockContents contents;
@ -639,7 +639,7 @@ TEST(Harness, ZeroRestartPointsInBlock) {
} }
// Test the empty key // Test the empty key
TEST(Harness, SimpleEmptyKey) { TEST_F(Harness, SimpleEmptyKey) {
for (int i = 0; i < kNumTestArgs; i++) { for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]); Init(kTestArgList[i]);
Random rnd(test::RandomSeed() + 1); Random rnd(test::RandomSeed() + 1);
@ -648,7 +648,7 @@ TEST(Harness, SimpleEmptyKey) {
} }
} }
TEST(Harness, SimpleSingle) { TEST_F(Harness, SimpleSingle) {
for (int i = 0; i < kNumTestArgs; i++) { for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]); Init(kTestArgList[i]);
Random rnd(test::RandomSeed() + 2); Random rnd(test::RandomSeed() + 2);
@ -657,7 +657,7 @@ TEST(Harness, SimpleSingle) {
} }
} }
TEST(Harness, SimpleMulti) { TEST_F(Harness, SimpleMulti) {
for (int i = 0; i < kNumTestArgs; i++) { for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]); Init(kTestArgList[i]);
Random rnd(test::RandomSeed() + 3); Random rnd(test::RandomSeed() + 3);
@ -668,7 +668,7 @@ TEST(Harness, SimpleMulti) {
} }
} }
TEST(Harness, SimpleSpecialKey) { TEST_F(Harness, SimpleSpecialKey) {
for (int i = 0; i < kNumTestArgs; i++) { for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]); Init(kTestArgList[i]);
Random rnd(test::RandomSeed() + 4); Random rnd(test::RandomSeed() + 4);
@ -677,15 +677,15 @@ TEST(Harness, SimpleSpecialKey) {
} }
} }
TEST(Harness, Randomized) { TEST_F(Harness, Randomized) {
for (int i = 0; i < kNumTestArgs; i++) { for (int i = 0; i < kNumTestArgs; i++) {
Init(kTestArgList[i]); Init(kTestArgList[i]);
Random rnd(test::RandomSeed() + 5); Random rnd(test::RandomSeed() + 5);
for (int num_entries = 0; num_entries < 2000; for (int num_entries = 0; num_entries < 2000;
num_entries += (num_entries < 50 ? 1 : 200)) { num_entries += (num_entries < 50 ? 1 : 200)) {
if ((num_entries % 10) == 0) { if ((num_entries % 10) == 0) {
fprintf(stderr, "case %d of %d: num_entries = %d\n", (i + 1), std::fprintf(stderr, "case %d of %d: num_entries = %d\n", (i + 1),
int(kNumTestArgs), num_entries); int(kNumTestArgs), num_entries);
} }
for (int e = 0; e < num_entries; e++) { for (int e = 0; e < num_entries; e++) {
std::string v; std::string v;
@ -697,7 +697,7 @@ TEST(Harness, Randomized) {
} }
} }
TEST(Harness, RandomizedLongDB) { TEST_F(Harness, RandomizedLongDB) {
Random rnd(test::RandomSeed()); Random rnd(test::RandomSeed());
TestArgs args = {DB_TEST, false, 16}; TestArgs args = {DB_TEST, false, 16};
Init(args); Init(args);
@ -714,15 +714,13 @@ TEST(Harness, RandomizedLongDB) {
for (int level = 0; level < config::kNumLevels; level++) { for (int level = 0; level < config::kNumLevels; level++) {
std::string value; std::string value;
char name[100]; char name[100];
snprintf(name, sizeof(name), "leveldb.num-files-at-level%d", level); std::snprintf(name, sizeof(name), "leveldb.num-files-at-level%d", level);
ASSERT_TRUE(db()->GetProperty(name, &value)); ASSERT_TRUE(db()->GetProperty(name, &value));
files += atoi(value.c_str()); files += atoi(value.c_str());
} }
ASSERT_GT(files, 0); ASSERT_GT(files, 0);
} }
class MemTableTest {};
TEST(MemTableTest, Simple) { TEST(MemTableTest, Simple) {
InternalKeyComparator cmp(BytewiseComparator()); InternalKeyComparator cmp(BytewiseComparator());
MemTable* memtable = new MemTable(cmp); MemTable* memtable = new MemTable(cmp);
@ -738,8 +736,8 @@ TEST(MemTableTest, Simple) {
Iterator* iter = memtable->NewIterator(); Iterator* iter = memtable->NewIterator();
iter->SeekToFirst(); iter->SeekToFirst();
while (iter->Valid()) { while (iter->Valid()) {
fprintf(stderr, "key: '%s' -> '%s'\n", iter->key().ToString().c_str(), std::fprintf(stderr, "key: '%s' -> '%s'\n", iter->key().ToString().c_str(),
iter->value().ToString().c_str()); iter->value().ToString().c_str());
iter->Next(); iter->Next();
} }
@ -750,15 +748,13 @@ TEST(MemTableTest, Simple) {
static bool Between(uint64_t val, uint64_t low, uint64_t high) { static bool Between(uint64_t val, uint64_t low, uint64_t high) {
bool result = (val >= low) && (val <= high); bool result = (val >= low) && (val <= high);
if (!result) { if (!result) {
fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n", std::fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
(unsigned long long)(val), (unsigned long long)(low), (unsigned long long)(val), (unsigned long long)(low),
(unsigned long long)(high)); (unsigned long long)(high));
} }
return result; return result;
} }
class TableTest {};
TEST(TableTest, ApproximateOffsetOfPlain) { TEST(TableTest, ApproximateOffsetOfPlain) {
TableConstructor c(BytewiseComparator()); TableConstructor c(BytewiseComparator());
c.Add("k01", "hello"); c.Add("k01", "hello");
@ -796,7 +792,7 @@ static bool SnappyCompressionSupported() {
TEST(TableTest, ApproximateOffsetOfCompressed) { TEST(TableTest, ApproximateOffsetOfCompressed) {
if (!SnappyCompressionSupported()) { if (!SnappyCompressionSupported()) {
fprintf(stderr, "skipping compression tests\n"); std::fprintf(stderr, "skipping compression tests\n");
return; return;
} }
@ -831,5 +827,3 @@ TEST(TableTest, ApproximateOffsetOfCompressed) {
} }
} // namespace leveldb } // namespace leveldb
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }

1
third_party/benchmark vendored Submodule

@ -0,0 +1 @@
Subproject commit 7d0d9061d83b663ce05d9de5da3d5865a3845b79

1
third_party/googletest vendored Submodule

@ -0,0 +1 @@
Subproject commit 662fe38e44900c007eccb65a5d2ea19df7bd520e

View File

@ -4,13 +4,11 @@
#include "util/arena.h" #include "util/arena.h"
#include "gtest/gtest.h"
#include "util/random.h" #include "util/random.h"
#include "util/testharness.h"
namespace leveldb { namespace leveldb {
class ArenaTest {};
TEST(ArenaTest, Empty) { Arena arena; } TEST(ArenaTest, Empty) { Arena arena; }
TEST(ArenaTest, Simple) { TEST(ArenaTest, Simple) {
@ -61,5 +59,3 @@ TEST(ArenaTest, Simple) {
} }
} // namespace leveldb } // namespace leveldb
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }

View File

@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors. // found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "gtest/gtest.h"
#include "leveldb/filter_policy.h" #include "leveldb/filter_policy.h"
#include "util/coding.h" #include "util/coding.h"
#include "util/logging.h" #include "util/logging.h"
#include "util/testharness.h"
#include "util/testutil.h" #include "util/testutil.h"
namespace leveldb { namespace leveldb {
@ -18,7 +17,7 @@ static Slice Key(int i, char* buffer) {
return Slice(buffer, sizeof(uint32_t)); return Slice(buffer, sizeof(uint32_t));
} }
class BloomTest { class BloomTest : public testing::Test {
public: public:
BloomTest() : policy_(NewBloomFilterPolicy(10)) {} BloomTest() : policy_(NewBloomFilterPolicy(10)) {}
@ -46,14 +45,14 @@ class BloomTest {
size_t FilterSize() const { return filter_.size(); } size_t FilterSize() const { return filter_.size(); }
void DumpFilter() { void DumpFilter() {
fprintf(stderr, "F("); std::fprintf(stderr, "F(");
for (size_t i = 0; i + 1 < filter_.size(); i++) { for (size_t i = 0; i + 1 < filter_.size(); i++) {
const unsigned int c = static_cast<unsigned int>(filter_[i]); const unsigned int c = static_cast<unsigned int>(filter_[i]);
for (int j = 0; j < 8; j++) { for (int j = 0; j < 8; j++) {
fprintf(stderr, "%c", (c & (1 << j)) ? '1' : '.'); std::fprintf(stderr, "%c", (c & (1 << j)) ? '1' : '.');
} }
} }
fprintf(stderr, ")\n"); std::fprintf(stderr, ")\n");
} }
bool Matches(const Slice& s) { bool Matches(const Slice& s) {
@ -80,12 +79,12 @@ class BloomTest {
std::vector<std::string> keys_; std::vector<std::string> keys_;
}; };
TEST(BloomTest, EmptyFilter) { TEST_F(BloomTest, EmptyFilter) {
ASSERT_TRUE(!Matches("hello")); ASSERT_TRUE(!Matches("hello"));
ASSERT_TRUE(!Matches("world")); ASSERT_TRUE(!Matches("world"));
} }
TEST(BloomTest, Small) { TEST_F(BloomTest, Small) {
Add("hello"); Add("hello");
Add("world"); Add("world");
ASSERT_TRUE(Matches("hello")); ASSERT_TRUE(Matches("hello"));
@ -107,7 +106,7 @@ static int NextLength(int length) {
return length; return length;
} }
TEST(BloomTest, VaryingLengths) { TEST_F(BloomTest, VaryingLengths) {
char buffer[sizeof(int)]; char buffer[sizeof(int)];
// Count number of filters that significantly exceed the false positive rate // Count number of filters that significantly exceed the false positive rate
@ -133,8 +132,9 @@ TEST(BloomTest, VaryingLengths) {
// Check false positive rate // Check false positive rate
double rate = FalsePositiveRate(); double rate = FalsePositiveRate();
if (kVerbose >= 1) { if (kVerbose >= 1) {
fprintf(stderr, "False positives: %5.2f%% @ length = %6d ; bytes = %6d\n", std::fprintf(stderr,
rate * 100.0, length, static_cast<int>(FilterSize())); "False positives: %5.2f%% @ length = %6d ; bytes = %6d\n",
rate * 100.0, length, static_cast<int>(FilterSize()));
} }
ASSERT_LE(rate, 0.02); // Must not be over 2% ASSERT_LE(rate, 0.02); // Must not be over 2%
if (rate > 0.0125) if (rate > 0.0125)
@ -143,8 +143,8 @@ TEST(BloomTest, VaryingLengths) {
good_filters++; good_filters++;
} }
if (kVerbose >= 1) { if (kVerbose >= 1) {
fprintf(stderr, "Filters: %d good, %d mediocre\n", good_filters, std::fprintf(stderr, "Filters: %d good, %d mediocre\n", good_filters,
mediocre_filters); mediocre_filters);
} }
ASSERT_LE(mediocre_filters, good_filters / 5); ASSERT_LE(mediocre_filters, good_filters / 5);
} }
@ -152,5 +152,3 @@ TEST(BloomTest, VaryingLengths) {
// Different bits-per-byte // Different bits-per-byte
} // namespace leveldb } // namespace leveldb
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }

View File

@ -2,11 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors. // found in the LICENSE file. See the AUTHORS file for names of contributors.
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include "leveldb/cache.h" #include "leveldb/cache.h"
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include "port/port.h" #include "port/port.h"
#include "port/thread_annotations.h" #include "port/thread_annotations.h"
#include "util/hash.h" #include "util/hash.h"
@ -278,7 +279,7 @@ Cache::Handle* LRUCache::Insert(const Slice& key, uint32_t hash, void* value,
e->hash = hash; e->hash = hash;
e->in_cache = false; e->in_cache = false;
e->refs = 1; // for the returned handle. e->refs = 1; // for the returned handle.
memcpy(e->key_data, key.data(), key.size()); std::memcpy(e->key_data, key.data(), key.size());
if (capacity_ > 0) { if (capacity_ > 0) {
e->refs++; // for the cache's reference. e->refs++; // for the cache's reference.

View File

@ -5,8 +5,9 @@
#include "leveldb/cache.h" #include "leveldb/cache.h"
#include <vector> #include <vector>
#include "gtest/gtest.h"
#include "util/coding.h" #include "util/coding.h"
#include "util/testharness.h"
namespace leveldb { namespace leveldb {
@ -23,14 +24,14 @@ static int DecodeKey(const Slice& k) {
static void* EncodeValue(uintptr_t v) { return reinterpret_cast<void*>(v); } static void* EncodeValue(uintptr_t v) { return reinterpret_cast<void*>(v); }
static int DecodeValue(void* v) { return reinterpret_cast<uintptr_t>(v); } static int DecodeValue(void* v) { return reinterpret_cast<uintptr_t>(v); }
class CacheTest { class CacheTest : public testing::Test {
public: public:
static void Deleter(const Slice& key, void* v) { static void Deleter(const Slice& key, void* v) {
current_->deleted_keys_.push_back(DecodeKey(key)); current_->deleted_keys_.push_back(DecodeKey(key));
current_->deleted_values_.push_back(DecodeValue(v)); current_->deleted_values_.push_back(DecodeValue(v));
} }
static const int kCacheSize = 1000; static constexpr int kCacheSize = 1000;
std::vector<int> deleted_keys_; std::vector<int> deleted_keys_;
std::vector<int> deleted_values_; std::vector<int> deleted_values_;
Cache* cache_; Cache* cache_;
@ -59,12 +60,11 @@ class CacheTest {
} }
void Erase(int key) { cache_->Erase(EncodeKey(key)); } void Erase(int key) { cache_->Erase(EncodeKey(key)); }
static CacheTest* current_; static CacheTest* current_;
}; };
CacheTest* CacheTest::current_; CacheTest* CacheTest::current_;
TEST(CacheTest, HitAndMiss) { TEST_F(CacheTest, HitAndMiss) {
ASSERT_EQ(-1, Lookup(100)); ASSERT_EQ(-1, Lookup(100));
Insert(100, 101); Insert(100, 101);
@ -87,7 +87,7 @@ TEST(CacheTest, HitAndMiss) {
ASSERT_EQ(101, deleted_values_[0]); ASSERT_EQ(101, deleted_values_[0]);
} }
TEST(CacheTest, Erase) { TEST_F(CacheTest, Erase) {
Erase(200); Erase(200);
ASSERT_EQ(0, deleted_keys_.size()); ASSERT_EQ(0, deleted_keys_.size());
@ -106,7 +106,7 @@ TEST(CacheTest, Erase) {
ASSERT_EQ(1, deleted_keys_.size()); ASSERT_EQ(1, deleted_keys_.size());
} }
TEST(CacheTest, EntriesArePinned) { TEST_F(CacheTest, EntriesArePinned) {
Insert(100, 101); Insert(100, 101);
Cache::Handle* h1 = cache_->Lookup(EncodeKey(100)); Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
ASSERT_EQ(101, DecodeValue(cache_->Value(h1))); ASSERT_EQ(101, DecodeValue(cache_->Value(h1)));
@ -131,7 +131,7 @@ TEST(CacheTest, EntriesArePinned) {
ASSERT_EQ(102, deleted_values_[1]); ASSERT_EQ(102, deleted_values_[1]);
} }
TEST(CacheTest, EvictionPolicy) { TEST_F(CacheTest, EvictionPolicy) {
Insert(100, 101); Insert(100, 101);
Insert(200, 201); Insert(200, 201);
Insert(300, 301); Insert(300, 301);
@ -150,7 +150,7 @@ TEST(CacheTest, EvictionPolicy) {
cache_->Release(h); cache_->Release(h);
} }
TEST(CacheTest, UseExceedsCacheSize) { TEST_F(CacheTest, UseExceedsCacheSize) {
// Overfill the cache, keeping handles on all inserted entries. // Overfill the cache, keeping handles on all inserted entries.
std::vector<Cache::Handle*> h; std::vector<Cache::Handle*> h;
for (int i = 0; i < kCacheSize + 100; i++) { for (int i = 0; i < kCacheSize + 100; i++) {
@ -167,7 +167,7 @@ TEST(CacheTest, UseExceedsCacheSize) {
} }
} }
TEST(CacheTest, HeavyEntries) { TEST_F(CacheTest, HeavyEntries) {
// Add a bunch of light and heavy entries and then count the combined // Add a bunch of light and heavy entries and then count the combined
// size of items still in the cache, which must be approximately the // size of items still in the cache, which must be approximately the
// same as the total capacity. // same as the total capacity.
@ -194,13 +194,13 @@ TEST(CacheTest, HeavyEntries) {
ASSERT_LE(cached_weight, kCacheSize + kCacheSize / 10); ASSERT_LE(cached_weight, kCacheSize + kCacheSize / 10);
} }
TEST(CacheTest, NewId) { TEST_F(CacheTest, NewId) {
uint64_t a = cache_->NewId(); uint64_t a = cache_->NewId();
uint64_t b = cache_->NewId(); uint64_t b = cache_->NewId();
ASSERT_NE(a, b); ASSERT_NE(a, b);
} }
TEST(CacheTest, Prune) { TEST_F(CacheTest, Prune) {
Insert(1, 100); Insert(1, 100);
Insert(2, 200); Insert(2, 200);
@ -213,7 +213,7 @@ TEST(CacheTest, Prune) {
ASSERT_EQ(-1, Lookup(2)); ASSERT_EQ(-1, Lookup(2));
} }
TEST(CacheTest, ZeroSizeCache) { TEST_F(CacheTest, ZeroSizeCache) {
delete cache_; delete cache_;
cache_ = NewLRUCache(0); cache_ = NewLRUCache(0);
@ -222,5 +222,3 @@ TEST(CacheTest, ZeroSizeCache) {
} }
} // namespace leveldb } // namespace leveldb
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }

View File

@ -48,29 +48,13 @@ int VarintLength(uint64_t v);
char* EncodeVarint32(char* dst, uint32_t value); char* EncodeVarint32(char* dst, uint32_t value);
char* EncodeVarint64(char* dst, uint64_t value); char* EncodeVarint64(char* dst, uint64_t value);
// TODO(costan): Remove port::kLittleEndian and the fast paths based on
// std::memcpy when clang learns to optimize the generic code, as
// described in https://bugs.llvm.org/show_bug.cgi?id=41761
//
// The platform-independent code in DecodeFixed{32,64}() gets optimized to mov
// on x86 and ldr on ARM64, by both clang and gcc. However, only gcc optimizes
// the platform-independent code in EncodeFixed{32,64}() to mov / str.
// Lower-level versions of Put... that write directly into a character buffer // Lower-level versions of Put... that write directly into a character buffer
// REQUIRES: dst has enough space for the value being written // REQUIRES: dst has enough space for the value being written
inline void EncodeFixed32(char* dst, uint32_t value) { inline void EncodeFixed32(char* dst, uint32_t value) {
uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst); uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
if (port::kLittleEndian) { // Recent clang and gcc optimize this to a single mov / str instruction.
// Fast path for little-endian CPUs. All major compilers optimize this to a
// single mov (x86_64) / str (ARM) instruction.
std::memcpy(buffer, &value, sizeof(uint32_t));
return;
}
// Platform-independent code.
// Currently, only gcc optimizes this to a single mov / str instruction.
buffer[0] = static_cast<uint8_t>(value); buffer[0] = static_cast<uint8_t>(value);
buffer[1] = static_cast<uint8_t>(value >> 8); buffer[1] = static_cast<uint8_t>(value >> 8);
buffer[2] = static_cast<uint8_t>(value >> 16); buffer[2] = static_cast<uint8_t>(value >> 16);
@ -80,15 +64,7 @@ inline void EncodeFixed32(char* dst, uint32_t value) {
inline void EncodeFixed64(char* dst, uint64_t value) { inline void EncodeFixed64(char* dst, uint64_t value) {
uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst); uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
if (port::kLittleEndian) { // Recent clang and gcc optimize this to a single mov / str instruction.
// Fast path for little-endian CPUs. All major compilers optimize this to a
// single mov (x86_64) / str (ARM) instruction.
std::memcpy(buffer, &value, sizeof(uint64_t));
return;
}
// Platform-independent code.
// Currently, only gcc optimizes this to a single mov / str instruction.
buffer[0] = static_cast<uint8_t>(value); buffer[0] = static_cast<uint8_t>(value);
buffer[1] = static_cast<uint8_t>(value >> 8); buffer[1] = static_cast<uint8_t>(value >> 8);
buffer[2] = static_cast<uint8_t>(value >> 16); buffer[2] = static_cast<uint8_t>(value >> 16);
@ -105,16 +81,7 @@ inline void EncodeFixed64(char* dst, uint64_t value) {
inline uint32_t DecodeFixed32(const char* ptr) { inline uint32_t DecodeFixed32(const char* ptr) {
const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr); const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
if (port::kLittleEndian) { // Recent clang and gcc optimize this to a single mov / ldr instruction.
// Fast path for little-endian CPUs. All major compilers optimize this to a
// single mov (x86_64) / ldr (ARM) instruction.
uint32_t result;
std::memcpy(&result, buffer, sizeof(uint32_t));
return result;
}
// Platform-independent code.
// Clang and gcc optimize this to a single mov / ldr instruction.
return (static_cast<uint32_t>(buffer[0])) | return (static_cast<uint32_t>(buffer[0])) |
(static_cast<uint32_t>(buffer[1]) << 8) | (static_cast<uint32_t>(buffer[1]) << 8) |
(static_cast<uint32_t>(buffer[2]) << 16) | (static_cast<uint32_t>(buffer[2]) << 16) |
@ -124,16 +91,7 @@ inline uint32_t DecodeFixed32(const char* ptr) {
inline uint64_t DecodeFixed64(const char* ptr) { inline uint64_t DecodeFixed64(const char* ptr) {
const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr); const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
if (port::kLittleEndian) { // Recent clang and gcc optimize this to a single mov / ldr instruction.
// Fast path for little-endian CPUs. All major compilers optimize this to a
// single mov (x86_64) / ldr (ARM) instruction.
uint64_t result;
std::memcpy(&result, buffer, sizeof(uint64_t));
return result;
}
// Platform-independent code.
// Clang and gcc optimize this to a single mov / ldr instruction.
return (static_cast<uint64_t>(buffer[0])) | return (static_cast<uint64_t>(buffer[0])) |
(static_cast<uint64_t>(buffer[1]) << 8) | (static_cast<uint64_t>(buffer[1]) << 8) |
(static_cast<uint64_t>(buffer[2]) << 16) | (static_cast<uint64_t>(buffer[2]) << 16) |

View File

@ -2,15 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors. // found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "util/coding.h"
#include <vector> #include <vector>
#include "util/coding.h" #include "gtest/gtest.h"
#include "util/testharness.h"
namespace leveldb { namespace leveldb {
class Coding {};
TEST(Coding, Fixed32) { TEST(Coding, Fixed32) {
std::string s; std::string s;
for (uint32_t v = 0; v < 100000; v++) { for (uint32_t v = 0; v < 100000; v++) {
@ -192,5 +191,3 @@ TEST(Coding, Strings) {
} }
} // namespace leveldb } // namespace leveldb
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }

View File

@ -6,8 +6,8 @@
#include "util/crc32c.h" #include "util/crc32c.h"
#include <stddef.h> #include <cstddef>
#include <stdint.h> #include <cstdint>
#include "port/port.h" #include "port/port.h"
#include "util/coding.h" #include "util/coding.h"

View File

@ -5,8 +5,8 @@
#ifndef STORAGE_LEVELDB_UTIL_CRC32C_H_ #ifndef STORAGE_LEVELDB_UTIL_CRC32C_H_
#define STORAGE_LEVELDB_UTIL_CRC32C_H_ #define STORAGE_LEVELDB_UTIL_CRC32C_H_
#include <stddef.h> #include <cstddef>
#include <stdint.h> #include <cstdint>
namespace leveldb { namespace leveldb {
namespace crc32c { namespace crc32c {

View File

@ -3,13 +3,12 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors. // found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "util/crc32c.h" #include "util/crc32c.h"
#include "util/testharness.h"
#include "gtest/gtest.h"
namespace leveldb { namespace leveldb {
namespace crc32c { namespace crc32c {
class CRC {};
TEST(CRC, StandardResults) { TEST(CRC, StandardResults) {
// From rfc3720 section B.4. // From rfc3720 section B.4.
char buf[32]; char buf[32];
@ -55,5 +54,3 @@ TEST(CRC, Mask) {
} // namespace crc32c } // namespace crc32c
} // namespace leveldb } // namespace leveldb
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }

View File

@ -4,14 +4,30 @@
#include "leveldb/env.h" #include "leveldb/env.h"
#include <cstdarg>
// This workaround can be removed when leveldb::Env::DeleteFile is removed.
// See env.h for justification.
#if defined(_WIN32) && defined(LEVELDB_DELETEFILE_UNDEFINED)
#undef DeleteFile
#endif
namespace leveldb { namespace leveldb {
Env::Env() = default;
Env::~Env() = default; Env::~Env() = default;
Status Env::NewAppendableFile(const std::string& fname, WritableFile** result) { Status Env::NewAppendableFile(const std::string& fname, WritableFile** result) {
return Status::NotSupported("NewAppendableFile", fname); return Status::NotSupported("NewAppendableFile", fname);
} }
Status Env::RemoveDir(const std::string& dirname) { return DeleteDir(dirname); }
Status Env::DeleteDir(const std::string& dirname) { return RemoveDir(dirname); }
Status Env::RemoveFile(const std::string& fname) { return DeleteFile(fname); }
Status Env::DeleteFile(const std::string& fname) { return RemoveFile(fname); }
SequentialFile::~SequentialFile() = default; SequentialFile::~SequentialFile() = default;
RandomAccessFile::~RandomAccessFile() = default; RandomAccessFile::~RandomAccessFile() = default;
@ -24,7 +40,7 @@ FileLock::~FileLock() = default;
void Log(Logger* info_log, const char* format, ...) { void Log(Logger* info_log, const char* format, ...) {
if (info_log != nullptr) { if (info_log != nullptr) {
va_list ap; std::va_list ap;
va_start(ap, format); va_start(ap, format);
info_log->Logv(format, ap); info_log->Logv(format, ap);
va_end(ap); va_end(ap);
@ -47,7 +63,7 @@ static Status DoWriteStringToFile(Env* env, const Slice& data,
} }
delete file; // Will auto-close if we did not close above delete file; // Will auto-close if we did not close above
if (!s.ok()) { if (!s.ok()) {
env->DeleteFile(fname); env->RemoveFile(fname);
} }
return s; return s;
} }

View File

@ -4,9 +4,10 @@
#include <dirent.h> #include <dirent.h>
#include <fcntl.h> #include <fcntl.h>
#include <pthread.h>
#include <sys/mman.h> #include <sys/mman.h>
#ifndef __Fuchsia__
#include <sys/resource.h> #include <sys/resource.h>
#endif
#include <sys/stat.h> #include <sys/stat.h>
#include <sys/time.h> #include <sys/time.h>
#include <sys/types.h> #include <sys/types.h>
@ -72,7 +73,14 @@ Status PosixError(const std::string& context, int error_number) {
class Limiter { class Limiter {
public: public:
// Limit maximum number of resources to |max_acquires|. // Limit maximum number of resources to |max_acquires|.
Limiter(int max_acquires) : acquires_allowed_(max_acquires) {} Limiter(int max_acquires)
:
#if !defined(NDEBUG)
max_acquires_(max_acquires),
#endif // !defined(NDEBUG)
acquires_allowed_(max_acquires) {
assert(max_acquires >= 0);
}
Limiter(const Limiter&) = delete; Limiter(const Limiter&) = delete;
Limiter operator=(const Limiter&) = delete; Limiter operator=(const Limiter&) = delete;
@ -85,15 +93,35 @@ class Limiter {
if (old_acquires_allowed > 0) return true; if (old_acquires_allowed > 0) return true;
acquires_allowed_.fetch_add(1, std::memory_order_relaxed); int pre_increment_acquires_allowed =
acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
// Silence compiler warnings about unused arguments when NDEBUG is defined.
(void)pre_increment_acquires_allowed;
// If the check below fails, Release() was called more times than acquire.
assert(pre_increment_acquires_allowed < max_acquires_);
return false; return false;
} }
// Release a resource acquired by a previous call to Acquire() that returned // Release a resource acquired by a previous call to Acquire() that returned
// true. // true.
void Release() { acquires_allowed_.fetch_add(1, std::memory_order_relaxed); } void Release() {
int old_acquires_allowed =
acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
// Silence compiler warnings about unused arguments when NDEBUG is defined.
(void)old_acquires_allowed;
// If the check below fails, Release() was called more times than acquire.
assert(old_acquires_allowed < max_acquires_);
}
private: private:
#if !defined(NDEBUG)
// Catches an excessive number of Release() calls.
const int max_acquires_;
#endif // !defined(NDEBUG)
// The number of available resources. // The number of available resources.
// //
// This is a counter and is not tied to the invariants of any other class, so // This is a counter and is not tied to the invariants of any other class, so
@ -108,7 +136,7 @@ class Limiter {
class PosixSequentialFile final : public SequentialFile { class PosixSequentialFile final : public SequentialFile {
public: public:
PosixSequentialFile(std::string filename, int fd) PosixSequentialFile(std::string filename, int fd)
: fd_(fd), filename_(filename) {} : fd_(fd), filename_(std::move(filename)) {}
~PosixSequentialFile() override { close(fd_); } ~PosixSequentialFile() override { close(fd_); }
Status Read(size_t n, Slice* result, char* scratch) override { Status Read(size_t n, Slice* result, char* scratch) override {
@ -214,7 +242,7 @@ class PosixMmapReadableFile final : public RandomAccessFile {
// over the ownership of the region. // over the ownership of the region.
// //
// |mmap_limiter| must outlive this instance. The caller must have already // |mmap_limiter| must outlive this instance. The caller must have already
// aquired the right to use one mmap region, which will be released when this // acquired the right to use one mmap region, which will be released when this
// instance is destroyed. // instance is destroyed.
PosixMmapReadableFile(std::string filename, char* mmap_base, size_t length, PosixMmapReadableFile(std::string filename, char* mmap_base, size_t length,
Limiter* mmap_limiter) Limiter* mmap_limiter)
@ -587,7 +615,7 @@ class PosixEnv : public Env {
return Status::OK(); return Status::OK();
} }
Status DeleteFile(const std::string& filename) override { Status RemoveFile(const std::string& filename) override {
if (::unlink(filename.c_str()) != 0) { if (::unlink(filename.c_str()) != 0) {
return PosixError(filename, errno); return PosixError(filename, errno);
} }
@ -601,7 +629,7 @@ class PosixEnv : public Env {
return Status::OK(); return Status::OK();
} }
Status DeleteDir(const std::string& dirname) override { Status RemoveDir(const std::string& dirname) override {
if (::rmdir(dirname.c_str()) != 0) { if (::rmdir(dirname.c_str()) != 0) {
return PosixError(dirname, errno); return PosixError(dirname, errno);
} }
@ -728,7 +756,7 @@ class PosixEnv : public Env {
// Instances are constructed on the thread calling Schedule() and used on the // Instances are constructed on the thread calling Schedule() and used on the
// background thread. // background thread.
// //
// This structure is thread-safe beacuse it is immutable. // This structure is thread-safe because it is immutable.
struct BackgroundWorkItem { struct BackgroundWorkItem {
explicit BackgroundWorkItem(void (*function)(void* arg), void* arg) explicit BackgroundWorkItem(void (*function)(void* arg), void* arg)
: function(function), arg(arg) {} : function(function), arg(arg) {}
@ -757,6 +785,10 @@ int MaxOpenFiles() {
if (g_open_read_only_file_limit >= 0) { if (g_open_read_only_file_limit >= 0) {
return g_open_read_only_file_limit; return g_open_read_only_file_limit;
} }
#ifdef __Fuchsia__
// Fuchsia doesn't implement getrlimit.
g_open_read_only_file_limit = 50;
#else
struct ::rlimit rlim; struct ::rlimit rlim;
if (::getrlimit(RLIMIT_NOFILE, &rlim)) { if (::getrlimit(RLIMIT_NOFILE, &rlim)) {
// getrlimit failed, fallback to hard-coded default. // getrlimit failed, fallback to hard-coded default.
@ -767,6 +799,7 @@ int MaxOpenFiles() {
// Allow use of 20% of available file descriptors for read-only files. // Allow use of 20% of available file descriptors for read-only files.
g_open_read_only_file_limit = rlim.rlim_cur / 5; g_open_read_only_file_limit = rlim.rlim_cur / 5;
} }
#endif
return g_open_read_only_file_limit; return g_open_read_only_file_limit;
} }
@ -837,7 +870,7 @@ class SingletonEnv {
public: public:
SingletonEnv() { SingletonEnv() {
#if !defined(NDEBUG) #if !defined(NDEBUG)
env_initialized_.store(true, std::memory_order::memory_order_relaxed); env_initialized_.store(true, std::memory_order_relaxed);
#endif // !defined(NDEBUG) #endif // !defined(NDEBUG)
static_assert(sizeof(env_storage_) >= sizeof(EnvType), static_assert(sizeof(env_storage_) >= sizeof(EnvType),
"env_storage_ will not fit the Env"); "env_storage_ will not fit the Env");
@ -854,7 +887,7 @@ class SingletonEnv {
static void AssertEnvNotInitialized() { static void AssertEnvNotInitialized() {
#if !defined(NDEBUG) #if !defined(NDEBUG)
assert(!env_initialized_.load(std::memory_order::memory_order_relaxed)); assert(!env_initialized_.load(std::memory_order_relaxed));
#endif // !defined(NDEBUG) #endif // !defined(NDEBUG)
} }

View File

@ -13,10 +13,11 @@
#include <unordered_set> #include <unordered_set>
#include <vector> #include <vector>
#include "gtest/gtest.h"
#include "leveldb/env.h" #include "leveldb/env.h"
#include "port/port.h" #include "port/port.h"
#include "util/env_posix_test_helper.h" #include "util/env_posix_test_helper.h"
#include "util/testharness.h" #include "util/testutil.h"
#if HAVE_O_CLOEXEC #if HAVE_O_CLOEXEC
@ -168,7 +169,7 @@ namespace leveldb {
static const int kReadOnlyFileLimit = 4; static const int kReadOnlyFileLimit = 4;
static const int kMMapLimit = 4; static const int kMMapLimit = 4;
class EnvPosixTest { class EnvPosixTest : public testing::Test {
public: public:
static void SetFileLimits(int read_only_file_limit, int mmap_limit) { static void SetFileLimits(int read_only_file_limit, int mmap_limit) {
EnvPosixTestHelper::SetReadOnlyFDLimit(read_only_file_limit); EnvPosixTestHelper::SetReadOnlyFDLimit(read_only_file_limit);
@ -180,150 +181,150 @@ class EnvPosixTest {
Env* env_; Env* env_;
}; };
TEST(EnvPosixTest, TestOpenOnRead) { TEST_F(EnvPosixTest, TestOpenOnRead) {
// Write some test data to a single file that will be opened |n| times. // Write some test data to a single file that will be opened |n| times.
std::string test_dir; std::string test_dir;
ASSERT_OK(env_->GetTestDirectory(&test_dir)); ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string test_file = test_dir + "/open_on_read.txt"; std::string test_file = test_dir + "/open_on_read.txt";
FILE* f = fopen(test_file.c_str(), "we"); FILE* f = std::fopen(test_file.c_str(), "we");
ASSERT_TRUE(f != nullptr); ASSERT_TRUE(f != nullptr);
const char kFileData[] = "abcdefghijklmnopqrstuvwxyz"; const char kFileData[] = "abcdefghijklmnopqrstuvwxyz";
fputs(kFileData, f); fputs(kFileData, f);
fclose(f); std::fclose(f);
// Open test file some number above the sum of the two limits to force // Open test file some number above the sum of the two limits to force
// open-on-read behavior of POSIX Env leveldb::RandomAccessFile. // open-on-read behavior of POSIX Env leveldb::RandomAccessFile.
const int kNumFiles = kReadOnlyFileLimit + kMMapLimit + 5; const int kNumFiles = kReadOnlyFileLimit + kMMapLimit + 5;
leveldb::RandomAccessFile* files[kNumFiles] = {0}; leveldb::RandomAccessFile* files[kNumFiles] = {0};
for (int i = 0; i < kNumFiles; i++) { for (int i = 0; i < kNumFiles; i++) {
ASSERT_OK(env_->NewRandomAccessFile(test_file, &files[i])); ASSERT_LEVELDB_OK(env_->NewRandomAccessFile(test_file, &files[i]));
} }
char scratch; char scratch;
Slice read_result; Slice read_result;
for (int i = 0; i < kNumFiles; i++) { for (int i = 0; i < kNumFiles; i++) {
ASSERT_OK(files[i]->Read(i, 1, &read_result, &scratch)); ASSERT_LEVELDB_OK(files[i]->Read(i, 1, &read_result, &scratch));
ASSERT_EQ(kFileData[i], read_result[0]); ASSERT_EQ(kFileData[i], read_result[0]);
} }
for (int i = 0; i < kNumFiles; i++) { for (int i = 0; i < kNumFiles; i++) {
delete files[i]; delete files[i];
} }
ASSERT_OK(env_->DeleteFile(test_file)); ASSERT_LEVELDB_OK(env_->RemoveFile(test_file));
} }
#if HAVE_O_CLOEXEC #if HAVE_O_CLOEXEC
TEST(EnvPosixTest, TestCloseOnExecSequentialFile) { TEST_F(EnvPosixTest, TestCloseOnExecSequentialFile) {
std::unordered_set<int> open_fds; std::unordered_set<int> open_fds;
GetOpenFileDescriptors(&open_fds); GetOpenFileDescriptors(&open_fds);
std::string test_dir; std::string test_dir;
ASSERT_OK(env_->GetTestDirectory(&test_dir)); ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string file_path = test_dir + "/close_on_exec_sequential.txt"; std::string file_path = test_dir + "/close_on_exec_sequential.txt";
ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path)); ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path));
leveldb::SequentialFile* file = nullptr; leveldb::SequentialFile* file = nullptr;
ASSERT_OK(env_->NewSequentialFile(file_path, &file)); ASSERT_LEVELDB_OK(env_->NewSequentialFile(file_path, &file));
CheckCloseOnExecDoesNotLeakFDs(open_fds); CheckCloseOnExecDoesNotLeakFDs(open_fds);
delete file; delete file;
ASSERT_OK(env_->DeleteFile(file_path)); ASSERT_LEVELDB_OK(env_->RemoveFile(file_path));
} }
TEST(EnvPosixTest, TestCloseOnExecRandomAccessFile) { TEST_F(EnvPosixTest, TestCloseOnExecRandomAccessFile) {
std::unordered_set<int> open_fds; std::unordered_set<int> open_fds;
GetOpenFileDescriptors(&open_fds); GetOpenFileDescriptors(&open_fds);
std::string test_dir; std::string test_dir;
ASSERT_OK(env_->GetTestDirectory(&test_dir)); ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string file_path = test_dir + "/close_on_exec_random_access.txt"; std::string file_path = test_dir + "/close_on_exec_random_access.txt";
ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path)); ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path));
// Exhaust the RandomAccessFile mmap limit. This way, the test // Exhaust the RandomAccessFile mmap limit. This way, the test
// RandomAccessFile instance below is backed by a file descriptor, not by an // RandomAccessFile instance below is backed by a file descriptor, not by an
// mmap region. // mmap region.
leveldb::RandomAccessFile* mmapped_files[kReadOnlyFileLimit] = {nullptr}; leveldb::RandomAccessFile* mmapped_files[kMMapLimit];
for (int i = 0; i < kReadOnlyFileLimit; i++) { for (int i = 0; i < kMMapLimit; i++) {
ASSERT_OK(env_->NewRandomAccessFile(file_path, &mmapped_files[i])); ASSERT_LEVELDB_OK(env_->NewRandomAccessFile(file_path, &mmapped_files[i]));
} }
leveldb::RandomAccessFile* file = nullptr; leveldb::RandomAccessFile* file = nullptr;
ASSERT_OK(env_->NewRandomAccessFile(file_path, &file)); ASSERT_LEVELDB_OK(env_->NewRandomAccessFile(file_path, &file));
CheckCloseOnExecDoesNotLeakFDs(open_fds); CheckCloseOnExecDoesNotLeakFDs(open_fds);
delete file; delete file;
for (int i = 0; i < kReadOnlyFileLimit; i++) { for (int i = 0; i < kMMapLimit; i++) {
delete mmapped_files[i]; delete mmapped_files[i];
} }
ASSERT_OK(env_->DeleteFile(file_path)); ASSERT_LEVELDB_OK(env_->RemoveFile(file_path));
} }
TEST(EnvPosixTest, TestCloseOnExecWritableFile) { TEST_F(EnvPosixTest, TestCloseOnExecWritableFile) {
std::unordered_set<int> open_fds; std::unordered_set<int> open_fds;
GetOpenFileDescriptors(&open_fds); GetOpenFileDescriptors(&open_fds);
std::string test_dir; std::string test_dir;
ASSERT_OK(env_->GetTestDirectory(&test_dir)); ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string file_path = test_dir + "/close_on_exec_writable.txt"; std::string file_path = test_dir + "/close_on_exec_writable.txt";
ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path)); ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path));
leveldb::WritableFile* file = nullptr; leveldb::WritableFile* file = nullptr;
ASSERT_OK(env_->NewWritableFile(file_path, &file)); ASSERT_LEVELDB_OK(env_->NewWritableFile(file_path, &file));
CheckCloseOnExecDoesNotLeakFDs(open_fds); CheckCloseOnExecDoesNotLeakFDs(open_fds);
delete file; delete file;
ASSERT_OK(env_->DeleteFile(file_path)); ASSERT_LEVELDB_OK(env_->RemoveFile(file_path));
} }
TEST(EnvPosixTest, TestCloseOnExecAppendableFile) { TEST_F(EnvPosixTest, TestCloseOnExecAppendableFile) {
std::unordered_set<int> open_fds; std::unordered_set<int> open_fds;
GetOpenFileDescriptors(&open_fds); GetOpenFileDescriptors(&open_fds);
std::string test_dir; std::string test_dir;
ASSERT_OK(env_->GetTestDirectory(&test_dir)); ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string file_path = test_dir + "/close_on_exec_appendable.txt"; std::string file_path = test_dir + "/close_on_exec_appendable.txt";
ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path)); ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path));
leveldb::WritableFile* file = nullptr; leveldb::WritableFile* file = nullptr;
ASSERT_OK(env_->NewAppendableFile(file_path, &file)); ASSERT_LEVELDB_OK(env_->NewAppendableFile(file_path, &file));
CheckCloseOnExecDoesNotLeakFDs(open_fds); CheckCloseOnExecDoesNotLeakFDs(open_fds);
delete file; delete file;
ASSERT_OK(env_->DeleteFile(file_path)); ASSERT_LEVELDB_OK(env_->RemoveFile(file_path));
} }
TEST(EnvPosixTest, TestCloseOnExecLockFile) { TEST_F(EnvPosixTest, TestCloseOnExecLockFile) {
std::unordered_set<int> open_fds; std::unordered_set<int> open_fds;
GetOpenFileDescriptors(&open_fds); GetOpenFileDescriptors(&open_fds);
std::string test_dir; std::string test_dir;
ASSERT_OK(env_->GetTestDirectory(&test_dir)); ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string file_path = test_dir + "/close_on_exec_lock.txt"; std::string file_path = test_dir + "/close_on_exec_lock.txt";
ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path)); ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path));
leveldb::FileLock* lock = nullptr; leveldb::FileLock* lock = nullptr;
ASSERT_OK(env_->LockFile(file_path, &lock)); ASSERT_LEVELDB_OK(env_->LockFile(file_path, &lock));
CheckCloseOnExecDoesNotLeakFDs(open_fds); CheckCloseOnExecDoesNotLeakFDs(open_fds);
ASSERT_OK(env_->UnlockFile(lock)); ASSERT_LEVELDB_OK(env_->UnlockFile(lock));
ASSERT_OK(env_->DeleteFile(file_path)); ASSERT_LEVELDB_OK(env_->RemoveFile(file_path));
} }
TEST(EnvPosixTest, TestCloseOnExecLogger) { TEST_F(EnvPosixTest, TestCloseOnExecLogger) {
std::unordered_set<int> open_fds; std::unordered_set<int> open_fds;
GetOpenFileDescriptors(&open_fds); GetOpenFileDescriptors(&open_fds);
std::string test_dir; std::string test_dir;
ASSERT_OK(env_->GetTestDirectory(&test_dir)); ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string file_path = test_dir + "/close_on_exec_logger.txt"; std::string file_path = test_dir + "/close_on_exec_logger.txt";
ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path)); ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path));
leveldb::Logger* file = nullptr; leveldb::Logger* file = nullptr;
ASSERT_OK(env_->NewLogger(file_path, &file)); ASSERT_LEVELDB_OK(env_->NewLogger(file_path, &file));
CheckCloseOnExecDoesNotLeakFDs(open_fds); CheckCloseOnExecDoesNotLeakFDs(open_fds);
delete file; delete file;
ASSERT_OK(env_->DeleteFile(file_path)); ASSERT_LEVELDB_OK(env_->RemoveFile(file_path));
} }
#endif // HAVE_O_CLOEXEC #endif // HAVE_O_CLOEXEC
@ -346,5 +347,7 @@ int main(int argc, char** argv) {
// All tests currently run with the same read-only file limits. // All tests currently run with the same read-only file limits.
leveldb::EnvPosixTest::SetFileLimits(leveldb::kReadOnlyFileLimit, leveldb::EnvPosixTest::SetFileLimits(leveldb::kReadOnlyFileLimit,
leveldb::kMMapLimit); leveldb::kMMapLimit);
return leveldb::test::RunAllTests();
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
} }

View File

@ -6,30 +6,30 @@
#include <algorithm> #include <algorithm>
#include "gtest/gtest.h"
#include "port/port.h" #include "port/port.h"
#include "port/thread_annotations.h" #include "port/thread_annotations.h"
#include "util/mutexlock.h" #include "util/mutexlock.h"
#include "util/testharness.h"
#include "util/testutil.h" #include "util/testutil.h"
namespace leveldb { namespace leveldb {
class EnvTest { class EnvTest : public testing::Test {
public: public:
EnvTest() : env_(Env::Default()) {} EnvTest() : env_(Env::Default()) {}
Env* env_; Env* env_;
}; };
TEST(EnvTest, ReadWrite) { TEST_F(EnvTest, ReadWrite) {
Random rnd(test::RandomSeed()); Random rnd(test::RandomSeed());
// Get file to use for testing. // Get file to use for testing.
std::string test_dir; std::string test_dir;
ASSERT_OK(env_->GetTestDirectory(&test_dir)); ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string test_file_name = test_dir + "/open_on_read.txt"; std::string test_file_name = test_dir + "/open_on_read.txt";
WritableFile* writable_file; WritableFile* writable_file;
ASSERT_OK(env_->NewWritableFile(test_file_name, &writable_file)); ASSERT_LEVELDB_OK(env_->NewWritableFile(test_file_name, &writable_file));
// Fill a file with data generated via a sequence of randomly sized writes. // Fill a file with data generated via a sequence of randomly sized writes.
static const size_t kDataSize = 10 * 1048576; static const size_t kDataSize = 10 * 1048576;
@ -38,26 +38,26 @@ TEST(EnvTest, ReadWrite) {
int len = rnd.Skewed(18); // Up to 2^18 - 1, but typically much smaller int len = rnd.Skewed(18); // Up to 2^18 - 1, but typically much smaller
std::string r; std::string r;
test::RandomString(&rnd, len, &r); test::RandomString(&rnd, len, &r);
ASSERT_OK(writable_file->Append(r)); ASSERT_LEVELDB_OK(writable_file->Append(r));
data += r; data += r;
if (rnd.OneIn(10)) { if (rnd.OneIn(10)) {
ASSERT_OK(writable_file->Flush()); ASSERT_LEVELDB_OK(writable_file->Flush());
} }
} }
ASSERT_OK(writable_file->Sync()); ASSERT_LEVELDB_OK(writable_file->Sync());
ASSERT_OK(writable_file->Close()); ASSERT_LEVELDB_OK(writable_file->Close());
delete writable_file; delete writable_file;
// Read all data using a sequence of randomly sized reads. // Read all data using a sequence of randomly sized reads.
SequentialFile* sequential_file; SequentialFile* sequential_file;
ASSERT_OK(env_->NewSequentialFile(test_file_name, &sequential_file)); ASSERT_LEVELDB_OK(env_->NewSequentialFile(test_file_name, &sequential_file));
std::string read_result; std::string read_result;
std::string scratch; std::string scratch;
while (read_result.size() < data.size()) { while (read_result.size() < data.size()) {
int len = std::min<int>(rnd.Skewed(18), data.size() - read_result.size()); int len = std::min<int>(rnd.Skewed(18), data.size() - read_result.size());
scratch.resize(std::max(len, 1)); // at least 1 so &scratch[0] is legal scratch.resize(std::max(len, 1)); // at least 1 so &scratch[0] is legal
Slice read; Slice read;
ASSERT_OK(sequential_file->Read(len, &read, &scratch[0])); ASSERT_LEVELDB_OK(sequential_file->Read(len, &read, &scratch[0]));
if (len > 0) { if (len > 0) {
ASSERT_GT(read.size(), 0); ASSERT_GT(read.size(), 0);
} }
@ -68,7 +68,7 @@ TEST(EnvTest, ReadWrite) {
delete sequential_file; delete sequential_file;
} }
TEST(EnvTest, RunImmediately) { TEST_F(EnvTest, RunImmediately) {
struct RunState { struct RunState {
port::Mutex mu; port::Mutex mu;
port::CondVar cvar{&mu}; port::CondVar cvar{&mu};
@ -92,7 +92,7 @@ TEST(EnvTest, RunImmediately) {
} }
} }
TEST(EnvTest, RunMany) { TEST_F(EnvTest, RunMany) {
struct RunState { struct RunState {
port::Mutex mu; port::Mutex mu;
port::CondVar cvar{&mu}; port::CondVar cvar{&mu};
@ -151,7 +151,7 @@ static void ThreadBody(void* arg) {
s->mu.Unlock(); s->mu.Unlock();
} }
TEST(EnvTest, StartThread) { TEST_F(EnvTest, StartThread) {
State state(0, 3); State state(0, 3);
for (int i = 0; i < 3; i++) { for (int i = 0; i < 3; i++) {
env_->StartThread(&ThreadBody, &state); env_->StartThread(&ThreadBody, &state);
@ -164,10 +164,10 @@ TEST(EnvTest, StartThread) {
ASSERT_EQ(state.val, 3); ASSERT_EQ(state.val, 3);
} }
TEST(EnvTest, TestOpenNonExistentFile) { TEST_F(EnvTest, TestOpenNonExistentFile) {
// Write some test data to a single file that will be opened |n| times. // Write some test data to a single file that will be opened |n| times.
std::string test_dir; std::string test_dir;
ASSERT_OK(env_->GetTestDirectory(&test_dir)); ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string non_existent_file = test_dir + "/non_existent_file"; std::string non_existent_file = test_dir + "/non_existent_file";
ASSERT_TRUE(!env_->FileExists(non_existent_file)); ASSERT_TRUE(!env_->FileExists(non_existent_file));
@ -182,54 +182,52 @@ TEST(EnvTest, TestOpenNonExistentFile) {
ASSERT_TRUE(status.IsNotFound()); ASSERT_TRUE(status.IsNotFound());
} }
TEST(EnvTest, ReopenWritableFile) { TEST_F(EnvTest, ReopenWritableFile) {
std::string test_dir; std::string test_dir;
ASSERT_OK(env_->GetTestDirectory(&test_dir)); ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string test_file_name = test_dir + "/reopen_writable_file.txt"; std::string test_file_name = test_dir + "/reopen_writable_file.txt";
env_->DeleteFile(test_file_name); env_->RemoveFile(test_file_name);
WritableFile* writable_file; WritableFile* writable_file;
ASSERT_OK(env_->NewWritableFile(test_file_name, &writable_file)); ASSERT_LEVELDB_OK(env_->NewWritableFile(test_file_name, &writable_file));
std::string data("hello world!"); std::string data("hello world!");
ASSERT_OK(writable_file->Append(data)); ASSERT_LEVELDB_OK(writable_file->Append(data));
ASSERT_OK(writable_file->Close()); ASSERT_LEVELDB_OK(writable_file->Close());
delete writable_file; delete writable_file;
ASSERT_OK(env_->NewWritableFile(test_file_name, &writable_file)); ASSERT_LEVELDB_OK(env_->NewWritableFile(test_file_name, &writable_file));
data = "42"; data = "42";
ASSERT_OK(writable_file->Append(data)); ASSERT_LEVELDB_OK(writable_file->Append(data));
ASSERT_OK(writable_file->Close()); ASSERT_LEVELDB_OK(writable_file->Close());
delete writable_file; delete writable_file;
ASSERT_OK(ReadFileToString(env_, test_file_name, &data)); ASSERT_LEVELDB_OK(ReadFileToString(env_, test_file_name, &data));
ASSERT_EQ(std::string("42"), data); ASSERT_EQ(std::string("42"), data);
env_->DeleteFile(test_file_name); env_->RemoveFile(test_file_name);
} }
TEST(EnvTest, ReopenAppendableFile) { TEST_F(EnvTest, ReopenAppendableFile) {
std::string test_dir; std::string test_dir;
ASSERT_OK(env_->GetTestDirectory(&test_dir)); ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string test_file_name = test_dir + "/reopen_appendable_file.txt"; std::string test_file_name = test_dir + "/reopen_appendable_file.txt";
env_->DeleteFile(test_file_name); env_->RemoveFile(test_file_name);
WritableFile* appendable_file; WritableFile* appendable_file;
ASSERT_OK(env_->NewAppendableFile(test_file_name, &appendable_file)); ASSERT_LEVELDB_OK(env_->NewAppendableFile(test_file_name, &appendable_file));
std::string data("hello world!"); std::string data("hello world!");
ASSERT_OK(appendable_file->Append(data)); ASSERT_LEVELDB_OK(appendable_file->Append(data));
ASSERT_OK(appendable_file->Close()); ASSERT_LEVELDB_OK(appendable_file->Close());
delete appendable_file; delete appendable_file;
ASSERT_OK(env_->NewAppendableFile(test_file_name, &appendable_file)); ASSERT_LEVELDB_OK(env_->NewAppendableFile(test_file_name, &appendable_file));
data = "42"; data = "42";
ASSERT_OK(appendable_file->Append(data)); ASSERT_LEVELDB_OK(appendable_file->Append(data));
ASSERT_OK(appendable_file->Close()); ASSERT_LEVELDB_OK(appendable_file->Close());
delete appendable_file; delete appendable_file;
ASSERT_OK(ReadFileToString(env_, test_file_name, &data)); ASSERT_LEVELDB_OK(ReadFileToString(env_, test_file_name, &data));
ASSERT_EQ(std::string("hello world!42"), data); ASSERT_EQ(std::string("hello world!42"), data);
env_->DeleteFile(test_file_name); env_->RemoveFile(test_file_name);
} }
} // namespace leveldb } // namespace leveldb
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }

View File

@ -33,10 +33,6 @@
#include "util/mutexlock.h" #include "util/mutexlock.h"
#include "util/windows_logger.h" #include "util/windows_logger.h"
#if defined(DeleteFile)
#undef DeleteFile
#endif // defined(DeleteFile)
namespace leveldb { namespace leveldb {
namespace { namespace {
@ -118,7 +114,14 @@ class ScopedHandle {
class Limiter { class Limiter {
public: public:
// Limit maximum number of resources to |max_acquires|. // Limit maximum number of resources to |max_acquires|.
Limiter(int max_acquires) : acquires_allowed_(max_acquires) {} Limiter(int max_acquires)
:
#if !defined(NDEBUG)
max_acquires_(max_acquires),
#endif // !defined(NDEBUG)
acquires_allowed_(max_acquires) {
assert(max_acquires >= 0);
}
Limiter(const Limiter&) = delete; Limiter(const Limiter&) = delete;
Limiter operator=(const Limiter&) = delete; Limiter operator=(const Limiter&) = delete;
@ -137,9 +140,22 @@ class Limiter {
// Release a resource acquired by a previous call to Acquire() that returned // Release a resource acquired by a previous call to Acquire() that returned
// true. // true.
void Release() { acquires_allowed_.fetch_add(1, std::memory_order_relaxed); } void Release() {
int old_acquires_allowed =
acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
// Silence compiler warnings about unused arguments when NDEBUG is defined.
(void)old_acquires_allowed;
// If the check below fails, Release() was called more times than acquire.
assert(old_acquires_allowed < max_acquires_);
}
private: private:
#if !defined(NDEBUG)
// Catches an excessive number of Release() calls.
const int max_acquires_;
#endif // !defined(NDEBUG)
// The number of available resources. // The number of available resources.
// //
// This is a counter and is not tied to the invariants of any other class, so // This is a counter and is not tied to the invariants of any other class, so
@ -505,7 +521,7 @@ class WindowsEnv : public Env {
return Status::OK(); return Status::OK();
} }
Status DeleteFile(const std::string& filename) override { Status RemoveFile(const std::string& filename) override {
if (!::DeleteFileA(filename.c_str())) { if (!::DeleteFileA(filename.c_str())) {
return WindowsError(filename, ::GetLastError()); return WindowsError(filename, ::GetLastError());
} }
@ -519,7 +535,7 @@ class WindowsEnv : public Env {
return Status::OK(); return Status::OK();
} }
Status DeleteDir(const std::string& dirname) override { Status RemoveDir(const std::string& dirname) override {
if (!::RemoveDirectoryA(dirname.c_str())) { if (!::RemoveDirectoryA(dirname.c_str())) {
return WindowsError(dirname, ::GetLastError()); return WindowsError(dirname, ::GetLastError());
} }
@ -626,7 +642,7 @@ class WindowsEnv : public Env {
} }
Status NewLogger(const std::string& filename, Logger** result) override { Status NewLogger(const std::string& filename, Logger** result) override {
std::FILE* fp = std::fopen(filename.c_str(), "w"); std::FILE* fp = std::fopen(filename.c_str(), "wN");
if (fp == nullptr) { if (fp == nullptr) {
*result = nullptr; *result = nullptr;
return WindowsError(filename, ::GetLastError()); return WindowsError(filename, ::GetLastError());
@ -665,7 +681,7 @@ class WindowsEnv : public Env {
// Instances are constructed on the thread calling Schedule() and used on the // Instances are constructed on the thread calling Schedule() and used on the
// background thread. // background thread.
// //
// This structure is thread-safe beacuse it is immutable. // This structure is thread-safe because it is immutable.
struct BackgroundWorkItem { struct BackgroundWorkItem {
explicit BackgroundWorkItem(void (*function)(void* arg), void* arg) explicit BackgroundWorkItem(void (*function)(void* arg), void* arg)
: function(function), arg(arg) {} : function(function), arg(arg) {}
@ -749,7 +765,7 @@ class SingletonEnv {
public: public:
SingletonEnv() { SingletonEnv() {
#if !defined(NDEBUG) #if !defined(NDEBUG)
env_initialized_.store(true, std::memory_order::memory_order_relaxed); env_initialized_.store(true, std::memory_order_relaxed);
#endif // !defined(NDEBUG) #endif // !defined(NDEBUG)
static_assert(sizeof(env_storage_) >= sizeof(EnvType), static_assert(sizeof(env_storage_) >= sizeof(EnvType),
"env_storage_ will not fit the Env"); "env_storage_ will not fit the Env");
@ -766,7 +782,7 @@ class SingletonEnv {
static void AssertEnvNotInitialized() { static void AssertEnvNotInitialized() {
#if !defined(NDEBUG) #if !defined(NDEBUG)
assert(!env_initialized_.load(std::memory_order::memory_order_relaxed)); assert(!env_initialized_.load(std::memory_order_relaxed));
#endif // !defined(NDEBUG) #endif // !defined(NDEBUG)
} }

View File

@ -2,17 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors. // found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "gtest/gtest.h"
#include "leveldb/env.h" #include "leveldb/env.h"
#include "port/port.h" #include "port/port.h"
#include "util/env_windows_test_helper.h" #include "util/env_windows_test_helper.h"
#include "util/testharness.h" #include "util/testutil.h"
namespace leveldb { namespace leveldb {
static const int kMMapLimit = 4; static const int kMMapLimit = 4;
class EnvWindowsTest { class EnvWindowsTest : public testing::Test {
public: public:
static void SetFileLimits(int mmap_limit) { static void SetFileLimits(int mmap_limit) {
EnvWindowsTestHelper::SetReadOnlyMMapLimit(mmap_limit); EnvWindowsTestHelper::SetReadOnlyMMapLimit(mmap_limit);
@ -23,17 +23,17 @@ class EnvWindowsTest {
Env* env_; Env* env_;
}; };
TEST(EnvWindowsTest, TestOpenOnRead) { TEST_F(EnvWindowsTest, TestOpenOnRead) {
// Write some test data to a single file that will be opened |n| times. // Write some test data to a single file that will be opened |n| times.
std::string test_dir; std::string test_dir;
ASSERT_OK(env_->GetTestDirectory(&test_dir)); ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string test_file = test_dir + "/open_on_read.txt"; std::string test_file = test_dir + "/open_on_read.txt";
FILE* f = fopen(test_file.c_str(), "w"); FILE* f = std::fopen(test_file.c_str(), "w");
ASSERT_TRUE(f != nullptr); ASSERT_TRUE(f != nullptr);
const char kFileData[] = "abcdefghijklmnopqrstuvwxyz"; const char kFileData[] = "abcdefghijklmnopqrstuvwxyz";
fputs(kFileData, f); fputs(kFileData, f);
fclose(f); std::fclose(f);
// Open test file some number above the sum of the two limits to force // Open test file some number above the sum of the two limits to force
// leveldb::WindowsEnv to switch from mapping the file into memory // leveldb::WindowsEnv to switch from mapping the file into memory
@ -41,18 +41,18 @@ TEST(EnvWindowsTest, TestOpenOnRead) {
const int kNumFiles = kMMapLimit + 5; const int kNumFiles = kMMapLimit + 5;
leveldb::RandomAccessFile* files[kNumFiles] = {0}; leveldb::RandomAccessFile* files[kNumFiles] = {0};
for (int i = 0; i < kNumFiles; i++) { for (int i = 0; i < kNumFiles; i++) {
ASSERT_OK(env_->NewRandomAccessFile(test_file, &files[i])); ASSERT_LEVELDB_OK(env_->NewRandomAccessFile(test_file, &files[i]));
} }
char scratch; char scratch;
Slice read_result; Slice read_result;
for (int i = 0; i < kNumFiles; i++) { for (int i = 0; i < kNumFiles; i++) {
ASSERT_OK(files[i]->Read(i, 1, &read_result, &scratch)); ASSERT_LEVELDB_OK(files[i]->Read(i, 1, &read_result, &scratch));
ASSERT_EQ(kFileData[i], read_result[0]); ASSERT_EQ(kFileData[i], read_result[0]);
} }
for (int i = 0; i < kNumFiles; i++) { for (int i = 0; i < kNumFiles; i++) {
delete files[i]; delete files[i];
} }
ASSERT_OK(env_->DeleteFile(test_file)); ASSERT_LEVELDB_OK(env_->RemoveFile(test_file));
} }
} // namespace leveldb } // namespace leveldb
@ -60,5 +60,6 @@ TEST(EnvWindowsTest, TestOpenOnRead) {
int main(int argc, char** argv) { int main(int argc, char** argv) {
// All tests currently run with the same read-only file limits. // All tests currently run with the same read-only file limits.
leveldb::EnvWindowsTest::SetFileLimits(leveldb::kMMapLimit); leveldb::EnvWindowsTest::SetFileLimits(leveldb::kMMapLimit);
return leveldb::test::RunAllTests(); testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
} }

View File

@ -4,7 +4,7 @@
#include "util/hash.h" #include "util/hash.h"
#include <string.h> #include <cstring>
#include "util/coding.h" #include "util/coding.h"

View File

@ -7,8 +7,8 @@
#ifndef STORAGE_LEVELDB_UTIL_HASH_H_ #ifndef STORAGE_LEVELDB_UTIL_HASH_H_
#define STORAGE_LEVELDB_UTIL_HASH_H_ #define STORAGE_LEVELDB_UTIL_HASH_H_
#include <stddef.h> #include <cstddef>
#include <stdint.h> #include <cstdint>
namespace leveldb { namespace leveldb {

View File

@ -3,12 +3,11 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors. // found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "util/hash.h" #include "util/hash.h"
#include "util/testharness.h"
#include "gtest/gtest.h"
namespace leveldb { namespace leveldb {
class HASH {};
TEST(HASH, SignedUnsignedIssue) { TEST(HASH, SignedUnsignedIssue) {
const uint8_t data1[1] = {0x62}; const uint8_t data1[1] = {0x62};
const uint8_t data2[2] = {0xc3, 0x97}; const uint8_t data2[2] = {0xc3, 0x97};
@ -40,5 +39,3 @@ TEST(HASH, SignedUnsignedIssue) {
} }
} // namespace leveldb } // namespace leveldb
int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }

View File

@ -4,8 +4,8 @@
#include "util/histogram.h" #include "util/histogram.h"
#include <math.h> #include <cmath>
#include <stdio.h> #include <cstdio>
#include "port/port.h" #include "port/port.h"
@ -241,11 +241,11 @@ double Histogram::StandardDeviation() const {
std::string Histogram::ToString() const { std::string Histogram::ToString() const {
std::string r; std::string r;
char buf[200]; char buf[200];
snprintf(buf, sizeof(buf), "Count: %.0f Average: %.4f StdDev: %.2f\n", num_, std::snprintf(buf, sizeof(buf), "Count: %.0f Average: %.4f StdDev: %.2f\n",
Average(), StandardDeviation()); num_, Average(), StandardDeviation());
r.append(buf); r.append(buf);
snprintf(buf, sizeof(buf), "Min: %.4f Median: %.4f Max: %.4f\n", std::snprintf(buf, sizeof(buf), "Min: %.4f Median: %.4f Max: %.4f\n",
(num_ == 0.0 ? 0.0 : min_), Median(), max_); (num_ == 0.0 ? 0.0 : min_), Median(), max_);
r.append(buf); r.append(buf);
r.append("------------------------------------------------------\n"); r.append("------------------------------------------------------\n");
const double mult = 100.0 / num_; const double mult = 100.0 / num_;
@ -253,12 +253,12 @@ std::string Histogram::ToString() const {
for (int b = 0; b < kNumBuckets; b++) { for (int b = 0; b < kNumBuckets; b++) {
if (buckets_[b] <= 0.0) continue; if (buckets_[b] <= 0.0) continue;
sum += buckets_[b]; sum += buckets_[b];
snprintf(buf, sizeof(buf), "[ %7.0f, %7.0f ) %7.0f %7.3f%% %7.3f%% ", std::snprintf(buf, sizeof(buf), "[ %7.0f, %7.0f ) %7.0f %7.3f%% %7.3f%% ",
((b == 0) ? 0.0 : kBucketLimit[b - 1]), // left ((b == 0) ? 0.0 : kBucketLimit[b - 1]), // left
kBucketLimit[b], // right kBucketLimit[b], // right
buckets_[b], // count buckets_[b], // count
mult * buckets_[b], // percentage mult * buckets_[b], // percentage
mult * sum); // cumulative percentage mult * sum); // cumulative percentage
r.append(buf); r.append(buf);
// Add hash marks based on percentage; 20 marks for 100%. // Add hash marks based on percentage; 20 marks for 100%.

View File

@ -4,11 +4,9 @@
#include "util/logging.h" #include "util/logging.h"
#include <errno.h> #include <cstdarg>
#include <stdarg.h> #include <cstdio>
#include <stdio.h> #include <cstdlib>
#include <stdlib.h>
#include <limits> #include <limits>
#include "leveldb/env.h" #include "leveldb/env.h"
@ -18,7 +16,7 @@ namespace leveldb {
void AppendNumberTo(std::string* str, uint64_t num) { void AppendNumberTo(std::string* str, uint64_t num) {
char buf[30]; char buf[30];
snprintf(buf, sizeof(buf), "%llu", (unsigned long long)num); std::snprintf(buf, sizeof(buf), "%llu", static_cast<unsigned long long>(num));
str->append(buf); str->append(buf);
} }
@ -29,8 +27,8 @@ void AppendEscapedStringTo(std::string* str, const Slice& value) {
str->push_back(c); str->push_back(c);
} else { } else {
char buf[10]; char buf[10];
snprintf(buf, sizeof(buf), "\\x%02x", std::snprintf(buf, sizeof(buf), "\\x%02x",
static_cast<unsigned int>(c) & 0xff); static_cast<unsigned int>(c) & 0xff);
str->append(buf); str->append(buf);
} }
} }

Some files were not shown because too many files have changed in this diff Show More