posix: Allow ScopedMmap::ResetAddrLen() to deal with overlap

It should be possible to shrink a region already supervised by
ScopedMmap, or in rare cases when ScopedMmap is supervising only a
smaller portion of an overall larger region, increase the size of the
region it supervises. This is now equivalent to the operation of
base::mac::ScopedMachVM::reset().

The Reset() and ResetAddrLen() methods are upgraded from a void return
to a bool return to indicate their success.

Bug: crashpad:30
Test: crashpad_util_test ScopedMmap*.ResetAddrLen_*
Change-Id: I564e154cd2387e8df3f83b416ecc1c83c9bcf71d
Reviewed-on: https://chromium-review.googlesource.com/464286
Commit-Queue: Mark Mentovai <mark@chromium.org>
Reviewed-by: Joshua Peraza <jperaza@chromium.org>
This commit is contained in:
Mark Mentovai 2017-03-30 22:39:39 -04:00 committed by Commit Bot
parent 1a6ae8ce0b
commit aa2bc55777
3 changed files with 213 additions and 25 deletions

View File

@ -14,27 +14,67 @@
#include "util/posix/scoped_mmap.h" #include "util/posix/scoped_mmap.h"
#include <unistd.h>
#include <algorithm>
#include "base/logging.h" #include "base/logging.h"
namespace {
bool Munmap(uintptr_t addr, size_t len) {
if (munmap(reinterpret_cast<void*>(addr), len) != 0) {
PLOG(ERROR) << "munmap";
return false;
}
return true;
}
} // namespace
namespace crashpad { namespace crashpad {
ScopedMmap::ScopedMmap() {} ScopedMmap::ScopedMmap() {}
ScopedMmap::~ScopedMmap() { ScopedMmap::~ScopedMmap() {
Reset(); if (is_valid()) {
Munmap(reinterpret_cast<uintptr_t>(addr_), len_);
}
} }
void ScopedMmap::Reset() { bool ScopedMmap::Reset() {
ResetAddrLen(MAP_FAILED, 0); return ResetAddrLen(MAP_FAILED, 0);
} }
void ScopedMmap::ResetAddrLen(void* addr, size_t len) { bool ScopedMmap::ResetAddrLen(void* addr, size_t len) {
if (is_valid() && munmap(addr_, len_) != 0) { const uintptr_t new_addr = reinterpret_cast<uintptr_t>(addr);
LOG(ERROR) << "munmap";
if (addr == MAP_FAILED) {
DCHECK_EQ(len, 0u);
} else {
DCHECK_NE(len, 0u);
DCHECK_EQ(new_addr % getpagesize(), 0u);
DCHECK_EQ(len % getpagesize(), 0u);
}
bool result = true;
if (is_valid()) {
const uintptr_t old_addr = reinterpret_cast<uintptr_t>(addr_);
if (old_addr < new_addr) {
result &= Munmap(old_addr, std::min(len_, new_addr - old_addr));
}
if (old_addr + len_ > new_addr + len) {
uintptr_t unmap_start = std::max(old_addr, new_addr + len);
result &= Munmap(unmap_start, old_addr + len_ - unmap_start);
}
} }
addr_ = addr; addr_ = addr;
len_ = len; len_ = len;
return result;
} }
bool ScopedMmap::ResetMmap(void* addr, bool ScopedMmap::ResetMmap(void* addr,
@ -43,21 +83,27 @@ bool ScopedMmap::ResetMmap(void* addr,
int flags, int flags,
int fd, int fd,
off_t offset) { off_t offset) {
// Reset() first, so that a new anonymous mapping can use the address space
// occupied by the old mapping if appropriate. The new mapping will be
// attempted even if there was something wrong with the old mapping, so dont
// consider the return value from Reset().
Reset(); Reset();
void* new_addr = mmap(addr, len, prot, flags, fd, offset); void* new_addr = mmap(addr, len, prot, flags, fd, offset);
if (new_addr == MAP_FAILED) { if (new_addr == MAP_FAILED) {
LOG(ERROR) << "mmap"; PLOG(ERROR) << "mmap";
return false; return false;
} }
// The new mapping is effective even if there was something wrong with the old
// mapping, so dont consider the return value from ResetAddrLen().
ResetAddrLen(new_addr, len); ResetAddrLen(new_addr, len);
return true; return true;
} }
bool ScopedMmap::Mprotect(int prot) { bool ScopedMmap::Mprotect(int prot) {
if (mprotect(addr_, len_, prot) < 0) { if (mprotect(addr_, len_, prot) < 0) {
LOG(ERROR) << "mprotect"; PLOG(ERROR) << "mprotect";
return false; return false;
} }

View File

@ -33,19 +33,22 @@ class ScopedMmap {
//! \brief Releases the memory-mapped region by calling `munmap()`. //! \brief Releases the memory-mapped region by calling `munmap()`.
//! //!
//! A message will be logged on failure. //! \return `true` on success. `false` on failure, with a message logged.
void Reset(); bool Reset();
//! \brief Releases any existing memory-mapped region and sets the object to //! \brief Releases any existing memory-mapped region and sets the object to
//! maintain an already-established mapping. //! maintain an already-established mapping.
//! //!
//! If \a addr and \a len indicate a region that overlaps with the existing
//! memory-mapped region, only the portion of the existing memory-mapped
//! region that does not overlap the new region, if any, will be released.
//!
//! \param[in] addr The base address of the existing memory-mapped region to //! \param[in] addr The base address of the existing memory-mapped region to
//! maintain. //! maintain.
//! \param[in] len The size of the existing memory-mapped region to maintain. //! \param[in] len The size of the existing memory-mapped region to maintain.
//! //!
//! A message will be logged on failure to release any existing memory-mapped //! \return `true` on success. `false` on failure, with a message logged.
//! region, but the new mapping will be set regardless. bool ResetAddrLen(void* addr, size_t len);
void ResetAddrLen(void* addr, size_t len);
//! \brief Releases any existing memory-mapped region and establishes a new //! \brief Releases any existing memory-mapped region and establishes a new
//! one by calling `mmap()`. //! one by calling `mmap()`.

View File

@ -20,6 +20,7 @@
#include "base/numerics/safe_conversions.h" #include "base/numerics/safe_conversions.h"
#include "base/rand_util.h" #include "base/rand_util.h"
#include "base/strings/stringprintf.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
namespace crashpad { namespace crashpad {
@ -31,17 +32,24 @@ bool ScopedMmapResetMmap(ScopedMmap* mapping, size_t len) {
nullptr, len, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); nullptr, len, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
} }
void* BareMmap(size_t len) {
return mmap(
nullptr, len, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
}
// A weird class. This is used to test that memory-mapped regions are freed // A weird class. This is used to test that memory-mapped regions are freed
// as expected by calling munmap(). This is difficult to test well because once // as expected by calling munmap(). This is difficult to test well because once
// a region has been unmapped, the address space it formerly occupied becomes // a region has been unmapped, the address space it formerly occupied becomes
// eligible for reuse. // eligible for reuse.
// //
// The strategy taken here is that a 64-bit cookie value is written into a // The strategy taken here is that a random 64-bit cookie value is written into
// mapped region by SetUp(). While the mapping is active, Check() should // a mapped region by SetUp(). While the mapping is active, Check() should not
// succeed. After the region is unmapped, calling Check() should fail, either // crash, or for a gtest expectation, Expected() and Observed() should not crash
// because the region has been unmapped and the address not reused, the address // and should be equal. After the region is unmapped, Check() should crash,
// has been reused but is protected against reading (unlikely), or because the // either because the region has been unmapped and the address not reused, the
// address has been reused but the cookie value is no longer present there. // address has been reused but is protected against reading (unlikely), or
// because the address has been reused but the cookie value is no longer present
// there.
class TestCookie { class TestCookie {
public: public:
// A weird constructor for a weird class. The member variable initialization // A weird constructor for a weird class. The member variable initialization
@ -56,8 +64,11 @@ class TestCookie {
*address_ = cookie_; *address_ = cookie_;
} }
void Check() { uint64_t Expected() const { return cookie_; }
if (*address_ != cookie_) { uint64_t Observed() const { return *address_; }
void Check() const {
if (Observed() != Expected()) {
__builtin_trap(); __builtin_trap();
} }
} }
@ -77,7 +88,7 @@ TEST(ScopedMmap, Mmap) {
EXPECT_EQ(MAP_FAILED, mapping.addr()); EXPECT_EQ(MAP_FAILED, mapping.addr());
EXPECT_EQ(0u, mapping.len()); EXPECT_EQ(0u, mapping.len());
mapping.Reset(); ASSERT_TRUE(mapping.Reset());
EXPECT_FALSE(mapping.is_valid()); EXPECT_FALSE(mapping.is_valid());
const size_t kPageSize = base::checked_cast<size_t>(getpagesize()); const size_t kPageSize = base::checked_cast<size_t>(getpagesize());
@ -87,9 +98,9 @@ TEST(ScopedMmap, Mmap) {
EXPECT_EQ(kPageSize, mapping.len()); EXPECT_EQ(kPageSize, mapping.len());
cookie.SetUp(mapping.addr_as<uint64_t*>()); cookie.SetUp(mapping.addr_as<uint64_t*>());
cookie.Check(); EXPECT_EQ(cookie.Expected(), cookie.Observed());
mapping.Reset(); ASSERT_TRUE(mapping.Reset());
EXPECT_FALSE(mapping.is_valid()); EXPECT_FALSE(mapping.is_valid());
} }
@ -122,11 +133,139 @@ TEST(ScopedMmapDeathTest, Reset) {
TestCookie cookie; TestCookie cookie;
cookie.SetUp(mapping.addr_as<uint64_t*>()); cookie.SetUp(mapping.addr_as<uint64_t*>());
mapping.Reset(); ASSERT_TRUE(mapping.Reset());
EXPECT_DEATH(cookie.Check(), ""); EXPECT_DEATH(cookie.Check(), "");
} }
TEST(ScopedMmapDeathTest, ResetAddrLen_Shrink) {
ScopedMmap mapping;
// Start with three pages mapped.
const size_t kPageSize = base::checked_cast<size_t>(getpagesize());
ASSERT_TRUE(ScopedMmapResetMmap(&mapping, 3 * kPageSize));
EXPECT_TRUE(mapping.is_valid());
EXPECT_NE(MAP_FAILED, mapping.addr());
EXPECT_EQ(3 * kPageSize, mapping.len());
TestCookie cookies[3];
for (size_t index = 0; index < arraysize(cookies); ++index) {
cookies[index].SetUp(reinterpret_cast<uint64_t*>(
mapping.addr_as<uintptr_t>() + index * kPageSize));
}
// Reset to the second page. The first and third pages should be unmapped.
void* const new_addr =
reinterpret_cast<void*>(mapping.addr_as<uintptr_t>() + kPageSize);
ASSERT_TRUE(mapping.ResetAddrLen(new_addr, kPageSize));
EXPECT_TRUE(mapping.is_valid());
EXPECT_EQ(new_addr, mapping.addr());
EXPECT_EQ(kPageSize, mapping.len());
EXPECT_EQ(cookies[1].Expected(), cookies[1].Observed());
EXPECT_DEATH(cookies[0].Check(), "");
EXPECT_DEATH(cookies[2].Check(), "");
}
TEST(ScopedMmap, ResetAddrLen_Grow) {
// Start with three pages mapped, but ScopedMmap only aware of the the second
// page.
const size_t kPageSize = base::checked_cast<size_t>(getpagesize());
void* pages = BareMmap(3 * kPageSize);
ASSERT_NE(MAP_FAILED, pages);
ScopedMmap mapping;
void* const old_addr =
reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(pages) + kPageSize);
ASSERT_TRUE(mapping.ResetAddrLen(old_addr, kPageSize));
EXPECT_TRUE(mapping.is_valid());
EXPECT_EQ(old_addr, mapping.addr());
EXPECT_EQ(kPageSize, mapping.len());
TestCookie cookies[3];
for (size_t index = 0; index < arraysize(cookies); ++index) {
cookies[index].SetUp(reinterpret_cast<uint64_t*>(
reinterpret_cast<uintptr_t>(pages) + index * kPageSize));
}
// Reset to all three pages. Nothing should be unmapped until destruction.
ASSERT_TRUE(mapping.ResetAddrLen(pages, 3 * kPageSize));
EXPECT_TRUE(mapping.is_valid());
EXPECT_EQ(pages, mapping.addr());
EXPECT_EQ(3 * kPageSize, mapping.len());
for (size_t index = 0; index < arraysize(cookies); ++index) {
SCOPED_TRACE(base::StringPrintf("index %zu", index));
EXPECT_EQ(cookies[index].Expected(), cookies[index].Observed());
}
}
TEST(ScopedMmapDeathTest, ResetAddrLen_MoveDownAndGrow) {
// Start with three pages mapped, but ScopedMmap only aware of the third page.
const size_t kPageSize = base::checked_cast<size_t>(getpagesize());
void* pages = BareMmap(3 * kPageSize);
ASSERT_NE(MAP_FAILED, pages);
ScopedMmap mapping;
void* const old_addr = reinterpret_cast<void*>(
reinterpret_cast<uintptr_t>(pages) + 2 * kPageSize);
ASSERT_TRUE(mapping.ResetAddrLen(old_addr, kPageSize));
EXPECT_TRUE(mapping.is_valid());
EXPECT_EQ(old_addr, mapping.addr());
EXPECT_EQ(kPageSize, mapping.len());
TestCookie cookies[3];
for (size_t index = 0; index < arraysize(cookies); ++index) {
cookies[index].SetUp(reinterpret_cast<uint64_t*>(
reinterpret_cast<uintptr_t>(pages) + index * kPageSize));
}
// Reset to the first two pages. The third page should be unmapped.
ASSERT_TRUE(mapping.ResetAddrLen(pages, 2 * kPageSize));
EXPECT_TRUE(mapping.is_valid());
EXPECT_EQ(pages, mapping.addr());
EXPECT_EQ(2 * kPageSize, mapping.len());
EXPECT_EQ(cookies[0].Expected(), cookies[0].Observed());
EXPECT_EQ(cookies[1].Expected(), cookies[1].Observed());
EXPECT_DEATH(cookies[2].Check(), "");
}
TEST(ScopedMmapDeathTest, ResetAddrLen_MoveUpAndShrink) {
// Start with three pages mapped, but ScopedMmap only aware of the first two
// pages.
const size_t kPageSize = base::checked_cast<size_t>(getpagesize());
void* pages = BareMmap(3 * kPageSize);
ASSERT_NE(MAP_FAILED, pages);
ScopedMmap mapping;
ASSERT_TRUE(mapping.ResetAddrLen(pages, 2 * kPageSize));
EXPECT_TRUE(mapping.is_valid());
EXPECT_EQ(pages, mapping.addr());
EXPECT_EQ(2 * kPageSize, mapping.len());
TestCookie cookies[3];
for (size_t index = 0; index < arraysize(cookies); ++index) {
cookies[index].SetUp(reinterpret_cast<uint64_t*>(
reinterpret_cast<uintptr_t>(pages) + index * kPageSize));
}
// Reset to the third page. The first two pages should be unmapped.
void* const new_addr =
reinterpret_cast<void*>(mapping.addr_as<uintptr_t>() + 2 * kPageSize);
ASSERT_TRUE(mapping.ResetAddrLen(new_addr, kPageSize));
EXPECT_TRUE(mapping.is_valid());
EXPECT_EQ(new_addr, mapping.addr());
EXPECT_EQ(kPageSize, mapping.len());
EXPECT_EQ(cookies[2].Expected(), cookies[2].Observed());
EXPECT_DEATH(cookies[0].Check(), "");
EXPECT_DEATH(cookies[1].Check(), "");
}
TEST(ScopedMmapDeathTest, ResetMmap) { TEST(ScopedMmapDeathTest, ResetMmap) {
ScopedMmap mapping; ScopedMmap mapping;