Add most of ProcessReader and its test.

TEST=util_test ProcessReader.*
R=rsesek@chromium.org

Review URL: https://codereview.chromium.org/491963002
This commit is contained in:
Mark Mentovai 2014-08-25 17:51:09 -04:00
parent d0fcfa42e4
commit 8256f9fc23
5 changed files with 1357 additions and 0 deletions

529
util/mac/process_reader.cc Normal file
View File

@ -0,0 +1,529 @@
// Copyright 2014 The Crashpad Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "util/mac/process_reader.h"
#include <AvailabilityMacros.h>
#include <mach/mach_vm.h>
#include <mach-o/loader.h>
#include <algorithm>
#include "base/logging.h"
#include "base/mac/mach_logging.h"
#include "base/mac/scoped_mach_port.h"
#include "base/mac/scoped_mach_vm.h"
namespace {
void MachTimeValueToTimeval(const time_value& mach, timeval* tv) {
tv->tv_sec = mach.seconds;
tv->tv_usec = mach.microseconds;
}
kern_return_t MachVMRegionRecurseDeepest(mach_port_t task,
mach_vm_address_t* address,
mach_vm_size_t* size,
natural_t* depth,
vm_prot_t* protection,
unsigned int* user_tag) {
vm_region_submap_short_info_64 submap_info;
mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
while (true) {
kern_return_t kr = mach_vm_region_recurse(
task,
address,
size,
depth,
reinterpret_cast<vm_region_recurse_info_t>(&submap_info),
&count);
if (kr != KERN_SUCCESS) {
return kr;
}
if (!submap_info.is_submap) {
*protection = submap_info.protection;
*user_tag = submap_info.user_tag;
return KERN_SUCCESS;
}
++*depth;
}
}
} // namespace
namespace crashpad {
ProcessReaderThread::ProcessReaderThread()
: thread_context(),
float_context(),
debug_context(),
id(0),
stack_region_address(0),
stack_region_size(0),
thread_specific_data_address(0),
port(MACH_PORT_NULL),
suspend_count(0),
priority(0) {
}
ProcessReaderModule::ProcessReaderModule() : name(), address(0), timestamp(0) {
}
ProcessReaderModule::~ProcessReaderModule() {
}
ProcessReader::ProcessReader()
: kern_proc_info_(),
threads_(),
modules_(),
task_memory_(),
task_(MACH_PORT_NULL),
initialized_(),
is_64_bit_(false),
initialized_threads_(false),
initialized_modules_(false) {
}
ProcessReader::~ProcessReader() {
for (const ProcessReaderThread& thread : threads_) {
kern_return_t kr = mach_port_deallocate(mach_task_self(), thread.port);
MACH_LOG_IF(ERROR, kr != KERN_SUCCESS, kr) << "mach_port_deallocate";
}
}
bool ProcessReader::Initialize(mach_port_t task) {
INITIALIZATION_STATE_SET_INITIALIZING(initialized_);
pid_t pid;
kern_return_t kr = pid_for_task(task, &pid);
if (kr != KERN_SUCCESS) {
MACH_LOG(ERROR, kr) << "pid_for_task";
return false;
}
int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, pid};
size_t len = sizeof(kern_proc_info_);
if (sysctl(mib, arraysize(mib), &kern_proc_info_, &len, NULL, 0) != 0) {
PLOG(ERROR) << "sysctl for pid " << pid;
return false;
}
DCHECK_EQ(kern_proc_info_.kp_proc.p_pid, pid);
is_64_bit_ = kern_proc_info_.kp_proc.p_flag & P_LP64;
task_memory_.reset(new TaskMemory(task));
task_ = task;
INITIALIZATION_STATE_SET_VALID(initialized_);
return true;
}
void ProcessReader::StartTime(timeval* start_time) const {
INITIALIZATION_STATE_DCHECK_VALID(initialized_);
*start_time = kern_proc_info_.kp_proc.p_starttime;
}
bool ProcessReader::CPUTimes(timeval* user_time, timeval* system_time) const {
INITIALIZATION_STATE_DCHECK_VALID(initialized_);
// Calculate user and system time the same way the kernel does for
// getrusage(). See 10.9.2 xnu-2422.90.20/bsd/kern/kern_resource.c calcru().
timerclear(user_time);
timerclear(system_time);
// As of the 10.8 SDK, the preferred routine is MACH_TASK_BASIC_INFO.
// TASK_BASIC_INFO_64_COUNT is equivalent and works on earlier systems.
task_basic_info_64 task_basic_info;
mach_msg_type_number_t task_basic_info_count = TASK_BASIC_INFO_64_COUNT;
kern_return_t kr = task_info(task_,
TASK_BASIC_INFO_64,
reinterpret_cast<task_info_t>(&task_basic_info),
&task_basic_info_count);
if (kr != KERN_SUCCESS) {
MACH_LOG(WARNING, kr) << "task_info TASK_BASIC_INFO_64";
return false;
}
task_thread_times_info_data_t task_thread_times;
mach_msg_type_number_t task_thread_times_count = TASK_THREAD_TIMES_INFO_COUNT;
kr = task_info(task_,
TASK_THREAD_TIMES_INFO,
reinterpret_cast<task_info_t>(&task_thread_times),
&task_thread_times_count);
if (kr != KERN_SUCCESS) {
MACH_LOG(WARNING, kr) << "task_info TASK_THREAD_TIMES";
return false;
}
MachTimeValueToTimeval(task_basic_info.user_time, user_time);
MachTimeValueToTimeval(task_basic_info.system_time, system_time);
timeval thread_user_time;
MachTimeValueToTimeval(task_thread_times.user_time, &thread_user_time);
timeval thread_system_time;
MachTimeValueToTimeval(task_thread_times.system_time, &thread_system_time);
timeradd(user_time, &thread_user_time, user_time);
timeradd(system_time, &thread_system_time, system_time);
return true;
}
const std::vector<ProcessReaderThread>& ProcessReader::Threads() {
INITIALIZATION_STATE_DCHECK_VALID(initialized_);
if (!initialized_threads_) {
InitializeThreads();
}
return threads_;
}
const std::vector<ProcessReaderModule>& ProcessReader::Modules() {
INITIALIZATION_STATE_DCHECK_VALID(initialized_);
if (!initialized_modules_) {
InitializeModules();
}
return modules_;
}
void ProcessReader::InitializeThreads() {
DCHECK(!initialized_threads_);
DCHECK(threads_.empty());
initialized_threads_ = true;
thread_act_array_t threads;
mach_msg_type_number_t thread_count = 0;
kern_return_t kr = task_threads(task_, &threads, &thread_count);
if (kr != KERN_SUCCESS) {
MACH_LOG(WARNING, kr) << "task_threads";
return;
}
// The send rights in the |threads| array wont have their send rights managed
// by anything until theyre added to |threads_| by the loop below. Any early
// return (or exception) that happens between here and the completion of the
// loop below will leak thread port send rights.
base::mac::ScopedMachVM threads_vm(
reinterpret_cast<vm_address_t>(threads),
mach_vm_round_page(thread_count * sizeof(*threads)));
for (size_t index = 0; index < thread_count; ++index) {
ProcessReaderThread thread;
thread.port = threads[index];
#if defined(ARCH_CPU_X86_FAMILY)
const thread_state_flavor_t kThreadStateFlavor =
Is64Bit() ? x86_THREAD_STATE64 : x86_THREAD_STATE32;
mach_msg_type_number_t thread_state_count =
Is64Bit() ? x86_THREAD_STATE64_COUNT : x86_THREAD_STATE32_COUNT;
// TODO(mark): Use the AVX variants instead of the FLOAT variants? Theyre
// supported on 10.6 and later.
const thread_state_flavor_t kFloatStateFlavor =
Is64Bit() ? x86_FLOAT_STATE64 : x86_FLOAT_STATE32;
mach_msg_type_number_t float_state_count =
Is64Bit() ? x86_FLOAT_STATE64_COUNT : x86_FLOAT_STATE32_COUNT;
const thread_state_flavor_t kDebugStateFlavor =
Is64Bit() ? x86_DEBUG_STATE64 : x86_DEBUG_STATE32;
mach_msg_type_number_t debug_state_count =
Is64Bit() ? x86_DEBUG_STATE64_COUNT : x86_DEBUG_STATE32_COUNT;
#endif
kr = thread_get_state(
thread.port,
kThreadStateFlavor,
reinterpret_cast<thread_state_t>(&thread.thread_context),
&thread_state_count);
if (kr != KERN_SUCCESS) {
MACH_LOG(ERROR, kr) << "thread_get_state(" << kThreadStateFlavor << ")";
continue;
}
kr = thread_get_state(
thread.port,
kFloatStateFlavor,
reinterpret_cast<thread_state_t>(&thread.float_context),
&float_state_count);
if (kr != KERN_SUCCESS) {
MACH_LOG(ERROR, kr) << "thread_get_state(" << kFloatStateFlavor << ")";
continue;
}
kr = thread_get_state(
thread.port,
kDebugStateFlavor,
reinterpret_cast<thread_state_t>(&thread.debug_context),
&debug_state_count);
if (kr != KERN_SUCCESS) {
MACH_LOG(ERROR, kr) << "thread_get_state(" << kDebugStateFlavor << ")";
continue;
}
thread_basic_info basic_info;
mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT;
kr = thread_info(thread.port,
THREAD_BASIC_INFO,
reinterpret_cast<thread_info_t>(&basic_info),
&count);
if (kr != KERN_SUCCESS) {
MACH_LOG(WARNING, kr) << "thread_info(THREAD_BASIC_INFO)";
} else {
thread.suspend_count = basic_info.suspend_count;
}
thread_identifier_info identifier_info;
count = THREAD_IDENTIFIER_INFO_COUNT;
kr = thread_info(thread.port,
THREAD_IDENTIFIER_INFO,
reinterpret_cast<thread_info_t>(&identifier_info),
&count);
if (kr != KERN_SUCCESS) {
MACH_LOG(WARNING, kr) << "thread_info(THREAD_IDENTIFIER_INFO)";
} else {
thread.id = identifier_info.thread_id;
// thread_identifier_info::thread_handle contains the base of the
// thread-specific data area, which on x86 and x86_64 is the threads base
// address of the %gs segment. 10.9.2 xnu-2422.90.20/osfmk/kern/thread.c
// thread_info_internal() gets the value from
// machine_thread::cthread_self, which is the same value used to set the
// %gs base in xnu-2422.90.20/osfmk/i386/pcb_native.c
// act_machine_switch_pcb().
//
// This address is the internal pthreads _pthread::tsd[], an array of
// void* values that can be indexed by pthread_key_t values.
thread.thread_specific_data_address = identifier_info.thread_handle;
}
thread_precedence_policy precedence;
count = THREAD_PRECEDENCE_POLICY_COUNT;
boolean_t get_default = FALSE;
kr = thread_policy_get(thread.port,
THREAD_PRECEDENCE_POLICY,
reinterpret_cast<thread_policy_t>(&precedence),
&count,
&get_default);
if (kr != KERN_SUCCESS) {
MACH_LOG(INFO, kr) << "thread_policy_get";
} else {
thread.priority = precedence.importance;
}
#if defined(ARCH_CPU_X86_FAMILY)
mach_vm_address_t stack_pointer = Is64Bit()
? thread.thread_context.t64.__rsp
: thread.thread_context.t32.__esp;
#endif
thread.stack_region_address =
CalculateStackRegion(stack_pointer, &thread.stack_region_size);
threads_.push_back(thread);
}
}
void ProcessReader::InitializeModules() {
DCHECK(!initialized_modules_);
DCHECK(modules_.empty());
initialized_modules_ = true;
// TODO(mark): Complete this implementation. The implementation depends on
// process_types, which cannot land yet because it depends on this file,
// process_reader. This temporary “cut” was made to avoid a review thats too
// large. Yes, this circular dependency is unfortunate. Suggestions are
// welcome.
}
mach_vm_address_t ProcessReader::CalculateStackRegion(
mach_vm_address_t stack_pointer,
mach_vm_size_t* stack_region_size) {
INITIALIZATION_STATE_DCHECK_VALID(initialized_);
// For pthreads, it may be possible to compute the stack region based on the
// internal _pthread::stackaddr and _pthread::stacksize. The _pthread struct
// for a thread can be located at TSD slot 0, or the known offsets of
// stackaddr and stacksize from the TSD area could be used.
mach_vm_address_t region_base = stack_pointer;
mach_vm_size_t region_size;
natural_t depth = 0;
vm_prot_t protection;
unsigned int user_tag;
kern_return_t kr = MachVMRegionRecurseDeepest(
task_, &region_base, &region_size, &depth, &protection, &user_tag);
if (kr != KERN_SUCCESS) {
MACH_LOG(INFO, kr) << "mach_vm_region_recurse";
*stack_region_size = 0;
return 0;
}
if (region_base > stack_pointer) {
// Theres nothing mapped at the stack pointers address. Something may have
// trashed the stack pointer. Note that this shouldnt happen for a normal
// stack guard region violation because the guard region is mapped but has
// VM_PROT_NONE protection.
*stack_region_size = 0;
return 0;
}
mach_vm_address_t start_address = stack_pointer;
if ((protection & VM_PROT_READ) == 0) {
// If the region isnt readable, the stack pointer probably points to the
// guard region. Dont include it as part of the stack, and dont include
// anything at any lower memory address. The code below may still possibly
// find the real stack region at a memory address higher than this region.
start_address = region_base + region_size;
} else {
// If the ABI requires a red zone, adjust the region to include it if
// possible.
LocateRedZone(&start_address, &region_base, &region_size, user_tag);
// Regardless of whether the ABI requires a red zone, capture up to
// kExtraCaptureSize additional bytes of stack, but only if present in the
// region that was already found.
const mach_vm_size_t kExtraCaptureSize = 128;
start_address = std::max(start_address >= kExtraCaptureSize
? start_address - kExtraCaptureSize
: start_address,
region_base);
// Align start_address to a 16-byte boundary, which can help readers by
// ensuring that data is aligned properly. This could page-align instead,
// but that might be wasteful.
const mach_vm_size_t kDesiredAlignment = 16;
start_address &= ~(kDesiredAlignment - 1);
DCHECK_GE(start_address, region_base);
}
region_size -= (start_address - region_base);
region_base = start_address;
mach_vm_size_t total_region_size = region_size;
// The stack region may have gotten split up into multiple abutting regions.
// Try to coalesce them. This frequently happens for the main threads stack
// when setrlimit(RLIMIT_STACK, …) is called. It may also happen if a region
// is split up due to an mprotect() or vm_protect() call.
//
// Stack regions created by the kernel and the pthreads library will be marked
// with the VM_MEMORY_STACK user tag. Scanning for multiple adjacent regions
// with the same tag should find an entire stack region. Checking that the
// protection on individual regions is not VM_PROT_NONE should guarantee that
// this algorithm doesnt collect map entries belonging to another threads
// stack: well-behaved stacks (such as those created by the kernel and the
// pthreads library) have VM_PROT_NONE guard regions at their low-address
// ends.
//
// Other stack regions may not be so well-behaved and thus if user_tag is not
// VM_MEMORY_STACK, the single region that was found is used as-is without
// trying to merge it with other adjacent regions.
if (user_tag == VM_MEMORY_STACK) {
mach_vm_address_t try_address = region_base;
mach_vm_address_t original_try_address;
while (try_address += region_size,
original_try_address = try_address,
(kr = MachVMRegionRecurseDeepest(task_,
&try_address,
&region_size,
&depth,
&protection,
&user_tag) == KERN_SUCCESS) &&
try_address == original_try_address &&
(protection & VM_PROT_READ) != 0 &&
user_tag == VM_MEMORY_STACK) {
total_region_size += region_size;
}
if (kr != KERN_SUCCESS && kr != KERN_INVALID_ADDRESS) {
// Tolerate KERN_INVALID_ADDRESS because it will be returned when there
// are no more regions in the map at or above the specified |try_address|.
MACH_LOG(INFO, kr) << "mach_vm_region_recurse";
}
}
*stack_region_size = total_region_size;
return region_base;
}
void ProcessReader::LocateRedZone(mach_vm_address_t* const start_address,
mach_vm_address_t* const region_base,
mach_vm_address_t* const region_size,
const unsigned int user_tag) {
#if defined(ARCH_CPU_X86_FAMILY)
if (Is64Bit()) {
// x86_64 has a red zone. See AMD64 ABI 0.99.6,
// http://www.x86-64.org/documentation/abi.pdf, section 3.2.2, “The Stack
// Frame”.
const mach_vm_size_t kRedZoneSize = 128;
mach_vm_address_t red_zone_base =
*start_address >= kRedZoneSize ? *start_address - kRedZoneSize : 0;
bool red_zone_ok = false;
if (red_zone_base >= *region_base) {
// The red zone is within the region already discovered.
red_zone_ok = true;
} else if (red_zone_base < *region_base && user_tag == VM_MEMORY_STACK) {
// Probe to see if theres a region immediately below the one already
// discovered.
mach_vm_address_t red_zone_region_base = red_zone_base;
mach_vm_size_t red_zone_region_size;
natural_t red_zone_depth = 0;
vm_prot_t red_zone_protection;
unsigned int red_zone_user_tag;
kern_return_t kr = MachVMRegionRecurseDeepest(task_,
&red_zone_region_base,
&red_zone_region_size,
&red_zone_depth,
&red_zone_protection,
&red_zone_user_tag);
if (kr != KERN_SUCCESS) {
MACH_LOG(INFO, kr) << "mach_vm_region_recurse";
*start_address = *region_base;
} else if (red_zone_region_base + red_zone_region_size == *region_base &&
(red_zone_protection & VM_PROT_READ) != 0 &&
red_zone_user_tag == user_tag) {
// The region containing the red zone is immediately below the region
// already found, its readable (not the guard region), and it has the
// same user tag as the region already found, so merge them.
red_zone_ok = true;
*region_base -= red_zone_region_size;
*region_size += red_zone_region_size;
}
}
if (red_zone_ok) {
// Begin capturing from the base of the red zone (but not the entire
// region that encompasses the red zone).
*start_address = red_zone_base;
} else {
// The red zone would go lower into another region in memory, but no
// region was found. Memory can only be captured to an address as low as
// the base address of the region already found.
*start_address = *region_base;
}
}
#endif
}
} // namespace crashpad

209
util/mac/process_reader.h Normal file
View File

@ -0,0 +1,209 @@
// Copyright 2014 The Crashpad Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef CRASHPAD_UTIL_MAC_PROCESS_READER_H_
#define CRASHPAD_UTIL_MAC_PROCESS_READER_H_
#include <mach/mach.h>
#include <sys/sysctl.h>
#include <sys/time.h>
#include <sys/types.h>
#include <time.h>
#include <string>
#include <vector>
#include "base/basictypes.h"
#include "base/memory/scoped_ptr.h"
#include "build/build_config.h"
#include "util/mach/task_memory.h"
#include "util/misc/initialization_state_dcheck.h"
namespace crashpad {
//! \brief Contains information about a thread that belongs to a task (process).
struct ProcessReaderThread {
#if defined(ARCH_CPU_X86_FAMILY)
union ThreadContext {
x86_thread_state64_t t64;
x86_thread_state32_t t32;
};
union FloatContext {
x86_float_state64_t f64;
x86_float_state32_t f32;
};
union DebugContext {
x86_debug_state64_t d64;
x86_debug_state32_t d32;
};
#endif
ProcessReaderThread();
~ProcessReaderThread() {}
ThreadContext thread_context;
FloatContext float_context;
DebugContext debug_context;
uint64_t id;
mach_vm_address_t stack_region_address;
mach_vm_size_t stack_region_size;
mach_vm_address_t thread_specific_data_address;
mach_port_t port;
int suspend_count;
int priority;
};
//! \brief Contains information about a module loaded into a process.
struct ProcessReaderModule {
ProcessReaderModule();
~ProcessReaderModule();
std::string name;
mach_vm_address_t address;
time_t timestamp;
};
//! \brief Accesses information about another process, identified by a Mach
//! task.
class ProcessReader {
public:
ProcessReader();
~ProcessReader();
//! \brief Initializes this object. This method must be called before any
//! other.
//!
//! \param[in] task A send right to the target tasks task port. This object
//! does not take ownership of the send right.
//!
//! \return `true` on success, indicating that this object will respond
//! validly to further method calls. `false` on failure. On failure, no
//! further method calls should be made.
bool Initialize(mach_port_t task);
//! \return `true` if the target task is a 64-bit process.
bool Is64Bit() const { return is_64_bit_; }
//! \return The target tasks process ID.
pid_t ProcessID() const { return kern_proc_info_.kp_proc.p_pid; }
//! \return The target tasks parent process ID.
pid_t ParentProcessID() const { return kern_proc_info_.kp_eproc.e_ppid; }
//! \param[out] start_time The time that the process started.
void StartTime(timeval* start_time) const;
//! \param[out] user_time The amount of time the process has executed code in
//! user mode.
//! \param[out] system_time The amount of time the process has executed code
//! in system mode.
//!
//! \return `true` on success, `false` on failure, with a warning logged. On
//! failure, \a user_time and \a system_time will be set to represent no
//! time spent executing code in user or system mode.
bool CPUTimes(timeval* user_time, timeval* system_time) const;
//! \return Accesses the memory of the target task.
TaskMemory* Memory() { return task_memory_.get(); }
//! \return The threads that are in the task (process).
const std::vector<ProcessReaderThread>& Threads();
//! \return The modules loaded in the process.
const std::vector<ProcessReaderModule>& Modules();
private:
//! Performs lazy initialization of the \a threads_ vector on behalf of
//! Threads().
void InitializeThreads();
//! Performs lazy initialization of the \a modules_ vector on behalf of
//! Modules().
void InitializeModules();
//! \brief Calculates the base address and size of the region used as a
//! threads stack.
//!
//! The region returned by this method may be formed by merging multiple
//! adjacent regions in a process memory map if appropriate. The base address
//! of the returned region may be lower than the \a stack_pointer passed in
//! when the ABI mandates a red zone below the stack pointer.
//!
//! \param[in] stack_pointer The stack pointer, referring to the top (lowest
//! address) of a threads stack.
//! \param[out] stack_region_size The size of the memory region used as the
//! threads stack.
//!
//! \return The base address (lowest address) of the memory region used as the
//! threads stack.
mach_vm_address_t CalculateStackRegion(mach_vm_address_t stack_pointer,
mach_vm_size_t* stack_region_size);
//! \brief Adjusts the region for the red zone, if the ABI requires one.
//!
//! This method performs red zone calculation for CalculateStackRegion(). Its
//! parameters are local variables used within that method, and may be
//! modified as needed.
//!
//! Where a red zone is required, the region of memory captured for a threads
//! stack will be extended to include the red zone below the stack pointer,
//! provided that such memory is mapped, readable, and has the correct user
//! tag value. If these conditions cannot be met fully, as much of the red
//! zone will be captured as is possible while meeting these conditions.
//!
//! \param[inout] start_address The base address of the region to begin
//! capturing stack memory from. On entry, \a start_address is the stack
//! pointer. On return, \a start_address may be decreased to encompass a
//! red zone.
//! \param[inout] region_base The base address of the region that contains
//! stack memory. This is distinct from \a start_address in that \a
//! region_base will be page-aligned. On entry, \a region_base is the
//! base address of a region that contains \a start_address. On return,
//! if \a start_address is decremented and is outside of the region
//! originally described by \a region_base, \a region_base will also be
//! decremented appropriately.
//! \param[inout] region_size The size of the region that contains stack
//! memory. This region begins at \a region_base. On return, if \a
//! region_base is decremented, \a region_size will be incremented
//! appropriately.
//! \param[in] user_tag The Mach VM systems user tag for the region described
//! by the initial values of \a region_base and \a region_size. The red
//! zone will only be allowed to extend out of the region described by
//! these initial values if the user tag is appropriate for stack memory
//! and the expanded region has the same user tag value.
void LocateRedZone(mach_vm_address_t* start_address,
mach_vm_address_t* region_base,
mach_vm_address_t* region_size,
unsigned int user_tag);
kinfo_proc kern_proc_info_;
std::vector<ProcessReaderThread> threads_; // owns send rights
std::vector<ProcessReaderModule> modules_;
scoped_ptr<TaskMemory> task_memory_;
mach_port_t task_; // weak
InitializationStateDcheck initialized_;
// This shadows a bit in kern_proc_info_, but its accessed so frequently that
// its given a first-class field to save a few bit operations on each access.
bool is_64_bit_;
bool initialized_threads_;
bool initialized_modules_;
DISALLOW_COPY_AND_ASSIGN(ProcessReader);
};
} // namespace crashpad
#endif // CRASHPAD_UTIL_MAC_PROCESS_READER_H_

View File

@ -0,0 +1,572 @@
// Copyright 2014 The Crashpad Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "util/mac/process_reader.h"
#include <dispatch/dispatch.h>
#include <mach/mach.h>
#include <string.h>
#include <map>
#include <string>
#include "base/logging.h"
#include "base/mac/scoped_mach_port.h"
#include "base/posix/eintr_wrapper.h"
#include "build/build_config.h"
#include "gtest/gtest.h"
#include "util/file/fd_io.h"
#include "util/stdlib/pointer_container.h"
#include "util/test/mac/mach_errors.h"
#include "util/test/mac/mach_multiprocess.h"
#include "util/test/errors.h"
namespace {
using namespace crashpad;
using namespace crashpad::test;
TEST(ProcessReader, SelfBasic) {
ProcessReader process_reader;
ASSERT_TRUE(process_reader.Initialize(mach_task_self()));
#if !defined(ARCH_CPU_64_BITS)
EXPECT_FALSE(process_reader.Is64Bit());
#else
EXPECT_TRUE(process_reader.Is64Bit());
#endif
EXPECT_EQ(getpid(), process_reader.ProcessID());
EXPECT_EQ(getppid(), process_reader.ParentProcessID());
const char kTestMemory[] = "Some test memory";
char buffer[arraysize(kTestMemory)];
ASSERT_TRUE(process_reader.Memory()->Read(
reinterpret_cast<mach_vm_address_t>(kTestMemory),
sizeof(kTestMemory),
&buffer));
EXPECT_STREQ(kTestMemory, buffer);
}
const char kTestMemory[] = "Read me from another process";
class ProcessReaderChild final : public MachMultiprocess {
public:
ProcessReaderChild() : MachMultiprocess() {}
~ProcessReaderChild() {}
protected:
void Parent() override {
ProcessReader process_reader;
ASSERT_TRUE(process_reader.Initialize(ChildTask()));
#if !defined(ARCH_CPU_64_BITS)
EXPECT_FALSE(process_reader.Is64Bit());
#else
EXPECT_TRUE(process_reader.Is64Bit());
#endif
EXPECT_EQ(getpid(), process_reader.ParentProcessID());
EXPECT_EQ(ChildPID(), process_reader.ProcessID());
int read_fd = ReadPipeFD();
mach_vm_address_t address;
int rv = ReadFD(read_fd, &address, sizeof(address));
ASSERT_EQ(static_cast<ssize_t>(sizeof(address)), rv)
<< ErrnoMessage("read");
std::string read_string;
ASSERT_TRUE(process_reader.Memory()->ReadCString(address, &read_string));
EXPECT_EQ(kTestMemory, read_string);
// Tell the child that its OK to exit. The child needed to be kept alive
// until the parent finished working with it.
int write_fd = WritePipeFD();
char c = '\0';
rv = WriteFD(write_fd, &c, 1);
ASSERT_EQ(1, rv) << ErrnoMessage("write");
}
void Child() override {
int write_fd = WritePipeFD();
mach_vm_address_t address =
reinterpret_cast<mach_vm_address_t>(kTestMemory);
int rv = WriteFD(write_fd, &address, sizeof(address));
ASSERT_EQ(static_cast<ssize_t>(sizeof(address)), rv)
<< ErrnoMessage("write");
// Wait for the parent to say that its OK to exit.
int read_fd = ReadPipeFD();
char c;
rv = ReadFD(read_fd, &c, 1);
ASSERT_EQ(1, rv) << ErrnoMessage("read");
}
private:
DISALLOW_COPY_AND_ASSIGN(ProcessReaderChild);
};
TEST(ProcessReader, ChildBasic) {
ProcessReaderChild process_reader_child;
process_reader_child.Run();
}
// Returns a thread ID given a pthread_t. This wraps pthread_threadid_np() but
// that function has a cumbersome interface because it returns a success value.
// This function CHECKs success and returns the thread ID directly.
uint64_t PthreadToThreadID(pthread_t pthread) {
uint64_t thread_id;
int rv = pthread_threadid_np(pthread, &thread_id);
CHECK_EQ(rv, 0);
return thread_id;
}
TEST(ProcessReader, SelfOneThread) {
ProcessReader process_reader;
ASSERT_TRUE(process_reader.Initialize(mach_task_self()));
const std::vector<ProcessReaderThread>& threads = process_reader.Threads();
// If other tests ran in this process previously, threads may have been
// created and may still be running. This check must look for at least one
// thread, not exactly one thread.
ASSERT_GE(threads.size(), 1u);
EXPECT_EQ(PthreadToThreadID(pthread_self()), threads[0].id);
base::mac::ScopedMachSendRight thread_self(mach_thread_self());
EXPECT_EQ(thread_self, threads[0].port);
EXPECT_EQ(0, threads[0].suspend_count);
}
class TestThreadPool {
public:
struct ThreadExpectation {
mach_vm_address_t stack_address;
int suspend_count;
};
TestThreadPool() : thread_infos_() {
}
// Resumes suspended threads, signals each threads exit semaphore asking it
// to exit, and joins each thread, blocking until they have all exited.
~TestThreadPool() {
for (ThreadInfo* thread_info : thread_infos_) {
mach_port_t thread_port = pthread_mach_thread_np(thread_info->pthread);
while (thread_info->suspend_count > 0) {
kern_return_t kr = thread_resume(thread_port);
EXPECT_EQ(KERN_SUCCESS, kr) << MachErrorMessage(kr, "thread_resume");
--thread_info->suspend_count;
}
}
for (const ThreadInfo* thread_info : thread_infos_) {
dispatch_semaphore_signal(thread_info->exit_semaphore);
}
for (const ThreadInfo* thread_info : thread_infos_) {
int rv = pthread_join(thread_info->pthread, NULL);
CHECK_EQ(0, rv);
}
}
// Starts |thread_count| threads and waits on each threads ready semaphore,
// so that when this function returns, all threads have been started and have
// all run to the point that theyve signalled that they are ready.
void StartThreads(size_t thread_count) {
ASSERT_TRUE(thread_infos_.empty());
for (size_t thread_index = 0; thread_index < thread_count; ++thread_index) {
ThreadInfo* thread_info = new ThreadInfo();
thread_infos_.push_back(thread_info);
int rv = pthread_create(&thread_info->pthread,
NULL,
ThreadMain,
thread_info);
ASSERT_EQ(0, rv);
}
for (const ThreadInfo* thread_info : thread_infos_) {
long rv = dispatch_semaphore_wait(thread_info->ready_semaphore,
DISPATCH_TIME_FOREVER);
ASSERT_EQ(0, rv);
}
// If present, suspend the thread at indices 1 through 3 the same number of
// times as their index. This tests reporting of suspend counts.
for (size_t thread_index = 1;
thread_index < thread_infos_.size() && thread_index < 4;
++thread_index) {
mach_port_t thread_port =
pthread_mach_thread_np(thread_infos_[thread_index]->pthread);
for (size_t suspend_count = 0;
suspend_count < thread_index;
++suspend_count) {
kern_return_t kr = thread_suspend(thread_port);
EXPECT_EQ(KERN_SUCCESS, kr) << MachErrorMessage(kr, "thread_suspend");
if (kr == KERN_SUCCESS) {
++thread_infos_[thread_index]->suspend_count;
}
}
}
}
uint64_t GetThreadInfo(size_t thread_index,
ThreadExpectation* expectation) {
CHECK_LT(thread_index, thread_infos_.size());
const ThreadInfo* thread_info = thread_infos_[thread_index];
expectation->stack_address = thread_info->stack_address;
expectation->suspend_count = thread_info->suspend_count;
return PthreadToThreadID(thread_info->pthread);
}
private:
struct ThreadInfo {
ThreadInfo()
: pthread(NULL),
stack_address(0),
ready_semaphore(dispatch_semaphore_create(0)),
exit_semaphore(dispatch_semaphore_create(0)),
suspend_count(0) {
}
~ThreadInfo() {
dispatch_release(exit_semaphore);
dispatch_release(ready_semaphore);
}
// The threads ID, set at the time the thread is created.
pthread_t pthread;
// An address somewhere within the threads stack. The thread sets this in
// its ThreadMain().
mach_vm_address_t stack_address;
// The worker thread signals ready_semaphore to indicate that its done
// setting up its ThreadInfo structure. The main thread waits on this
// semaphore before using any data that the worker thread is responsible for
// setting.
dispatch_semaphore_t ready_semaphore;
// The worker thread waits on exit_semaphore to determine when its safe to
// exit. The main thread signals exit_semaphore when it no longer needs the
// worker thread.
dispatch_semaphore_t exit_semaphore;
// The threads suspend count.
int suspend_count;
};
static void* ThreadMain(void* argument) {
ThreadInfo* thread_info = static_cast<ThreadInfo*>(argument);
thread_info->stack_address =
reinterpret_cast<mach_vm_address_t>(&thread_info);
dispatch_semaphore_signal(thread_info->ready_semaphore);
dispatch_semaphore_wait(thread_info->exit_semaphore, DISPATCH_TIME_FOREVER);
// Check this here after everythings known to be synchronized, otherwise
// theres a race between the parent thread storing this threads pthread_t
// in thread_info_pthread and this thread starting and attempting to access
// it.
CHECK_EQ(pthread_self(), thread_info->pthread);
return NULL;
}
// This is a PointerVector because the address of a ThreadInfo object is
// passed to each threads ThreadMain(), so they cannot move around in memory.
PointerVector<ThreadInfo> thread_infos_;
DISALLOW_COPY_AND_ASSIGN(TestThreadPool);
};
typedef std::map<uint64_t, TestThreadPool::ThreadExpectation> ThreadMap;
// Verifies that all of the threads in |threads|, obtained from ProcessReader,
// agree with the expectation in |thread_map|. If |tolerate_extra_threads| is
// true, |threads| is allowed to contain threads that are not listed in
// |thread_map|. This is useful when testing situations where code outside of
// the tests control (such as system libraries) may start threads, or may have
// started threads prior to a tests execution.
void ExpectSeveralThreads(ThreadMap* thread_map,
const std::vector<ProcessReaderThread>& threads,
const bool tolerate_extra_threads) {
if (tolerate_extra_threads) {
ASSERT_GE(threads.size(), thread_map->size());
} else {
ASSERT_EQ(thread_map->size(), threads.size());
}
for (size_t thread_index = 0; thread_index < threads.size(); ++thread_index) {
const ProcessReaderThread& thread = threads[thread_index];
mach_vm_address_t thread_stack_region_end =
thread.stack_region_address + thread.stack_region_size;
const auto& iterator = thread_map->find(thread.id);
if (!tolerate_extra_threads) {
// Make sure that the thread is in the expectation map.
ASSERT_NE(thread_map->end(), iterator);
}
if (iterator != thread_map->end()) {
EXPECT_GE(iterator->second.stack_address, thread.stack_region_address);
EXPECT_LT(iterator->second.stack_address, thread_stack_region_end);
EXPECT_EQ(iterator->second.suspend_count, thread.suspend_count);
// Remove the thread from the expectation map since its already been
// found. This makes it easy to check for duplicate thread IDs, and makes
// it easy to check that all expected threads were found.
thread_map->erase(iterator);
}
// Make sure that this threads ID, stack region, and port dont conflict
// with any other threads. Each thread should have a unique value for its
// ID and port, and each should have its own stack that doesnt touch any
// other threads stack.
for (size_t other_thread_index = 0;
other_thread_index < threads.size();
++other_thread_index) {
if (thread_index == other_thread_index) {
continue;
}
const ProcessReaderThread& other_thread = threads[other_thread_index];
EXPECT_NE(thread.id, other_thread.id);
EXPECT_NE(thread.port, other_thread.port);
mach_vm_address_t other_thread_stack_region_end =
other_thread.stack_region_address + other_thread.stack_region_size;
EXPECT_FALSE(
thread.stack_region_address >= other_thread.stack_region_address &&
thread.stack_region_address < other_thread_stack_region_end);
EXPECT_FALSE(
thread_stack_region_end > other_thread.stack_region_address &
thread_stack_region_end <= other_thread_stack_region_end);
}
}
// Make sure that each expected thread was found.
EXPECT_TRUE(thread_map->empty());
}
TEST(ProcessReader, SelfSeveralThreads) {
// Set up the ProcessReader here, before any other threads are running. This
// tests that the threads it returns are lazily initialized as a snapshot of
// the threads at the time of the first call to Threads(), and not at the
// time the ProcessReader was created or initialized.
ProcessReader process_reader;
ASSERT_TRUE(process_reader.Initialize(mach_task_self()));
TestThreadPool thread_pool;
const size_t kChildThreads = 16;
thread_pool.StartThreads(kChildThreads);
if (Test::HasFatalFailure()) {
return;
}
// Build a map of all expected threads, keyed by each threads ID. The values
// are addresses that should lie somewhere within each threads stack.
ThreadMap thread_map;
const uint64_t self_thread_id = PthreadToThreadID(pthread_self());
TestThreadPool::ThreadExpectation expectation;
expectation.stack_address = reinterpret_cast<mach_vm_address_t>(&thread_map);
expectation.suspend_count = 0;
thread_map[self_thread_id] = expectation;
for (size_t thread_index = 0; thread_index < kChildThreads; ++thread_index) {
uint64_t thread_id = thread_pool.GetThreadInfo(thread_index, &expectation);
// There cant be any duplicate thread IDs.
EXPECT_EQ(0u, thread_map.count(thread_id));
thread_map[thread_id] = expectation;
}
const std::vector<ProcessReaderThread>& threads = process_reader.Threads();
// Other tests that have run previously may have resulted in the creation of
// threads that still exist, so pass true for |tolerate_extra_threads|.
ExpectSeveralThreads(&thread_map, threads, true);
// When testing in-process, verify that when this thread shows up in the
// vector, it has the expected thread port, and that this thread port only
// shows up once.
base::mac::ScopedMachSendRight thread_self(mach_thread_self());
bool found_thread_self = false;
for (const ProcessReaderThread& thread : threads) {
if (thread.port == thread_self) {
EXPECT_FALSE(found_thread_self);
found_thread_self = true;
EXPECT_EQ(self_thread_id, thread.id);
}
}
EXPECT_TRUE(found_thread_self);
}
class ProcessReaderThreadedChild final : public MachMultiprocess {
public:
explicit ProcessReaderThreadedChild(size_t thread_count)
: MachMultiprocess(),
thread_count_(thread_count) {
}
~ProcessReaderThreadedChild() {}
protected:
void Parent() override {
ProcessReader process_reader;
ASSERT_TRUE(process_reader.Initialize(ChildTask()));
int read_fd = ReadPipeFD();
// Build a map of all expected threads, keyed by each threads ID, and with
// addresses that should lie somewhere within each threads stack as values.
// These IDs and addresses all come from the child process via the pipe.
ThreadMap thread_map;
for (size_t thread_index = 0;
thread_index < thread_count_ + 1;
++thread_index) {
uint64_t thread_id;
int rv = ReadFD(read_fd, &thread_id, sizeof(thread_id));
ASSERT_EQ(static_cast<ssize_t>(sizeof(thread_id)), rv)
<< ErrnoMessage("read");
TestThreadPool::ThreadExpectation expectation;
rv = ReadFD(read_fd,
&expectation.stack_address,
sizeof(expectation.stack_address));
ASSERT_EQ(static_cast<ssize_t>(sizeof(expectation.stack_address)), rv)
<< ErrnoMessage("read");
rv = ReadFD(read_fd,
&expectation.suspend_count,
sizeof(expectation.suspend_count));
ASSERT_EQ(static_cast<ssize_t>(sizeof(expectation.suspend_count)), rv)
<< ErrnoMessage("read");
// There cant be any duplicate thread IDs.
EXPECT_EQ(0u, thread_map.count(thread_id));
thread_map[thread_id] = expectation;
}
const std::vector<ProcessReaderThread>& threads = process_reader.Threads();
// The child shouldnt have any threads other than its main thread and the
// ones it created in its pool, so pass false for |tolerate_extra_threads|.
ExpectSeveralThreads(&thread_map, threads, false);
// Tell the child that its OK to exit. The child needed to be kept alive
// until the parent finished working with it.
int write_fd = WritePipeFD();
char c = '\0';
int rv = WriteFD(write_fd, &c, 1);
ASSERT_EQ(1, rv) << ErrnoMessage("write");
}
void Child() override {
TestThreadPool thread_pool;
thread_pool.StartThreads(thread_count_);
if (testing::Test::HasFatalFailure()) {
return;
}
int write_fd = WritePipeFD();
// This thread isnt part of the thread pool, but the parent will be able
// to inspect it. Write an entry for it.
uint64_t thread_id = PthreadToThreadID(pthread_self());
int rv = WriteFD(write_fd, &thread_id, sizeof(thread_id));
ASSERT_EQ(static_cast<ssize_t>(sizeof(thread_id)), rv)
<< ErrnoMessage("write");
TestThreadPool::ThreadExpectation expectation;
expectation.stack_address = reinterpret_cast<mach_vm_address_t>(&thread_id);
expectation.suspend_count = 0;
rv = WriteFD(write_fd,
&expectation.stack_address,
sizeof(expectation.stack_address));
ASSERT_EQ(static_cast<ssize_t>(sizeof(expectation.stack_address)), rv)
<< ErrnoMessage("write");
rv = WriteFD(write_fd,
&expectation.suspend_count,
sizeof(expectation.suspend_count));
ASSERT_EQ(static_cast<ssize_t>(sizeof(expectation.suspend_count)), rv)
<< ErrnoMessage("write");
// Write an entry for everything in the thread pool.
for (size_t thread_index = 0;
thread_index < thread_count_;
++thread_index) {
uint64_t thread_id =
thread_pool.GetThreadInfo(thread_index, &expectation);
rv = WriteFD(write_fd, &thread_id, sizeof(thread_id));
ASSERT_EQ(static_cast<ssize_t>(sizeof(thread_id)), rv)
<< ErrnoMessage("write");
rv = WriteFD(write_fd,
&expectation.stack_address,
sizeof(expectation.stack_address));
ASSERT_EQ(static_cast<ssize_t>(sizeof(expectation.stack_address)), rv)
<< ErrnoMessage("write");
rv = WriteFD(write_fd,
&expectation.suspend_count,
sizeof(expectation.suspend_count));
ASSERT_EQ(static_cast<ssize_t>(sizeof(expectation.suspend_count)), rv)
<< ErrnoMessage("write");
}
// Wait for the parent to say that its OK to exit.
int read_fd = ReadPipeFD();
char c;
rv = ReadFD(read_fd, &c, 1);
ASSERT_EQ(1, rv) << ErrnoMessage("read");
}
private:
size_t thread_count_;
DISALLOW_COPY_AND_ASSIGN(ProcessReaderThreadedChild);
};
TEST(ProcessReader, ChildOneThread) {
// The main thread plus zero child threads equals one thread.
const size_t kChildThreads = 0;
ProcessReaderThreadedChild process_reader_threaded_child(kChildThreads);
process_reader_threaded_child.Run();
}
TEST(ProcessReader, ChildSeveralThreads) {
const size_t kChildThreads = 64;
ProcessReaderThreadedChild process_reader_threaded_child(kChildThreads);
process_reader_threaded_child.Run();
}
} // namespace

View File

@ -0,0 +1,43 @@
// Copyright 2014 The Crashpad Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef CRASHPAD_UTIL_STDLIB_POINTER_CONTAINER_H_
#define CRASHPAD_UTIL_STDLIB_POINTER_CONTAINER_H_
#include <vector>
#include "base/stl_util.h"
namespace crashpad {
// PointerContainer allows an STL container such as std::vector<> to “own”
// pointer elements stored in it. When the container is destroyed, “delete” will
// be called on its pointer elements.
template <typename ContainerType>
class PointerContainer : public ContainerType {
public:
PointerContainer() : ContainerType(), pointer_deleter_(this) {}
~PointerContainer() {}
private:
STLElementDeleter<ContainerType> pointer_deleter_;
};
template <typename T>
class PointerVector : public PointerContainer<std::vector<T*> > {};
} // namespace crashpad
#endif // CRASHPAD_UTIL_STDLIB_POINTER_CONTAINER_H_

View File

@ -38,6 +38,8 @@
'mac/mac_util.h',
'mac/service_management.cc',
'mac/service_management.h',
'mac/process_reader.cc',
'mac/process_reader.h',
'mach/bootstrap.cc',
'mach/bootstrap.h',
'mach/task_memory.cc',
@ -54,6 +56,7 @@
'posix/process_util_mac.cc',
'stdlib/cxx.h',
'stdlib/objc.h',
'stdlib/pointer_container.h',
'stdlib/strlcpy.cc',
'stdlib/strlcpy.h',
],
@ -102,6 +105,7 @@
'file/string_file_writer_test.cc',
'mac/launchd_test.mm',
'mac/mac_util_test.mm',
'mac/process_reader_test.cc',
'mac/service_management_test.mm',
'mach/bootstrap_test.cc',
'mach/task_memory_test.cc',