feat add gperftools

This commit is contained in:
tqcq
2024-03-17 10:11:06 +08:00
parent 719fecd4bc
commit 0b9103e276
163 changed files with 49593 additions and 8 deletions

View File

@@ -0,0 +1,439 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef _BASICTYPES_H_
#define _BASICTYPES_H_
#include <config.h>
#include <string.h> // for memcpy()
#include <inttypes.h> // gets us PRId64, etc
// To use this in an autoconf setting, make sure you run the following
// autoconf macros:
// AC_HEADER_STDC /* for stdint_h and inttypes_h */
// AC_CHECK_TYPES([__int64]) /* defined in some windows platforms */
#include <stdint.h> // to get uint16_t (ISO naming madness)
#include <sys/types.h> // our last best hope for uint16_t
// Standard typedefs
// All Google code is compiled with -funsigned-char to make "char"
// unsigned. Google code therefore doesn't need a "uchar" type.
// TODO(csilvers): how do we make sure unsigned-char works on non-gcc systems?
typedef signed char schar;
typedef int8_t int8;
typedef int16_t int16;
typedef int32_t int32;
typedef int64_t int64;
// NOTE: unsigned types are DANGEROUS in loops and other arithmetical
// places. Use the signed types unless your variable represents a bit
// pattern (eg a hash value) or you really need the extra bit. Do NOT
// use 'unsigned' to express "this value should always be positive";
// use assertions for this.
typedef uint8_t uint8;
typedef uint16_t uint16;
typedef uint32_t uint32;
typedef uint64_t uint64;
const uint16 kuint16max = ( (uint16) 0xFFFF);
const uint32 kuint32max = ( (uint32) 0xFFFFFFFF);
const uint64 kuint64max = ( (((uint64) kuint32max) << 32) | kuint32max );
const int8 kint8max = ( ( int8) 0x7F);
const int16 kint16max = ( ( int16) 0x7FFF);
const int32 kint32max = ( ( int32) 0x7FFFFFFF);
const int64 kint64max = ( ((( int64) kint32max) << 32) | kuint32max );
const int8 kint8min = ( ( int8) 0x80);
const int16 kint16min = ( ( int16) 0x8000);
const int32 kint32min = ( ( int32) 0x80000000);
const int64 kint64min = ( (((uint64) kint32min) << 32) | 0 );
// Define the "portable" printf and scanf macros, if they're not
// already there (via the inttypes.h we #included above, hopefully).
// Mostly it's old systems that don't support inttypes.h, so we assume
// they're 32 bit.
#ifndef PRIx64
#define PRIx64 "llx"
#endif
#ifndef SCNx64
#define SCNx64 "llx"
#endif
#ifndef PRId64
#define PRId64 "lld"
#endif
#ifndef SCNd64
#define SCNd64 "lld"
#endif
#ifndef PRIu64
#define PRIu64 "llu"
#endif
#ifndef PRIxPTR
#define PRIxPTR "lx"
#endif
// Also allow for printing of a pthread_t.
#define GPRIuPTHREAD "lu"
#define GPRIxPTHREAD "lx"
#if defined(__CYGWIN__) || defined(__CYGWIN32__) || defined(__APPLE__) || defined(__FreeBSD__)
#define PRINTABLE_PTHREAD(pthreadt) reinterpret_cast<uintptr_t>(pthreadt)
#elif defined(__QNXNTO__)
#define PRINTABLE_PTHREAD(pthreadt) static_cast<intptr_t>(pthreadt)
#else
#define PRINTABLE_PTHREAD(pthreadt) pthreadt
#endif
#if defined(__GNUC__)
#define PREDICT_TRUE(x) __builtin_expect(!!(x), 1)
#define PREDICT_FALSE(x) __builtin_expect(!!(x), 0)
#else
#define PREDICT_TRUE(x) (x)
#define PREDICT_FALSE(x) (x)
#endif
// A macro to disallow the evil copy constructor and operator= functions
// This should be used in the private: declarations for a class
#define DISALLOW_EVIL_CONSTRUCTORS(TypeName) \
TypeName(const TypeName&); \
void operator=(const TypeName&)
// An alternate name that leaves out the moral judgment... :-)
#define DISALLOW_COPY_AND_ASSIGN(TypeName) DISALLOW_EVIL_CONSTRUCTORS(TypeName)
// The COMPILE_ASSERT macro can be used to verify that a compile time
// expression is true. For example, you could use it to verify the
// size of a static array:
//
// COMPILE_ASSERT(sizeof(num_content_type_names) == sizeof(int),
// content_type_names_incorrect_size);
//
// or to make sure a struct is smaller than a certain size:
//
// COMPILE_ASSERT(sizeof(foo) < 128, foo_too_large);
//
// The second argument to the macro is the name of the variable. If
// the expression is false, most compilers will issue a warning/error
// containing the name of the variable.
//
// Implementation details of COMPILE_ASSERT:
//
// - COMPILE_ASSERT works by defining an array type that has -1
// elements (and thus is invalid) when the expression is false.
//
// - The simpler definition
//
// #define COMPILE_ASSERT(expr, msg) typedef char msg[(expr) ? 1 : -1]
//
// does not work, as gcc supports variable-length arrays whose sizes
// are determined at run-time (this is gcc's extension and not part
// of the C++ standard). As a result, gcc fails to reject the
// following code with the simple definition:
//
// int foo;
// COMPILE_ASSERT(foo, msg); // not supposed to compile as foo is
// // not a compile-time constant.
//
// - By using the type CompileAssert<(bool(expr))>, we ensures that
// expr is a compile-time constant. (Template arguments must be
// determined at compile-time.)
//
// - The outter parentheses in CompileAssert<(bool(expr))> are necessary
// to work around a bug in gcc 3.4.4 and 4.0.1. If we had written
//
// CompileAssert<bool(expr)>
//
// instead, these compilers will refuse to compile
//
// COMPILE_ASSERT(5 > 0, some_message);
//
// (They seem to think the ">" in "5 > 0" marks the end of the
// template argument list.)
//
// - The array size is (bool(expr) ? 1 : -1), instead of simply
//
// ((expr) ? 1 : -1).
//
// This is to avoid running into a bug in MS VC 7.1, which
// causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1.
template <bool>
struct CompileAssert {
};
#ifdef HAVE___ATTRIBUTE__
# define ATTRIBUTE_UNUSED __attribute__((unused))
#else
# define ATTRIBUTE_UNUSED
#endif
#if defined(HAVE___ATTRIBUTE__) && defined(HAVE_TLS)
#define ATTR_INITIAL_EXEC __attribute__ ((tls_model ("initial-exec")))
#else
#define ATTR_INITIAL_EXEC
#endif
#define COMPILE_ASSERT(expr, msg) \
typedef CompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1] ATTRIBUTE_UNUSED
#define arraysize(a) (sizeof(a) / sizeof(*(a)))
#define OFFSETOF_MEMBER(strct, field) \
(reinterpret_cast<char*>(&reinterpret_cast<strct*>(16)->field) - \
reinterpret_cast<char*>(16))
// bit_cast<Dest,Source> implements the equivalent of
// "*reinterpret_cast<Dest*>(&source)".
//
// The reinterpret_cast method would produce undefined behavior
// according to ISO C++ specification section 3.10 -15 -.
// bit_cast<> calls memcpy() which is blessed by the standard,
// especially by the example in section 3.9.
//
// Fortunately memcpy() is very fast. In optimized mode, with a
// constant size, gcc 2.95.3, gcc 4.0.1, and msvc 7.1 produce inline
// code with the minimal amount of data movement. On a 32-bit system,
// memcpy(d,s,4) compiles to one load and one store, and memcpy(d,s,8)
// compiles to two loads and two stores.
template <class Dest, class Source>
inline Dest bit_cast(const Source& source) {
COMPILE_ASSERT(sizeof(Dest) == sizeof(Source), bitcasting_unequal_sizes);
Dest dest;
memcpy(&dest, &source, sizeof(dest));
return dest;
}
// bit_store<Dest,Source> implements the equivalent of
// "dest = *reinterpret_cast<Dest*>(&source)".
//
// This prevents undefined behavior when the dest pointer is unaligned.
template <class Dest, class Source>
inline void bit_store(Dest *dest, const Source *source) {
COMPILE_ASSERT(sizeof(Dest) == sizeof(Source), bitcasting_unequal_sizes);
memcpy(dest, source, sizeof(Dest));
}
#ifdef HAVE___ATTRIBUTE__
# define ATTRIBUTE_WEAK __attribute__((weak))
# define ATTRIBUTE_NOINLINE __attribute__((noinline))
#else
# define ATTRIBUTE_WEAK
# define ATTRIBUTE_NOINLINE
#endif
#ifdef _MSC_VER
#undef ATTRIBUTE_NOINLINE
#define ATTRIBUTE_NOINLINE __declspec(noinline)
#endif
#if defined(HAVE___ATTRIBUTE__) && defined(__ELF__)
# define ATTRIBUTE_VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
#else
# define ATTRIBUTE_VISIBILITY_HIDDEN
#endif
// Section attributes are supported for both ELF and Mach-O, but in
// very different ways. Here's the API we provide:
// 1) ATTRIBUTE_SECTION: put this with the declaration of all functions
// you want to be in the same linker section
// 2) DEFINE_ATTRIBUTE_SECTION_VARS: must be called once per unique
// name. You want to make sure this is executed before any
// DECLARE_ATTRIBUTE_SECTION_VARS; the easiest way is to put them
// in the same .cc file. Put this call at the global level.
// 3) INIT_ATTRIBUTE_SECTION_VARS: you can scatter calls to this in
// multiple places to help ensure execution before any
// DECLARE_ATTRIBUTE_SECTION_VARS. You must have at least one
// DEFINE, but you can have many INITs. Put each in its own scope.
// 4) DECLARE_ATTRIBUTE_SECTION_VARS: must be called before using
// ATTRIBUTE_SECTION_START or ATTRIBUTE_SECTION_STOP on a name.
// Put this call at the global level.
// 5) ATTRIBUTE_SECTION_START/ATTRIBUTE_SECTION_STOP: call this to say
// where in memory a given section is. All functions declared with
// ATTRIBUTE_SECTION are guaranteed to be between START and STOP.
#if defined(HAVE___ATTRIBUTE__) && defined(__ELF__)
# define ATTRIBUTE_SECTION(name) __attribute__ ((section (#name))) __attribute__((noinline))
// Weak section declaration to be used as a global declaration
// for ATTRIBUTE_SECTION_START|STOP(name) to compile and link
// even without functions with ATTRIBUTE_SECTION(name).
# define DECLARE_ATTRIBUTE_SECTION_VARS(name) \
extern char __start_##name[] ATTRIBUTE_WEAK; \
extern char __stop_##name[] ATTRIBUTE_WEAK
# define INIT_ATTRIBUTE_SECTION_VARS(name) // no-op for ELF
# define DEFINE_ATTRIBUTE_SECTION_VARS(name) // no-op for ELF
// Return void* pointers to start/end of a section of code with functions
// having ATTRIBUTE_SECTION(name), or 0 if no such function exists.
// One must DECLARE_ATTRIBUTE_SECTION(name) for this to compile and link.
# define ATTRIBUTE_SECTION_START(name) (reinterpret_cast<void*>(__start_##name))
# define ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast<void*>(__stop_##name))
# define HAVE_ATTRIBUTE_SECTION_START 1
#elif defined(HAVE___ATTRIBUTE__) && defined(__MACH__)
# define ATTRIBUTE_SECTION(name) __attribute__ ((section ("__TEXT, " #name))) __attribute__((noinline))
#include <mach-o/getsect.h>
#include <mach-o/dyld.h>
class AssignAttributeStartEnd {
public:
AssignAttributeStartEnd(const char* name, char** pstart, char** pend) {
// Find out what dynamic library name is defined in
for (int i = _dyld_image_count() - 1; i >= 0; --i) {
const mach_header* hdr = _dyld_get_image_header(i);
#ifdef MH_MAGIC_64
if (hdr->magic == MH_MAGIC_64) {
uint64_t len;
*pstart = getsectdatafromheader_64((mach_header_64*)hdr,
"__TEXT", name, &len);
if (*pstart) { // NULL if not defined in this dynamic library
*pstart += _dyld_get_image_vmaddr_slide(i); // correct for reloc
*pend = *pstart + len;
return;
}
}
#endif
if (hdr->magic == MH_MAGIC) {
uint32_t len;
*pstart = getsectdatafromheader(hdr, "__TEXT", name, &len);
if (*pstart) { // NULL if not defined in this dynamic library
*pstart += _dyld_get_image_vmaddr_slide(i); // correct for reloc
*pend = *pstart + len;
return;
}
}
}
// If we get here, not defined in a dll at all. See if defined statically.
unsigned long len; // don't ask me why this type isn't uint32_t too...
*pstart = getsectdata("__TEXT", name, &len);
*pend = *pstart + len;
}
};
#define DECLARE_ATTRIBUTE_SECTION_VARS(name) \
extern char* __start_##name; \
extern char* __stop_##name
#define INIT_ATTRIBUTE_SECTION_VARS(name) \
DECLARE_ATTRIBUTE_SECTION_VARS(name); \
static const AssignAttributeStartEnd __assign_##name( \
#name, &__start_##name, &__stop_##name)
#define DEFINE_ATTRIBUTE_SECTION_VARS(name) \
char* __start_##name, *__stop_##name; \
INIT_ATTRIBUTE_SECTION_VARS(name)
# define ATTRIBUTE_SECTION_START(name) (reinterpret_cast<void*>(__start_##name))
# define ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast<void*>(__stop_##name))
# define HAVE_ATTRIBUTE_SECTION_START 1
#else // not HAVE___ATTRIBUTE__ && __ELF__, nor HAVE___ATTRIBUTE__ && __MACH__
# define ATTRIBUTE_SECTION(name)
# define DECLARE_ATTRIBUTE_SECTION_VARS(name)
# define INIT_ATTRIBUTE_SECTION_VARS(name)
# define DEFINE_ATTRIBUTE_SECTION_VARS(name)
# define ATTRIBUTE_SECTION_START(name) (reinterpret_cast<void*>(0))
# define ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast<void*>(0))
#endif // HAVE___ATTRIBUTE__ and __ELF__ or __MACH__
#if defined(HAVE___ATTRIBUTE__)
# if (defined(__i386__) || defined(__x86_64__))
# define CACHELINE_ALIGNED __attribute__((aligned(64)))
# elif (defined(__PPC__) || defined(__PPC64__) || defined(__ppc__) || defined(__ppc64__))
# define CACHELINE_ALIGNED __attribute__((aligned(16)))
# elif (defined(__arm__))
# define CACHELINE_ALIGNED __attribute__((aligned(64)))
// some ARMs have shorter cache lines (ARM1176JZF-S is 32 bytes for example) but obviously 64-byte aligned implies 32-byte aligned
# elif (defined(__mips__))
# define CACHELINE_ALIGNED __attribute__((aligned(128)))
# elif (defined(__aarch64__))
# define CACHELINE_ALIGNED __attribute__((aligned(64)))
// implementation specific, Cortex-A53 and 57 should have 64 bytes
# elif (defined(__s390__))
# define CACHELINE_ALIGNED __attribute__((aligned(256)))
# elif (defined(__riscv) && __riscv_xlen == 64)
# define CACHELINE_ALIGNED __attribute__((aligned(64)))
# elif defined(__loongarch64)
# define CACHELINE_ALIGNED __attribute__((aligned(64)))
# else
# error Could not determine cache line length - unknown architecture
# endif
#else
# define CACHELINE_ALIGNED
#endif // defined(HAVE___ATTRIBUTE__)
#if defined(HAVE___ATTRIBUTE__ALIGNED_FN)
# define CACHELINE_ALIGNED_FN CACHELINE_ALIGNED
#else
# define CACHELINE_ALIGNED_FN
#endif
// Structure for discovering alignment
union MemoryAligner {
void* p;
double d;
size_t s;
} CACHELINE_ALIGNED;
#if defined(HAVE___ATTRIBUTE__) && defined(__ELF__)
#define ATTRIBUTE_HIDDEN __attribute__((visibility("hidden")))
#else
#define ATTRIBUTE_HIDDEN
#endif
#if defined(__GNUC__)
#define ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline))
#elif defined(_MSC_VER)
#define ATTRIBUTE_ALWAYS_INLINE __forceinline
#else
#define ATTRIBUTE_ALWAYS_INLINE
#endif
// The following enum should be used only as a constructor argument to indicate
// that the variable has static storage class, and that the constructor should
// do nothing to its state. It indicates to the reader that it is legal to
// declare a static nistance of the class, provided the constructor is given
// the base::LINKER_INITIALIZED argument. Normally, it is unsafe to declare a
// static variable that has a constructor or a destructor because invocation
// order is undefined. However, IF the type can be initialized by filling with
// zeroes (which the loader does for static variables), AND the destructor also
// does nothing to the storage, then a constructor declared as
// explicit MyClass(base::LinkerInitialized x) {}
// and invoked as
// static MyClass my_variable_name(base::LINKER_INITIALIZED);
namespace base {
enum LinkerInitialized { LINKER_INITIALIZED };
}
#endif // _BASICTYPES_H_

View File

@@ -0,0 +1,175 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// This file is a compatibility layer that defines Google's version of
// command line flags that are used for configuration.
//
// We put flags into their own namespace. It is purposefully
// named in an opaque way that people should have trouble typing
// directly. The idea is that DEFINE puts the flag in the weird
// namespace, and DECLARE imports the flag from there into the
// current namespace. The net result is to force people to use
// DECLARE to get access to a flag, rather than saying
// extern bool FLAGS_logtostderr;
// or some such instead. We want this so we can put extra
// functionality (like sanity-checking) in DECLARE if we want,
// and make sure it is picked up everywhere.
//
// We also put the type of the variable in the namespace, so that
// people can't DECLARE_int32 something that they DEFINE_bool'd
// elsewhere.
#ifndef BASE_COMMANDLINEFLAGS_H_
#define BASE_COMMANDLINEFLAGS_H_
#include <config.h>
#include <string>
#include <string.h> // for memchr
#include <stdlib.h> // for getenv
#include "base/basictypes.h"
#define DECLARE_VARIABLE(type, name) \
namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \
extern PERFTOOLS_DLL_DECL type FLAGS_##name; \
} \
using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name
#define DEFINE_VARIABLE(type, name, value, meaning) \
namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \
PERFTOOLS_DLL_DECL type FLAGS_##name(value); \
char FLAGS_no##name; \
} \
using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name
// bool specialization
#define DECLARE_bool(name) \
DECLARE_VARIABLE(bool, name)
#define DEFINE_bool(name, value, meaning) \
DEFINE_VARIABLE(bool, name, value, meaning)
// int32 specialization
#define DECLARE_int32(name) \
DECLARE_VARIABLE(int32, name)
#define DEFINE_int32(name, value, meaning) \
DEFINE_VARIABLE(int32, name, value, meaning)
// int64 specialization
#define DECLARE_int64(name) \
DECLARE_VARIABLE(int64, name)
#define DEFINE_int64(name, value, meaning) \
DEFINE_VARIABLE(int64, name, value, meaning)
#define DECLARE_uint64(name) \
DECLARE_VARIABLE(uint64, name)
#define DEFINE_uint64(name, value, meaning) \
DEFINE_VARIABLE(uint64, name, value, meaning)
// double specialization
#define DECLARE_double(name) \
DECLARE_VARIABLE(double, name)
#define DEFINE_double(name, value, meaning) \
DEFINE_VARIABLE(double, name, value, meaning)
// Special case for string, because we have to specify the namespace
// std::string, which doesn't play nicely with our FLAG__namespace hackery.
#define DECLARE_string(name) \
namespace FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead { \
extern std::string FLAGS_##name; \
} \
using FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead::FLAGS_##name
#define DEFINE_string(name, value, meaning) \
namespace FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead { \
std::string FLAGS_##name(value); \
char FLAGS_no##name; \
} \
using FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead::FLAGS_##name
// implemented in sysinfo.cc
namespace tcmalloc {
namespace commandlineflags {
inline bool StringToBool(const char *value, bool def) {
if (!value) {
return def;
}
switch (value[0]) {
case 't':
case 'T':
case 'y':
case 'Y':
case '1':
case '\0':
return true;
}
return false;
}
inline int StringToInt(const char *value, int def) {
if (!value) {
return def;
}
return strtol(value, NULL, 10);
}
inline long long StringToLongLong(const char *value, long long def) {
if (!value) {
return def;
}
return strtoll(value, NULL, 10);
}
inline double StringToDouble(const char *value, double def) {
if (!value) {
return def;
}
return strtod(value, NULL);
}
}
}
// These macros (could be functions, but I don't want to bother with a .cc
// file), make it easier to initialize flags from the environment.
#define EnvToString(envname, dflt) \
(!getenv(envname) ? (dflt) : getenv(envname))
#define EnvToBool(envname, dflt) \
tcmalloc::commandlineflags::StringToBool(getenv(envname), dflt)
#define EnvToInt(envname, dflt) \
tcmalloc::commandlineflags::StringToInt(getenv(envname), dflt)
#define EnvToInt64(envname, dflt) \
tcmalloc::commandlineflags::StringToLongLong(getenv(envname), dflt)
#define EnvToDouble(envname, dflt) \
tcmalloc::commandlineflags::StringToDouble(getenv(envname), dflt)
#endif // BASE_COMMANDLINEFLAGS_H_

View File

@@ -0,0 +1,60 @@
// -*- Mode: c; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2008-2009, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Kostya Serebryany
*/
#include "config.h"
#include <stdlib.h>
#include <string.h>
#include "base/dynamic_annotations.h"
#include "getenv_safe.h" // for TCMallocGetenvSafe
static int GetRunningOnValgrind(void) {
#ifdef RUNNING_ON_VALGRIND
if (RUNNING_ON_VALGRIND) return 1;
#endif
const char *running_on_valgrind_str = TCMallocGetenvSafe("RUNNING_ON_VALGRIND");
if (running_on_valgrind_str) {
return strcmp(running_on_valgrind_str, "0") != 0;
}
return 0;
}
/* See the comments in dynamic_annotations.h */
int RunningOnValgrind(void) {
static volatile int running_on_valgrind = -1;
int local_running_on_valgrind = running_on_valgrind;
if (local_running_on_valgrind == -1)
running_on_valgrind = local_running_on_valgrind = GetRunningOnValgrind();
return local_running_on_valgrind;
}

View File

@@ -0,0 +1,86 @@
/* -*- Mode: c; c-basic-offset: 2; indent-tabs-mode: nil -*- */
/* Copyright (c) 2008, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Kostya Serebryany
*/
/* This file defines dynamic annotations for use with dynamic analysis
tool such as valgrind, PIN, etc.
Dynamic annotation is a source code annotation that affects
the generated code (that is, the annotation is not a comment).
Each such annotation is attached to a particular
instruction and/or to a particular object (address) in the program.
The annotations that should be used by users are macros in all upper-case
(e.g., ANNOTATE_NEW_MEMORY).
Actual implementation of these macros may differ depending on the
dynamic analysis tool being used.
See http://code.google.com/p/data-race-test/ for more information.
This file supports the following dynamic analysis tools:
- None (DYNAMIC_ANNOTATIONS_ENABLED is not defined or zero).
Macros are defined empty.
- ThreadSanitizer, Helgrind, DRD (DYNAMIC_ANNOTATIONS_ENABLED is 1).
Macros are defined as calls to non-inlinable empty functions
that are intercepted by Valgrind. */
#ifndef BASE_DYNAMIC_ANNOTATIONS_H_
#define BASE_DYNAMIC_ANNOTATIONS_H_
#ifdef __cplusplus
extern "C" {
#endif
/* Return non-zero value if running under valgrind.
If "valgrind.h" is included into dynamic_annotations.c,
the regular valgrind mechanism will be used.
See http://valgrind.org/docs/manual/manual-core-adv.html about
RUNNING_ON_VALGRIND and other valgrind "client requests".
The file "valgrind.h" may be obtained by doing
svn co svn://svn.valgrind.org/valgrind/trunk/include
If for some reason you can't use "valgrind.h" or want to fake valgrind,
there are two ways to make this function return non-zero:
- Use environment variable: export RUNNING_ON_VALGRIND=1
- Make your tool intercept the function RunningOnValgrind() and
change its return value.
*/
int RunningOnValgrind(void);
#ifdef __cplusplus
}
#endif
#endif /* BASE_DYNAMIC_ANNOTATIONS_H_ */

View File

@@ -0,0 +1,434 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2008, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Paul Pluzhnikov
//
// Allow dynamic symbol lookup in an in-memory Elf image.
//
#include "base/elf_mem_image.h"
#ifdef HAVE_ELF_MEM_IMAGE // defined in elf_mem_image.h
#include <stddef.h> // for size_t, ptrdiff_t
#include "base/logging.h"
// From binutils/include/elf/common.h (this doesn't appear to be documented
// anywhere else).
//
// /* This flag appears in a Versym structure. It means that the symbol
// is hidden, and is only visible with an explicit version number.
// This is a GNU extension. */
// #define VERSYM_HIDDEN 0x8000
//
// /* This is the mask for the rest of the Versym information. */
// #define VERSYM_VERSION 0x7fff
#define VERSYM_VERSION 0x7fff
namespace base {
namespace {
template <int N> class ElfClass {
public:
static const int kElfClass = -1;
static int ElfBind(const ElfW(Sym) *) {
CHECK(false); // << "Unexpected word size";
return 0;
}
static int ElfType(const ElfW(Sym) *) {
CHECK(false); // << "Unexpected word size";
return 0;
}
};
template <> class ElfClass<32> {
public:
static const int kElfClass = ELFCLASS32;
static int ElfBind(const ElfW(Sym) *symbol) {
return ELF32_ST_BIND(symbol->st_info);
}
static int ElfType(const ElfW(Sym) *symbol) {
return ELF32_ST_TYPE(symbol->st_info);
}
};
template <> class ElfClass<64> {
public:
static const int kElfClass = ELFCLASS64;
static int ElfBind(const ElfW(Sym) *symbol) {
return ELF64_ST_BIND(symbol->st_info);
}
static int ElfType(const ElfW(Sym) *symbol) {
return ELF64_ST_TYPE(symbol->st_info);
}
};
typedef ElfClass<__WORDSIZE> CurrentElfClass;
// Extract an element from one of the ELF tables, cast it to desired type.
// This is just a simple arithmetic and a glorified cast.
// Callers are responsible for bounds checking.
template <class T>
const T* GetTableElement(const ElfW(Ehdr) *ehdr,
ElfW(Off) table_offset,
ElfW(Word) element_size,
size_t index) {
return reinterpret_cast<const T*>(reinterpret_cast<const char *>(ehdr)
+ table_offset
+ index * element_size);
}
} // namespace
const void *const ElfMemImage::kInvalidBase =
reinterpret_cast<const void *>(~0L);
ElfMemImage::ElfMemImage(const void *base) {
CHECK(base != kInvalidBase);
Init(base);
}
int ElfMemImage::GetNumSymbols() const {
if (!hash_) {
return 0;
}
// See http://www.caldera.com/developers/gabi/latest/ch5.dynamic.html#hash
return hash_[1];
}
const ElfW(Sym) *ElfMemImage::GetDynsym(int index) const {
CHECK_LT(index, GetNumSymbols());
return dynsym_ + index;
}
const ElfW(Versym) *ElfMemImage::GetVersym(int index) const {
CHECK_LT(index, GetNumSymbols());
return versym_ + index;
}
const ElfW(Phdr) *ElfMemImage::GetPhdr(int index) const {
CHECK_LT(index, ehdr_->e_phnum);
return GetTableElement<ElfW(Phdr)>(ehdr_,
ehdr_->e_phoff,
ehdr_->e_phentsize,
index);
}
const char *ElfMemImage::GetDynstr(ElfW(Word) offset) const {
CHECK_LT(offset, strsize_);
return dynstr_ + offset;
}
const void *ElfMemImage::GetSymAddr(const ElfW(Sym) *sym) const {
if (sym->st_shndx == SHN_UNDEF || sym->st_shndx >= SHN_LORESERVE) {
// Symbol corresponds to "special" (e.g. SHN_ABS) section.
return reinterpret_cast<const void *>(sym->st_value);
}
CHECK_LT(link_base_, sym->st_value);
return GetTableElement<char>(ehdr_, 0, 1, sym->st_value) - link_base_;
}
const ElfW(Verdef) *ElfMemImage::GetVerdef(int index) const {
CHECK_LE(index, verdefnum_);
const ElfW(Verdef) *version_definition = verdef_;
while (version_definition->vd_ndx < index && version_definition->vd_next) {
const char *const version_definition_as_char =
reinterpret_cast<const char *>(version_definition);
version_definition =
reinterpret_cast<const ElfW(Verdef) *>(version_definition_as_char +
version_definition->vd_next);
}
return version_definition->vd_ndx == index ? version_definition : NULL;
}
const ElfW(Verdaux) *ElfMemImage::GetVerdefAux(
const ElfW(Verdef) *verdef) const {
return reinterpret_cast<const ElfW(Verdaux) *>(verdef+1);
}
const char *ElfMemImage::GetVerstr(ElfW(Word) offset) const {
CHECK_LT(offset, strsize_);
return dynstr_ + offset;
}
void ElfMemImage::Init(const void *base) {
ehdr_ = NULL;
dynsym_ = NULL;
dynstr_ = NULL;
versym_ = NULL;
verdef_ = NULL;
hash_ = NULL;
strsize_ = 0;
verdefnum_ = 0;
link_base_ = ~0L; // Sentinel: PT_LOAD .p_vaddr can't possibly be this.
if (!base) {
return;
}
const intptr_t base_as_uintptr_t = reinterpret_cast<uintptr_t>(base);
// Fake VDSO has low bit set.
const bool fake_vdso = ((base_as_uintptr_t & 1) != 0);
base = reinterpret_cast<const void *>(base_as_uintptr_t & ~1);
const char *const base_as_char = reinterpret_cast<const char *>(base);
if (base_as_char[EI_MAG0] != ELFMAG0 || base_as_char[EI_MAG1] != ELFMAG1 ||
base_as_char[EI_MAG2] != ELFMAG2 || base_as_char[EI_MAG3] != ELFMAG3) {
RAW_DCHECK(false, "no ELF magic"); // at %p", base);
return;
}
int elf_class = base_as_char[EI_CLASS];
if (elf_class != CurrentElfClass::kElfClass) {
DCHECK_EQ(elf_class, CurrentElfClass::kElfClass);
return;
}
switch (base_as_char[EI_DATA]) {
case ELFDATA2LSB: {
if (__LITTLE_ENDIAN != __BYTE_ORDER) {
DCHECK_EQ(__LITTLE_ENDIAN, __BYTE_ORDER); // << ": wrong byte order";
return;
}
break;
}
case ELFDATA2MSB: {
if (__BIG_ENDIAN != __BYTE_ORDER) {
DCHECK_EQ(__BIG_ENDIAN, __BYTE_ORDER); // << ": wrong byte order";
return;
}
break;
}
default: {
RAW_DCHECK(false, "unexpected data encoding"); // << base_as_char[EI_DATA];
return;
}
}
ehdr_ = reinterpret_cast<const ElfW(Ehdr) *>(base);
const ElfW(Phdr) *dynamic_program_header = NULL;
for (int i = 0; i < ehdr_->e_phnum; ++i) {
const ElfW(Phdr) *const program_header = GetPhdr(i);
switch (program_header->p_type) {
case PT_LOAD:
if (link_base_ == ~0L) {
link_base_ = program_header->p_vaddr;
}
break;
case PT_DYNAMIC:
dynamic_program_header = program_header;
break;
}
}
if (link_base_ == ~0L || !dynamic_program_header) {
RAW_DCHECK(~0L != link_base_, "no PT_LOADs in VDSO");
RAW_DCHECK(dynamic_program_header, "no PT_DYNAMIC in VDSO");
// Mark this image as not present. Can not recur infinitely.
Init(0);
return;
}
ptrdiff_t relocation =
base_as_char - reinterpret_cast<const char *>(link_base_);
ElfW(Dyn) *dynamic_entry =
reinterpret_cast<ElfW(Dyn) *>(dynamic_program_header->p_vaddr +
relocation);
for (; dynamic_entry->d_tag != DT_NULL; ++dynamic_entry) {
ElfW(Xword) value = dynamic_entry->d_un.d_val;
if (fake_vdso) {
// A complication: in the real VDSO, dynamic entries are not relocated
// (it wasn't loaded by a dynamic loader). But when testing with a
// "fake" dlopen()ed vdso library, the loader relocates some (but
// not all!) of them before we get here.
if (dynamic_entry->d_tag == DT_VERDEF) {
// The only dynamic entry (of the ones we care about) libc-2.3.6
// loader doesn't relocate.
value += relocation;
}
} else {
// Real VDSO. Everything needs to be relocated.
value += relocation;
}
switch (dynamic_entry->d_tag) {
case DT_HASH:
hash_ = reinterpret_cast<ElfW(Word) *>(value);
break;
case DT_SYMTAB:
dynsym_ = reinterpret_cast<ElfW(Sym) *>(value);
break;
case DT_STRTAB:
dynstr_ = reinterpret_cast<const char *>(value);
break;
case DT_VERSYM:
versym_ = reinterpret_cast<ElfW(Versym) *>(value);
break;
case DT_VERDEF:
verdef_ = reinterpret_cast<ElfW(Verdef) *>(value);
break;
case DT_VERDEFNUM:
verdefnum_ = dynamic_entry->d_un.d_val;
break;
case DT_STRSZ:
strsize_ = dynamic_entry->d_un.d_val;
break;
default:
// Unrecognized entries explicitly ignored.
break;
}
}
if (!hash_ || !dynsym_ || !dynstr_ || !versym_ ||
!verdef_ || !verdefnum_ || !strsize_) {
RAW_DCHECK(hash_, "invalid VDSO (no DT_HASH)");
RAW_DCHECK(dynsym_, "invalid VDSO (no DT_SYMTAB)");
RAW_DCHECK(dynstr_, "invalid VDSO (no DT_STRTAB)");
RAW_DCHECK(versym_, "invalid VDSO (no DT_VERSYM)");
RAW_DCHECK(verdef_, "invalid VDSO (no DT_VERDEF)");
RAW_DCHECK(verdefnum_, "invalid VDSO (no DT_VERDEFNUM)");
RAW_DCHECK(strsize_, "invalid VDSO (no DT_STRSZ)");
// Mark this image as not present. Can not recur infinitely.
Init(0);
return;
}
}
bool ElfMemImage::LookupSymbol(const char *name,
const char *version,
int type,
SymbolInfo *info) const {
for (SymbolIterator it = begin(); it != end(); ++it) {
if (strcmp(it->name, name) == 0 && strcmp(it->version, version) == 0 &&
CurrentElfClass::ElfType(it->symbol) == type) {
if (info) {
*info = *it;
}
return true;
}
}
return false;
}
bool ElfMemImage::LookupSymbolByAddress(const void *address,
SymbolInfo *info_out) const {
for (SymbolIterator it = begin(); it != end(); ++it) {
const char *const symbol_start =
reinterpret_cast<const char *>(it->address);
const char *const symbol_end = symbol_start + it->symbol->st_size;
if (symbol_start <= address && address < symbol_end) {
if (info_out) {
// Client wants to know details for that symbol (the usual case).
if (CurrentElfClass::ElfBind(it->symbol) == STB_GLOBAL) {
// Strong symbol; just return it.
*info_out = *it;
return true;
} else {
// Weak or local. Record it, but keep looking for a strong one.
*info_out = *it;
}
} else {
// Client only cares if there is an overlapping symbol.
return true;
}
}
}
return false;
}
ElfMemImage::SymbolIterator::SymbolIterator(const void *const image, int index)
: index_(index), image_(image) {
}
const ElfMemImage::SymbolInfo *ElfMemImage::SymbolIterator::operator->() const {
return &info_;
}
const ElfMemImage::SymbolInfo& ElfMemImage::SymbolIterator::operator*() const {
return info_;
}
bool ElfMemImage::SymbolIterator::operator==(const SymbolIterator &rhs) const {
return this->image_ == rhs.image_ && this->index_ == rhs.index_;
}
bool ElfMemImage::SymbolIterator::operator!=(const SymbolIterator &rhs) const {
return !(*this == rhs);
}
ElfMemImage::SymbolIterator &ElfMemImage::SymbolIterator::operator++() {
this->Update(1);
return *this;
}
ElfMemImage::SymbolIterator ElfMemImage::begin() const {
SymbolIterator it(this, 0);
it.Update(0);
return it;
}
ElfMemImage::SymbolIterator ElfMemImage::end() const {
return SymbolIterator(this, GetNumSymbols());
}
void ElfMemImage::SymbolIterator::Update(int increment) {
const ElfMemImage *image = reinterpret_cast<const ElfMemImage *>(image_);
CHECK(image->IsPresent() || increment == 0);
if (!image->IsPresent()) {
return;
}
index_ += increment;
if (index_ >= image->GetNumSymbols()) {
index_ = image->GetNumSymbols();
return;
}
const ElfW(Sym) *symbol = image->GetDynsym(index_);
const ElfW(Versym) *version_symbol = image->GetVersym(index_);
CHECK(symbol && version_symbol);
const char *const symbol_name = image->GetDynstr(symbol->st_name);
const ElfW(Versym) version_index = version_symbol[0] & VERSYM_VERSION;
const ElfW(Verdef) *version_definition = NULL;
const char *version_name = "";
if (symbol->st_shndx == SHN_UNDEF) {
// Undefined symbols reference DT_VERNEED, not DT_VERDEF, and
// version_index could well be greater than verdefnum_, so calling
// GetVerdef(version_index) may trigger assertion.
} else {
version_definition = image->GetVerdef(version_index);
}
if (version_definition) {
// I am expecting 1 or 2 auxiliary entries: 1 for the version itself,
// optional 2nd if the version has a parent.
CHECK_LE(1, version_definition->vd_cnt);
CHECK_LE(version_definition->vd_cnt, 2);
const ElfW(Verdaux) *version_aux = image->GetVerdefAux(version_definition);
version_name = image->GetVerstr(version_aux->vda_name);
}
info_.name = symbol_name;
info_.version = version_name;
info_.address = image->GetSymAddr(symbol);
info_.symbol = symbol;
}
} // namespace base
#endif // HAVE_ELF_MEM_IMAGE

View File

@@ -0,0 +1,135 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2008, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Paul Pluzhnikov
//
// Allow dynamic symbol lookup for in-memory Elf images.
#ifndef BASE_ELF_MEM_IMAGE_H_
#define BASE_ELF_MEM_IMAGE_H_
#include <config.h>
#ifdef HAVE_FEATURES_H
#include <features.h> // for __GLIBC__
#endif
// Maybe one day we can rewrite this file not to require the elf
// symbol extensions in glibc, but for right now we need them.
#if defined(__ELF__) && defined(__GLIBC__) && !defined(__native_client__)
#define HAVE_ELF_MEM_IMAGE 1
#include <stdlib.h>
#include <link.h> // for ElfW
namespace base {
// An in-memory ELF image (may not exist on disk).
class ElfMemImage {
public:
// Sentinel: there could never be an elf image at this address.
static const void *const kInvalidBase;
// Information about a single vdso symbol.
// All pointers are into .dynsym, .dynstr, or .text of the VDSO.
// Do not free() them or modify through them.
struct SymbolInfo {
const char *name; // E.g. "__vdso_getcpu"
const char *version; // E.g. "LINUX_2.6", could be ""
// for unversioned symbol.
const void *address; // Relocated symbol address.
const ElfW(Sym) *symbol; // Symbol in the dynamic symbol table.
};
// Supports iteration over all dynamic symbols.
class SymbolIterator {
public:
friend class ElfMemImage;
const SymbolInfo *operator->() const;
const SymbolInfo &operator*() const;
SymbolIterator& operator++();
bool operator!=(const SymbolIterator &rhs) const;
bool operator==(const SymbolIterator &rhs) const;
private:
SymbolIterator(const void *const image, int index);
void Update(int incr);
SymbolInfo info_;
int index_;
const void *const image_;
};
explicit ElfMemImage(const void *base);
void Init(const void *base);
bool IsPresent() const { return ehdr_ != NULL; }
const ElfW(Phdr)* GetPhdr(int index) const;
const ElfW(Sym)* GetDynsym(int index) const;
const ElfW(Versym)* GetVersym(int index) const;
const ElfW(Verdef)* GetVerdef(int index) const;
const ElfW(Verdaux)* GetVerdefAux(const ElfW(Verdef) *verdef) const;
const char* GetDynstr(ElfW(Word) offset) const;
const void* GetSymAddr(const ElfW(Sym) *sym) const;
const char* GetVerstr(ElfW(Word) offset) const;
int GetNumSymbols() const;
SymbolIterator begin() const;
SymbolIterator end() const;
// Look up versioned dynamic symbol in the image.
// Returns false if image is not present, or doesn't contain given
// symbol/version/type combination.
// If info_out != NULL, additional details are filled in.
bool LookupSymbol(const char *name, const char *version,
int symbol_type, SymbolInfo *info_out) const;
// Find info about symbol (if any) which overlaps given address.
// Returns true if symbol was found; false if image isn't present
// or doesn't have a symbol overlapping given address.
// If info_out != NULL, additional details are filled in.
bool LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const;
private:
const ElfW(Ehdr) *ehdr_;
const ElfW(Sym) *dynsym_;
const ElfW(Versym) *versym_;
const ElfW(Verdef) *verdef_;
const ElfW(Word) *hash_;
const char *dynstr_;
size_t strsize_;
size_t verdefnum_;
ElfW(Addr) link_base_; // Link-time base (p_vaddr of first PT_LOAD).
};
} // namespace base
#endif // __ELF__ and __GLIBC__ and !__native_client__
#endif // BASE_ELF_MEM_IMAGE_H_

View File

@@ -0,0 +1,74 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Jacob Hoffman-Andrews
#ifndef _GOOGLEINIT_H
#define _GOOGLEINIT_H
#include "base/logging.h"
class GoogleInitializer {
public:
typedef void (*VoidFunction)(void);
GoogleInitializer(const char* name, VoidFunction ctor, VoidFunction dtor)
: name_(name), destructor_(dtor) {
RAW_VLOG(10, "<GoogleModuleObject> constructing: %s\n", name_);
if (ctor)
ctor();
}
~GoogleInitializer() {
RAW_VLOG(10, "<GoogleModuleObject> destroying: %s\n", name_);
if (destructor_)
destructor_();
}
private:
const char* const name_;
const VoidFunction destructor_;
};
#define REGISTER_MODULE_INITIALIZER(name, body) \
namespace { \
static void google_init_module_##name () { body; } \
GoogleInitializer google_initializer_module_##name(#name, \
google_init_module_##name, NULL); \
}
#define REGISTER_MODULE_DESTRUCTOR(name, body) \
namespace { \
static void google_destruct_module_##name () { body; } \
GoogleInitializer google_destructor_module_##name(#name, \
NULL, google_destruct_module_##name); \
}
#endif /* _GOOGLEINIT_H */

View File

@@ -0,0 +1,727 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2005-2007, Google Inc.
* Copyright (c) 2023, gperftools Contributors
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Markus Gutschke
*
* Substantial upgrades by Aliaksey Kandratsenka. All bugs are mine.
*/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include "base/linuxthreads.h"
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
#include <sched.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/prctl.h>
#include <sys/ptrace.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/wait.h>
#include <unistd.h>
#include <atomic>
#include "base/basictypes.h"
#include "base/logging.h"
#ifndef CLONE_UNTRACED
#define CLONE_UNTRACED 0x00800000
#endif
#ifndef PR_SET_PTRACER
#define PR_SET_PTRACER 0x59616d61
#endif
namespace {
class SetPTracerSetup {
public:
~SetPTracerSetup() {
if (need_cleanup_) {
prctl(PR_SET_PTRACER, 0, 0, 0, 0);
}
}
void Prepare(int clone_pid) {
if (prctl(PR_SET_PTRACER, clone_pid, 0, 0, 0) == 0) {
need_cleanup_ = true;
}
}
private:
bool need_cleanup_ = false;
};
class UniqueFD {
public:
explicit UniqueFD(int fd) : fd_(fd) {}
int ReleaseFD() {
int retval = fd_;
fd_ = -1;
return retval;
}
~UniqueFD() {
if (fd_ < 0) {
return;
}
(void)close(fd_);
}
private:
int fd_;
};
template <typename Body>
struct SimpleCleanup {
const Body body;
explicit SimpleCleanup(const Body& body) : body(body) {}
~SimpleCleanup() {
body();
}
};
template <typename Body>
SimpleCleanup<Body> MakeSimpleCleanup(const Body& body) {
return SimpleCleanup<Body>{body};
};
} // namespace
/* Synchronous signals that should not be blocked while in the lister thread.
*/
static const int sync_signals[] = {
SIGABRT, SIGILL,
SIGFPE, SIGSEGV, SIGBUS,
#ifdef SIGEMT
SIGEMT,
#endif
SIGSYS, SIGTRAP,
SIGXCPU, SIGXFSZ };
ATTRIBUTE_NOINLINE
static int local_clone (int (*fn)(void *), void *arg) {
#ifdef __PPC64__
/* To avoid the gap cross page boundaries, increase by the large parge
* size mostly PowerPC system uses. */
// FIXME(alk): I don't really understand why ppc needs this and why
// 64k pages matter. I.e. some other architectures have 64k pages,
// so should we do the same there?
uintptr_t clone_stack_size = 64 << 10;
#else
uintptr_t clone_stack_size = 4 << 10;
#endif
bool grows_to_low = (&arg < arg);
if (grows_to_low) {
// Negate clone_stack_size if stack grows to lower addresses
// (common for arch-es that matter).
clone_stack_size = ~clone_stack_size + 1;
}
#if defined(__i386__) || defined(__x86_64__) || defined(__riscv) || defined(__arm__) || defined(__aarch64__)
// Sanity check code above. We know that those arch-es grow stack to
// lower addresses.
CHECK(grows_to_low);
#endif
/* Leave 4kB of gap between the callers stack and the new clone. This
* should be more than sufficient for the caller to call waitpid() until
* the cloned thread terminates.
*
* It is important that we set the CLONE_UNTRACED flag, because newer
* versions of "gdb" otherwise attempt to attach to our thread, and will
* attempt to reap its status codes. This subsequently results in the
* caller hanging indefinitely in waitpid(), waiting for a change in
* status that will never happen. By setting the CLONE_UNTRACED flag, we
* prevent "gdb" from stealing events, but we still expect the thread
* lister to fail, because it cannot PTRACE_ATTACH to the process that
* is being debugged. This is OK and the error code will be reported
* correctly.
*/
uintptr_t stack_addr = reinterpret_cast<uintptr_t>(&arg) + clone_stack_size;
stack_addr &= ~63; // align stack address on 64 bytes (x86 needs 16, but lets be generous)
return clone(fn, reinterpret_cast<void*>(stack_addr),
CLONE_VM|CLONE_FS|CLONE_FILES|CLONE_UNTRACED,
arg, 0, 0, 0);
}
/* Local substitute for the atoi() function, which is not necessarily safe
* to call once threads are suspended (depending on whether libc looks up
* locale information, when executing atoi()).
*/
static int local_atoi(const char *s) {
int n = 0;
int neg = *s == '-';
if (neg)
s++;
while (*s >= '0' && *s <= '9')
n = 10*n + (*s++ - '0');
return neg ? -n : n;
}
static int ptrace_detach(pid_t pid) {
return ptrace(PTRACE_DETACH, pid, nullptr, nullptr);
}
/* Re-runs fn until it doesn't cause EINTR
*/
#define NO_INTR(fn) do {} while ((fn) < 0 && errno == EINTR)
/* abort() is not safely reentrant, and changes it's behavior each time
* it is called. This means, if the main application ever called abort()
* we cannot safely call it again. This would happen if we were called
* from a SIGABRT signal handler in the main application. So, document
* that calling SIGABRT from the thread lister makes it not signal safe
* (and vice-versa).
* Also, since we share address space with the main application, we
* cannot call abort() from the callback and expect the main application
* to behave correctly afterwards. In fact, the only thing we can do, is
* to terminate the main application with extreme prejudice (aka
* PTRACE_KILL).
* We set up our own SIGABRT handler to do this.
* In order to find the main application from the signal handler, we
* need to store information about it in global variables. This is
* safe, because the main application should be suspended at this
* time. If the callback ever called TCMalloc_ResumeAllProcessThreads(), then
* we are running a higher risk, though. So, try to avoid calling
* abort() after calling TCMalloc_ResumeAllProcessThreads.
*/
static volatile int *sig_pids, sig_num_threads;
/* Signal handler to help us recover from dying while we are attached to
* other threads.
*/
static void SignalHandler(int signum, siginfo_t *si, void *data) {
RAW_LOG(ERROR, "Got fatal signal %d inside ListerThread", signum);
if (sig_pids != NULL) {
if (signum == SIGABRT) {
prctl(PR_SET_PDEATHSIG, 0);
while (sig_num_threads-- > 0) {
/* Not sure if sched_yield is really necessary here, but it does not */
/* hurt, and it might be necessary for the same reasons that we have */
/* to do so in ptrace_detach(). */
sched_yield();
ptrace(PTRACE_KILL, sig_pids[sig_num_threads], 0, 0);
}
} else if (sig_num_threads > 0) {
TCMalloc_ResumeAllProcessThreads(sig_num_threads, (int *)sig_pids);
}
}
sig_pids = NULL;
syscall(SYS_exit, signum == SIGABRT ? 1 : 2);
}
/* Try to dirty the stack, and hope that the compiler is not smart enough
* to optimize this function away. Or worse, the compiler could inline the
* function and permanently allocate the data on the stack.
*/
static void DirtyStack(size_t amount) {
char buf[amount];
memset(buf, 0, amount);
read(-1, buf, amount);
}
/* Data structure for passing arguments to the lister thread.
*/
#define ALT_STACKSIZE (MINSIGSTKSZ + 4096)
struct ListerParams {
int result, err;
pid_t ppid;
int start_pipe_rd;
int start_pipe_wr;
char *altstack_mem;
ListAllProcessThreadsCallBack callback;
void *parameter;
va_list ap;
int proc_fd;
};
struct kernel_dirent64 { // see man 2 getdents
int64_t d_ino; /* 64-bit inode number */
int64_t d_off; /* 64-bit offset to next structure */
unsigned short d_reclen; /* Size of this dirent */
unsigned char d_type; /* File type */
char d_name[]; /* Filename (null-terminated) */
};
static const kernel_dirent64 *BumpDirentPtr(const kernel_dirent64 *ptr, uintptr_t by_bytes) {
return reinterpret_cast<kernel_dirent64*>(reinterpret_cast<uintptr_t>(ptr) + by_bytes);
}
static int ListerThread(struct ListerParams *args) {
int found_parent = 0;
pid_t clone_pid = syscall(SYS_gettid);
int proc = args->proc_fd, num_threads = 0;
int max_threads = 0, sig;
struct stat proc_sb;
stack_t altstack;
/* Wait for parent thread to set appropriate permissions to allow
* ptrace activity. Note we using pipe pair, so which ensures we
* don't sleep past parent's death.
*/
(void)close(args->start_pipe_wr);
{
char tmp;
read(args->start_pipe_rd, &tmp, sizeof(tmp));
}
// No point in continuing if parent dies before/during ptracing.
prctl(PR_SET_PDEATHSIG, SIGKILL);
/* Catch signals on an alternate pre-allocated stack. This way, we can
* safely execute the signal handler even if we ran out of memory.
*/
memset(&altstack, 0, sizeof(altstack));
altstack.ss_sp = args->altstack_mem;
altstack.ss_flags = 0;
altstack.ss_size = ALT_STACKSIZE;
sigaltstack(&altstack, nullptr);
/* Some kernels forget to wake up traced processes, when the
* tracer dies. So, intercept synchronous signals and make sure
* that we wake up our tracees before dying. It is the caller's
* responsibility to ensure that asynchronous signals do not
* interfere with this function.
*/
for (sig = 0; sig < sizeof(sync_signals)/sizeof(*sync_signals); sig++) {
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction = SignalHandler;
sigfillset(&sa.sa_mask);
sa.sa_flags = SA_ONSTACK|SA_SIGINFO|SA_RESETHAND;
sigaction(sync_signals[sig], &sa, nullptr);
}
/* Read process directories in /proc/... */
for (;;) {
if (lseek(proc, 0, SEEK_SET) < 0) {
goto failure;
}
if (fstat(proc, &proc_sb) < 0) {
goto failure;
}
/* Since we are suspending threads, we cannot call any libc
* functions that might acquire locks. Most notably, we cannot
* call malloc(). So, we have to allocate memory on the stack,
* instead. Since we do not know how much memory we need, we
* make a best guess. And if we guessed incorrectly we retry on
* a second iteration (by jumping to "detach_threads").
*
* Unless the number of threads is increasing very rapidly, we
* should never need to do so, though, as our guestimate is very
* conservative.
*/
if (max_threads < proc_sb.st_nlink + 100) {
max_threads = proc_sb.st_nlink + 100;
}
/* scope */ {
pid_t pids[max_threads];
int added_entries = 0;
sig_num_threads = num_threads;
sig_pids = pids;
for (;;) {
// lets make sure to align buf to store kernel_dirent64-s properly.
int64_t buf[4096 / sizeof(int64_t)];
ssize_t nbytes = syscall(SYS_getdents64, proc, buf, sizeof(buf));
// fprintf(stderr, "nbytes = %zd\n", nbytes);
if (nbytes < 0) {
goto failure;
}
if (nbytes == 0) {
if (added_entries) {
/* Need to keep iterating over "/proc" in multiple
* passes until we no longer find any more threads. This
* algorithm eventually completes, when all threads have
* been suspended.
*/
added_entries = 0;
lseek(proc, 0, SEEK_SET);
continue;
}
break;
}
const kernel_dirent64 *entry = reinterpret_cast<kernel_dirent64*>(buf);
const kernel_dirent64 *end = BumpDirentPtr(entry, nbytes);
for (;entry < end; entry = BumpDirentPtr(entry, entry->d_reclen)) {
if (entry->d_ino == 0) {
continue;
}
const char *ptr = entry->d_name;
// fprintf(stderr, "name: %s\n", ptr);
pid_t pid;
/* Some kernels hide threads by preceding the pid with a '.' */
if (*ptr == '.')
ptr++;
/* If the directory is not numeric, it cannot be a
* process/thread
*/
if (*ptr < '0' || *ptr > '9')
continue;
pid = local_atoi(ptr);
// fprintf(stderr, "pid = %d (%d)\n", pid, getpid());
if (!pid || pid == clone_pid) {
continue;
}
/* Attach (and suspend) all threads */
long i, j;
/* Found one of our threads, make sure it is no duplicate */
for (i = 0; i < num_threads; i++) {
/* Linear search is slow, but should not matter much for
* the typically small number of threads.
*/
if (pids[i] == pid) {
/* Found a duplicate; most likely on second pass */
goto next_entry;
}
}
/* Check whether data structure needs growing */
if (num_threads >= max_threads) {
/* Back to square one, this time with more memory */
goto detach_threads;
}
/* Attaching to thread suspends it */
pids[num_threads++] = pid;
sig_num_threads = num_threads;
if (ptrace(PTRACE_ATTACH, pid, (void *)0,
(void *)0) < 0) {
/* If operation failed, ignore thread. Maybe it
* just died? There might also be a race
* condition with a concurrent core dumper or
* with a debugger. In that case, we will just
* make a best effort, rather than failing
* entirely.
*/
num_threads--;
sig_num_threads = num_threads;
goto next_entry;
}
while (waitpid(pid, (int *)0, __WALL) < 0) {
if (errno != EINTR) {
ptrace_detach(pid);
num_threads--;
sig_num_threads = num_threads;
goto next_entry;
}
}
if (syscall(SYS_ptrace, PTRACE_PEEKDATA, pid, &i, &j) || i++ != j ||
syscall(SYS_ptrace, PTRACE_PEEKDATA, pid, &i, &j) || i != j) {
/* Address spaces are distinct. This is probably
* a forked child process rather than a thread.
*/
ptrace_detach(pid);
num_threads--;
sig_num_threads = num_threads;
goto next_entry;
}
found_parent |= pid == args->ppid;
added_entries++;
next_entry:;
} // entries iterations loop
} // getdents loop
/* If we never found the parent process, something is very wrong.
* Most likely, we are running in debugger. Any attempt to operate
* on the threads would be very incomplete. Let's just report an
* error to the caller.
*/
if (!found_parent) {
TCMalloc_ResumeAllProcessThreads(num_threads, pids);
return 3;
}
/* Now we are ready to call the callback,
* which takes care of resuming the threads for us.
*/
args->result = args->callback(args->parameter, num_threads,
pids, args->ap);
args->err = errno;
/* Callback should have resumed threads, but better safe than sorry */
if (TCMalloc_ResumeAllProcessThreads(num_threads, pids)) {
/* Callback forgot to resume at least one thread, report error */
args->err = EINVAL;
args->result = -1;
}
return 0;
detach_threads:
/* Resume all threads prior to retrying the operation */
TCMalloc_ResumeAllProcessThreads(num_threads, pids);
sig_pids = NULL;
num_threads = 0;
sig_num_threads = num_threads;
max_threads += 100;
} // pids[max_threads] scope
} // for (;;)
failure:
args->result = -1;
args->err = errno;
return 1;
}
/* This function gets the list of all linux threads of the current process
* passes them to the 'callback' along with the 'parameter' pointer; at the
* call back call time all the threads are paused via
* PTRACE_ATTACH.
* The callback is executed from a separate thread which shares only the
* address space, the filesystem, and the filehandles with the caller. Most
* notably, it does not share the same pid and ppid; and if it terminates,
* the rest of the application is still there. 'callback' is supposed to do
* or arrange for TCMalloc_ResumeAllProcessThreads. This happens automatically, if
* the thread raises a synchronous signal (e.g. SIGSEGV); asynchronous
* signals are blocked. If the 'callback' decides to unblock them, it must
* ensure that they cannot terminate the application, or that
* TCMalloc_ResumeAllProcessThreads will get called.
* It is an error for the 'callback' to make any library calls that could
* acquire locks. Most notably, this means that most system calls have to
* avoid going through libc. Also, this means that it is not legal to call
* exit() or abort().
* We return -1 on error and the return value of 'callback' on success.
*/
int TCMalloc_ListAllProcessThreads(void *parameter,
ListAllProcessThreadsCallBack callback, ...) {
char altstack_mem[ALT_STACKSIZE];
struct ListerParams args;
pid_t clone_pid;
int dumpable = 1;
int need_sigprocmask = 0;
sigset_t sig_blocked, sig_old;
int status, rc;
SetPTracerSetup ptracer_setup;
auto cleanup = MakeSimpleCleanup([&] () {
int old_errno = errno;
if (need_sigprocmask) {
sigprocmask(SIG_SETMASK, &sig_old, nullptr);
}
if (!dumpable) {
prctl(PR_SET_DUMPABLE, dumpable);
}
errno = old_errno;
});
va_start(args.ap, callback);
/* If we are short on virtual memory, initializing the alternate stack
* might trigger a SIGSEGV. Let's do this early, before it could get us
* into more trouble (i.e. before signal handlers try to use the alternate
* stack, and before we attach to other threads).
*/
memset(altstack_mem, 0, sizeof(altstack_mem));
/* Some of our cleanup functions could conceivable use more stack space.
* Try to touch the stack right now. This could be defeated by the compiler
* being too smart for it's own good, so try really hard.
*/
DirtyStack(32768);
/* Make this process "dumpable". This is necessary in order to ptrace()
* after having called setuid().
*/
dumpable = prctl(PR_GET_DUMPABLE, 0);
if (!dumpable) {
prctl(PR_SET_DUMPABLE, 1);
}
/* Fill in argument block for dumper thread */
args.result = -1;
args.err = 0;
args.ppid = getpid();
args.altstack_mem = altstack_mem;
args.parameter = parameter;
args.callback = callback;
NO_INTR(args.proc_fd = open("/proc/self/task/", O_RDONLY|O_DIRECTORY|O_CLOEXEC));
UniqueFD proc_closer{args.proc_fd};
if (args.proc_fd < 0) {
return -1;
}
int pipefds[2];
if (pipe2(pipefds, O_CLOEXEC)) {
return -1;
}
UniqueFD pipe_rd_closer{pipefds[0]};
UniqueFD pipe_wr_closer{pipefds[1]};
args.start_pipe_rd = pipefds[0];
args.start_pipe_wr = pipefds[1];
/* Before cloning the thread lister, block all asynchronous signals, as we */
/* are not prepared to handle them. */
sigfillset(&sig_blocked);
for (int sig = 0; sig < sizeof(sync_signals)/sizeof(*sync_signals); sig++) {
sigdelset(&sig_blocked, sync_signals[sig]);
}
if (sigprocmask(SIG_BLOCK, &sig_blocked, &sig_old)) {
return -1;
}
need_sigprocmask = 1;
// make sure all functions used by parent from local_clone to after
// waitpid have plt entries fully initialized. We cannot afford
// dynamic linker running relocations and messing with errno (see
// comment just below)
(void)prctl(PR_GET_PDEATHSIG, 0);
(void)close(-1);
(void)waitpid(INT_MIN, nullptr, 0);
/* After cloning, both the parent and the child share the same
* instance of errno. We deal with this by being very
* careful. Specifically, child immediately calls into sem_wait
* which never fails (cannot even EINTR), so doesn't touch errno.
*
* Parent sets up PR_SET_PTRACER prctl (if it fails, which usually
* doesn't happen, we ignore that failure). Then parent does close
* on write side of start pipe. After that child runs complex code,
* including arbitrary callback. So parent avoids screwing with
* errno by immediately calling waitpid with async signals disabled.
*
* I.e. errno is parent's up until close below. Then errno belongs
* to child up until it exits.
*/
clone_pid = local_clone((int (*)(void *))ListerThread, &args);
if (clone_pid < 0) {
return -1;
}
/* Most Linux kernels in the wild have Yama LSM enabled, so
* requires us to explicitly give permission for child to ptrace
* us. See man 2 ptrace for details. This then requires us to
* synchronize with the child (see close on start pipe
* below). I.e. so that child doesn't start ptracing before we've
* completed this prctl call.
*/
ptracer_setup.Prepare(clone_pid);
/* Closing write side of pipe works like releasing the lock. It
* allows the ListerThread to run past read() call on read side of
* pipe and ptrace us.
*/
close(pipe_wr_closer.ReleaseFD());
/* So here child runs (see ListerThread), it finds and ptraces all
* threads, runs whatever callback is setup and then
* detaches/resumes everything. In any case we wait for child's
* completion to gather status and synchronize everything. */
rc = waitpid(clone_pid, &status, __WALL);
if (rc < 0) {
if (errno == EINTR) {
RAW_LOG(FATAL, "BUG: EINTR from waitpid shouldn't be possible!");
}
// Any error waiting for child is sign of some bug, so abort
// asap. Continuing is unsafe anyways with child potentially writing to our
// stack.
RAW_LOG(FATAL, "BUG: waitpid inside TCMalloc_ListAllProcessThreads cannot fail, but it did. Raw errno: %d\n", errno);
} else if (WIFEXITED(status)) {
errno = args.err;
switch (WEXITSTATUS(status)) {
case 0: break; /* Normal process termination */
case 2: args.err = EFAULT; /* Some fault (e.g. SIGSEGV) detected */
args.result = -1;
break;
case 3: args.err = EPERM; /* Process is already being traced */
args.result = -1;
break;
default:args.err = ECHILD; /* Child died unexpectedly */
args.result = -1;
break;
}
} else if (!WIFEXITED(status)) {
args.err = EFAULT; /* Terminated due to an unhandled signal*/
args.result = -1;
}
errno = args.err;
return args.result;
}
/* This function resumes the list of all linux threads that
* TCMalloc_ListAllProcessThreads pauses before giving to its callback.
* The function returns non-zero if at least one thread was
* suspended and has now been resumed.
*/
int TCMalloc_ResumeAllProcessThreads(int num_threads, pid_t *thread_pids) {
int detached_at_least_one = 0;
while (num_threads-- > 0) {
detached_at_least_one |= (ptrace_detach(thread_pids[num_threads]) >= 0);
}
return detached_at_least_one;
}

View File

@@ -0,0 +1,75 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2005-2007, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Markus Gutschke
*/
#ifndef _LINUXTHREADS_H
#define _LINUXTHREADS_H
#include <stdarg.h>
#include <sys/types.h>
typedef int (*ListAllProcessThreadsCallBack)(void *parameter,
int num_threads,
pid_t *thread_pids,
va_list ap);
/* This function gets the list of all linux threads of the current process
* passes them to the 'callback' along with the 'parameter' pointer; at the
* call back call time all the threads are paused via
* PTRACE_ATTACH.
* The callback is executed from a separate thread which shares only the
* address space, the filesystem, and the filehandles with the caller. Most
* notably, it does not share the same pid and ppid; and if it terminates,
* the rest of the application is still there. 'callback' is supposed to do
* or arrange for TCMalloc_ResumeAllProcessThreads. This happens automatically, if
* the thread raises a synchronous signal (e.g. SIGSEGV); asynchronous
* signals are blocked. If the 'callback' decides to unblock them, it must
* ensure that they cannot terminate the application, or that
* TCMalloc_ResumeAllProcessThreads will get called.
* It is an error for the 'callback' to make any library calls that could
* acquire locks. Most notably, this means that most system calls have to
* avoid going through libc. Also, this means that it is not legal to call
* exit() or abort().
* We return -1 on error and the return value of 'callback' on success.
*/
int TCMalloc_ListAllProcessThreads(void *parameter,
ListAllProcessThreadsCallBack callback, ...);
/* This function resumes the list of all linux threads that
* TCMalloc_ListAllProcessThreads pauses before giving to its
* callback. The function returns non-zero if at least one thread was
* suspended and has now been resumed.
*/
int TCMalloc_ResumeAllProcessThreads(int num_threads, pid_t *thread_pids);
#endif /* _LINUXTHREADS_H */

View File

@@ -0,0 +1,108 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2007, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// This file just provides storage for FLAGS_verbose.
#include <config.h>
#include "base/logging.h"
#include "base/commandlineflags.h"
DEFINE_int32(verbose, EnvToInt("PERFTOOLS_VERBOSE", 0),
"Set to numbers >0 for more verbose output, or <0 for less. "
"--verbose == -4 means we log fatal errors only.");
#if defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__)
// While windows does have a POSIX-compatible API
// (_open/_write/_close), it acquires memory. Using this lower-level
// windows API is the closest we can get to being "raw".
RawFD RawOpenForWriting(const char* filename) {
// CreateFile allocates memory if file_name isn't absolute, so if
// that ever becomes a problem then we ought to compute the absolute
// path on its behalf (perhaps the ntdll/kernel function isn't aware
// of the working directory?)
RawFD fd = CreateFileA(filename, GENERIC_WRITE, 0, NULL,
CREATE_ALWAYS, 0, NULL);
if (fd != kIllegalRawFD && GetLastError() == ERROR_ALREADY_EXISTS)
SetEndOfFile(fd); // truncate the existing file
return fd;
}
void RawWrite(RawFD handle, const char* buf, size_t len) {
while (len > 0) {
DWORD wrote;
BOOL ok = WriteFile(handle, buf, len, &wrote, NULL);
// We do not use an asynchronous file handle, so ok==false means an error
if (!ok) break;
buf += wrote;
len -= wrote;
}
}
void RawClose(RawFD handle) {
CloseHandle(handle);
}
#else // _WIN32 || __CYGWIN__ || __CYGWIN32__
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_FCNTL_H
#include <fcntl.h>
#endif
// Re-run fn until it doesn't cause EINTR.
#define NO_INTR(fn) do {} while ((fn) < 0 && errno == EINTR)
RawFD RawOpenForWriting(const char* filename) {
return open(filename, O_WRONLY|O_CREAT|O_TRUNC, 0664);
}
void RawWrite(RawFD fd, const char* buf, size_t len) {
while (len > 0) {
ssize_t r;
NO_INTR(r = write(fd, buf, len));
if (r <= 0) break;
buf += r;
len -= r;
}
}
void RawClose(RawFD fd) {
close(fd);
}
#endif // _WIN32 || __CYGWIN__ || __CYGWIN32__

View File

@@ -0,0 +1,259 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// This file contains #include information about logging-related stuff.
// Pretty much everybody needs to #include this file so that they can
// log various happenings.
//
#ifndef _LOGGING_H_
#define _LOGGING_H_
#include <config.h>
#include <stdarg.h>
#include <stdlib.h>
#include <stdio.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h> // for write()
#endif
#include <string.h> // for strlen(), strcmp()
#include <assert.h>
#include <errno.h> // for errno
#include "base/commandlineflags.h"
// On some systems (like freebsd), we can't call write() at all in a
// global constructor, perhaps because errno hasn't been set up.
// (In windows, we can't call it because it might call malloc.)
// Calling the write syscall is safer (it doesn't set errno), so we
// prefer that. Note we don't care about errno for logging: we just
// do logging on a best-effort basis.
#if defined(_MSC_VER)
#define WRITE_TO_STDERR(buf, len) WriteToStderr(buf, len); // in port.cc
#elif HAVE_SYS_SYSCALL_H && !defined(__APPLE__)
#include <sys/syscall.h>
#define WRITE_TO_STDERR(buf, len) syscall(SYS_write, STDERR_FILENO, buf, len)
#else
#define WRITE_TO_STDERR(buf, len) write(STDERR_FILENO, buf, len)
#endif
// MSVC and mingw define their own, safe version of vnsprintf (the
// windows one in broken) in port.cc. Everyone else can use the
// version here. We had to give it a unique name for windows.
#ifndef _WIN32
# define perftools_vsnprintf vsnprintf
#endif
// We log all messages at this log-level and below.
// INFO == -1, WARNING == -2, ERROR == -3, FATAL == -4
DECLARE_int32(verbose);
// CHECK dies with a fatal error if condition is not true. It is *not*
// controlled by NDEBUG, so the check will be executed regardless of
// compilation mode. Therefore, it is safe to do things like:
// CHECK(fp->Write(x) == 4)
// Note we use write instead of printf/puts to avoid the risk we'll
// call malloc().
#define CHECK(condition) \
do { \
if (!(condition)) { \
WRITE_TO_STDERR("Check failed: " #condition "\n", \
sizeof("Check failed: " #condition "\n")-1); \
abort(); \
} \
} while (0)
// This takes a message to print. The name is historical.
#define RAW_CHECK(condition, message) \
do { \
if (!(condition)) { \
WRITE_TO_STDERR("Check failed: " #condition ": " message "\n", \
sizeof("Check failed: " #condition ": " message "\n")-1);\
abort(); \
} \
} while (0)
// This is like RAW_CHECK, but only in debug-mode
#ifdef NDEBUG
enum { DEBUG_MODE = 0 };
#define RAW_DCHECK(condition, message)
#else
enum { DEBUG_MODE = 1 };
#define RAW_DCHECK(condition, message) RAW_CHECK(condition, message)
#endif
// This prints errno as well. Note we use write instead of printf/puts to
// avoid the risk we'll call malloc().
#define PCHECK(condition) \
do { \
if (!(condition)) { \
const int err_no = errno; \
WRITE_TO_STDERR("Check failed: " #condition ": ", \
sizeof("Check failed: " #condition ": ")-1); \
WRITE_TO_STDERR(strerror(err_no), strlen(strerror(err_no))); \
WRITE_TO_STDERR("\n", sizeof("\n")-1); \
abort(); \
} \
} while (0)
// Helper macro for binary operators; prints the two values on error
// Don't use this macro directly in your code, use CHECK_EQ et al below
// WARNING: These don't compile correctly if one of the arguments is a pointer
// and the other is NULL. To work around this, simply static_cast NULL to the
// type of the desired pointer.
// TODO(jandrews): Also print the values in case of failure. Requires some
// sort of type-sensitive ToString() function.
#define CHECK_OP(op, val1, val2) \
do { \
if (!((val1) op (val2))) { \
fprintf(stderr, "%s:%d Check failed: %s %s %s\n", __FILE__, __LINE__, #val1, #op, #val2); \
abort(); \
} \
} while (0)
#define CHECK_EQ(val1, val2) CHECK_OP(==, val1, val2)
#define CHECK_NE(val1, val2) CHECK_OP(!=, val1, val2)
#define CHECK_LE(val1, val2) CHECK_OP(<=, val1, val2)
#define CHECK_LT(val1, val2) CHECK_OP(< , val1, val2)
#define CHECK_GE(val1, val2) CHECK_OP(>=, val1, val2)
#define CHECK_GT(val1, val2) CHECK_OP(> , val1, val2)
// Synonyms for CHECK_* that are used in some unittests.
#define EXPECT_EQ(val1, val2) CHECK_EQ(val1, val2)
#define EXPECT_NE(val1, val2) CHECK_NE(val1, val2)
#define EXPECT_LE(val1, val2) CHECK_LE(val1, val2)
#define EXPECT_LT(val1, val2) CHECK_LT(val1, val2)
#define EXPECT_GE(val1, val2) CHECK_GE(val1, val2)
#define EXPECT_GT(val1, val2) CHECK_GT(val1, val2)
#define ASSERT_EQ(val1, val2) EXPECT_EQ(val1, val2)
#define ASSERT_NE(val1, val2) EXPECT_NE(val1, val2)
#define ASSERT_LE(val1, val2) EXPECT_LE(val1, val2)
#define ASSERT_LT(val1, val2) EXPECT_LT(val1, val2)
#define ASSERT_GE(val1, val2) EXPECT_GE(val1, val2)
#define ASSERT_GT(val1, val2) EXPECT_GT(val1, val2)
// As are these variants.
#define EXPECT_TRUE(cond) CHECK(cond)
#define EXPECT_FALSE(cond) CHECK(!(cond))
#define EXPECT_STREQ(a, b) CHECK(strcmp(a, b) == 0)
#define ASSERT_TRUE(cond) EXPECT_TRUE(cond)
#define ASSERT_FALSE(cond) EXPECT_FALSE(cond)
#define ASSERT_STREQ(a, b) EXPECT_STREQ(a, b)
// Used for (libc) functions that return -1 and set errno
#define CHECK_ERR(invocation) PCHECK((invocation) != -1)
// A few more checks that only happen in debug mode
#ifdef NDEBUG
#define DCHECK_EQ(val1, val2)
#define DCHECK_NE(val1, val2)
#define DCHECK_LE(val1, val2)
#define DCHECK_LT(val1, val2)
#define DCHECK_GE(val1, val2)
#define DCHECK_GT(val1, val2)
#else
#define DCHECK_EQ(val1, val2) CHECK_EQ(val1, val2)
#define DCHECK_NE(val1, val2) CHECK_NE(val1, val2)
#define DCHECK_LE(val1, val2) CHECK_LE(val1, val2)
#define DCHECK_LT(val1, val2) CHECK_LT(val1, val2)
#define DCHECK_GE(val1, val2) CHECK_GE(val1, val2)
#define DCHECK_GT(val1, val2) CHECK_GT(val1, val2)
#endif
#ifdef ERROR
#undef ERROR // may conflict with ERROR macro on windows
#endif
enum LogSeverity {INFO = -1, WARNING = -2, ERROR = -3, FATAL = -4};
// NOTE: we add a newline to the end of the output if it's not there already
inline void LogPrintf(int severity, const char* pat, va_list ap) {
// We write directly to the stderr file descriptor and avoid FILE
// buffering because that may invoke malloc()
char buf[600];
perftools_vsnprintf(buf, sizeof(buf)-1, pat, ap);
if (buf[0] != '\0' && buf[strlen(buf)-1] != '\n') {
assert(strlen(buf)+1 < sizeof(buf));
strcat(buf, "\n");
}
WRITE_TO_STDERR(buf, strlen(buf));
if ((severity) == FATAL)
abort(); // LOG(FATAL) indicates a big problem, so don't run atexit() calls
}
// Note that since the order of global constructors is unspecified,
// global code that calls RAW_LOG may execute before FLAGS_verbose is set.
// Such code will run with verbosity == 0 no matter what.
#define VLOG_IS_ON(severity) (FLAGS_verbose >= severity)
// In a better world, we'd use __VA_ARGS__, but VC++ 7 doesn't support it.
#define LOG_PRINTF(severity, pat) do { \
if (VLOG_IS_ON(severity)) { \
va_list ap; \
va_start(ap, pat); \
LogPrintf(severity, pat, ap); \
va_end(ap); \
} \
} while (0)
// RAW_LOG is the main function; some synonyms are used in unittests.
inline void RAW_LOG(int lvl, const char* pat, ...) { LOG_PRINTF(lvl, pat); }
inline void RAW_VLOG(int lvl, const char* pat, ...) { LOG_PRINTF(lvl, pat); }
inline void LOG(int lvl, const char* pat, ...) { LOG_PRINTF(lvl, pat); }
inline void VLOG(int lvl, const char* pat, ...) { LOG_PRINTF(lvl, pat); }
inline void LOG_IF(int lvl, bool cond, const char* pat, ...) {
if (cond) LOG_PRINTF(lvl, pat);
}
// This isn't technically logging, but it's also IO and also is an
// attempt to be "raw" -- that is, to not use any higher-level libc
// routines that might allocate memory or (ideally) try to allocate
// locks. We use an opaque file handle (not necessarily an int)
// to allow even more low-level stuff in the future.
// Like other "raw" routines, these functions are best effort, and
// thus don't return error codes (except RawOpenForWriting()).
#if defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__)
#ifndef NOMINMAX
#define NOMINMAX // @#!$& windows
#endif
#include <windows.h>
typedef HANDLE RawFD;
const RawFD kIllegalRawFD = INVALID_HANDLE_VALUE;
#else
typedef int RawFD;
const RawFD kIllegalRawFD = -1; // what open returns if it fails
#endif // defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__)
RawFD RawOpenForWriting(const char* filename); // uses default permissions
void RawWrite(RawFD fd, const char* buf, size_t len);
void RawClose(RawFD fd);
#endif // _LOGGING_H_

View File

@@ -0,0 +1,561 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// A low-level allocator that can be used by other low-level
// modules without introducing dependency cycles.
// This allocator is slow and wasteful of memory;
// it should not be used when performance is key.
#include "base/low_level_alloc.h"
#include "base/dynamic_annotations.h"
#include "base/spinlock.h"
#include "base/logging.h"
#include "malloc_hook-inl.h"
#include <gperftools/malloc_hook.h>
#include "mmap_hook.h"
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <new> // for placement-new
// A first-fit allocator with amortized logarithmic free() time.
LowLevelAlloc::PagesAllocator::~PagesAllocator() {
}
// ---------------------------------------------------------------------------
static const int kMaxLevel = 30;
// We put this class-only struct in a namespace to avoid polluting the
// global namespace with this struct name (thus risking an ODR violation).
namespace low_level_alloc_internal {
// This struct describes one allocated block, or one free block.
struct AllocList {
struct Header {
intptr_t size; // size of entire region, including this field. Must be
// first. Valid in both allocated and unallocated blocks
intptr_t magic; // kMagicAllocated or kMagicUnallocated xor this
LowLevelAlloc::Arena *arena; // pointer to parent arena
void *dummy_for_alignment; // aligns regions to 0 mod 2*sizeof(void*)
} header;
// Next two fields: in unallocated blocks: freelist skiplist data
// in allocated blocks: overlaps with client data
int levels; // levels in skiplist used
AllocList *next[kMaxLevel]; // actually has levels elements.
// The AllocList node may not have room for
// all kMaxLevel entries. See max_fit in
// LLA_SkiplistLevels()
};
}
using low_level_alloc_internal::AllocList;
// ---------------------------------------------------------------------------
// A trivial skiplist implementation. This is used to keep the freelist
// in address order while taking only logarithmic time per insert and delete.
// An integer approximation of log2(size/base)
// Requires size >= base.
static int IntLog2(size_t size, size_t base) {
int result = 0;
for (size_t i = size; i > base; i >>= 1) { // i == floor(size/2**result)
result++;
}
// floor(size / 2**result) <= base < floor(size / 2**(result-1))
// => log2(size/(base+1)) <= result < 1+log2(size/base)
// => result ~= log2(size/base)
return result;
}
// Return a random integer n: p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1.
static int Random() {
static uint32 r = 1; // no locking---it's not critical
int result = 1;
while ((((r = r*1103515245 + 12345) >> 30) & 1) == 0) {
result++;
}
return result;
}
// Return a number of skiplist levels for a node of size bytes, where
// base is the minimum node size. Compute level=log2(size / base)+n
// where n is 1 if random is false and otherwise a random number generated with
// the standard distribution for a skiplist: See Random() above.
// Bigger nodes tend to have more skiplist levels due to the log2(size / base)
// term, so first-fit searches touch fewer nodes. "level" is clipped so
// level<kMaxLevel and next[level-1] will fit in the node.
// 0 < LLA_SkiplistLevels(x,y,false) <= LLA_SkiplistLevels(x,y,true) < kMaxLevel
static int LLA_SkiplistLevels(size_t size, size_t base, bool random) {
// max_fit is the maximum number of levels that will fit in a node for the
// given size. We can't return more than max_fit, no matter what the
// random number generator says.
int max_fit = (size-OFFSETOF_MEMBER(AllocList, next)) / sizeof (AllocList *);
int level = IntLog2(size, base) + (random? Random() : 1);
if (level > max_fit) level = max_fit;
if (level > kMaxLevel-1) level = kMaxLevel - 1;
RAW_CHECK(level >= 1, "block not big enough for even one level");
return level;
}
// Return "atleast", the first element of AllocList *head s.t. *atleast >= *e.
// For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater
// points to the last element at level i in the AllocList less than *e, or is
// head if no such element exists.
static AllocList *LLA_SkiplistSearch(AllocList *head,
AllocList *e, AllocList **prev) {
AllocList *p = head;
for (int level = head->levels - 1; level >= 0; level--) {
for (AllocList *n; (n = p->next[level]) != 0 && n < e; p = n) {
}
prev[level] = p;
}
return (head->levels == 0) ? 0 : prev[0]->next[0];
}
// Insert element *e into AllocList *head. Set prev[] as LLA_SkiplistSearch.
// Requires that e->levels be previously set by the caller (using
// LLA_SkiplistLevels())
static void LLA_SkiplistInsert(AllocList *head, AllocList *e,
AllocList **prev) {
LLA_SkiplistSearch(head, e, prev);
for (; head->levels < e->levels; head->levels++) { // extend prev pointers
prev[head->levels] = head; // to all *e's levels
}
for (int i = 0; i != e->levels; i++) { // add element to list
e->next[i] = prev[i]->next[i];
prev[i]->next[i] = e;
}
}
// Remove element *e from AllocList *head. Set prev[] as LLA_SkiplistSearch().
// Requires that e->levels be previous set by the caller (using
// LLA_SkiplistLevels())
static void LLA_SkiplistDelete(AllocList *head, AllocList *e,
AllocList **prev) {
AllocList *found = LLA_SkiplistSearch(head, e, prev);
RAW_CHECK(e == found, "element not in freelist");
for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) {
prev[i]->next[i] = e->next[i];
}
while (head->levels > 0 && head->next[head->levels - 1] == 0) {
head->levels--; // reduce head->levels if level unused
}
}
// ---------------------------------------------------------------------------
// Arena implementation
struct LowLevelAlloc::Arena {
Arena() : mu(SpinLock::LINKER_INITIALIZED) {} // does nothing; for static init
explicit Arena(int) : pagesize(0) {} // set pagesize to zero explicitly
// for non-static init
SpinLock mu; // protects freelist, allocation_count,
// pagesize, roundup, min_size
AllocList freelist; // head of free list; sorted by addr (under mu)
int32 allocation_count; // count of allocated blocks (under mu)
int32 flags; // flags passed to NewArena (ro after init)
size_t pagesize; // ==getpagesize() (init under mu, then ro)
size_t roundup; // lowest power of 2 >= max(16,sizeof (AllocList))
// (init under mu, then ro)
size_t min_size; // smallest allocation block size
// (init under mu, then ro)
PagesAllocator *allocator;
};
// The default arena, which is used when 0 is passed instead of an Arena
// pointer.
static struct LowLevelAlloc::Arena default_arena;
// Non-malloc-hooked arenas: used only to allocate metadata for arenas that
// do not want malloc hook reporting, so that for them there's no malloc hook
// reporting even during arena creation.
static struct LowLevelAlloc::Arena unhooked_arena;
static struct LowLevelAlloc::Arena unhooked_async_sig_safe_arena;
namespace {
class DefaultPagesAllocator : public LowLevelAlloc::PagesAllocator {
public:
virtual ~DefaultPagesAllocator() {};
virtual void *MapPages(int32 flags, size_t size);
virtual void UnMapPages(int32 flags, void *addr, size_t size);
};
}
// magic numbers to identify allocated and unallocated blocks
static const intptr_t kMagicAllocated = 0x4c833e95;
static const intptr_t kMagicUnallocated = ~kMagicAllocated;
namespace {
class SCOPED_LOCKABLE ArenaLock {
public:
explicit ArenaLock(LowLevelAlloc::Arena *arena)
EXCLUSIVE_LOCK_FUNCTION(arena->mu)
: left_(false), mask_valid_(false), arena_(arena) {
if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
// We've decided not to support async-signal-safe arena use until
// there a demonstrated need. Here's how one could do it though
// (would need to be made more portable).
#if 0
sigset_t all;
sigfillset(&all);
this->mask_valid_ =
(pthread_sigmask(SIG_BLOCK, &all, &this->mask_) == 0);
#else
RAW_CHECK(false, "We do not yet support async-signal-safe arena.");
#endif
}
this->arena_->mu.Lock();
}
~ArenaLock() { RAW_CHECK(this->left_, "haven't left Arena region"); }
void Leave() UNLOCK_FUNCTION() {
this->arena_->mu.Unlock();
#if 0
if (this->mask_valid_) {
pthread_sigmask(SIG_SETMASK, &this->mask_, 0);
}
#endif
this->left_ = true;
}
private:
bool left_; // whether left region
bool mask_valid_;
#if 0
sigset_t mask_; // old mask of blocked signals
#endif
LowLevelAlloc::Arena *arena_;
DISALLOW_COPY_AND_ASSIGN(ArenaLock);
};
} // anonymous namespace
// create an appropriate magic number for an object at "ptr"
// "magic" should be kMagicAllocated or kMagicUnallocated
inline static intptr_t Magic(intptr_t magic, AllocList::Header *ptr) {
return magic ^ reinterpret_cast<intptr_t>(ptr);
}
// Initialize the fields of an Arena
static void ArenaInit(LowLevelAlloc::Arena *arena) {
if (arena->pagesize == 0) {
arena->pagesize = getpagesize();
// Round up block sizes to a power of two close to the header size.
arena->roundup = 16;
while (arena->roundup < sizeof (arena->freelist.header)) {
arena->roundup += arena->roundup;
}
// Don't allocate blocks less than twice the roundup size to avoid tiny
// free blocks.
arena->min_size = 2 * arena->roundup;
arena->freelist.header.size = 0;
arena->freelist.header.magic =
Magic(kMagicUnallocated, &arena->freelist.header);
arena->freelist.header.arena = arena;
arena->freelist.levels = 0;
memset(arena->freelist.next, 0, sizeof (arena->freelist.next));
arena->allocation_count = 0;
if (arena == &default_arena) {
// Default arena should be hooked, e.g. for heap-checker to trace
// pointer chains through objects in the default arena.
arena->flags = LowLevelAlloc::kCallMallocHook;
} else if (arena == &unhooked_async_sig_safe_arena) {
arena->flags = LowLevelAlloc::kAsyncSignalSafe;
} else {
arena->flags = 0; // other arenas' flags may be overridden by client,
// but unhooked_arena will have 0 in 'flags'.
}
arena->allocator = LowLevelAlloc::GetDefaultPagesAllocator();
}
}
// L < meta_data_arena->mu
LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32 flags,
Arena *meta_data_arena) {
return NewArenaWithCustomAlloc(flags, meta_data_arena, NULL);
}
// L < meta_data_arena->mu
LowLevelAlloc::Arena *LowLevelAlloc::NewArenaWithCustomAlloc(int32 flags,
Arena *meta_data_arena,
PagesAllocator *allocator) {
RAW_CHECK(meta_data_arena != 0, "must pass a valid arena");
if (meta_data_arena == &default_arena) {
if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
meta_data_arena = &unhooked_async_sig_safe_arena;
} else if ((flags & LowLevelAlloc::kCallMallocHook) == 0) {
meta_data_arena = &unhooked_arena;
}
}
// Arena(0) uses the constructor for non-static contexts
Arena *result =
new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(0);
ArenaInit(result);
result->flags = flags;
if (allocator) {
result->allocator = allocator;
}
return result;
}
// L < arena->mu, L < arena->arena->mu
bool LowLevelAlloc::DeleteArena(Arena *arena) {
RAW_CHECK(arena != 0 && arena != &default_arena && arena != &unhooked_arena,
"may not delete default arena");
ArenaLock section(arena);
bool empty = (arena->allocation_count == 0);
section.Leave();
if (empty) {
while (arena->freelist.next[0] != 0) {
AllocList *region = arena->freelist.next[0];
size_t size = region->header.size;
arena->freelist.next[0] = region->next[0];
RAW_CHECK(region->header.magic ==
Magic(kMagicUnallocated, &region->header),
"bad magic number in DeleteArena()");
RAW_CHECK(region->header.arena == arena,
"bad arena pointer in DeleteArena()");
RAW_CHECK(size % arena->pagesize == 0,
"empty arena has non-page-aligned block size");
RAW_CHECK(reinterpret_cast<intptr_t>(region) % arena->pagesize == 0,
"empty arena has non-page-aligned block");
int munmap_result = tcmalloc::DirectMUnMap((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0,
region, size);
RAW_CHECK(munmap_result == 0,
"LowLevelAlloc::DeleteArena: munmap failed address");
}
Free(arena);
}
return empty;
}
// ---------------------------------------------------------------------------
// Return value rounded up to next multiple of align.
// align must be a power of two.
static intptr_t RoundUp(intptr_t addr, intptr_t align) {
return (addr + align - 1) & ~(align - 1);
}
// Equivalent to "return prev->next[i]" but with sanity checking
// that the freelist is in the correct order, that it
// consists of regions marked "unallocated", and that no two regions
// are adjacent in memory (they should have been coalesced).
// L < arena->mu
static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) {
RAW_CHECK(i < prev->levels, "too few levels in Next()");
AllocList *next = prev->next[i];
if (next != 0) {
RAW_CHECK(next->header.magic == Magic(kMagicUnallocated, &next->header),
"bad magic number in Next()");
RAW_CHECK(next->header.arena == arena,
"bad arena pointer in Next()");
if (prev != &arena->freelist) {
RAW_CHECK(prev < next, "unordered freelist");
RAW_CHECK(reinterpret_cast<char *>(prev) + prev->header.size <
reinterpret_cast<char *>(next), "malformed freelist");
}
}
return next;
}
// Coalesce list item "a" with its successor if they are adjacent.
static void Coalesce(AllocList *a) {
AllocList *n = a->next[0];
if (n != 0 && reinterpret_cast<char *>(a) + a->header.size ==
reinterpret_cast<char *>(n)) {
LowLevelAlloc::Arena *arena = a->header.arena;
a->header.size += n->header.size;
n->header.magic = 0;
n->header.arena = 0;
AllocList *prev[kMaxLevel];
LLA_SkiplistDelete(&arena->freelist, n, prev);
LLA_SkiplistDelete(&arena->freelist, a, prev);
a->levels = LLA_SkiplistLevels(a->header.size, arena->min_size, true);
LLA_SkiplistInsert(&arena->freelist, a, prev);
}
}
// Adds block at location "v" to the free list
// L >= arena->mu
static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) {
AllocList *f = reinterpret_cast<AllocList *>(
reinterpret_cast<char *>(v) - sizeof (f->header));
RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
"bad magic number in AddToFreelist()");
RAW_CHECK(f->header.arena == arena,
"bad arena pointer in AddToFreelist()");
f->levels = LLA_SkiplistLevels(f->header.size, arena->min_size, true);
AllocList *prev[kMaxLevel];
LLA_SkiplistInsert(&arena->freelist, f, prev);
f->header.magic = Magic(kMagicUnallocated, &f->header);
Coalesce(f); // maybe coalesce with successor
Coalesce(prev[0]); // maybe coalesce with predecessor
}
// Frees storage allocated by LowLevelAlloc::Alloc().
// L < arena->mu
void LowLevelAlloc::Free(void *v) {
if (v != 0) {
AllocList *f = reinterpret_cast<AllocList *>(
reinterpret_cast<char *>(v) - sizeof (f->header));
RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
"bad magic number in Free()");
LowLevelAlloc::Arena *arena = f->header.arena;
if ((arena->flags & kCallMallocHook) != 0) {
MallocHook::InvokeDeleteHook(v);
}
ArenaLock section(arena);
AddToFreelist(v, arena);
RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free");
arena->allocation_count--;
section.Leave();
}
}
// allocates and returns a block of size bytes, to be freed with Free()
// L < arena->mu
static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
void *result = 0;
if (request != 0) {
AllocList *s; // will point to region that satisfies request
ArenaLock section(arena);
ArenaInit(arena);
// round up with header
size_t req_rnd = RoundUp(request + sizeof (s->header), arena->roundup);
for (;;) { // loop until we find a suitable region
// find the minimum levels that a block of this size must have
int i = LLA_SkiplistLevels(req_rnd, arena->min_size, false) - 1;
if (i < arena->freelist.levels) { // potential blocks exist
AllocList *before = &arena->freelist; // predecessor of s
while ((s = Next(i, before, arena)) != 0 && s->header.size < req_rnd) {
before = s;
}
if (s != 0) { // we found a region
break;
}
}
// we unlock before mmap() both because mmap() may call a callback hook,
// and because it may be slow.
arena->mu.Unlock();
// mmap generous 64K chunks to decrease
// the chances/impact of fragmentation:
size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
void *new_pages = arena->allocator->MapPages(arena->flags, new_pages_size);
arena->mu.Lock();
s = reinterpret_cast<AllocList *>(new_pages);
s->header.size = new_pages_size;
// Pretend the block is allocated; call AddToFreelist() to free it.
s->header.magic = Magic(kMagicAllocated, &s->header);
s->header.arena = arena;
AddToFreelist(&s->levels, arena); // insert new region into free list
}
AllocList *prev[kMaxLevel];
LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list
// s points to the first free region that's big enough
if (req_rnd + arena->min_size <= s->header.size) { // big enough to split
AllocList *n = reinterpret_cast<AllocList *>
(req_rnd + reinterpret_cast<char *>(s));
n->header.size = s->header.size - req_rnd;
n->header.magic = Magic(kMagicAllocated, &n->header);
n->header.arena = arena;
s->header.size = req_rnd;
AddToFreelist(&n->levels, arena);
}
s->header.magic = Magic(kMagicAllocated, &s->header);
RAW_CHECK(s->header.arena == arena, "");
arena->allocation_count++;
section.Leave();
result = &s->levels;
}
return result;
}
void *LowLevelAlloc::Alloc(size_t request) {
void *result = DoAllocWithArena(request, &default_arena);
if ((default_arena.flags & kCallMallocHook) != 0) {
// this call must be directly in the user-called allocator function
// for MallocHook::GetCallerStackTrace to work properly
MallocHook::InvokeNewHook(result, request);
}
return result;
}
void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) {
RAW_CHECK(arena != 0, "must pass a valid arena");
void *result = DoAllocWithArena(request, arena);
if ((arena->flags & kCallMallocHook) != 0) {
// this call must be directly in the user-called allocator function
// for MallocHook::GetCallerStackTrace to work properly
MallocHook::InvokeNewHook(result, request);
}
return result;
}
LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
return &default_arena;
}
static DefaultPagesAllocator *default_pages_allocator;
static union {
char chars[sizeof(DefaultPagesAllocator)];
void *ptr;
} debug_pages_allocator_space;
LowLevelAlloc::PagesAllocator *LowLevelAlloc::GetDefaultPagesAllocator(void) {
if (default_pages_allocator) {
return default_pages_allocator;
}
default_pages_allocator = new (debug_pages_allocator_space.chars) DefaultPagesAllocator();
return default_pages_allocator;
}
void *DefaultPagesAllocator::MapPages(int32 flags, size_t size) {
const bool invoke_hooks = ((flags & LowLevelAlloc::kAsyncSignalSafe) == 0);
auto result = tcmalloc::DirectAnonMMap(invoke_hooks, size);
RAW_CHECK(result.success, "mmap error");
return result.addr;
}
void DefaultPagesAllocator::UnMapPages(int32 flags, void *region, size_t size) {
const bool invoke_hooks = ((flags & LowLevelAlloc::kAsyncSignalSafe) == 0);
int munmap_result = tcmalloc::DirectMUnMap(invoke_hooks, region, size);
RAW_CHECK(munmap_result == 0,
"LowLevelAlloc::DeleteArena: munmap failed address");
}

View File

@@ -0,0 +1,130 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if !defined(_BASE_LOW_LEVEL_ALLOC_H_)
#define _BASE_LOW_LEVEL_ALLOC_H_
// A simple thread-safe memory allocator that does not depend on
// mutexes or thread-specific data. It is intended to be used
// sparingly, and only when malloc() would introduce an unwanted
// dependency, such as inside the heap-checker.
#include <config.h>
#include <stddef.h> // for size_t
#include "base/basictypes.h"
#ifndef __APPLE__
// As of now, whatever clang version apple ships (clang-1205.0.22.11),
// somehow miscompiles LowLevelAlloc when we try this section
// thingy. Thankfully, we only need this section stuff heap leak
// checker which is Linux-only anyways.
#define ATTR_MALLOC_SECTION ATTRIBUTE_SECTION(malloc_hook)
#else
#define ATTR_MALLOC_SECTION
#endif
class LowLevelAlloc {
public:
class PagesAllocator {
public:
virtual ~PagesAllocator();
virtual void *MapPages(int32 flags, size_t size) = 0;
virtual void UnMapPages(int32 flags, void *addr, size_t size) = 0;
};
static PagesAllocator *GetDefaultPagesAllocator(void);
struct Arena; // an arena from which memory may be allocated
// Returns a pointer to a block of at least "request" bytes
// that have been newly allocated from the specific arena.
// for Alloc() call the DefaultArena() is used.
// Returns 0 if passed request==0.
// Does not return 0 under other circumstances; it crashes if memory
// is not available.
static void *Alloc(size_t request)
ATTR_MALLOC_SECTION;
static void *AllocWithArena(size_t request, Arena *arena)
ATTR_MALLOC_SECTION;
// Deallocates a region of memory that was previously allocated with
// Alloc(). Does nothing if passed 0. "s" must be either 0,
// or must have been returned from a call to Alloc() and not yet passed to
// Free() since that call to Alloc(). The space is returned to the arena
// from which it was allocated.
static void Free(void *s) ATTR_MALLOC_SECTION;
// ATTR_MALLOC_SECTION for Alloc* and Free
// are to put all callers of MallocHook::Invoke* in this module
// into special section,
// so that MallocHook::GetCallerStackTrace can function accurately.
// Create a new arena.
// The root metadata for the new arena is allocated in the
// meta_data_arena; the DefaultArena() can be passed for meta_data_arena.
// These values may be ored into flags:
enum {
// Report calls to Alloc() and Free() via the MallocHook interface.
// Set in the DefaultArena.
kCallMallocHook = 0x0001,
// Make calls to Alloc(), Free() be async-signal-safe. Not set in
// DefaultArena().
kAsyncSignalSafe = 0x0002,
// When used with DefaultArena(), the NewArena() and DeleteArena() calls
// obey the flags given explicitly in the NewArena() call, even if those
// flags differ from the settings in DefaultArena(). So the call
// NewArena(kAsyncSignalSafe, DefaultArena()) is itself async-signal-safe,
// as well as generatating an arena that provides async-signal-safe
// Alloc/Free.
};
static Arena *NewArena(int32 flags, Arena *meta_data_arena);
// note: pages allocator will never be destroyed and allocated pages will never be freed
// When allocator is NULL, it's same as NewArena
static Arena *NewArenaWithCustomAlloc(int32 flags, Arena *meta_data_arena, PagesAllocator *allocator);
// Destroys an arena allocated by NewArena and returns true,
// provided no allocated blocks remain in the arena.
// If allocated blocks remain in the arena, does nothing and
// returns false.
// It is illegal to attempt to destroy the DefaultArena().
static bool DeleteArena(Arena *arena);
// The default arena that always exists.
static Arena *DefaultArena();
private:
LowLevelAlloc(); // no instances
};
#endif

View File

@@ -0,0 +1,332 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2007, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// ---
// Author: Craig Silverstein.
//
// A simple mutex wrapper, supporting locks and read-write locks.
// You should assume the locks are *not* re-entrant.
//
// To use: you should define the following macros in your configure.ac:
// ACX_PTHREAD
// AC_RWLOCK
// The latter is defined in ../autoconf.
//
// This class is meant to be internal-only and should be wrapped by an
// internal namespace. Before you use this module, please give the
// name of your internal namespace for this module. Or, if you want
// to expose it, you'll want to move it to the Google namespace. We
// cannot put this class in global namespace because there can be some
// problems when we have multiple versions of Mutex in each shared object.
//
// NOTE: TryLock() is broken for NO_THREADS mode, at least in NDEBUG
// mode.
//
// CYGWIN NOTE: Cygwin support for rwlock seems to be buggy:
// http://www.cygwin.com/ml/cygwin/2008-12/msg00017.html
// Because of that, we might as well use windows locks for
// cygwin. They seem to be more reliable than the cygwin pthreads layer.
//
// TRICKY IMPLEMENTATION NOTE:
// This class is designed to be safe to use during
// dynamic-initialization -- that is, by global constructors that are
// run before main() starts. The issue in this case is that
// dynamic-initialization happens in an unpredictable order, and it
// could be that someone else's dynamic initializer could call a
// function that tries to acquire this mutex -- but that all happens
// before this mutex's constructor has run. (This can happen even if
// the mutex and the function that uses the mutex are in the same .cc
// file.) Basically, because Mutex does non-trivial work in its
// constructor, it's not, in the naive implementation, safe to use
// before dynamic initialization has run on it.
//
// The solution used here is to pair the actual mutex primitive with a
// bool that is set to true when the mutex is dynamically initialized.
// (Before that it's false.) Then we modify all mutex routines to
// look at the bool, and not try to lock/unlock until the bool makes
// it to true (which happens after the Mutex constructor has run.)
//
// This works because before main() starts -- particularly, during
// dynamic initialization -- there are no threads, so a) it's ok that
// the mutex operations are a no-op, since we don't need locking then
// anyway; and b) we can be quite confident our bool won't change
// state between a call to Lock() and a call to Unlock() (that would
// require a global constructor in one translation unit to call Lock()
// and another global constructor in another translation unit to call
// Unlock() later, which is pretty perverse).
//
// That said, it's tricky, and can conceivably fail; it's safest to
// avoid trying to acquire a mutex in a global constructor, if you
// can. One way it can fail is that a really smart compiler might
// initialize the bool to true at static-initialization time (too
// early) rather than at dynamic-initialization time. To discourage
// that, we set is_safe_ to true in code (not the constructor
// colon-initializer) and set it to true via a function that always
// evaluates to true, but that the compiler can't know always
// evaluates to true. This should be good enough.
//
// A related issue is code that could try to access the mutex
// after it's been destroyed in the global destructors (because
// the Mutex global destructor runs before some other global
// destructor, that tries to acquire the mutex). The way we
// deal with this is by taking a constructor arg that global
// mutexes should pass in, that causes the destructor to do no
// work. We still depend on the compiler not doing anything
// weird to a Mutex's memory after it is destroyed, but for a
// static global variable, that's pretty safe.
#ifndef GOOGLE_MUTEX_H_
#define GOOGLE_MUTEX_H_
#include <config.h>
#if defined(NO_THREADS)
typedef int MutexType; // to keep a lock-count
#elif defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__)
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN // We only need minimal includes
# endif
// We need Windows NT or later for TryEnterCriticalSection(). If you
// don't need that functionality, you can remove these _WIN32_WINNT
// lines, and change TryLock() to assert(0) or something.
# ifndef _WIN32_WINNT
# define _WIN32_WINNT 0x0400
# endif
# include <windows.h>
typedef CRITICAL_SECTION MutexType;
#elif defined(HAVE_PTHREAD) && defined(HAVE_RWLOCK)
// Needed for pthread_rwlock_*. If it causes problems, you could take it
// out, but then you'd have to unset HAVE_RWLOCK (at least on linux -- it
// *does* cause problems for FreeBSD, or MacOSX, but isn't needed
// for locking there.)
# ifdef __linux__
# define _XOPEN_SOURCE 500 // may be needed to get the rwlock calls
# endif
# include <pthread.h>
typedef pthread_rwlock_t MutexType;
#elif defined(HAVE_PTHREAD)
# include <pthread.h>
typedef pthread_mutex_t MutexType;
#else
# error Need to implement mutex.h for your architecture, or #define NO_THREADS
#endif
#include <assert.h>
#include <stdlib.h> // for abort()
#define MUTEX_NAMESPACE perftools_mutex_namespace
namespace MUTEX_NAMESPACE {
class Mutex {
public:
// This is used for the single-arg constructor
enum LinkerInitialized { LINKER_INITIALIZED };
// Create a Mutex that is not held by anybody. This constructor is
// typically used for Mutexes allocated on the heap or the stack.
inline Mutex();
// This constructor should be used for global, static Mutex objects.
// It inhibits work being done by the destructor, which makes it
// safer for code that tries to acqiure this mutex in their global
// destructor.
inline Mutex(LinkerInitialized);
// Destructor
inline ~Mutex();
inline void Lock(); // Block if needed until free then acquire exclusively
inline void Unlock(); // Release a lock acquired via Lock()
inline bool TryLock(); // If free, Lock() and return true, else return false
// Note that on systems that don't support read-write locks, these may
// be implemented as synonyms to Lock() and Unlock(). So you can use
// these for efficiency, but don't use them anyplace where being able
// to do shared reads is necessary to avoid deadlock.
inline void ReaderLock(); // Block until free or shared then acquire a share
inline void ReaderUnlock(); // Release a read share of this Mutex
inline void WriterLock() { Lock(); } // Acquire an exclusive lock
inline void WriterUnlock() { Unlock(); } // Release a lock from WriterLock()
private:
MutexType mutex_;
// We want to make sure that the compiler sets is_safe_ to true only
// when we tell it to, and never makes assumptions is_safe_ is
// always true. volatile is the most reliable way to do that.
volatile bool is_safe_;
// This indicates which constructor was called.
bool destroy_;
inline void SetIsSafe() { is_safe_ = true; }
// Catch the error of writing Mutex when intending MutexLock.
Mutex(Mutex* /*ignored*/) {}
// Disallow "evil" constructors
Mutex(const Mutex&);
void operator=(const Mutex&);
};
// Now the implementation of Mutex for various systems
#if defined(NO_THREADS)
// When we don't have threads, we can be either reading or writing,
// but not both. We can have lots of readers at once (in no-threads
// mode, that's most likely to happen in recursive function calls),
// but only one writer. We represent this by having mutex_ be -1 when
// writing and a number > 0 when reading (and 0 when no lock is held).
//
// In debug mode, we assert these invariants, while in non-debug mode
// we do nothing, for efficiency. That's why everything is in an
// assert.
Mutex::Mutex() : mutex_(0) { }
Mutex::Mutex(Mutex::LinkerInitialized) : mutex_(0) { }
Mutex::~Mutex() { assert(mutex_ == 0); }
void Mutex::Lock() { assert(--mutex_ == -1); }
void Mutex::Unlock() { assert(mutex_++ == -1); }
bool Mutex::TryLock() { if (mutex_) return false; Lock(); return true; }
void Mutex::ReaderLock() { assert(++mutex_ > 0); }
void Mutex::ReaderUnlock() { assert(mutex_-- > 0); }
#elif defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__)
Mutex::Mutex() : destroy_(true) {
InitializeCriticalSection(&mutex_);
SetIsSafe();
}
Mutex::Mutex(LinkerInitialized) : destroy_(false) {
InitializeCriticalSection(&mutex_);
SetIsSafe();
}
Mutex::~Mutex() { if (destroy_) DeleteCriticalSection(&mutex_); }
void Mutex::Lock() { if (is_safe_) EnterCriticalSection(&mutex_); }
void Mutex::Unlock() { if (is_safe_) LeaveCriticalSection(&mutex_); }
bool Mutex::TryLock() { return is_safe_ ?
TryEnterCriticalSection(&mutex_) != 0 : true; }
void Mutex::ReaderLock() { Lock(); } // we don't have read-write locks
void Mutex::ReaderUnlock() { Unlock(); }
#elif defined(HAVE_PTHREAD) && defined(HAVE_RWLOCK)
#define SAFE_PTHREAD(fncall) do { /* run fncall if is_safe_ is true */ \
if (is_safe_ && fncall(&mutex_) != 0) abort(); \
} while (0)
Mutex::Mutex() : destroy_(true) {
SetIsSafe();
if (is_safe_ && pthread_rwlock_init(&mutex_, NULL) != 0) abort();
}
Mutex::Mutex(Mutex::LinkerInitialized) : destroy_(false) {
SetIsSafe();
if (is_safe_ && pthread_rwlock_init(&mutex_, NULL) != 0) abort();
}
Mutex::~Mutex() { if (destroy_) SAFE_PTHREAD(pthread_rwlock_destroy); }
void Mutex::Lock() { SAFE_PTHREAD(pthread_rwlock_wrlock); }
void Mutex::Unlock() { SAFE_PTHREAD(pthread_rwlock_unlock); }
bool Mutex::TryLock() { return is_safe_ ?
pthread_rwlock_trywrlock(&mutex_) == 0 : true; }
void Mutex::ReaderLock() { SAFE_PTHREAD(pthread_rwlock_rdlock); }
void Mutex::ReaderUnlock() { SAFE_PTHREAD(pthread_rwlock_unlock); }
#undef SAFE_PTHREAD
#elif defined(HAVE_PTHREAD)
#define SAFE_PTHREAD(fncall) do { /* run fncall if is_safe_ is true */ \
if (is_safe_ && fncall(&mutex_) != 0) abort(); \
} while (0)
Mutex::Mutex() : destroy_(true) {
SetIsSafe();
if (is_safe_ && pthread_mutex_init(&mutex_, NULL) != 0) abort();
}
Mutex::Mutex(Mutex::LinkerInitialized) : destroy_(false) {
SetIsSafe();
if (is_safe_ && pthread_mutex_init(&mutex_, NULL) != 0) abort();
}
Mutex::~Mutex() { if (destroy_) SAFE_PTHREAD(pthread_mutex_destroy); }
void Mutex::Lock() { SAFE_PTHREAD(pthread_mutex_lock); }
void Mutex::Unlock() { SAFE_PTHREAD(pthread_mutex_unlock); }
bool Mutex::TryLock() { return is_safe_ ?
pthread_mutex_trylock(&mutex_) == 0 : true; }
void Mutex::ReaderLock() { Lock(); }
void Mutex::ReaderUnlock() { Unlock(); }
#undef SAFE_PTHREAD
#endif
// --------------------------------------------------------------------------
// Some helper classes
// MutexLock(mu) acquires mu when constructed and releases it when destroyed.
class MutexLock {
public:
explicit MutexLock(Mutex *mu) : mu_(mu) { mu_->Lock(); }
~MutexLock() { mu_->Unlock(); }
private:
Mutex * const mu_;
// Disallow "evil" constructors
MutexLock(const MutexLock&);
void operator=(const MutexLock&);
};
// ReaderMutexLock and WriterMutexLock do the same, for rwlocks
class ReaderMutexLock {
public:
explicit ReaderMutexLock(Mutex *mu) : mu_(mu) { mu_->ReaderLock(); }
~ReaderMutexLock() { mu_->ReaderUnlock(); }
private:
Mutex * const mu_;
// Disallow "evil" constructors
ReaderMutexLock(const ReaderMutexLock&);
void operator=(const ReaderMutexLock&);
};
class WriterMutexLock {
public:
explicit WriterMutexLock(Mutex *mu) : mu_(mu) { mu_->WriterLock(); }
~WriterMutexLock() { mu_->WriterUnlock(); }
private:
Mutex * const mu_;
// Disallow "evil" constructors
WriterMutexLock(const WriterMutexLock&);
void operator=(const WriterMutexLock&);
};
// Catch bug where variable name is omitted, e.g. MutexLock (&mu);
#define MutexLock(x) COMPILE_ASSERT(0, mutex_lock_decl_missing_var_name)
#define ReaderMutexLock(x) COMPILE_ASSERT(0, rmutex_lock_decl_missing_var_name)
#define WriterMutexLock(x) COMPILE_ASSERT(0, wmutex_lock_decl_missing_var_name)
} // namespace MUTEX_NAMESPACE
using namespace MUTEX_NAMESPACE;
#undef MUTEX_NAMESPACE
#endif /* #define GOOGLE_SIMPLE_MUTEX_H_ */

View File

@@ -0,0 +1,144 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Sanjay Ghemawat
*/
#include <config.h>
#include "base/spinlock.h"
#include "base/spinlock_internal.h"
#include "base/sysinfo.h" /* for GetSystemCPUsCount() */
// NOTE on the Lock-state values:
//
// kSpinLockFree represents the unlocked state
// kSpinLockHeld represents the locked state with no waiters
// kSpinLockSleeper represents the locked state with waiters
static int adaptive_spin_count = 0;
const base::LinkerInitialized SpinLock::LINKER_INITIALIZED =
base::LINKER_INITIALIZED;
namespace {
struct SpinLock_InitHelper {
SpinLock_InitHelper() {
// On multi-cpu machines, spin for longer before yielding
// the processor or sleeping. Reduces idle time significantly.
if (GetSystemCPUsCount() > 1) {
adaptive_spin_count = 1000;
}
}
};
// Hook into global constructor execution:
// We do not do adaptive spinning before that,
// but nothing lock-intensive should be going on at that time.
static SpinLock_InitHelper init_helper;
inline void SpinlockPause(void) {
#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
__asm__ __volatile__("rep; nop" : : );
#elif defined(__GNUC__) && defined(__aarch64__)
__asm__ __volatile__("isb" : : );
#endif
}
} // unnamed namespace
// Monitor the lock to see if its value changes within some time
// period (adaptive_spin_count loop iterations). The last value read
// from the lock is returned from the method.
int SpinLock::SpinLoop() {
int c = adaptive_spin_count;
while (lockword_.load(std::memory_order_relaxed) != kSpinLockFree && --c > 0) {
SpinlockPause();
}
int old = kSpinLockFree;
lockword_.compare_exchange_strong(old, kSpinLockSleeper, std::memory_order_acquire);
// note, that we try to set lock word to 'have sleeper' state might
// look unnecessary, but:
//
// *) pay attention to second call to SpinLoop at the bottom of SlowLock loop below
//
// *) note, that we get there after sleeping in SpinLockDelay and
// getting woken by Unlock
//
// *) also note, that we don't "count" sleepers, so when unlock
// awakes us, it also sets lock word to "free". So we risk
// forgetting other sleepers. And to prevent this, we become
// "designated waker", by setting lock word to "have sleeper". So
// then when we unlock, we also wake up someone.
return old;
}
void SpinLock::SlowLock() {
int lock_value = SpinLoop();
int lock_wait_call_count = 0;
while (lock_value != kSpinLockFree) {
// If the lock is currently held, but not marked as having a sleeper, mark
// it as having a sleeper.
if (lock_value == kSpinLockHeld) {
// Here, just "mark" that the thread is going to sleep. Don't
// store the lock wait time in the lock as that will cause the
// current lock owner to think it experienced contention. Note,
// compare_exchange updates lock_value with previous value of
// lock word.
lockword_.compare_exchange_strong(lock_value, kSpinLockSleeper,
std::memory_order_acquire);
if (lock_value == kSpinLockHeld) {
// Successfully transitioned to kSpinLockSleeper. Pass
// kSpinLockSleeper to the SpinLockDelay routine to properly indicate
// the last lock_value observed.
lock_value = kSpinLockSleeper;
} else if (lock_value == kSpinLockFree) {
// Lock is free again, so try and acquire it before sleeping. The
// new lock state will be the number of cycles this thread waited if
// this thread obtains the lock.
lockword_.compare_exchange_strong(lock_value, kSpinLockSleeper, std::memory_order_acquire);
continue; // skip the delay at the end of the loop
}
}
// Wait for an OS specific delay.
base::internal::SpinLockDelay(&lockword_, lock_value,
++lock_wait_call_count);
// Spin again after returning from the wait routine to give this thread
// some chance of obtaining the lock.
lock_value = SpinLoop();
}
}
void SpinLock::SlowUnlock() {
// wake waiter if necessary
base::internal::SpinLockWake(&lockword_, false);
}

View File

@@ -0,0 +1,166 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Sanjay Ghemawat
*/
// SpinLock is async signal safe.
// If used within a signal handler, all lock holders
// should block the signal even outside the signal handler.
#ifndef BASE_SPINLOCK_H_
#define BASE_SPINLOCK_H_
#include <config.h>
#include <atomic>
#include <type_traits>
#include "base/basictypes.h"
#include "base/dynamic_annotations.h"
#include "base/thread_annotations.h"
class LOCKABLE SpinLock {
public:
SpinLock() : lockword_(kSpinLockFree) { }
// Special constructor for use with static SpinLock objects. E.g.,
//
// static SpinLock lock(base::LINKER_INITIALIZED);
//
// When intialized using this constructor, we depend on the fact
// that the linker has already initialized the memory appropriately.
// A SpinLock constructed like this can be freely used from global
// initializers without worrying about the order in which global
// initializers run.
explicit SpinLock(base::LinkerInitialized /*x*/) {
// Does nothing; lockword_ is already initialized
}
// Acquire this SpinLock.
void Lock() EXCLUSIVE_LOCK_FUNCTION() {
int old = kSpinLockFree;
if (!lockword_.compare_exchange_weak(old, kSpinLockHeld, std::memory_order_acquire)) {
SlowLock();
}
}
// Try to acquire this SpinLock without blocking and return true if the
// acquisition was successful. If the lock was not acquired, false is
// returned. If this SpinLock is free at the time of the call, TryLock
// will return true with high probability.
bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true) {
int old = kSpinLockFree;
return lockword_.compare_exchange_weak(old, kSpinLockHeld);
}
// Release this SpinLock, which must be held by the calling thread.
void Unlock() UNLOCK_FUNCTION() {
int prev_value = lockword_.exchange(kSpinLockFree, std::memory_order_release);
if (prev_value != kSpinLockHeld) {
// Speed the wakeup of any waiter.
SlowUnlock();
}
}
// Determine if the lock is held. When the lock is held by the invoking
// thread, true will always be returned. Intended to be used as
// CHECK(lock.IsHeld()).
bool IsHeld() const {
return lockword_.load(std::memory_order_relaxed) != kSpinLockFree;
}
static const base::LinkerInitialized LINKER_INITIALIZED; // backwards compat
private:
enum { kSpinLockFree = 0 };
enum { kSpinLockHeld = 1 };
enum { kSpinLockSleeper = 2 };
std::atomic<int> lockword_;
void SlowLock();
void SlowUnlock();
int SpinLoop();
DISALLOW_COPY_AND_ASSIGN(SpinLock);
};
// Corresponding locker object that arranges to acquire a spinlock for
// the duration of a C++ scope.
class SCOPED_LOCKABLE SpinLockHolder {
private:
SpinLock* lock_;
public:
explicit SpinLockHolder(SpinLock* l) EXCLUSIVE_LOCK_FUNCTION(l)
: lock_(l) {
l->Lock();
}
SpinLockHolder(const SpinLockHolder&) = delete;
~SpinLockHolder() UNLOCK_FUNCTION() {
lock_->Unlock();
}
};
// Catch bug where variable name is omitted, e.g. SpinLockHolder (&lock);
#define SpinLockHolder(x) COMPILE_ASSERT(0, spin_lock_decl_missing_var_name)
namespace tcmalloc {
class TrivialOnce {
public:
template <typename Body>
bool RunOnce(Body body) {
auto done_atomic = reinterpret_cast<std::atomic<int>*>(&done_flag_);
if (done_atomic->load(std::memory_order_acquire) == 1) {
return false;
}
SpinLockHolder h(reinterpret_cast<SpinLock*>(&lock_storage_));
if (done_atomic->load(std::memory_order_relaxed) == 1) {
// barrier provided by lock
return false;
}
body();
done_atomic->store(1, std::memory_order_release);
return true;
}
private:
int done_flag_;
alignas(alignof(SpinLock)) char lock_storage_[sizeof(SpinLock)];
};
static_assert(std::is_trivial<TrivialOnce>::value == true, "");
} // namespace tcmalloc
#endif // BASE_SPINLOCK_H_

View File

@@ -0,0 +1,83 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2010, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// The OS-specific header included below must provide two calls:
// base::internal::SpinLockDelay() and base::internal::SpinLockWake().
// See spinlock_internal.h for the spec of SpinLockWake().
// void SpinLockDelay(std::atomic<int> *w, int32 value, int loop)
// SpinLockDelay() generates an apprproate spin delay on iteration "loop" of a
// spin loop on location *w, whose previously observed value was "value".
// SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick,
// or may wait for a delay that can be truncated by a call to SpinlockWake(w).
// In all cases, it must return in bounded time even if SpinlockWake() is not
// called.
#include "base/spinlock_internal.h"
// forward declaration for use by spinlock_*-inl.h
namespace base { namespace internal { static int SuggestedDelayNS(int loop); }}
#if defined(_WIN32)
#include "base/spinlock_win32-inl.h"
#elif defined(__linux__)
#include "base/spinlock_linux-inl.h"
#else
#include "base/spinlock_posix-inl.h"
#endif
namespace base {
namespace internal {
// Return a suggested delay in nanoseconds for iteration number "loop"
static int SuggestedDelayNS(int loop) {
// Weak pseudo-random number generator to get some spread between threads
// when many are spinning.
static volatile uint64_t rand;
uint64 r = rand;
r = 0x5deece66dLL * r + 0xb; // numbers from nrand48()
rand = r;
r <<= 16; // 48-bit random number now in top 48-bits.
if (loop < 0 || loop > 32) { // limit loop to 0..32
loop = 32;
}
// loop>>3 cannot exceed 4 because loop cannot exceed 32.
// Select top 20..24 bits of lower 48 bits,
// giving approximately 0ms to 16ms.
// Mean is exponential in loop for first 32 iterations, then 8ms.
// The futex path multiplies this by 16, since we expect explicit wakeups
// almost always on that path.
return r >> (44 - (loop >> 3));
}
} // namespace internal
} // namespace base

View File

@@ -0,0 +1,53 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2010, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* This file is an internal part spinlock.cc and once.cc
* It may not be used directly by code outside of //base.
*/
#ifndef BASE_SPINLOCK_INTERNAL_H_
#define BASE_SPINLOCK_INTERNAL_H_
#include <config.h>
#include <atomic>
#include "base/basictypes.h"
namespace base {
namespace internal {
void SpinLockWake(std::atomic<int> *w, bool all);
void SpinLockDelay(std::atomic<int> *w, int32 value, int loop);
} // namespace internal
} // namespace base
#endif

View File

@@ -0,0 +1,102 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2009, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* This file is a Linux-specific part of spinlock_internal.cc
*/
#include <errno.h>
#include <limits.h>
#include <sched.h>
#include <sys/syscall.h>
#include <time.h>
#include <unistd.h>
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
#define FUTEX_PRIVATE_FLAG 128
// Note: Instead of making direct system calls that are inlined, we rely
// on the syscall() function in glibc to do the right thing.
static bool have_futex;
static int futex_private_flag = FUTEX_PRIVATE_FLAG;
namespace {
static struct InitModule {
InitModule() {
int x = 0;
// futexes are ints, so we can use them only when
// that's the same size as the lockword_ in SpinLock.
have_futex = (syscall(__NR_futex, &x, FUTEX_WAKE, 1, NULL, NULL, 0) >= 0);
if (have_futex && syscall(__NR_futex, &x, FUTEX_WAKE | futex_private_flag,
1, NULL, NULL, 0) < 0) {
futex_private_flag = 0;
}
}
} init_module;
} // anonymous namespace
namespace base {
namespace internal {
void SpinLockDelay(std::atomic<int> *w, int32 value, int loop) {
if (loop != 0) {
int save_errno = errno;
struct timespec tm;
tm.tv_sec = 0;
if (have_futex) {
tm.tv_nsec = base::internal::SuggestedDelayNS(loop);
} else {
tm.tv_nsec = 2000001; // above 2ms so linux 2.4 doesn't spin
}
if (have_futex) {
tm.tv_nsec *= 16; // increase the delay; we expect explicit wakeups
syscall(__NR_futex, reinterpret_cast<int*>(w),
FUTEX_WAIT | futex_private_flag, value,
reinterpret_cast<struct kernel_timespec*>(&tm), NULL, 0);
} else {
nanosleep(&tm, NULL);
}
errno = save_errno;
}
}
void SpinLockWake(std::atomic<int> *w, bool all) {
if (have_futex) {
syscall(__NR_futex, reinterpret_cast<int*>(w),
FUTEX_WAKE | futex_private_flag, all ? INT_MAX : 1, NULL, NULL, 0);
}
}
} // namespace internal
} // namespace base

View File

@@ -0,0 +1,63 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2009, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* This file is a Posix-specific part of spinlock_internal.cc
*/
#include <config.h>
#include <errno.h>
#ifdef HAVE_SCHED_H
#include <sched.h> /* For sched_yield() */
#endif
#include <time.h> /* For nanosleep() */
namespace base {
namespace internal {
void SpinLockDelay(std::atomic<int> *w, int32 value, int loop) {
int save_errno = errno;
if (loop == 0) {
} else if (loop == 1) {
sched_yield();
} else {
struct timespec tm;
tm.tv_sec = 0;
tm.tv_nsec = base::internal::SuggestedDelayNS(loop);
nanosleep(&tm, NULL);
}
errno = save_errno;
}
void SpinLockWake(std::atomic<int> *w, bool all) {
}
} // namespace internal
} // namespace base

View File

@@ -0,0 +1,63 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2009, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* This file is a Win32-specific part of spinlock_internal.cc
*/
#include <windows.h>
#ifdef _MSC_VER
# pragma comment(lib, "Synchronization.lib")
#endif
namespace base {
namespace internal {
void SpinLockDelay(std::atomic<int> *w, int32 value, int loop) {
if (loop != 0) {
auto wait_ns = static_cast<uint64_t>(base::internal::SuggestedDelayNS(loop)) * 16;
auto wait_ms = wait_ns / 1000000;
WaitOnAddress(w, &value, 4, static_cast<DWORD>(wait_ms));
}
}
void SpinLockWake(std::atomic<int> *w, bool all) {
if (all) {
WakeByAddressAll((void*)w);
} else {
WakeByAddressSingle((void*)w);
}
}
} // namespace internal
} // namespace base

View File

@@ -0,0 +1,98 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
/* Copyright (c) 2006, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Maxim Lifantsev
*/
#ifndef BASE_STL_ALLOCATOR_H_
#define BASE_STL_ALLOCATOR_H_
#include <config.h>
#include <stddef.h> // for ptrdiff_t
#include <limits>
#include "base/logging.h"
// Generic allocator class for STL objects
// that uses a given type-less allocator Alloc, which must provide:
// static void* Alloc::Allocate(size_t size);
// static void Alloc::Free(void* ptr, size_t size);
//
// STL_Allocator<T, MyAlloc> provides the same thread-safety
// guarantees as MyAlloc.
//
// Usage example:
// set<T, less<T>, STL_Allocator<T, MyAlloc> > my_set;
// CAVEAT: Parts of the code below are probably specific
// to the STL version(s) we are using.
// The code is simply lifted from what std::allocator<> provides.
template <typename T, class Alloc>
class STL_Allocator {
public:
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
typedef T value_type;
template <class T1> struct rebind {
typedef STL_Allocator<T1, Alloc> other;
};
STL_Allocator() { }
STL_Allocator(const STL_Allocator&) { }
template <class T1> STL_Allocator(const STL_Allocator<T1, Alloc>&) { }
~STL_Allocator() { }
pointer address(reference x) const { return &x; }
const_pointer address(const_reference x) const { return &x; }
pointer allocate(size_type n, const void* = 0) {
RAW_DCHECK((n * sizeof(T)) / sizeof(T) == n, "n is too big to allocate");
return static_cast<T*>(Alloc::Allocate(n * sizeof(T)));
}
void deallocate(pointer p, size_type n) { Alloc::Free(p, n * sizeof(T)); }
size_type max_size() const { return size_t(-1) / sizeof(T); }
void construct(pointer p, const T& val) { ::new(p) T(val); }
void construct(pointer p) { ::new(p) T(); }
void destroy(pointer p) { p->~T(); }
// There's no state, so these allocators are always equal
bool operator==(const STL_Allocator&) const { return true; }
};
#endif // BASE_STL_ALLOCATOR_H_

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,230 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// All functions here are thread-hostile due to file caching unless
// commented otherwise.
#ifndef _SYSINFO_H_
#define _SYSINFO_H_
#include <config.h>
#include <time.h>
#if (defined(_WIN32) || defined(__MINGW32__)) && (!defined(__CYGWIN__) && !defined(__CYGWIN32__))
#include <windows.h> // for DWORD
#include <tlhelp32.h> // for CreateToolhelp32Snapshot
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h> // for pid_t
#endif
#include <stddef.h> // for size_t
#include <limits.h> // for PATH_MAX
#include "base/basictypes.h"
#include "base/logging.h" // for RawFD
// This getenv function is safe to call before the C runtime is initialized.
// On Windows, it utilizes GetEnvironmentVariable() and on unix it uses
// /proc/self/environ instead calling getenv(). It's intended to be used in
// routines that run before main(), when the state required for getenv() may
// not be set up yet. In particular, errno isn't set up until relatively late
// (after the pthreads library has a chance to make it threadsafe), and
// getenv() doesn't work until then.
// On some platforms, this call will utilize the same, static buffer for
// repeated GetenvBeforeMain() calls. Callers should not expect pointers from
// this routine to be long lived.
// Note that on unix, /proc only has the environment at the time the
// application was started, so this routine ignores setenv() calls/etc. Also
// note it only reads the first 16K of the environment.
extern const char* GetenvBeforeMain(const char* name);
// This takes as an argument an environment-variable name (like
// CPUPROFILE) whose value is supposed to be a file-path, and sets
// path to that path, and returns true. Non-trivial for surprising
// reasons, as documented in sysinfo.cc. path must have space PATH_MAX.
extern bool GetUniquePathFromEnv(const char* env_name, char* path);
extern int GetSystemCPUsCount();
// Return true if we're running POSIX (e.g., NPTL on Linux) threads,
// as opposed to a non-POSIX thread library. The thing that we care
// about is whether a thread's pid is the same as the thread that
// spawned it. If so, this function returns true.
// Thread-safe.
// Note: We consider false negatives to be OK.
bool HasPosixThreads();
#ifndef SWIG // SWIG doesn't like struct Buffer and variable arguments.
// A ProcMapsIterator abstracts access to /proc/maps for a given
// process. Needs to be stack-allocatable and avoid using stdio/malloc
// so it can be used in the google stack dumper, heap-profiler, etc.
//
// On Windows and Mac OS X, this iterator iterates *only* over DLLs
// mapped into this process space. For Linux, FreeBSD, and Solaris,
// it iterates over *all* mapped memory regions, including anonymous
// mmaps. For other O/Ss, it is unlikely to work at all, and Valid()
// will always return false. Also note: this routine only works on
// FreeBSD if procfs is mounted: make sure this is in your /etc/fstab:
// proc /proc procfs rw 0 0
class ProcMapsIterator {
public:
struct Buffer {
#ifdef __FreeBSD__
// FreeBSD requires us to read all of the maps file at once, so
// we have to make a buffer that's "always" big enough
static const size_t kBufSize = 102400;
#else // a one-line buffer is good enough
static const size_t kBufSize = PATH_MAX + 1024;
#endif
char buf_[kBufSize];
};
// Create a new iterator for the specified pid. pid can be 0 for "self".
explicit ProcMapsIterator(pid_t pid);
// Create an iterator with specified storage (for use in signal
// handler). "buffer" should point to a ProcMapsIterator::Buffer
// buffer can be NULL in which case a bufer will be allocated.
ProcMapsIterator(pid_t pid, Buffer *buffer);
// Iterate through maps_backing instead of maps if use_maps_backing
// is true. Otherwise the same as above. buffer can be NULL and
// it will allocate a buffer itself.
ProcMapsIterator(pid_t pid, Buffer *buffer,
bool use_maps_backing);
// Returns true if the iterator successfully initialized;
bool Valid() const;
// Returns a pointer to the most recently parsed line. Only valid
// after Next() returns true, and until the iterator is destroyed or
// Next() is called again. This may give strange results on non-Linux
// systems. Prefer FormatLine() if that may be a concern.
const char *CurrentLine() const { return stext_; }
// Writes the "canonical" form of the /proc/xxx/maps info for a single
// line to the passed-in buffer. Returns the number of bytes written,
// or 0 if it was not able to write the complete line. (To guarantee
// success, buffer should have size at least Buffer::kBufSize.)
// Takes as arguments values set via a call to Next(). The
// "canonical" form of the line (taken from linux's /proc/xxx/maps):
// <start_addr(hex)>-<end_addr(hex)> <perms(rwxp)> <offset(hex)> +
// <major_dev(hex)>:<minor_dev(hex)> <inode> <filename> Note: the
// eg
// 08048000-0804c000 r-xp 00000000 03:01 3793678 /bin/cat
// If you don't have the dev_t (dev), feel free to pass in 0.
// (Next() doesn't return a dev_t, though NextExt does.)
//
// Note: if filename and flags were obtained via a call to Next(),
// then the output of this function is only valid if Next() returned
// true, and only until the iterator is destroyed or Next() is
// called again. (Since filename, at least, points into CurrentLine.)
static int FormatLine(char* buffer, int bufsize,
uint64 start, uint64 end, const char *flags,
uint64 offset, int64 inode, const char *filename,
dev_t dev);
// Find the next entry in /proc/maps; return true if found or false
// if at the end of the file.
//
// Any of the result pointers can be NULL if you're not interested
// in those values.
//
// If "flags" and "filename" are passed, they end up pointing to
// storage within the ProcMapsIterator that is valid only until the
// iterator is destroyed or Next() is called again. The caller may
// modify the contents of these strings (up as far as the first NUL,
// and only until the subsequent call to Next()) if desired.
// The offsets are all uint64 in order to handle the case of a
// 32-bit process running on a 64-bit kernel
//
// IMPORTANT NOTE: see top-of-class notes for details about what
// mapped regions Next() iterates over, depending on O/S.
// TODO(csilvers): make flags and filename const.
bool Next(uint64 *start, uint64 *end, char **flags,
uint64 *offset, int64 *inode, char **filename);
bool NextExt(uint64 *start, uint64 *end, char **flags,
uint64 *offset, int64 *inode, char **filename,
uint64 *file_mapping, uint64 *file_pages,
uint64 *anon_mapping, uint64 *anon_pages,
dev_t *dev);
~ProcMapsIterator();
private:
void Init(pid_t pid, Buffer *buffer, bool use_maps_backing);
char *ibuf_; // input buffer
char *stext_; // start of text
char *etext_; // end of text
char *nextline_; // start of next line
char *ebuf_; // end of buffer (1 char for a nul)
#if (defined(_WIN32) || defined(__MINGW32__)) && (!defined(__CYGWIN__) && !defined(__CYGWIN32__))
HANDLE snapshot_; // filehandle on dll info
// In a change from the usual W-A pattern, there is no A variant of
// MODULEENTRY32. Tlhelp32.h #defines the W variant, but not the A.
// We want the original A variants, and this #undef is the only
// way I see to get them. Redefining it when we're done prevents us
// from affecting other .cc files.
# ifdef MODULEENTRY32 // Alias of W
# undef MODULEENTRY32
MODULEENTRY32 module_; // info about current dll (and dll iterator)
# define MODULEENTRY32 MODULEENTRY32W
# else // It's the ascii, the one we want.
MODULEENTRY32 module_; // info about current dll (and dll iterator)
# endif
#elif defined(__MACH__)
int current_image_; // dll's are called "images" in macos parlance
int current_load_cmd_; // the segment of this dll we're examining
#elif defined(__sun__) // Solaris
int fd_;
char current_filename_[PATH_MAX];
#else
int fd_; // filehandle on /proc/*/maps
#endif
pid_t pid_;
char flags_[10];
Buffer* dynamic_buffer_; // dynamically-allocated Buffer
bool using_maps_backing_; // true if we are looking at maps_backing instead of maps.
};
#endif /* #ifndef SWIG */
// Helper routines
namespace tcmalloc {
int FillProcSelfMaps(char buf[], int size, bool* wrote_all);
void DumpProcSelfMaps(RawFD fd);
}
#endif /* #ifndef _SYSINFO_H_ */

View File

@@ -0,0 +1,133 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2008, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Le-Chun Wu
//
// This header file contains the macro definitions for thread safety
// annotations that allow the developers to document the locking policies
// of their multi-threaded code. The annotations can also help program
// analysis tools to identify potential thread safety issues.
//
// The annotations are implemented using clang's "attributes" extension.
// Using the macros defined here instead of the raw clang attributes allows
// for portability and future compatibility.
//
// This functionality is not yet fully implemented in perftools,
// but may be one day.
#ifndef BASE_THREAD_ANNOTATIONS_H_
#define BASE_THREAD_ANNOTATIONS_H_
#if defined(__clang__)
#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
#else
#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
#endif
// Document if a shared variable/field needs to be protected by a lock.
// GUARDED_BY allows the user to specify a particular lock that should be
// held when accessing the annotated variable, while GUARDED_VAR only
// indicates a shared variable should be guarded (by any lock). GUARDED_VAR
// is primarily used when the client cannot express the name of the lock.
#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
#define GUARDED_VAR THREAD_ANNOTATION_ATTRIBUTE__(guarded)
// Document if the memory location pointed to by a pointer should be guarded
// by a lock when dereferencing the pointer. Similar to GUARDED_VAR,
// PT_GUARDED_VAR is primarily used when the client cannot express the name
// of the lock. Note that a pointer variable to a shared memory location
// could itself be a shared variable. For example, if a shared global pointer
// q, which is guarded by mu1, points to a shared memory location that is
// guarded by mu2, q should be annotated as follows:
// int *q GUARDED_BY(mu1) PT_GUARDED_BY(mu2);
#define PT_GUARDED_BY(x) \
THREAD_ANNOTATION_ATTRIBUTE__(point_to_guarded_by(x))
#define PT_GUARDED_VAR \
THREAD_ANNOTATION_ATTRIBUTE__(point_to_guarded)
// Document the acquisition order between locks that can be held
// simultaneously by a thread. For any two locks that need to be annotated
// to establish an acquisition order, only one of them needs the annotation.
// (i.e. You don't have to annotate both locks with both ACQUIRED_AFTER
// and ACQUIRED_BEFORE.)
#define ACQUIRED_AFTER(x) \
THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(x))
#define ACQUIRED_BEFORE(x) \
THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(x))
// The following three annotations document the lock requirements for
// functions/methods.
// Document if a function expects certain locks to be held before it is called
#define EXCLUSIVE_LOCKS_REQUIRED(x) \
THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(x))
#define SHARED_LOCKS_REQUIRED(x) \
THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(x))
// Document the locks acquired in the body of the function. These locks
// cannot be held when calling this function (as google3's Mutex locks are
// non-reentrant).
#define LOCKS_EXCLUDED(x) \
THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(x))
// Document the lock the annotated function returns without acquiring it.
#define LOCK_RETURNED(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
// Document if a class/type is a lockable type (such as the Mutex class).
#define LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(lockable)
// Document if a class is a scoped lockable type (such as the MutexLock class).
#define SCOPED_LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
// The following annotations specify lock and unlock primitives.
#define EXCLUSIVE_LOCK_FUNCTION(x) \
THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(x))
#define SHARED_LOCK_FUNCTION(x) \
THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(x))
#define EXCLUSIVE_TRYLOCK_FUNCTION(x) \
THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(x))
#define SHARED_TRYLOCK_FUNCTION(x) \
THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(x))
#define UNLOCK_FUNCTION(x) \
THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(x))
// An escape hatch for thread safety analysis to ignore the annotated function.
#define NO_THREAD_SAFETY_ANALYSIS \
THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
#endif // BASE_THREAD_ANNOTATIONS_H_

View File

@@ -0,0 +1,140 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2008, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Paul Pluzhnikov
//
// Allow dynamic symbol lookup in the kernel VDSO page.
//
// VDSOSupport -- a class representing kernel VDSO (if present).
//
#include "base/vdso_support.h"
#ifdef HAVE_VDSO_SUPPORT // defined in vdso_support.h
#include <fcntl.h>
#include <stddef.h> // for ptrdiff_t
#include "base/logging.h"
#include "base/dynamic_annotations.h"
#include "base/basictypes.h" // for COMPILE_ASSERT
#ifndef AT_SYSINFO_EHDR
#define AT_SYSINFO_EHDR 33
#endif
namespace base {
const void *VDSOSupport::vdso_base_ = ElfMemImage::kInvalidBase;
VDSOSupport::VDSOSupport()
// If vdso_base_ is still set to kInvalidBase, we got here
// before VDSOSupport::Init has been called. Call it now.
: image_(vdso_base_ == ElfMemImage::kInvalidBase ? Init() : vdso_base_) {
}
// NOTE: we can't use GoogleOnceInit() below, because we can be
// called by tcmalloc, and none of the *once* stuff may be functional yet.
//
// In addition, we hope that the VDSOSupportHelper constructor
// causes this code to run before there are any threads, and before
// InitGoogle() has executed any chroot or setuid calls.
//
// Finally, even if there is a race here, it is harmless, because
// the operation should be idempotent.
const void *VDSOSupport::Init() {
if (vdso_base_ == ElfMemImage::kInvalidBase) {
// Valgrind zaps AT_SYSINFO_EHDR and friends from the auxv[]
// on stack, and so glibc works as if VDSO was not present.
// But going directly to kernel via /proc/self/auxv below bypasses
// Valgrind zapping. So we check for Valgrind separately.
if (RunningOnValgrind()) {
vdso_base_ = NULL;
return NULL;
}
int fd = open("/proc/self/auxv", O_RDONLY);
if (fd == -1) {
// Kernel too old to have a VDSO.
vdso_base_ = NULL;
return NULL;
}
ElfW(auxv_t) aux;
while (read(fd, &aux, sizeof(aux)) == sizeof(aux)) {
if (aux.a_type == AT_SYSINFO_EHDR) {
COMPILE_ASSERT(sizeof(vdso_base_) == sizeof(aux.a_un.a_val),
unexpected_sizeof_pointer_NE_sizeof_a_val);
vdso_base_ = reinterpret_cast<void *>(aux.a_un.a_val);
break;
}
}
close(fd);
if (vdso_base_ == ElfMemImage::kInvalidBase) {
// Didn't find AT_SYSINFO_EHDR in auxv[].
vdso_base_ = NULL;
}
}
return vdso_base_;
}
const void *VDSOSupport::SetBase(const void *base) {
CHECK(base != ElfMemImage::kInvalidBase);
const void *old_base = vdso_base_;
vdso_base_ = base;
image_.Init(base);
return old_base;
}
bool VDSOSupport::LookupSymbol(const char *name,
const char *version,
int type,
SymbolInfo *info) const {
return image_.LookupSymbol(name, version, type, info);
}
bool VDSOSupport::LookupSymbolByAddress(const void *address,
SymbolInfo *info_out) const {
return image_.LookupSymbolByAddress(address, info_out);
}
// We need to make sure VDSOSupport::Init() is called before
// the main() runs, since it might do something like setuid or
// chroot. If VDSOSupport
// is used in any global constructor, this will happen, since
// VDSOSupport's constructor calls Init. But if not, we need to
// ensure it here, with a global constructor of our own. This
// is an allowed exception to the normal rule against non-trivial
// global constructors.
static class VDSOInitHelper {
public:
VDSOInitHelper() { VDSOSupport::Init(); }
} vdso_init_helper;
}
#endif // HAVE_VDSO_SUPPORT

View File

@@ -0,0 +1,137 @@
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2008, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Paul Pluzhnikov
//
// Allow dynamic symbol lookup in the kernel VDSO page.
//
// VDSO stands for "Virtual Dynamic Shared Object" -- a page of
// executable code, which looks like a shared library, but doesn't
// necessarily exist anywhere on disk, and which gets mmap()ed into
// every process by kernels which support VDSO, such as 2.6.x for 32-bit
// executables, and 2.6.24 and above for 64-bit executables.
//
// More details could be found here:
// http://www.trilithium.com/johan/2005/08/linux-gate/
//
// VDSOSupport -- a class representing kernel VDSO (if present).
//
// Example usage:
// VDSOSupport vdso;
// VDSOSupport::SymbolInfo info;
// typedef (*FN)(unsigned *, void *, void *);
// FN fn = NULL;
// if (vdso.LookupSymbol("__vdso_getcpu", "LINUX_2.6", STT_FUNC, &info)) {
// fn = reinterpret_cast<FN>(info.address);
// }
#ifndef BASE_VDSO_SUPPORT_H_
#define BASE_VDSO_SUPPORT_H_
#include <config.h>
#include "base/basictypes.h"
#include "base/elf_mem_image.h"
#ifdef HAVE_ELF_MEM_IMAGE
// Enable VDSO support only for the architectures/operating systems that
// support it.
#if defined(__linux__) && (defined(__i386__) || defined(__PPC__))
#define HAVE_VDSO_SUPPORT 1
#endif
#include <stdlib.h> // for NULL
namespace base {
// NOTE: this class may be used from within tcmalloc, and can not
// use any memory allocation routines.
class VDSOSupport {
public:
VDSOSupport();
typedef ElfMemImage::SymbolInfo SymbolInfo;
typedef ElfMemImage::SymbolIterator SymbolIterator;
// Answers whether we have a vdso at all.
bool IsPresent() const { return image_.IsPresent(); }
// Allow to iterate over all VDSO symbols.
SymbolIterator begin() const { return image_.begin(); }
SymbolIterator end() const { return image_.end(); }
// Look up versioned dynamic symbol in the kernel VDSO.
// Returns false if VDSO is not present, or doesn't contain given
// symbol/version/type combination.
// If info_out != NULL, additional details are filled in.
bool LookupSymbol(const char *name, const char *version,
int symbol_type, SymbolInfo *info_out) const;
// Find info about symbol (if any) which overlaps given address.
// Returns true if symbol was found; false if VDSO isn't present
// or doesn't have a symbol overlapping given address.
// If info_out != NULL, additional details are filled in.
bool LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const;
// Used only for testing. Replace real VDSO base with a mock.
// Returns previous value of vdso_base_. After you are done testing,
// you are expected to call SetBase() with previous value, in order to
// reset state to the way it was.
const void *SetBase(const void *s);
// Computes vdso_base_ and returns it. Should be called as early as
// possible; before any thread creation, chroot or setuid.
static const void *Init();
private:
// image_ represents VDSO ELF image in memory.
// image_.ehdr_ == NULL implies there is no VDSO.
ElfMemImage image_;
// Cached value of auxv AT_SYSINFO_EHDR, computed once.
// This is a tri-state:
// kInvalidBase => value hasn't been determined yet.
// 0 => there is no VDSO.
// else => vma of VDSO Elf{32,64}_Ehdr.
//
// When testing with mock VDSO, low bit is set.
// The low bit is always available because vdso_base_ is
// page-aligned.
static const void *vdso_base_;
DISALLOW_COPY_AND_ASSIGN(VDSOSupport);
};
} // namespace base
#endif // HAVE_ELF_MEM_IMAGE
#endif // BASE_VDSO_SUPPORT_H_