0
0
mirror of https://github.com/yse/easy_profiler.git synced 2025-01-14 00:27:55 +08:00

Merge pull request #63 from rationalcoder/arm-fix

Fixed Issue #50 and Issue #61, added opt-in unaligned access support, and some documentation.
This commit is contained in:
Sergey Yagovtsev 2017-09-14 09:38:01 +04:00 committed by GitHub
commit e76f4d4a67
4 changed files with 302 additions and 44 deletions

View File

@ -407,7 +407,7 @@ namespace profiler {
p.base = m_properties.base; // Use copy of m_properties to make sure m_properties will not be changed
// Stop another session
ControlTrace(NULL, KERNEL_LOGGER_NAME, reinterpret_cast<EVENT_TRACE_PROPERTIES*>(&p), EVENT_TRACE_CONTROL_STOP);
ControlTrace((TRACEHANDLE)NULL, KERNEL_LOGGER_NAME, reinterpret_cast<EVENT_TRACE_PROPERTIES*>(&p), EVENT_TRACE_CONTROL_STOP);
// Console window variant:
//if (32 >= (int)ShellExecute(NULL, NULL, "logman", "stop \"" KERNEL_LOGGER_NAME "\" -ets", NULL, SW_HIDE))

View File

@ -82,6 +82,8 @@
VarName = VarInitializer
# endif
#define EASY_FORCE_INLINE __forceinline
#elif defined (__clang__)
//////////////////////////////////////////////////////////////////////////
// Clang Compiler
@ -102,6 +104,8 @@
# define EASY_FINAL
# endif
#define EASY_FORCE_INLINE inline __attribute__((always_inline))
#elif defined(__GNUC__)
//////////////////////////////////////////////////////////////////////////
// GNU Compiler
@ -124,6 +128,8 @@
# define EASY_FINAL
# endif
#define EASY_FORCE_INLINE inline __attribute__((always_inline))
#endif
// END // TODO: Add other compilers support
//////////////////////////////////////////////////////////////////////////
@ -147,6 +153,10 @@
# define EASY_FINAL final
#endif
#ifndef EASY_FORCE_INLINE
# define EASY_FORCE_INLINE inline
#endif
#ifndef PROFILER_API
# define PROFILER_API
#endif

View File

@ -531,7 +531,7 @@ extern "C" {
SerializedBlock::SerializedBlock(const Block& block, uint16_t name_length)
: BaseBlockData(block)
{
auto pName = const_cast<char*>(name());
char* pName = const_cast<char*>(name());
if (name_length) strncpy(pName, block.name(), name_length);
pName[name_length] = 0;
}
@ -539,7 +539,7 @@ SerializedBlock::SerializedBlock(const Block& block, uint16_t name_length)
SerializedCSwitch::SerializedCSwitch(const CSwitchBlock& block, uint16_t name_length)
: CSwitchEvent(block)
{
auto pName = const_cast<char*>(name());
char* pName = const_cast<char*>(name());
if (name_length) strncpy(pName, block.name(), name_length);
pName[name_length] = 0;
}
@ -678,15 +678,15 @@ void ThreadStorage::storeBlock(const profiler::Block& block)
EASY_THREAD_LOCAL static profiler::timestamp_t endTime = 0ULL;
#endif
auto name_length = static_cast<uint16_t>(strlen(block.name()));
auto size = static_cast<uint16_t>(sizeof(BaseBlockData) + name_length + 1);
uint16_t name_length = static_cast<uint16_t>(strlen(block.name()));
uint16_t size = static_cast<uint16_t>(sizeof(BaseBlockData) + name_length + 1);
#if EASY_OPTION_MEASURE_STORAGE_EXPAND != 0
const bool expanded = (desc->m_status & profiler::ON) && blocks.closedList.need_expand(size);
if (expanded) beginTime = getCurrentTime();
#endif
auto data = blocks.closedList.allocate(size);
char* data = (char*)blocks.closedList.allocate(size);
#if EASY_OPTION_MEASURE_STORAGE_EXPAND != 0
if (expanded) endTime = getCurrentTime();
@ -711,9 +711,9 @@ void ThreadStorage::storeBlock(const profiler::Block& block)
void ThreadStorage::storeCSwitch(const CSwitchBlock& block)
{
auto name_length = static_cast<uint16_t>(strlen(block.name()));
auto size = static_cast<uint16_t>(sizeof(CSwitchEvent) + name_length + 1);
auto data = sync.closedList.allocate(size);
uint16_t name_length = static_cast<uint16_t>(strlen(block.name()));
uint16_t size = static_cast<uint16_t>(sizeof(CSwitchEvent) + name_length + 1);
void* data = sync.closedList.allocate(size);
::new (data) SerializedCSwitch(block, name_length);
sync.usedMemorySize += size;
}

View File

@ -56,6 +56,9 @@ The Apache License, Version 2.0 (the "License");
#include <thread>
#include <atomic>
#include <list>
#include <type_traits>
#include <cstring>
#include <cstddef>
//////////////////////////////////////////////////////////////////////////
@ -115,15 +118,14 @@ namespace profiler {
# define EASY_ENABLE_BLOCK_STATUS 1
#endif
#ifndef EASY_ENABLE_ALIGNMENT
#ifndef EASY_ENABLE_ALIGNMENT
# define EASY_ENABLE_ALIGNMENT 0
#endif
#ifndef EASY_ALIGNMENT_SIZE
# define EASY_ALIGNMENT_SIZE 64
# define EASY_ALIGNMENT_SIZE alignof(std::max_align_t)
#endif
#if EASY_ENABLE_ALIGNMENT == 0
# define EASY_ALIGNED(TYPE, VAR, A) TYPE VAR
# define EASY_MALLOC(MEMSIZE, A) malloc(MEMSIZE)
@ -144,7 +146,226 @@ namespace profiler {
# endif
#endif
template <const uint16_t N>
//! Checks if a pointer is aligned.
//! \param ptr The pointer to check.
//! \param alignment The alignement (must be a power of 2)
//! \returns true if the memory is aligned.
//!
template <uint32_t ALIGNMENT>
EASY_FORCE_INLINE bool is_aligned(void* ptr)
{
static_assert(ALIGNMENT % 2 == 0, "Alignment must be a power of two.");
return ((uintptr_t)ptr & (ALIGNMENT-1)) == 0;
}
EASY_FORCE_INLINE void unaligned_zero16(void* ptr)
{
#ifndef EASY_ENABLE_STRICT_ALIGNMENT
*(uint16_t*)ptr = 0;
#else
((char*)ptr)[0] = 0;
((char*)ptr)[1] = 0;
#endif
}
EASY_FORCE_INLINE void unaligned_zero32(void* ptr)
{
#ifndef EASY_ENABLE_STRICT_ALIGNMENT
*(uint32_t*)ptr = 0;
#else
((char*)ptr)[0] = 0;
((char*)ptr)[1] = 0;
((char*)ptr)[2] = 0;
((char*)ptr)[3] = 0;
#endif
}
EASY_FORCE_INLINE void unaligned_zero64(void* ptr)
{
#ifndef EASY_ENABLE_STRICT_ALIGNMENT
*(uint64_t*)ptr = 0;
#else
// Assume unaligned is more common.
if (!is_aligned<alignof(uint64_t)>(ptr)) {
((char*)ptr)[0] = 0;
((char*)ptr)[1] = 0;
((char*)ptr)[2] = 0;
((char*)ptr)[3] = 0;
((char*)ptr)[4] = 0;
((char*)ptr)[5] = 0;
((char*)ptr)[6] = 0;
((char*)ptr)[7] = 0;
}
else {
*(uint64_t*)ptr = 0;
}
#endif
}
template <typename T>
EASY_FORCE_INLINE void unaligned_store16(void* ptr, T val)
{
static_assert(sizeof(T) == 2, "16 bit type required.");
#ifndef EASY_ENABLE_STRICT_ALIGNMENT
*(T*)ptr = val;
#else
const char* const temp = (char*)&val;
((char*)ptr)[0] = temp[0];
((char*)ptr)[1] = temp[1];
#endif
}
template <typename T>
EASY_FORCE_INLINE void unaligned_store32(void* ptr, T val)
{
static_assert(sizeof(T) == 4, "32 bit type required.");
#ifndef EASY_ENABLE_STRICT_ALIGNMENT
*(T*)ptr = val;
#else
const char* const temp = (char*)&val;
((char*)ptr)[0] = temp[0];
((char*)ptr)[1] = temp[1];
((char*)ptr)[2] = temp[2];
((char*)ptr)[3] = temp[3];
#endif
}
template <typename T>
EASY_FORCE_INLINE void unaligned_store64(void* ptr, T val)
{
static_assert(sizeof(T) == 8, "64 bit type required.");
#ifndef EASY_ENABLE_STRICT_ALIGNMENT
*(T*)ptr = val;
#else
const char* const temp = (char*)&val;
// Assume unaligned is more common.
if (!is_aligned<alignof(T)>(ptr)) {
((char*)ptr)[0] = temp[0];
((char*)ptr)[1] = temp[1];
((char*)ptr)[2] = temp[2];
((char*)ptr)[3] = temp[3];
((char*)ptr)[4] = temp[4];
((char*)ptr)[5] = temp[5];
((char*)ptr)[6] = temp[6];
((char*)ptr)[7] = temp[7];
}
else {
*(T*)ptr = val;
}
#endif
}
template <typename T>
EASY_FORCE_INLINE T unaligned_load16(const void* ptr)
{
static_assert(sizeof(T) == 2, "16 bit type required.");
#ifndef EASY_ENABLE_STRICT_ALIGNMENT
return *(T*)ptr;
#else
T value;
((char*)&value)[0] = ((char*)ptr)[0];
((char*)&value)[1] = ((char*)ptr)[1];
return value;
#endif
}
template <typename T>
EASY_FORCE_INLINE T unaligned_load16(const void* ptr, T* val)
{
static_assert(sizeof(T) == 2, "16 bit type required.");
#ifndef EASY_ENABLE_STRICT_ALIGNMENT
*val = *(T*)ptr;
return *val;
#else
((char*)val)[0] = ((char*)ptr)[0];
((char*)val)[1] = ((char*)ptr)[1];
return *val;
#endif
}
template <typename T>
EASY_FORCE_INLINE T unaligned_load32(const void* ptr)
{
static_assert(sizeof(T) == 4, "32 bit type required.");
#ifndef EASY_ENABLE_STRICT_ALIGNMENT
return *(T*)ptr;
#else
T value;
((char*)&value)[0] = ((char*)ptr)[0];
((char*)&value)[1] = ((char*)ptr)[1];
((char*)&value)[2] = ((char*)ptr)[2];
((char*)&value)[3] = ((char*)ptr)[3];
return value;
#endif
}
template <typename T>
EASY_FORCE_INLINE T unaligned_load32(const void* ptr, T* val)
{
static_assert(sizeof(T) == 4, "32 bit type required.");
#ifndef EASY_ENABLE_STRICT_ALIGNMENT
*val = *(T*)ptr;
#else
((char*)&val)[0] = ((char*)ptr)[0];
((char*)&val)[1] = ((char*)ptr)[1];
((char*)&val)[2] = ((char*)ptr)[2];
((char*)&val)[3] = ((char*)ptr)[3];
return *val;
#endif
}
template <typename T>
EASY_FORCE_INLINE T unaligned_load64(const void* ptr)
{
static_assert(sizeof(T) == 8, "64 bit type required.");
#ifndef EASY_ENABLE_STRICT_ALIGNMENT
return *(T*)ptr;
#else
if (!is_aligned<alignof(T)>(ptr)) {
T value;
((char*)&value)[0] = ((char*)ptr)[0];
((char*)&value)[1] = ((char*)ptr)[1];
((char*)&value)[2] = ((char*)ptr)[2];
((char*)&value)[3] = ((char*)ptr)[3];
((char*)&value)[4] = ((char*)ptr)[4];
((char*)&value)[5] = ((char*)ptr)[5];
((char*)&value)[6] = ((char*)ptr)[6];
((char*)&value)[7] = ((char*)ptr)[7];
return value;
}
else {
return *(T*)ptr;
}
#endif
}
template <typename T>
EASY_FORCE_INLINE T unaligned_load64(const void* ptr, T* val)
{
static_assert(sizeof(T) == 8, "64 bit type required.");
#ifndef EASY_ENABLE_STRICT_ALIGNMENT
*val = *(T*)ptr;
#else
if (!is_aligned<alignof(T)>(ptr)) {
((char*)&val)[0] = ((char*)ptr)[0];
((char*)&val)[1] = ((char*)ptr)[1];
((char*)&val)[2] = ((char*)ptr)[2];
((char*)&val)[3] = ((char*)ptr)[3];
((char*)&val)[4] = ((char*)ptr)[4];
((char*)&val)[5] = ((char*)ptr)[5];
((char*)&val)[6] = ((char*)ptr)[6];
((char*)&val)[7] = ((char*)ptr)[7];
return *val;
}
else {
*val = *(T*)ptr;
return *val;
}
#endif
}
template <uint16_t N>
class chunk_allocator
{
struct chunk { EASY_ALIGNED(int8_t, data[N], EASY_ALIGNMENT_SIZE); chunk* prev = nullptr; };
@ -177,7 +398,12 @@ class chunk_allocator
auto prev = last;
last = ::new (EASY_MALLOC(sizeof(chunk), EASY_ALIGNMENT_SIZE)) chunk();
last->prev = prev;
*(uint16_t*)last->data = 0;
// Although there is no need for unaligned access stuff b/c a new chunk will
// usually be at least 8 byte aligned (and we only need 2 byte alignment),
// this is the only way I have been able to get rid of the GCC strict-aliasing warning
// without using std::memset. It's an extra line, but is just as fast as *(uint16_t*)last->data = 0;
char* const data = (char*)&last->data;
*(uint16_t*)data = 0;
}
/** Invert current chunks list to enable to iterate over chunks list in direct order.
@ -201,13 +427,16 @@ class chunk_allocator
//typedef std::list<chunk> chunk_list;
chunk_list m_chunks;
uint32_t m_size;
uint16_t m_shift;
// Used in serialize(): workaround for no constexpr support in MSVC 2013.
static const int_fast32_t MAX_CHUNK_OFFSET = N-sizeof(uint16_t);
chunk_list m_chunks; ///< List of chunks.
uint32_t m_size; ///< Number of elements stored(# of times allocate() has been called.)
uint16_t m_chunkOffset; ///< Number of bytes used in the current chunk.
public:
chunk_allocator() : m_size(0), m_shift(0)
chunk_allocator() : m_size(0), m_chunkOffset(0)
{
m_chunks.emplace_back();
}
@ -223,42 +452,49 @@ public:
if (!need_expand(n))
{
int8_t* data = m_chunks.back().data + m_shift;
m_shift += n + sizeof(uint16_t);
// Temp to avoid extra load due to this* aliasing.
uint16_t chunkOffset = m_chunkOffset;
char* data = (char*)m_chunks.back().data + chunkOffset;
chunkOffset += n + sizeof(uint16_t);
m_chunkOffset = chunkOffset;
*(uint16_t*)data = n;
data = data + sizeof(uint16_t);
unaligned_store16(data, n);
data += sizeof(uint16_t);
if (m_shift + 1 < N)
*(uint16_t*)(data + n) = 0;
// If there is enough space for at least another payload size,
// set it to zero.
if (chunkOffset < N-1)
unaligned_zero16(data + n);
return data;
}
m_shift = n + sizeof(uint16_t);
m_chunkOffset = n + sizeof(uint16_t);
m_chunks.emplace_back();
auto data = m_chunks.back().data;
*(uint16_t*)data = n;
data = data + sizeof(uint16_t);
char* data = (char*)&m_chunks.back().data[0];
unaligned_store16(data, n);
data += sizeof(uint16_t);
// We assume here that it takes more than one element to fill a chunk.
unaligned_zero16(data + n);
*(uint16_t*)(data + n) = 0;
return data;
}
/** Check if current storage is not enough to store additional n bytes.
*/
inline bool need_expand(uint16_t n) const
bool need_expand(uint16_t n) const
{
return (m_shift + n + sizeof(uint16_t)) > N;
return (m_chunkOffset + n + sizeof(uint16_t)) > N;
}
inline uint32_t size() const
uint32_t size() const
{
return m_size;
}
inline bool empty() const
bool empty() const
{
return m_size == 0;
}
@ -266,7 +502,7 @@ public:
void clear()
{
m_size = 0;
m_shift = 0;
m_chunkOffset = 0;
m_chunks.clear();
m_chunks.emplace_back();
}
@ -278,20 +514,32 @@ public:
void serialize(profiler::OStream& _outputStream)
{
// Chunks are stored in reversed order (stack).
// To be able to iterate them in direct order we have to invert chunks list.
// To be able to iterate them in direct order we have to invert the chunks list.
m_chunks.invert();
// Iterate over chunks and perform blocks serialization
auto current = m_chunks.last;
// Each chunk is an array of N bytes that can hold between
// 1(if the list isn't empty) and however many elements can fit in a chunk,
// where an element consists of a payload size + a payload as follows:
// elementStart[0..1]: size as a uint16_t
// elementStart[2..size-1]: payload.
// The maximum chunk offset is N-sizeof(uint16_t) b/c, if we hit that (or go past),
// there is either no space left, 1 byte left, or 2 bytes left, all of which are
// too small to cary more than a zero-sized element.
chunk* current = m_chunks.last;
do {
const int8_t* data = current->data;
uint16_t i = 0;
while (i + 1 < N && *(uint16_t*)data != 0) {
const uint16_t size = sizeof(uint16_t) + *(uint16_t*)data;
_outputStream.write((const char*)data, size);
data = data + size;
i += size;
const char* data = (char*)current->data;
int_fast32_t chunkOffset = 0; // signed int so overflow is not checked.
uint16_t payloadSize = unaligned_load16<uint16_t>(data);
while ((chunkOffset < MAX_CHUNK_OFFSET) & (payloadSize != 0)) {
const uint16_t chunkSize = sizeof(uint16_t) + payloadSize;
_outputStream.write(data, chunkSize);
data += chunkSize;
chunkOffset += chunkSize;
unaligned_load16(data, &payloadSize);
}
current = current->prev;
} while (current != nullptr);