0
0
mirror of https://github.com/yse/easy_profiler.git synced 2024-12-27 00:31:02 +08:00

#75 [Core] No more waiting behavior when dumping blocks. Current solution resolves possible dead-lock but restricts any Events out of Frame bounds (You can see that the last event for LoadingResources thread in the profiler_sample is always absent).

This commit is contained in:
Victor Zarubkin 2017-12-28 22:21:54 +03:00
parent a9e6ac084b
commit d049a1339e
5 changed files with 169 additions and 118 deletions

View File

@ -91,7 +91,7 @@ The Apache License, Version 2.0 (the "License");
template <uint32_t ALIGNMENT>
EASY_FORCE_INLINE bool is_aligned(void* ptr)
{
static_assert(ALIGNMENT % 2 == 0, "Alignment must be a power of two.");
static_assert((ALIGNMENT & 1) == 0, "Alignment must be a power of two.");
return ((uintptr_t)ptr & (ALIGNMENT-1)) == 0;
}
@ -119,9 +119,7 @@ EASY_FORCE_INLINE void unaligned_zero32(void* ptr)
EASY_FORCE_INLINE void unaligned_zero64(void* ptr)
{
#ifndef EASY_ENABLE_STRICT_ALIGNMENT
*(uint64_t*)ptr = 0;
#else
#ifdef EASY_ENABLE_STRICT_ALIGNMENT
// Assume unaligned is more common.
if (!is_aligned<alignof(uint64_t)>(ptr)) {
((char*)ptr)[0] = 0;
@ -133,20 +131,21 @@ EASY_FORCE_INLINE void unaligned_zero64(void* ptr)
((char*)ptr)[6] = 0;
((char*)ptr)[7] = 0;
}
else {
*(uint64_t*)ptr = 0;
}
else
#endif
*(uint64_t*)ptr = 0;
}
template <typename T>
EASY_FORCE_INLINE void unaligned_store16(void* ptr, T val)
{
static_assert(sizeof(T) == 2, "16 bit type required.");
#ifndef EASY_ENABLE_STRICT_ALIGNMENT
*(T*)ptr = val;
#else
const char* const temp = (char*)&val;
const char* const temp = (const char*)&val;
((char*)ptr)[0] = temp[0];
((char*)ptr)[1] = temp[1];
#endif
@ -156,10 +155,11 @@ template <typename T>
EASY_FORCE_INLINE void unaligned_store32(void* ptr, T val)
{
static_assert(sizeof(T) == 4, "32 bit type required.");
#ifndef EASY_ENABLE_STRICT_ALIGNMENT
*(T*)ptr = val;
#else
const char* const temp = (char*)&val;
const char* const temp = (const char*)&val;
((char*)ptr)[0] = temp[0];
((char*)ptr)[1] = temp[1];
((char*)ptr)[2] = temp[2];
@ -171,12 +171,11 @@ template <typename T>
EASY_FORCE_INLINE void unaligned_store64(void* ptr, T val)
{
static_assert(sizeof(T) == 8, "64 bit type required.");
#ifndef EASY_ENABLE_STRICT_ALIGNMENT
*(T*)ptr = val;
#else
const char* const temp = (char*)&val;
#ifdef EASY_ENABLE_STRICT_ALIGNMENT
// Assume unaligned is more common.
if (!is_aligned<alignof(T)>(ptr)) {
const char* const temp = (const char*)&val;
((char*)ptr)[0] = temp[0];
((char*)ptr)[1] = temp[1];
((char*)ptr)[2] = temp[2];
@ -186,22 +185,23 @@ EASY_FORCE_INLINE void unaligned_store64(void* ptr, T val)
((char*)ptr)[6] = temp[6];
((char*)ptr)[7] = temp[7];
}
else {
*(T*)ptr = val;
}
else
#endif
*(T*)ptr = val;
}
template <typename T>
EASY_FORCE_INLINE T unaligned_load16(const void* ptr)
{
static_assert(sizeof(T) == 2, "16 bit type required.");
#ifndef EASY_ENABLE_STRICT_ALIGNMENT
return *(T*)ptr;
return *(const T*)ptr;
#else
T value;
((char*)&value)[0] = ((char*)ptr)[0];
((char*)&value)[1] = ((char*)ptr)[1];
((char*)&value)[0] = ((const char*)ptr)[0];
((char*)&value)[1] = ((const char*)ptr)[1];
return value;
#endif
}
@ -210,28 +210,30 @@ template <typename T>
EASY_FORCE_INLINE T unaligned_load16(const void* ptr, T* val)
{
static_assert(sizeof(T) == 2, "16 bit type required.");
#ifndef EASY_ENABLE_STRICT_ALIGNMENT
*val = *(T*)ptr;
return *val;
*val = *(const T*)ptr;
#else
((char*)val)[0] = ((char*)ptr)[0];
((char*)val)[1] = ((char*)ptr)[1];
return *val;
((char*)val)[0] = ((const char*)ptr)[0];
((char*)val)[1] = ((const char*)ptr)[1];
#endif
return *val;
}
template <typename T>
EASY_FORCE_INLINE T unaligned_load32(const void* ptr)
{
static_assert(sizeof(T) == 4, "32 bit type required.");
#ifndef EASY_ENABLE_STRICT_ALIGNMENT
return *(T*)ptr;
return *(const T*)ptr;
#else
T value;
((char*)&value)[0] = ((char*)ptr)[0];
((char*)&value)[1] = ((char*)ptr)[1];
((char*)&value)[2] = ((char*)ptr)[2];
((char*)&value)[3] = ((char*)ptr)[3];
((char*)&value)[0] = ((const char*)ptr)[0];
((char*)&value)[1] = ((const char*)ptr)[1];
((char*)&value)[2] = ((const char*)ptr)[2];
((char*)&value)[3] = ((const char*)ptr)[3];
return value;
#endif
}
@ -240,65 +242,63 @@ template <typename T>
EASY_FORCE_INLINE T unaligned_load32(const void* ptr, T* val)
{
static_assert(sizeof(T) == 4, "32 bit type required.");
#ifndef EASY_ENABLE_STRICT_ALIGNMENT
*val = *(T*)ptr;
*val = *(const T*)ptr;
#else
((char*)&val)[0] = ((char*)ptr)[0];
((char*)&val)[1] = ((char*)ptr)[1];
((char*)&val)[2] = ((char*)ptr)[2];
((char*)&val)[3] = ((char*)ptr)[3];
return *val;
((char*)&val)[0] = ((const char*)ptr)[0];
((char*)&val)[1] = ((const char*)ptr)[1];
((char*)&val)[2] = ((const char*)ptr)[2];
((char*)&val)[3] = ((const char*)ptr)[3];
#endif
return *val;
}
template <typename T>
EASY_FORCE_INLINE T unaligned_load64(const void* ptr)
{
static_assert(sizeof(T) == 8, "64 bit type required.");
#ifndef EASY_ENABLE_STRICT_ALIGNMENT
return *(T*)ptr;
#else
#ifdef EASY_ENABLE_STRICT_ALIGNMENT
if (!is_aligned<alignof(T)>(ptr)) {
T value;
((char*)&value)[0] = ((char*)ptr)[0];
((char*)&value)[1] = ((char*)ptr)[1];
((char*)&value)[2] = ((char*)ptr)[2];
((char*)&value)[3] = ((char*)ptr)[3];
((char*)&value)[4] = ((char*)ptr)[4];
((char*)&value)[5] = ((char*)ptr)[5];
((char*)&value)[6] = ((char*)ptr)[6];
((char*)&value)[7] = ((char*)ptr)[7];
((char*)&value)[0] = ((const char*)ptr)[0];
((char*)&value)[1] = ((const char*)ptr)[1];
((char*)&value)[2] = ((const char*)ptr)[2];
((char*)&value)[3] = ((const char*)ptr)[3];
((char*)&value)[4] = ((const char*)ptr)[4];
((char*)&value)[5] = ((const char*)ptr)[5];
((char*)&value)[6] = ((const char*)ptr)[6];
((char*)&value)[7] = ((const char*)ptr)[7];
return value;
}
else {
return *(T*)ptr;
}
#endif
return *(const T*)ptr;
}
template <typename T>
EASY_FORCE_INLINE T unaligned_load64(const void* ptr, T* val)
{
static_assert(sizeof(T) == 8, "64 bit type required.");
#ifndef EASY_ENABLE_STRICT_ALIGNMENT
*val = *(T*)ptr;
#else
#ifdef EASY_ENABLE_STRICT_ALIGNMENT
if (!is_aligned<alignof(T)>(ptr)) {
((char*)&val)[0] = ((char*)ptr)[0];
((char*)&val)[1] = ((char*)ptr)[1];
((char*)&val)[2] = ((char*)ptr)[2];
((char*)&val)[3] = ((char*)ptr)[3];
((char*)&val)[4] = ((char*)ptr)[4];
((char*)&val)[5] = ((char*)ptr)[5];
((char*)&val)[6] = ((char*)ptr)[6];
((char*)&val)[7] = ((char*)ptr)[7];
return *val;
}
else {
*val = *(T*)ptr;
return *val;
((char*)&val)[0] = ((const char*)ptr)[0];
((char*)&val)[1] = ((const char*)ptr)[1];
((char*)&val)[2] = ((const char*)ptr)[2];
((char*)&val)[3] = ((const char*)ptr)[3];
((char*)&val)[4] = ((const char*)ptr)[4];
((char*)&val)[5] = ((const char*)ptr)[5];
((char*)&val)[6] = ((const char*)ptr)[6];
((char*)&val)[7] = ((const char*)ptr)[7];
}
else
#endif
*val = *(const T*)ptr;
return *val;
}
//////////////////////////////////////////////////////////////////////////
@ -380,16 +380,19 @@ class chunk_allocator
};
// Used in serialize(): workaround for no constexpr support in MSVC 2013.
static const int_fast32_t MAX_CHUNK_OFFSET = N - sizeof(uint16_t);
static const uint16_t N_MINUS_ONE = N - 1;
EASY_STATIC_CONSTEXPR int_fast32_t MAX_CHUNK_OFFSET = N - sizeof(uint16_t);
EASY_STATIC_CONSTEXPR uint16_t N_MINUS_ONE = N - 1;
chunk_list m_chunks; ///< List of chunks.
uint32_t m_size; ///< Number of elements stored(# of times allocate() has been called.)
uint16_t m_chunkOffset; ///< Number of bytes used in the current chunk.
chunk_list m_chunks; ///< List of chunks.
const chunk* m_markedChunk; ///< Chunk marked by last closed frame
uint32_t m_size; ///< Number of elements stored(# of times allocate() has been called.)
uint32_t m_markedSize; ///< Number of elements to the moment when put_mark() has been called.
uint16_t m_chunkOffset; ///< Number of bytes used in the current chunk.
uint16_t m_markedChunkOffset; ///< Last byte in marked chunk for serializing.
public:
chunk_allocator() : m_size(0), m_chunkOffset(0)
chunk_allocator() : m_markedChunk(nullptr), m_size(0), m_markedSize(0), m_chunkOffset(0), m_markedChunkOffset(0)
{
}
@ -451,10 +454,22 @@ public:
return m_size == 0;
}
uint32_t markedSize() const
{
return m_markedSize;
}
bool markedEmpty() const
{
return m_markedSize == 0;
}
void clear()
{
m_size = 0;
m_markedSize = 0;
m_chunkOffset = 0;
m_markedChunk = nullptr;
m_chunks.clear_all_except_last(); // There is always at least one chunk
}
@ -479,11 +494,18 @@ public:
// too small to cary more than a zero-sized element.
chunk* current = m_chunks.last;
bool isMarked;
do {
isMarked = (current == m_markedChunk);
const char* data = current->data;
const int_fast32_t maxOffset = isMarked ? m_markedChunkOffset : MAX_CHUNK_OFFSET;
int_fast32_t chunkOffset = 0; // signed int so overflow is not checked.
uint16_t payloadSize = unaligned_load16<uint16_t>(data);
while (chunkOffset < MAX_CHUNK_OFFSET && payloadSize != 0) {
auto payloadSize = unaligned_load16<uint16_t>(data);
while (chunkOffset < maxOffset && payloadSize != 0)
{
const uint16_t chunkSize = sizeof(uint16_t) + payloadSize;
_outputStream.write(data, chunkSize);
data += chunkSize;
@ -492,11 +514,19 @@ public:
}
current = current->prev;
} while (current != nullptr);
} while (current != nullptr && !isMarked);
clear();
}
void put_mark()
{
m_markedChunk = m_chunks.last;
m_markedSize = m_size;
m_markedChunkOffset = m_chunkOffset;
}
private:
chunk_allocator(const chunk_allocator&) = delete;

View File

@ -160,9 +160,9 @@ extern const uint32_t EASY_CURRENT_VERSION = EASY_VERSION_INT(EASY_PROFILER_VERS
//////////////////////////////////////////////////////////////////////////
# define EASY_PROF_DISABLED 0
# define EASY_PROF_ENABLED 1
# define EASY_PROF_DUMP 2
# define EASY_PROF_DISABLED false//0
# define EASY_PROF_ENABLED true//1
//# define EASY_PROF_DUMP 2
//////////////////////////////////////////////////////////////////////////
@ -644,7 +644,7 @@ ThreadGuard::~ThreadGuard()
{
bool isMarked = false;
EASY_EVENT_RES(isMarked, "ThreadFinished", EASY_COLOR_THREAD_END, ::profiler::FORCE_ON);
THIS_THREAD->profiledFrameOpened.store(false, std::memory_order_release);
THIS_THREAD->markProfilingFrameEnded();
THIS_THREAD->expired.store(isMarked ? 2 : 1, std::memory_order_release);
THIS_THREAD = nullptr;
}
@ -810,12 +810,13 @@ bool ProfileManager::storeBlock(const profiler::BaseBlockDescriptor* _desc, cons
if (state == EASY_PROF_DISABLED || (_desc->m_status & profiler::ON) == 0)
return false;
if (state == EASY_PROF_DUMP)
/*if (state == EASY_PROF_DUMP)
{
if (THIS_THREAD == nullptr || THIS_THREAD->halt)
return false;
}
else if (THIS_THREAD == nullptr)
else*/
if (THIS_THREAD == nullptr)
{
registerThread();
}
@ -837,12 +838,13 @@ bool ProfileManager::storeBlock(const profiler::BaseBlockDescriptor* _desc, cons
if (state == EASY_PROF_DISABLED || (_desc->m_status & profiler::ON) == 0)
return false;
if (state == EASY_PROF_DUMP)
/*if (state == EASY_PROF_DUMP)
{
if (THIS_THREAD == nullptr || THIS_THREAD->halt)
return false;
}
else if (THIS_THREAD == nullptr)
else*/
if (THIS_THREAD == nullptr)
{
registerThread();
}
@ -915,18 +917,19 @@ void ProfileManager::beginBlock(Block& _block)
bool empty = true;
const auto state = m_profilerStatus.load(std::memory_order_acquire);
switch (state)
{
case EASY_PROF_DISABLED:
if (state ==
//switch (state)
//{
EASY_PROF_DISABLED)
{
_block.m_status = profiler::OFF;
THIS_THREAD->halt = false;
//THIS_THREAD->halt = false;
THIS_THREAD->blocks.openedList.emplace_back(_block);
beginFrame();
return;
}
case EASY_PROF_DUMP:
/*case EASY_PROF_DUMP:
{
const bool halt = THIS_THREAD->halt;
if (halt || THIS_THREAD->blocks.openedList.empty())
@ -945,17 +948,18 @@ void ProfileManager::beginBlock(Block& _block)
empty = false;
break;
}
}*/
default:
//default:
else
{
empty = THIS_THREAD->blocks.openedList.empty();
break;
//break;
}
}
//}
THIS_THREAD->stackSize = 0;
THIS_THREAD->halt = false;
//THIS_THREAD->halt = false;
auto blockStatus = _block.m_status;
#if EASY_ENABLE_BLOCK_STATUS != 0
@ -981,7 +985,7 @@ void ProfileManager::beginBlock(Block& _block)
if (empty)
{
beginFrame();
THIS_THREAD->profiledFrameOpened.store(true, std::memory_order_release);
THIS_THREAD->markProfilingFrameStarted();
}
THIS_THREAD->blocks.openedList.emplace_back(_block);
@ -1017,7 +1021,7 @@ void ProfileManager::endBlock()
}
THIS_THREAD->stackSize = 0;
if (THIS_THREAD->halt || m_profilerStatus.load(std::memory_order_acquire) == EASY_PROF_DISABLED)
if (/*THIS_THREAD->halt ||*/ m_profilerStatus.load(std::memory_order_acquire) == EASY_PROF_DISABLED)
{
THIS_THREAD->popSilent();
endFrame();
@ -1046,7 +1050,7 @@ void ProfileManager::endBlock()
const bool empty = THIS_THREAD->blocks.openedList.empty();
if (empty)
{
THIS_THREAD->profiledFrameOpened.store(false, std::memory_order_release);
THIS_THREAD->markProfilingFrameEnded();
endFrame();
#if EASY_ENABLE_BLOCK_STATUS != 0
THIS_THREAD->allowChildren = true;
@ -1192,7 +1196,7 @@ void ProfileManager::setEnabled(bool isEnable)
auto time = getCurrentTime();
const auto status = isEnable ? EASY_PROF_ENABLED : EASY_PROF_DISABLED;
const auto prev = m_profilerStatus.exchange(status, std::memory_order_release);
const auto prev = m_profilerStatus.exchange(status, std::memory_order_acq_rel);
if (prev == status)
return;
@ -1286,24 +1290,19 @@ uint32_t ProfileManager::dumpBlocksToStream(profiler::OStream& _outputStream, bo
#endif
if (state == EASY_PROF_ENABLED) {
m_profilerStatus.store(EASY_PROF_DUMP, std::memory_order_release);
//m_profilerStatus.store(EASY_PROF_DUMP, std::memory_order_release);
m_profilerStatus.store(EASY_PROF_DISABLED, std::memory_order_release);
disableEventTracer();
m_endTime = getCurrentTime();
}
// This is to make sure that no new descriptors or new threads will be
// added until we finish sending data.
//m_spin.lock();
// This is the only place using both spins, so no dead-lock will occur
if (_async && m_stopDumping.load(std::memory_order_acquire))
{
if (_lockSpin)
m_dumpSpin.unlock();
return 0;
}
/*
// Wait for some time to be sure that all operations which began before setEnabled(false) will be finished.
// This is much better than inserting spin-lock or atomic variable check into each store operation.
std::this_thread::sleep_for(std::chrono::milliseconds(20));
@ -1345,9 +1344,13 @@ uint32_t ProfileManager::dumpBlocksToStream(profiler::OStream& _outputStream, bo
EASY_LOGMSG("All threads have closed frames\n");
EASY_LOGMSG("Disabled profiling\n");
*/
// This is to make sure that no new descriptors or new threads will be
// added until we finish sending data.
m_spin.lock();
m_storedSpin.lock();
// This is the only place using both spins, so no dead-lock will occur
// TODO: think about better solution because this one is not 100% safe...
const profiler::timestamp_t now = getCurrentTime();
@ -1421,7 +1424,7 @@ uint32_t ProfileManager::dumpBlocksToStream(profiler::OStream& _outputStream, bo
}
auto& thread = thread_it->second;
uint32_t num = static_cast<uint32_t>(thread.blocks.closedList.size()) + static_cast<uint32_t>(thread.sync.closedList.size());
uint32_t num = thread.blocks.closedList.markedSize() + thread.sync.closedList.size();
const char expired = ProfileManager::checkThreadExpired(thread);
#ifdef _WIN32
@ -1523,8 +1526,8 @@ uint32_t ProfileManager::dumpBlocksToStream(profiler::OStream& _outputStream, bo
if (!thread.sync.closedList.empty())
thread.sync.closedList.serialize(_outputStream);
_outputStream.write(thread.blocks.closedList.size());
if (!thread.blocks.closedList.empty())
_outputStream.write(thread.blocks.closedList.markedSize());
if (!thread.blocks.closedList.markedEmpty())
thread.blocks.closedList.serialize(_outputStream);
thread.clearClosed();
@ -1680,8 +1683,6 @@ void ProfileManager::stopListen()
if (m_listenThread.joinable())
m_listenThread.join();
m_isAlreadyListening.store(false, std::memory_order_release);
EASY_LOGMSG("Listening stopped\n");
}
bool ProfileManager::isListening() const
@ -1873,7 +1874,8 @@ void ProfileManager::listen(uint16_t _port)
m_dumpSpin.lock();
auto time = getCurrentTime();
const auto prev = m_profilerStatus.exchange(EASY_PROF_DUMP, std::memory_order_release);
//const auto prev = m_profilerStatus.exchange(EASY_PROF_DUMP, std::memory_order_release);
const auto prev = m_profilerStatus.exchange(EASY_PROF_DISABLED, std::memory_order_acq_rel);
if (prev == EASY_PROF_ENABLED) {
disableEventTracer();
m_endTime = time;
@ -2009,6 +2011,8 @@ void ProfileManager::listen(uint16_t _port)
m_stopDumping.store(true, std::memory_order_release);
join(dumpingResult);
}
EASY_LOGMSG("Listening stopped\n");
}
//////////////////////////////////////////////////////////////////////////

View File

@ -104,7 +104,7 @@ class ProfileManager
profiler::spin_lock m_storedSpin;
profiler::spin_lock m_dumpSpin;
std::atomic<profiler::thread_id_t> m_mainThreadId;
std::atomic<char> m_profilerStatus;
std::atomic_bool m_profilerStatus;
std::atomic_bool m_isEventTracingEnabled;
std::atomic_bool m_isAlreadyListening;
std::atomic_bool m_frameMaxReset;

View File

@ -53,7 +53,7 @@ ThreadStorage::ThreadStorage()
, named(false)
, guarded(false)
, frameOpened(false)
, halt(false)
//, halt(false)
{
expired = ATOMIC_VAR_INIT(0);
profiledFrameOpened = ATOMIC_VAR_INIT(false);
@ -69,7 +69,7 @@ void ThreadStorage::storeValue(profiler::timestamp_t _timestamp, profiler::block
char* cdata = reinterpret_cast<char*>(data);
memcpy(cdata + sizeof(profiler::ArbitraryValue), _data, _size);
blocks.usedMemorySize += serializedDataSize;
blocks.frameMemorySize += serializedDataSize;
}
void ThreadStorage::storeBlock(const profiler::Block& block)
@ -98,7 +98,7 @@ void ThreadStorage::storeBlock(const profiler::Block& block)
#endif
::new (data) profiler::SerializedBlock(block, nameLength);
blocks.usedMemorySize += serializedDataSize;
blocks.frameMemorySize += serializedDataSize;
#if EASY_OPTION_MEASURE_STORAGE_EXPAND != 0
if (expanded)
@ -109,7 +109,7 @@ void ThreadStorage::storeBlock(const profiler::Block& block)
serializedDataSize = static_cast<uint16_t>(sizeof(profiler::BaseBlockData) + 1);
data = blocks.closedList.allocate(serializedDataSize);
::new (data) profiler::SerializedBlock(b, 0);
blocks.usedMemorySize += serializedDataSize;
blocks.frameMemorySize += serializedDataSize;
}
#endif
}
@ -155,3 +155,16 @@ profiler::timestamp_t ThreadStorage::endFrame()
frameOpened = false;
return getCurrentTime() - frameStartTime;
}
void ThreadStorage::markProfilingFrameStarted()
{
profiledFrameOpened.store(true, std::memory_order_release);
}
void ThreadStorage::markProfilingFrameEnded()
{
profiledFrameOpened.store(false, std::memory_order_release);
blocks.closedList.put_mark();
blocks.usedMemorySize += blocks.frameMemorySize;
blocks.frameMemorySize = 0;
}

View File

@ -63,10 +63,12 @@ struct BlocksList
std::vector<T> openedList;
chunk_allocator<N> closedList;
uint64_t usedMemorySize = 0;
uint64_t frameMemorySize = 0;
void clearClosed() {
//closedList.clear();
usedMemorySize = 0;
frameMemorySize = 0;
}
private:
@ -90,8 +92,8 @@ public:
//////////////////////////////////////////////////////////////////////////
const uint16_t SIZEOF_BLOCK = sizeof(profiler::BaseBlockData) + 1 + sizeof(uint16_t); // SerializedBlock stores BaseBlockData + at least 1 character for name ('\0') + 2 bytes for size of serialized data
const uint16_t SIZEOF_CSWITCH = sizeof(profiler::CSwitchEvent) + 1 + sizeof(uint16_t); // SerializedCSwitch also stores additional 4 bytes to be able to save 64-bit thread_id
EASY_CONSTEXPR uint16_t SIZEOF_BLOCK = sizeof(profiler::BaseBlockData) + 1 + sizeof(uint16_t); // SerializedBlock stores BaseBlockData + at least 1 character for name ('\0') + 2 bytes for size of serialized data
EASY_CONSTEXPR uint16_t SIZEOF_CSWITCH = sizeof(profiler::CSwitchEvent) + 1 + sizeof(uint16_t); // SerializedCSwitch also stores additional 4 bytes to be able to save 64-bit thread_id
struct ThreadStorage EASY_FINAL
{
@ -109,7 +111,7 @@ struct ThreadStorage EASY_FINAL
bool named; ///< True if thread name was set
bool guarded; ///< True if thread has been registered using ThreadGuard
bool frameOpened; ///< Is new frame opened (this does not depend on profiling status) \sa profiledFrameOpened
bool halt; ///< This is set to true when new frame started while dumping blocks. Used to restrict collecting blocks during dumping process.
//bool halt; ///< This is set to true when new frame started while dumping blocks. Used to restrict collecting blocks during dumping process.
void storeValue(profiler::timestamp_t _timestamp, profiler::block_id_t _id, profiler::DataType _type, const void* _data, size_t _size, bool _isArray, profiler::ValueId _vin);
void storeBlock(const profiler::Block& _block);
@ -119,6 +121,8 @@ struct ThreadStorage EASY_FINAL
void beginFrame();
profiler::timestamp_t endFrame();
void markProfilingFrameStarted();
void markProfilingFrameEnded();
ThreadStorage();