0
0
mirror of https://github.com/yse/easy_profiler.git synced 2024-12-26 08:01:51 +08:00

(Core) Update #29 - added non-scoped block functionality for beginning and ending block manually from different functions.

(Core) Added new API functions for getting current time (ticks) and converting it to nano- and microseconds.
This commit is contained in:
Victor Zarubkin 2017-04-17 22:27:10 +03:00
parent c93464968e
commit c2b3a8f5dc
5 changed files with 384 additions and 77 deletions

View File

@ -74,14 +74,17 @@ Block::Block(Block&& that)
: BaseBlockData(that.m_begin, that.m_id)
, m_name(that.m_name)
, m_status(that.m_status)
, m_isScoped(that.m_isScoped)
{
m_end = that.m_end;
that.m_end = that.m_begin;
}
Block::Block(timestamp_t _begin_time, block_id_t _descriptor_id, const char* _runtimeName)
: BaseBlockData(_begin_time, _descriptor_id)
, m_name(_runtimeName)
, m_status(::profiler::ON)
, m_isScoped(true)
{
}
@ -90,14 +93,16 @@ Block::Block(timestamp_t _begin_time, timestamp_t _end_time, block_id_t _descrip
: BaseBlockData(_begin_time, _end_time, _descriptor_id)
, m_name(_runtimeName)
, m_status(::profiler::ON)
, m_isScoped(true)
{
}
Block::Block(const BaseBlockDescriptor* _descriptor, const char* _runtimeName)
Block::Block(const BaseBlockDescriptor* _descriptor, const char* _runtimeName, bool _scoped)
: BaseBlockData(1ULL, _descriptor->id())
, m_name(_runtimeName)
, m_status(_descriptor->status())
, m_isScoped(_scoped)
{
}
@ -136,10 +141,11 @@ BaseBlockData::BaseBlockData(timestamp_t, block_id_t)
}
Block::Block(Block&&)
Block::Block(Block&& that)
: BaseBlockData(0, ~0U)
, m_name("")
, m_status(::profiler::OFF)
, m_isScoped(that.m_isScoped)
{
}
@ -147,14 +153,25 @@ Block::Block(timestamp_t, block_id_t, const char*)
: BaseBlockData(0, ~0U)
, m_name("")
, m_status(::profiler::OFF)
, m_isScoped(true)
{
}
Block::Block(const BaseBlockDescriptor*, const char*)
Block::Block(timestamp_t, timestamp_t, block_id_t, const char*)
: BaseBlockData(0, ~0U)
, m_name("")
, m_status(::profiler::OFF)
, m_isScoped(true)
{
}
Block::Block(const BaseBlockDescriptor*, const char*, bool _scoped)
: BaseBlockData(0, ~0U)
, m_name("")
, m_status(::profiler::OFF)
, m_isScoped(_scoped)
{
}

View File

@ -75,7 +75,7 @@ The Apache License, Version 2.0 (the "License");
// EasyProfiler core API:
/** Macro for beginning of a block with custom name and color.
/** Macro for beginning of a scoped block with custom name and color.
\code
#include <easy/profiler.h>
@ -113,7 +113,44 @@ Block will be automatically completed by destructor.
EASY_UNIQUE_LINE_ID, EASY_COMPILETIME_NAME(name), __FILE__, __LINE__, ::profiler::BLOCK_TYPE_BLOCK, ::profiler::extract_color(__VA_ARGS__),\
::std::is_base_of<::profiler::ForceConstStr, decltype(name)>::value));\
::profiler::Block EASY_UNIQUE_BLOCK(__LINE__)(EASY_UNIQUE_DESC(__LINE__), EASY_RUNTIME_NAME(name));\
::profiler::beginBlock(EASY_UNIQUE_BLOCK(__LINE__)); // this is to avoid compiler warning about unused variable
::profiler::beginBlock(EASY_UNIQUE_BLOCK(__LINE__));
/** Macro for beginning of a non-scoped block with custom name and color.
You must end such block manually with EASY_END_BLOCK.
\code
#include <easy/profiler.h>
void foo() {
EASY_NONSCOPED_BLOCK("Callback"); // Begin block which would not be finished when function returns.
// some code ...
}
void bar() {
// some another code...
EASY_END_BLOCK; // This, as always, ends last opened block. You have to take care about blocks order by yourself.
}
void baz() {
foo(); // non-scoped block begins here
// some code...
bar(); // non-scoped block ends here
}
\endcode
Block will be automatically completed by destructor.
\ingroup profiler
*/
#define EASY_NONSCOPED_BLOCK(name, ...)\
EASY_LOCAL_STATIC_PTR(const ::profiler::BaseBlockDescriptor*, EASY_UNIQUE_DESC(__LINE__), ::profiler::registerDescription(::profiler::extract_enable_flag(__VA_ARGS__),\
EASY_UNIQUE_LINE_ID, EASY_COMPILETIME_NAME(name), __FILE__, __LINE__, ::profiler::BLOCK_TYPE_BLOCK, ::profiler::extract_color(__VA_ARGS__),\
::std::is_base_of<::profiler::ForceConstStr, decltype(name)>::value));\
::profiler::beginNonScopedBlock(EASY_UNIQUE_DESC(__LINE__), EASY_RUNTIME_NAME(name));
/** Macro for beginning of a block with function name and custom color.
@ -350,6 +387,7 @@ Otherwise, no log messages will be printed.
#else // #ifdef BUILD_WITH_EASY_PROFILER
# define EASY_BLOCK(...)
# define EASY_NONSCOPED_BLOCK(...)
# define EASY_FUNCTION(...)
# define EASY_END_BLOCK
# define EASY_PROFILER_ENABLE
@ -395,6 +433,7 @@ Otherwise, no log messages will be printed.
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
class NonscopedBlock;
class ProfileManager;
struct ThreadStorage;
@ -486,13 +525,15 @@ namespace profiler {
//***********************************************
class PROFILER_API Block EASY_FINAL : public BaseBlockData
class PROFILER_API Block : public BaseBlockData
{
friend ::ProfileManager;
friend ::ThreadStorage;
friend ::NonscopedBlock;
const char* m_name;
EasyBlockStatus m_status;
bool m_isScoped;
private:
@ -507,7 +548,7 @@ namespace profiler {
public:
Block(Block&& that);
Block(const BaseBlockDescriptor* _desc, const char* _runtimeName);
Block(const BaseBlockDescriptor* _desc, const char* _runtimeName, bool _scoped = true);
Block(timestamp_t _begin_time, block_id_t _id, const char* _runtimeName);
Block(timestamp_t _begin_time, timestamp_t _end_time, block_id_t _id, const char* _runtimeName);
~Block();
@ -537,11 +578,34 @@ namespace profiler {
#ifdef BUILD_WITH_EASY_PROFILER
extern "C" {
/** Returns current time in ticks.
You can use it if you want to store block explicitly.
\ingroup profiler
*/
PROFILER_API timestamp_t currentTime();
/** Convert ticks to nanoseconds.
\ingroup profiler
*/
PROFILER_API timestamp_t toNanoseconds(timestamp_t _ticks);
/** Convert ticks to microseconds.
\ingroup profiler
*/
PROFILER_API timestamp_t toMicroseconds(timestamp_t _ticks);
/** Registers static description of a block.
It is general information which is common for all such blocks.
Includes color, block type (see BlockType), file-name, line-number, compile-time name of a block and enable-flag.
\note This API function is used by EASY_EVENT, EASY_BLOCK, EASY_FUNCTION macros.
There is no need to invoke this function explicitly.
\ingroup profiler
*/
PROFILER_API const BaseBlockDescriptor* registerDescription(EasyBlockStatus _status, const char* _autogenUniqueId, const char* _compiletimeName, const char* _filename, int _line, block_type_t _block_type, color_t _color, bool _copyName = false);
@ -550,19 +614,56 @@ namespace profiler {
An event ends instantly and has zero duration.
\note There is no need to invoke this function explicitly - use EASY_EVENT macro instead.
\param _desc Reference to the previously registered description.
\param _runtimeName Standard zero-terminated string which will be copied to the events buffer.
\note _runtimeName must be an empty string ("") if you do not want to set name to the event at run-time.
\ingroup profiler
*/
PROFILER_API void storeEvent(const BaseBlockDescriptor* _desc, const char* _runtimeName);
PROFILER_API void storeEvent(const BaseBlockDescriptor* _desc, const char* _runtimeName = "");
/** Begins block.
/** Stores block explicitly in the blocks list.
Use this function for additional flexibility if you want to set block duration manually.
\param _desc Reference to the previously registered description.
\param _runtimeName Standard zero-terminated string which will be copied to the events buffer.
\param _beginTime begin time of the block
\param _endTime end time of the block
\note _runtimeName must be an empty string ("") if you do not want to set name to the block at run-time.
\ingroup profiler
*/
PROFILER_API void storeBlock(const BaseBlockDescriptor* _desc, const char* _runtimeName, timestamp_t _beginTime, timestamp_t _endTime);
/** Begins scoped block.
\ingroup profiler
*/
PROFILER_API void beginBlock(Block& _block);
/** Begins non-scoped block.
\param _desc Reference to the previously registered description (see registerDescription).
\param _runtimeName Standard zero-terminated string which will be copied to the block buffer when block will end.
\note There is no need to invoke this function explicitly - use EASY_NONSCOPED_BLOCK macro instead.
EASY_NONSCOPED_BLOCK macro could be used for higher flexibility if you have to begin block in one
function and end it in another one.
\note _runtimeName must be an empty string ("") if you do not want to set name to the block at run-time.
\note _runtimeName is copied only when block ends so you must ensure it's validity until block end.
\warning You have to end this block explicitly.
\ingroup profiler
*/
PROFILER_API void beginNonScopedBlock(const BaseBlockDescriptor* _desc, const char* _runtimeName = "");
/** Ends last started block.
Use this only if you want to finish block explicitly.
@ -723,13 +824,18 @@ namespace profiler {
}
#else
inline timestamp_t currentTime() { return 0; }
inline timestamp_t toNanoseconds(timestamp_t) { return 0; }
inline timestamp_t toMicroseconds(timestamp_t) { return 0; }
inline const BaseBlockDescriptor* registerDescription(EasyBlockStatus, const char*, const char*, const char*, int, block_type_t, color_t, bool = false)
{ return reinterpret_cast<const BaseBlockDescriptor*>(0xbad); }
inline void endBlock() { }
inline void setEnabled(bool) { }
inline bool isEnabled() { return false; }
inline void storeEvent(const BaseBlockDescriptor*, const char*) { }
inline void storeEvent(const BaseBlockDescriptor*, const char* = "") { }
inline void storeBlock(const BaseBlockDescriptor*, const char*, timestamp_t, timestamp_t) { }
inline void beginBlock(Block&) { }
inline void beginNonScopedBlock(const BaseBlockDescriptor*, const char* = "") { }
inline uint32_t dumpBlocksToFile(const char*) { return 0; }
inline const char* registerThreadScoped(const char*, ThreadGuard&) { return ""; }
inline const char* registerThread(const char*) { return ""; }

View File

@ -248,6 +248,33 @@ EASY_THREAD_LOCAL static bool THIS_THREAD_FRAME_T_RESET_AVG = false;
extern "C" {
#if !defined(EASY_PROFILER_API_DISABLED)
PROFILER_API timestamp_t currentTime()
{
return getCurrentTime();
}
PROFILER_API timestamp_t toNanoseconds(timestamp_t _ticks)
{
#ifdef _WIN32
return _ticks * 1000000000LL / CPU_FREQUENCY;
#elif defined(USE_STD_CHRONO)
return _ticks;
#else
return _ticks / CPU_FREQUENCY.load(std::memory_order_acquire)
#endif
}
PROFILER_API timestamp_t toMicroseconds(timestamp_t _ticks)
{
#ifdef _WIN32
return _ticks * 1000000LL / CPU_FREQUENCY;
#elif defined(USE_STD_CHRONO)
return _ticks / 1000;
#else
return _ticks * 1000 / CPU_FREQUENCY.load(std::memory_order_acquire)
#endif
}
PROFILER_API const BaseBlockDescriptor* registerDescription(EasyBlockStatus _status, const char* _autogenUniqueId, const char* _name, const char* _filename, int _line, block_type_t _block_type, color_t _color, bool _copyName)
{
return MANAGER.addBlockDescriptor(_status, _autogenUniqueId, _name, _filename, _line, _block_type, _color, _copyName);
@ -273,11 +300,21 @@ extern "C" {
MANAGER.storeBlock(_desc, _runtimeName);
}
PROFILER_API void storeBlock(const BaseBlockDescriptor* _desc, const char* _runtimeName, timestamp_t _beginTime, timestamp_t _endTime)
{
MANAGER.storeBlock(_desc, _runtimeName, _beginTime, _endTime);
}
PROFILER_API void beginBlock(Block& _block)
{
MANAGER.beginBlock(_block);
}
PROFILER_API void beginNonScopedBlock(const BaseBlockDescriptor* _desc, const char* _runtimeName)
{
MANAGER.beginNonScopedBlock(_desc, _runtimeName);
}
PROFILER_API uint32_t dumpBlocksToFile(const char* filename)
{
return MANAGER.dumpBlocksToFile(filename);
@ -412,12 +449,17 @@ extern "C" {
}
#else
PROFILER_API timestamp_t currentTime() { return 0; }
PROFILER_API timestamp_t toNanoseconds(timestamp_t) { return 0; }
PROFILER_API timestamp_t toMicroseconds(timestamp_t) { return 0; }
PROFILER_API const BaseBlockDescriptor* registerDescription(EasyBlockStatus, const char*, const char*, const char*, int, block_type_t, color_t, bool) { return reinterpret_cast<const BaseBlockDescriptor*>(0xbad); }
PROFILER_API void endBlock() { }
PROFILER_API void setEnabled(bool) { }
PROFILER_API bool isEnabled() { return false; }
PROFILER_API void storeEvent(const BaseBlockDescriptor*, const char*) { }
PROFILER_API void storeBlock(const BaseBlockDescriptor*, const char*, timestamp_t, timestamp_t) { }
PROFILER_API void beginBlock(Block&) { }
PROFILER_API void beginNonScopedBlock(const BaseBlockDescriptor*, const char*) { }
PROFILER_API uint32_t dumpBlocksToFile(const char*) { return 0; }
PROFILER_API const char* registerThreadScoped(const char*, ThreadGuard&) { return ""; }
PROFILER_API const char* registerThread(const char*) { return ""; }
@ -548,7 +590,29 @@ public:
//////////////////////////////////////////////////////////////////////////
ThreadStorage::ThreadStorage() : id(getCurrentThreadId()), allowChildren(true), named(false), guarded(false)
NonscopedBlock::NonscopedBlock(const profiler::BaseBlockDescriptor* _desc, const char* _runtimeName, bool)
: profiler::Block(_desc, _runtimeName, false)
{
}
NonscopedBlock::~NonscopedBlock()
{
m_end = m_begin; // to restrict profiler::Block to invoke profiler::endBlock() on destructor.
}
void NonscopedBlock::copyname()
{
if ((m_status & profiler::ON) != 0 && m_name[0] != 0)
{
m_runtimeName = m_name;
m_name = m_runtimeName.c_str();
}
}
//////////////////////////////////////////////////////////////////////////
ThreadStorage::ThreadStorage() : nonscopedBlocks(16), id(getCurrentThreadId()), allowChildren(true), named(false), guarded(false)
#ifndef _WIN32
, pthread_id(pthread_self())
#endif
@ -619,9 +683,11 @@ void ThreadStorage::popSilent()
{
if (!blocks.openedList.empty())
{
Block& top = blocks.openedList.top();
Block& top = blocks.openedList.back();
top.m_end = top.m_begin;
blocks.openedList.pop();
if (!top.m_isScoped)
nonscopedBlocks.pop();
blocks.openedList.pop_back();
}
}
@ -804,6 +870,34 @@ bool ProfileManager::storeBlock(const profiler::BaseBlockDescriptor* _desc, cons
return true;
}
bool ProfileManager::storeBlock(const profiler::BaseBlockDescriptor* _desc, const char* _runtimeName, profiler::timestamp_t _beginTime, profiler::timestamp_t _endTime)
{
const auto state = m_profilerStatus.load(std::memory_order_acquire);
if (state == EASY_PROF_DISABLED || !(_desc->m_status & profiler::ON))
return false;
if (state == EASY_PROF_DUMP)
{
if (THIS_THREAD == nullptr || THIS_THREAD->blocks.openedList.empty())
return false;
}
else if (THIS_THREAD == nullptr)
{
THIS_THREAD = &threadStorage(getCurrentThreadId());
}
#if EASY_ENABLE_BLOCK_STATUS != 0
if (!THIS_THREAD->allowChildren && !(_desc->m_status & FORCE_ON_FLAG))
return false;
#endif
profiler::Block b(_beginTime, _endTime, _desc->id(), _runtimeName);
THIS_THREAD->storeBlock(b);
b.m_end = b.m_begin;
return true;
}
//////////////////////////////////////////////////////////////////////////
void ProfileManager::storeBlockForce(const profiler::BaseBlockDescriptor* _desc, const char* _runtimeName, ::profiler::timestamp_t& _timestamp)
@ -863,7 +957,7 @@ void ProfileManager::beginBlock(Block& _block)
if (++THIS_THREAD_STACK_SIZE > 1)
{
_block.m_status = profiler::OFF;
THIS_THREAD->blocks.openedList.emplace(_block);
THIS_THREAD->blocks.openedList.emplace_back(_block);
return;
}
@ -872,7 +966,7 @@ void ProfileManager::beginBlock(Block& _block)
{
THIS_THREAD_HALT = false;
_block.m_status = profiler::OFF;
THIS_THREAD->blocks.openedList.emplace(_block);
THIS_THREAD->blocks.openedList.emplace_back(_block);
beginFrame();
return;
}
@ -883,7 +977,7 @@ void ProfileManager::beginBlock(Block& _block)
if (THIS_THREAD_HALT || THIS_THREAD->blocks.openedList.empty())
{
_block.m_status = profiler::OFF;
THIS_THREAD->blocks.openedList.emplace(_block);
THIS_THREAD->blocks.openedList.emplace_back(_block);
if (!THIS_THREAD_HALT)
{
@ -930,7 +1024,17 @@ void ProfileManager::beginBlock(Block& _block)
THIS_THREAD->frame.store(true, std::memory_order_release);
}
THIS_THREAD->blocks.openedList.emplace(_block);
THIS_THREAD->blocks.openedList.emplace_back(_block);
}
void ProfileManager::beginNonScopedBlock(const profiler::BaseBlockDescriptor* _desc, const char* _runtimeName)
{
if (THIS_THREAD == nullptr)
THIS_THREAD = &threadStorage(getCurrentThreadId());
NonscopedBlock& b = THIS_THREAD->nonscopedBlocks.push(_desc, _runtimeName, false);
beginBlock(b);
b.copyname();
}
void ProfileManager::beginContextSwitch(profiler::thread_id_t _thread_id, profiler::timestamp_t _time, profiler::thread_id_t _target_thread_id, const char* _target_process, bool _lockSpin)
@ -939,7 +1043,7 @@ void ProfileManager::beginContextSwitch(profiler::thread_id_t _thread_id, profil
if (ts != nullptr)
// Dirty hack: _target_thread_id will be written to the field "block_id_t m_id"
// and will be available calling method id().
ts->sync.openedList.emplace(_time, _time, _target_thread_id, _target_process);
ts->sync.openedList.emplace_back(_time, _time, _target_thread_id, _target_process);
}
//////////////////////////////////////////////////////////////////////////
@ -952,6 +1056,7 @@ void ProfileManager::endBlock()
return;
}
THIS_THREAD_STACK_SIZE = 0;
if (THIS_THREAD_HALT || m_profilerStatus.load(std::memory_order_acquire) == EASY_PROF_DISABLED)
{
THIS_THREAD->popSilent();
@ -959,11 +1064,10 @@ void ProfileManager::endBlock()
return;
}
THIS_THREAD_STACK_SIZE = 0;
if (THIS_THREAD->blocks.openedList.empty())
return;
Block& top = THIS_THREAD->blocks.openedList.top();
Block& top = THIS_THREAD->blocks.openedList.back();
if (top.m_status & profiler::ON)
{
if (!top.finished())
@ -975,7 +1079,10 @@ void ProfileManager::endBlock()
top.m_end = top.m_begin; // this is to restrict endBlock() call inside ~Block()
}
THIS_THREAD->blocks.openedList.pop();
if (!top.m_isScoped)
THIS_THREAD->nonscopedBlocks.pop();
THIS_THREAD->blocks.openedList.pop_back();
const bool empty = THIS_THREAD->blocks.openedList.empty();
if (empty)
{
@ -986,7 +1093,7 @@ void ProfileManager::endBlock()
}
else
{
THIS_THREAD->allowChildren = !(THIS_THREAD->blocks.openedList.top().get().m_status & profiler::OFF_RECURSIVE);
THIS_THREAD->allowChildren = !(THIS_THREAD->blocks.openedList.back().get().m_status & profiler::OFF_RECURSIVE);
}
#else
}
@ -1006,11 +1113,11 @@ void ProfileManager::endContextSwitch(profiler::thread_id_t _thread_id, processi
if (ts == nullptr || ts->sync.openedList.empty())
return;
Block& lastBlock = ts->sync.openedList.top();
Block& lastBlock = ts->sync.openedList.back();
lastBlock.m_end = _endtime;
ts->storeCSwitch(lastBlock);
ts->sync.openedList.pop();
ts->sync.openedList.pop_back();
}
//////////////////////////////////////////////////////////////////////////

View File

@ -55,7 +55,7 @@ The Apache License, Version 2.0 (the "License");
#include <unordered_map>
#include <thread>
#include <atomic>
//#include <list>
#include <list>
//////////////////////////////////////////////////////////////////////////
@ -69,6 +69,10 @@ The Apache License, Version 2.0 (the "License");
#include <time.h>
#endif
#ifdef max
#undef max
#endif
inline uint32_t getCurrentThreadId()
{
#ifdef _WIN32
@ -159,6 +163,10 @@ class chunk_allocator
*(uint16_t*)last->data = 0;
}
/** Invert current chunks list to enable to iterate over chunks list in direct order.
This method is used by serialize().
*/
void invert()
{
chunk* next = nullptr;
@ -187,6 +195,11 @@ public:
m_chunks.emplace_back();
}
/** Allocate n bytes.
Automatically checks if there is enough preserved memory to store additional n bytes
and allocates additional buffer if needed.
*/
void* allocate(uint16_t n)
{
++m_size;
@ -216,6 +229,8 @@ public:
return data;
}
/** Check if current storage is not enough to store additional n bytes.
*/
inline bool need_expand(uint16_t n) const
{
return (m_shift + n + sizeof(uint16_t)) > N;
@ -245,8 +260,11 @@ public:
*/
void serialize(profiler::OStream& _outputStream)
{
// Chunks are stored in reversed order (stack).
// To be able to iterate them in direct order we have to invert chunks list.
m_chunks.invert();
// Iterate over chunks and perform blocks serialization
auto current = m_chunks.last;
do {
const int8_t* data = current->data;
@ -262,55 +280,101 @@ public:
clear();
}
};
}; // END of class chunk_allocator.
//////////////////////////////////////////////////////////////////////////
const uint16_t SIZEOF_CSWITCH = sizeof(profiler::BaseBlockData) + 1 + sizeof(uint16_t);
class NonscopedBlock : public profiler::Block
{
std::string m_runtimeName; ///< a copy of _runtimeName to make it safe to begin block in one function and end it in another
typedef std::vector<profiler::SerializedBlock*> serialized_list_t;
NonscopedBlock() = delete;
NonscopedBlock(const NonscopedBlock&) = delete;
NonscopedBlock(NonscopedBlock&&) = delete;
NonscopedBlock& operator = (const NonscopedBlock&) = delete;
NonscopedBlock& operator = (NonscopedBlock&&) = delete;
public:
NonscopedBlock(const profiler::BaseBlockDescriptor* _desc, const char* _runtimeName, bool = false);
~NonscopedBlock();
/** Copy string from m_name to m_runtimeName to make it safe to end block in another function.
Performs any work if block is ON and m_name != ""
*/
void copyname();
}; // END of class NonscopedBlock.
//////////////////////////////////////////////////////////////////////////
template <class T>
class StackBuffer
{
struct chunk { int8_t data[sizeof(T)]; };
std::list<chunk> m_overflow; ///< List of additional stack elements if current capacity of buffer is not enough
T* m_buffer; ///< Contiguous buffer used for stack
uint32_t m_size; ///< Current size of stack
uint32_t m_capacity; ///< Current capacity of m_buffer
uint32_t m_maxcapacity; ///< Maximum used capacity including m_buffer and m_overflow
public:
StackBuffer(uint32_t N) : m_buffer((T*)malloc(N * sizeof(T))), m_size(0), m_capacity(N), m_maxcapacity(N)
{
}
~StackBuffer()
{
free(m_buffer);
}
template <class ... TArgs>
T& push(TArgs ... _args)
{
if (m_size < m_capacity)
return *(::new (m_buffer + m_size++) T(_args...));
m_overflow.emplace_back();
const uint32_t cap = m_capacity + (uint32_t)m_overflow.size();
if (m_maxcapacity < cap)
m_maxcapacity = cap;
return *(::new (m_overflow.back().data + 0) T(_args...));
}
void pop()
{
if (m_overflow.empty())
{
// m_size should not be equal to 0 here because ProfileManager behavior does not allow such situation
if (--m_size == 0 && m_maxcapacity > m_capacity)
{
// When stack gone empty we can resize buffer to use enough space in future
free(m_buffer);
m_maxcapacity = m_capacity = std::max(m_maxcapacity, m_capacity << 1);
m_buffer = (T*)malloc(m_capacity * sizeof(T));
}
return;
}
m_overflow.pop_back();
}
}; // END of class StackBuffer.
//////////////////////////////////////////////////////////////////////////
template <class T, const uint16_t N>
struct BlocksList
{
BlocksList() = default;
class Stack {
//std::stack<T> m_stack;
std::vector<T> m_stack;
public:
inline void clear() { m_stack.clear(); }
inline bool empty() const { return m_stack.empty(); }
inline void emplace(profiler::Block& _block) {
//m_stack.emplace(_block);
m_stack.emplace_back(_block);
}
inline void emplace(profiler::Block&& _block) {
//m_stack.emplace(_block);
m_stack.emplace_back(std::forward<profiler::Block&&>(_block));
}
template <class ... TArgs> inline void emplace(TArgs ... _args) {
//m_stack.emplace(_args);
m_stack.emplace_back(_args...);
}
inline T& top() {
//return m_stack.top();
return m_stack.back();
}
inline void pop() {
//m_stack.pop();
m_stack.pop_back();
}
};
Stack openedList;
std::vector<T> openedList;
chunk_allocator<N> closedList;
uint64_t usedMemorySize = 0;
@ -318,24 +382,31 @@ struct BlocksList
//closedList.clear();
usedMemorySize = 0;
}
};
}; // END of struct BlocksList.
//////////////////////////////////////////////////////////////////////////
const uint16_t SIZEOF_CSWITCH = sizeof(profiler::BaseBlockData) + 1 + sizeof(uint16_t);
struct ThreadStorage
{
StackBuffer<NonscopedBlock> nonscopedBlocks;
BlocksList<std::reference_wrapper<profiler::Block>, SIZEOF_CSWITCH * (uint16_t)128U> blocks;
BlocksList<profiler::Block, SIZEOF_CSWITCH * (uint16_t)128U> sync;
std::string name;
std::string name; ///< Thread name
#ifndef _WIN32
const pthread_t pthread_id;
const pthread_t pthread_id; ///< Thread pointer
#endif
const profiler::thread_id_t id;
std::atomic<char> expired;
std::atomic_bool frame; ///< is new frame working
bool allowChildren;
bool named;
bool guarded;
const profiler::thread_id_t id; ///< Thread ID
std::atomic<char> expired; ///< Is thread expired
std::atomic_bool frame; ///< Is new frame opened
bool allowChildren; ///< False if one of previously opened blocks has OFF_RECURSIVE or ON_WITHOUT_CHILDREN status
bool named; ///< True if thread name was set
bool guarded; ///< True if thread has been registered using ThreadGuard
void storeBlock(const profiler::Block& _block);
void storeCSwitch(const profiler::Block& _block);
@ -343,7 +414,8 @@ struct ThreadStorage
void popSilent();
ThreadStorage();
};
}; // END of struct ThreadStorage.
//////////////////////////////////////////////////////////////////////////
@ -417,7 +489,9 @@ public:
bool _copyName = false);
bool storeBlock(const profiler::BaseBlockDescriptor* _desc, const char* _runtimeName);
bool storeBlock(const profiler::BaseBlockDescriptor* _desc, const char* _runtimeName, profiler::timestamp_t _beginTime, profiler::timestamp_t _endTime);
void beginBlock(profiler::Block& _block);
void beginNonScopedBlock(const profiler::BaseBlockDescriptor* _desc, const char* _runtimeName);
void endBlock();
profiler::timestamp_t maxFrameDuration();
profiler::timestamp_t avgFrameDuration();
@ -474,6 +548,9 @@ private:
guard_lock_t lock(m_spin);
return _findThreadStorage(_thread_id);
}
};
}; // END of class ProfileManager.
//////////////////////////////////////////////////////////////////////////
#endif // EASY_PROFILER_MANAGER_H

View File

@ -158,9 +158,9 @@ void modellingThread(){
#else
for (int i = 0; i < MODELLING_STEPS; i++){
#endif
//EASY_FRAME_COUNTER;
EASY_END_BLOCK;
EASY_NONSCOPED_BLOCK("Frame");
modellingStep();
//EASY_END_FRAME_COUNTER;
localSleep(1200000);
//std::this_thread::sleep_for(std::chrono::milliseconds(20));