2016-02-16 23:21:12 +03:00
/**
Lightweight profiler library for c + +
2017-03-30 06:14:23 +03:00
Copyright ( C ) 2016 - 2017 Sergey Yagovtsev , Victor Zarubkin
2016-02-16 23:21:12 +03:00
2017-03-30 06:14:23 +03:00
Licensed under either of
* MIT license ( LICENSE . MIT or http : //opensource.org/licenses/MIT)
* Apache License , Version 2.0 , ( LICENSE . APACHE or http : //www.apache.org/licenses/LICENSE-2.0)
at your option .
2016-11-13 16:39:59 +03:00
2017-03-30 06:14:23 +03:00
The MIT License
Permission is hereby granted , free of charge , to any person obtaining a copy
of this software and associated documentation files ( the " Software " ) , to deal
in the Software without restriction , including without limitation the rights
to use , copy , modify , merge , publish , distribute , sublicense , and / or sell copies
of the Software , and to permit persons to whom the Software is furnished
to do so , subject to the following conditions :
2016-11-13 16:39:59 +03:00
2017-03-30 06:14:23 +03:00
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software .
2016-11-13 16:39:59 +03:00
2017-03-30 06:14:23 +03:00
THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR IMPLIED ,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY , FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT ,
TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
USE OR OTHER DEALINGS IN THE SOFTWARE .
2016-11-13 16:39:59 +03:00
2017-03-30 06:14:23 +03:00
The Apache License , Version 2.0 ( the " License " ) ;
You may not use this file except in compliance with the License .
You may obtain a copy of the License at
2016-02-16 23:21:12 +03:00
2017-03-30 06:14:23 +03:00
http : //www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing , software
distributed under the License is distributed on an " AS IS " BASIS ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
See the License for the specific language governing permissions and
limitations under the License .
2016-02-16 23:21:12 +03:00
2016-02-17 23:43:37 +03:00
* */
2016-02-16 23:21:12 +03:00
2017-03-05 22:59:03 +03:00
# ifndef EASY_PROFILER_MANAGER_H
# define EASY_PROFILER_MANAGER_H
2016-02-16 23:21:12 +03:00
2017-03-05 23:50:38 +03:00
# include <easy/profiler.h>
# include <easy/easy_socket.h>
2016-02-18 19:27:17 +03:00
# include "spin_lock.h"
2016-09-11 16:57:35 +03:00
# include "outstream.h"
2016-09-13 23:03:01 +03:00
# include "hashed_cstr.h"
2017-03-05 23:50:38 +03:00
2016-02-18 19:27:17 +03:00
# include <map>
2016-08-11 23:52:33 +03:00
# include <vector>
2016-09-13 23:03:01 +03:00
# include <unordered_map>
2016-09-09 06:14:34 +03:00
# include <thread>
2016-09-13 23:03:01 +03:00
# include <atomic>
2017-04-17 22:27:10 +03:00
# include <list>
2017-08-19 22:10:51 -05:00
# include <type_traits>
2017-08-10 14:58:42 -05:00
# include <cstring>
2017-08-16 16:16:23 -05:00
# include <cstddef>
2016-02-20 05:24:12 +03:00
2016-08-28 02:41:02 +03:00
//////////////////////////////////////////////////////////////////////////
2016-09-01 22:22:58 +03:00
# ifdef _WIN32
2016-06-21 00:13:45 +03:00
# include <Windows.h>
2017-05-23 19:49:21 +03:00
# elif defined(__APPLE__)
# include <pthread.h>
# include <Availability.h>
2016-06-21 00:13:45 +03:00
# else
2016-09-05 22:02:32 +03:00
# include <sys/types.h>
# include <unistd.h>
# include <sys/syscall.h>
2016-09-20 22:57:34 +03:00
# include <chrono>
2016-09-26 23:11:25 +03:00
# include <time.h>
2017-07-31 17:58:22 +03:00
# include <malloc.h>
2016-06-21 00:13:45 +03:00
# endif
2017-04-17 22:27:10 +03:00
# ifdef max
# undef max
# endif
2017-05-23 19:49:21 +03:00
inline profiler : : thread_id_t getCurrentThreadId ( )
2016-06-21 00:13:45 +03:00
{
2016-09-01 22:22:58 +03:00
# ifdef _WIN32
2017-05-23 19:49:21 +03:00
return ( profiler : : thread_id_t ) : : GetCurrentThreadId ( ) ;
# elif defined(__APPLE__)
# if (defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED >= __MAC_10_6) || \
( defined ( __IPHONE_OS_VERSION_MIN_REQUIRED ) & & __IPHONE_OS_VERSION_MIN_REQUIRED > = __IPHONE_8_0 )
EASY_THREAD_LOCAL static uint64_t _id = 0 ;
if ( ! _id )
pthread_threadid_np ( NULL , & _id ) ;
return ( profiler : : thread_id_t ) _id ;
# else
return ( profiler : : thread_id_t ) pthread_self ( ) ;
# endif
2016-06-21 00:13:45 +03:00
# else
2017-05-23 19:49:21 +03:00
EASY_THREAD_LOCAL static const profiler : : thread_id_t _id = ( profiler : : thread_id_t ) syscall ( __NR_gettid ) ;
2016-09-05 22:02:32 +03:00
return _id ;
2016-06-21 00:13:45 +03:00
# endif
}
2016-06-20 23:21:54 +03:00
2016-09-13 23:03:01 +03:00
namespace profiler {
class SerializedBlock ;
struct do_not_calc_hash {
template < class T > inline size_t operator ( ) ( T _value ) const {
return static_cast < size_t > ( _value ) ;
}
} ;
}
2016-08-28 02:41:02 +03:00
//////////////////////////////////////////////////////////////////////////
2016-09-22 23:06:43 +03:00
# ifndef EASY_ENABLE_BLOCK_STATUS
# define EASY_ENABLE_BLOCK_STATUS 1
# endif
2017-08-17 18:35:29 -05:00
# ifndef EASY_ENABLE_ALIGNMENT
2016-09-22 23:06:43 +03:00
# define EASY_ENABLE_ALIGNMENT 0
# endif
2016-09-11 16:57:35 +03:00
2016-12-12 03:13:07 +03:00
# ifndef EASY_ALIGNMENT_SIZE
2017-08-16 16:16:23 -05:00
# define EASY_ALIGNMENT_SIZE alignof(std::max_align_t)
2016-12-12 03:13:07 +03:00
# endif
2016-09-20 22:57:34 +03:00
# if EASY_ENABLE_ALIGNMENT == 0
2016-09-11 16:57:35 +03:00
# define EASY_ALIGNED(TYPE, VAR, A) TYPE VAR
# define EASY_MALLOC(MEMSIZE, A) malloc(MEMSIZE)
# define EASY_FREE(MEMPTR) free(MEMPTR)
# else
2017-02-13 20:19:41 +03:00
# if defined(_MSC_VER)
2016-09-11 16:57:35 +03:00
# define EASY_ALIGNED(TYPE, VAR, A) __declspec(align(A)) TYPE VAR
# define EASY_MALLOC(MEMSIZE, A) _aligned_malloc(MEMSIZE, A)
# define EASY_FREE(MEMPTR) _aligned_free(MEMPTR)
# elif defined(__GNUC__)
2017-07-31 17:58:22 +03:00
# define EASY_ALIGNED(TYPE, VAR, A) TYPE VAR __attribute__((aligned(A)))
# define EASY_MALLOC(MEMSIZE, A) memalign(A, MEMSIZE)
# define EASY_FREE(MEMPTR) free(MEMPTR)
2016-09-11 16:57:35 +03:00
# else
# define EASY_ALIGNED(TYPE, VAR, A) TYPE VAR
2017-07-31 17:58:22 +03:00
# define EASY_MALLOC(MEMSIZE, A) malloc(MEMSIZE)
# define EASY_FREE(MEMPTR) free(MEMPTR)
2016-09-11 16:57:35 +03:00
# endif
# endif
2017-08-19 22:10:51 -05:00
//! Checks if a pointer is aligned.
//! \param ptr The pointer to check.
//! \param alignment The alignement (must be a power of 2)
//! \returns true if the memory is aligned.
//!
template < uint32_t ALIGNMENT >
EASY_FORCE_INLINE bool is_aligned ( void * ptr )
{
static_assert ( ALIGNMENT % 2 = = 0 , " Alignment must be a power of two. " ) ;
2017-08-22 13:29:19 -05:00
return ( ( uintptr_t ) ptr & ( ALIGNMENT - 1 ) ) = = 0 ;
2017-08-19 22:10:51 -05:00
}
EASY_FORCE_INLINE void unaligned_zero16 ( void * ptr )
{
# ifndef EASY_ENABLE_STRICT_ALIGNMENT
* ( uint16_t * ) ptr = 0 ;
# else
( ( char * ) ptr ) [ 0 ] = 0 ;
( ( char * ) ptr ) [ 1 ] = 0 ;
# endif
}
EASY_FORCE_INLINE void unaligned_zero32 ( void * ptr )
{
# ifndef EASY_ENABLE_STRICT_ALIGNMENT
* ( uint32_t * ) ptr = 0 ;
# else
( ( char * ) ptr ) [ 0 ] = 0 ;
( ( char * ) ptr ) [ 1 ] = 0 ;
( ( char * ) ptr ) [ 2 ] = 0 ;
( ( char * ) ptr ) [ 3 ] = 0 ;
# endif
}
EASY_FORCE_INLINE void unaligned_zero64 ( void * ptr )
{
# ifndef EASY_ENABLE_STRICT_ALIGNMENT
* ( uint64_t * ) ptr = 0 ;
# else
// Assume unaligned is more common.
if ( ! is_aligned < alignof ( uint64_t ) > ( ptr ) ) {
( ( char * ) ptr ) [ 0 ] = 0 ;
( ( char * ) ptr ) [ 1 ] = 0 ;
( ( char * ) ptr ) [ 2 ] = 0 ;
( ( char * ) ptr ) [ 3 ] = 0 ;
( ( char * ) ptr ) [ 4 ] = 0 ;
( ( char * ) ptr ) [ 5 ] = 0 ;
( ( char * ) ptr ) [ 6 ] = 0 ;
( ( char * ) ptr ) [ 7 ] = 0 ;
}
else {
* ( uint64_t * ) ptr = 0 ;
}
# endif
}
template < typename T >
EASY_FORCE_INLINE void unaligned_store16 ( void * ptr , T val )
{
static_assert ( sizeof ( T ) = = 2 , " 16 bit type required. " ) ;
# ifndef EASY_ENABLE_STRICT_ALIGNMENT
* ( T * ) ptr = val ;
# else
2017-08-22 13:29:19 -05:00
const char * const temp = ( char * ) & val ;
2017-08-19 22:10:51 -05:00
( ( char * ) ptr ) [ 0 ] = temp [ 0 ] ;
( ( char * ) ptr ) [ 1 ] = temp [ 1 ] ;
# endif
}
template < typename T >
EASY_FORCE_INLINE void unaligned_store32 ( void * ptr , T val )
{
static_assert ( sizeof ( T ) = = 4 , " 32 bit type required. " ) ;
# ifndef EASY_ENABLE_STRICT_ALIGNMENT
* ( T * ) ptr = val ;
# else
2017-08-22 13:29:19 -05:00
const char * const temp = ( char * ) & val ;
2017-08-19 22:10:51 -05:00
( ( char * ) ptr ) [ 0 ] = temp [ 0 ] ;
( ( char * ) ptr ) [ 1 ] = temp [ 1 ] ;
( ( char * ) ptr ) [ 2 ] = temp [ 2 ] ;
( ( char * ) ptr ) [ 3 ] = temp [ 3 ] ;
# endif
}
template < typename T >
EASY_FORCE_INLINE void unaligned_store64 ( void * ptr , T val )
{
static_assert ( sizeof ( T ) = = 8 , " 64 bit type required. " ) ;
# ifndef EASY_ENABLE_STRICT_ALIGNMENT
* ( T * ) ptr = val ;
# else
2017-08-22 13:29:19 -05:00
const char * const temp = ( char * ) & val ;
2017-08-19 22:10:51 -05:00
// Assume unaligned is more common.
if ( ! is_aligned < alignof ( T ) > ( ptr ) ) {
( ( char * ) ptr ) [ 0 ] = temp [ 0 ] ;
( ( char * ) ptr ) [ 1 ] = temp [ 1 ] ;
( ( char * ) ptr ) [ 2 ] = temp [ 2 ] ;
( ( char * ) ptr ) [ 3 ] = temp [ 3 ] ;
( ( char * ) ptr ) [ 4 ] = temp [ 4 ] ;
( ( char * ) ptr ) [ 5 ] = temp [ 5 ] ;
( ( char * ) ptr ) [ 6 ] = temp [ 6 ] ;
( ( char * ) ptr ) [ 7 ] = temp [ 7 ] ;
}
else {
* ( T * ) ptr = val ;
}
# endif
}
template < typename T >
EASY_FORCE_INLINE T unaligned_load16 ( const void * ptr )
{
static_assert ( sizeof ( T ) = = 2 , " 16 bit type required. " ) ;
# ifndef EASY_ENABLE_STRICT_ALIGNMENT
return * ( T * ) ptr ;
# else
T value ;
( ( char * ) & value ) [ 0 ] = ( ( char * ) ptr ) [ 0 ] ;
( ( char * ) & value ) [ 1 ] = ( ( char * ) ptr ) [ 1 ] ;
return value ;
# endif
}
template < typename T >
EASY_FORCE_INLINE T unaligned_load16 ( const void * ptr , T * val )
{
static_assert ( sizeof ( T ) = = 2 , " 16 bit type required. " ) ;
# ifndef EASY_ENABLE_STRICT_ALIGNMENT
* val = * ( T * ) ptr ;
return * val ;
# else
( ( char * ) val ) [ 0 ] = ( ( char * ) ptr ) [ 0 ] ;
( ( char * ) val ) [ 1 ] = ( ( char * ) ptr ) [ 1 ] ;
return * val ;
2017-08-17 18:35:29 -05:00
# endif
2017-08-19 22:10:51 -05:00
}
template < typename T >
EASY_FORCE_INLINE T unaligned_load32 ( const void * ptr )
{
static_assert ( sizeof ( T ) = = 4 , " 32 bit type required. " ) ;
# ifndef EASY_ENABLE_STRICT_ALIGNMENT
return * ( T * ) ptr ;
# else
T value ;
( ( char * ) & value ) [ 0 ] = ( ( char * ) ptr ) [ 0 ] ;
( ( char * ) & value ) [ 1 ] = ( ( char * ) ptr ) [ 1 ] ;
( ( char * ) & value ) [ 2 ] = ( ( char * ) ptr ) [ 2 ] ;
( ( char * ) & value ) [ 3 ] = ( ( char * ) ptr ) [ 3 ] ;
return value ;
# endif
}
template < typename T >
EASY_FORCE_INLINE T unaligned_load32 ( const void * ptr , T * val )
{
static_assert ( sizeof ( T ) = = 4 , " 32 bit type required. " ) ;
# ifndef EASY_ENABLE_STRICT_ALIGNMENT
* val = * ( T * ) ptr ;
# else
( ( char * ) & val ) [ 0 ] = ( ( char * ) ptr ) [ 0 ] ;
( ( char * ) & val ) [ 1 ] = ( ( char * ) ptr ) [ 1 ] ;
( ( char * ) & val ) [ 2 ] = ( ( char * ) ptr ) [ 2 ] ;
( ( char * ) & val ) [ 3 ] = ( ( char * ) ptr ) [ 3 ] ;
return * val ;
# endif
}
template < typename T >
EASY_FORCE_INLINE T unaligned_load64 ( const void * ptr )
{
static_assert ( sizeof ( T ) = = 8 , " 64 bit type required. " ) ;
# ifndef EASY_ENABLE_STRICT_ALIGNMENT
return * ( T * ) ptr ;
# else
if ( ! is_aligned < alignof ( T ) > ( ptr ) ) {
T value ;
( ( char * ) & value ) [ 0 ] = ( ( char * ) ptr ) [ 0 ] ;
( ( char * ) & value ) [ 1 ] = ( ( char * ) ptr ) [ 1 ] ;
( ( char * ) & value ) [ 2 ] = ( ( char * ) ptr ) [ 2 ] ;
( ( char * ) & value ) [ 3 ] = ( ( char * ) ptr ) [ 3 ] ;
( ( char * ) & value ) [ 4 ] = ( ( char * ) ptr ) [ 4 ] ;
( ( char * ) & value ) [ 5 ] = ( ( char * ) ptr ) [ 5 ] ;
( ( char * ) & value ) [ 6 ] = ( ( char * ) ptr ) [ 6 ] ;
( ( char * ) & value ) [ 7 ] = ( ( char * ) ptr ) [ 7 ] ;
return value ;
}
else {
return * ( T * ) ptr ;
}
# endif
}
template < typename T >
EASY_FORCE_INLINE T unaligned_load64 ( const void * ptr , T * val )
{
static_assert ( sizeof ( T ) = = 8 , " 64 bit type required. " ) ;
# ifndef EASY_ENABLE_STRICT_ALIGNMENT
* val = * ( T * ) ptr ;
# else
if ( ! is_aligned < alignof ( T ) > ( ptr ) ) {
( ( char * ) & val ) [ 0 ] = ( ( char * ) ptr ) [ 0 ] ;
( ( char * ) & val ) [ 1 ] = ( ( char * ) ptr ) [ 1 ] ;
( ( char * ) & val ) [ 2 ] = ( ( char * ) ptr ) [ 2 ] ;
( ( char * ) & val ) [ 3 ] = ( ( char * ) ptr ) [ 3 ] ;
( ( char * ) & val ) [ 4 ] = ( ( char * ) ptr ) [ 4 ] ;
( ( char * ) & val ) [ 5 ] = ( ( char * ) ptr ) [ 5 ] ;
( ( char * ) & val ) [ 6 ] = ( ( char * ) ptr ) [ 6 ] ;
( ( char * ) & val ) [ 7 ] = ( ( char * ) ptr ) [ 7 ] ;
return * val ;
}
else {
* val = * ( T * ) ptr ;
return * val ;
}
# endif
}
2017-08-17 18:35:29 -05:00
2017-08-16 16:16:23 -05:00
template < uint16_t N >
2016-09-22 23:06:43 +03:00
class chunk_allocator
2016-02-16 23:21:12 +03:00
{
2017-08-17 18:35:29 -05:00
struct chunk { EASY_ALIGNED ( int8_t , data [ N ] , EASY_ALIGNMENT_SIZE ) ; chunk * prev = nullptr ; } ;
2016-09-11 16:57:35 +03:00
struct chunk_list
{
chunk * last = nullptr ;
~ chunk_list ( )
{
clear ( ) ;
}
void clear ( )
{
do {
auto p = last ;
last = last - > prev ;
EASY_FREE ( p ) ;
} while ( last ! = nullptr ) ;
}
chunk & back ( )
{
return * last ;
}
void emplace_back ( )
{
auto prev = last ;
2016-12-12 03:13:07 +03:00
last = : : new ( EASY_MALLOC ( sizeof ( chunk ) , EASY_ALIGNMENT_SIZE ) ) chunk ( ) ;
2016-09-11 16:57:35 +03:00
last - > prev = prev ;
2017-08-17 18:35:29 -05:00
// Although there is no need for unaligned access stuff b/c a new chunk will
// usually be at least 8 byte aligned (and we only need 2 byte alignment),
// this is the only way I have been able to get rid of the GCC strict-aliasing warning
2017-08-19 22:10:51 -05:00
// without using std::memset. It's an extra line, but is just as fast as *(uint16_t*)last->data = 0;
char * const data = ( char * ) & last - > data ;
* ( uint16_t * ) data = 0 ;
2016-09-11 16:57:35 +03:00
}
2017-04-17 22:27:10 +03:00
/** Invert current chunks list to enable to iterate over chunks list in direct order.
This method is used by serialize ( ) .
*/
2016-09-11 16:57:35 +03:00
void invert ( )
{
chunk * next = nullptr ;
while ( last - > prev ! = nullptr ) {
auto p = last - > prev ;
last - > prev = next ;
next = last ;
last = p ;
}
last - > prev = next ;
}
} ;
2016-02-16 23:21:12 +03:00
2016-09-11 16:57:35 +03:00
//typedef std::list<chunk> chunk_list;
2017-09-03 21:51:27 -05:00
// Used in serialize(): workaround for no constexpr support in MSVC 2013.
static const int_fast32_t MAX_CHUNK_OFFSET = N - sizeof ( uint16_t ) ;
2017-08-16 19:15:27 -05:00
chunk_list m_chunks ; ///< List of chunks.
uint32_t m_size ; ///< Number of elements stored(# of times allocate() has been called.)
2017-08-16 18:38:37 -05:00
uint16_t m_chunkOffset ; ///< Number of bytes used in the current chunk.
2016-02-18 19:27:17 +03:00
2016-08-28 02:41:02 +03:00
public :
2016-02-18 19:27:17 +03:00
2017-08-16 18:38:37 -05:00
chunk_allocator ( ) : m_size ( 0 ) , m_chunkOffset ( 0 )
2016-08-28 02:41:02 +03:00
{
m_chunks . emplace_back ( ) ;
}
2017-04-17 22:27:10 +03:00
/** Allocate n bytes.
Automatically checks if there is enough preserved memory to store additional n bytes
and allocates additional buffer if needed .
*/
2016-09-11 16:57:35 +03:00
void * allocate ( uint16_t n )
2016-08-28 02:41:02 +03:00
{
2016-09-11 16:57:35 +03:00
+ + m_size ;
if ( ! need_expand ( n ) )
2016-08-28 02:41:02 +03:00
{
2017-08-17 18:35:29 -05:00
// Temp to avoid extra load due to this* aliasing.
uint16_t chunkOffset = m_chunkOffset ;
2017-08-31 19:37:21 -05:00
char * data = ( char * ) m_chunks . back ( ) . data + chunkOffset ;
2017-08-22 15:10:19 -05:00
chunkOffset + = n + sizeof ( uint16_t ) ;
m_chunkOffset = chunkOffset ;
2016-09-11 16:57:35 +03:00
2017-08-19 22:10:51 -05:00
unaligned_store16 ( data , n ) ;
2017-08-10 14:23:26 -05:00
data + = sizeof ( uint16_t ) ;
2017-08-19 22:10:51 -05:00
// If there is enough space for at least another payload size,
// set it to zero.
if ( chunkOffset < N - 1 )
unaligned_zero16 ( data + n ) ;
2016-08-28 02:41:02 +03:00
return data ;
}
2017-08-23 14:06:03 -05:00
m_chunkOffset = n + sizeof ( uint16_t ) ;
2016-08-28 02:41:02 +03:00
m_chunks . emplace_back ( ) ;
2016-09-11 16:57:35 +03:00
2017-08-17 18:35:29 -05:00
char * data = ( char * ) & m_chunks . back ( ) . data [ 0 ] ;
2017-08-19 22:10:51 -05:00
unaligned_store16 ( data , n ) ;
2017-08-10 14:23:26 -05:00
data + = sizeof ( uint16_t ) ;
2017-08-22 15:10:19 -05:00
// We assume here that it takes more than one element to fill a chunk.
unaligned_zero16 ( data + n ) ;
2016-09-11 16:57:35 +03:00
return data ;
}
2017-04-17 22:27:10 +03:00
/** Check if current storage is not enough to store additional n bytes.
*/
2017-08-16 16:16:23 -05:00
bool need_expand ( uint16_t n ) const
2016-09-11 16:57:35 +03:00
{
2017-08-17 18:35:29 -05:00
return ( m_chunkOffset + n + sizeof ( uint16_t ) ) > N ;
2016-09-11 16:57:35 +03:00
}
2017-08-16 16:16:23 -05:00
uint32_t size ( ) const
2016-09-11 16:57:35 +03:00
{
return m_size ;
2016-08-28 02:41:02 +03:00
}
2017-08-16 16:16:23 -05:00
bool empty ( ) const
2016-09-13 23:03:01 +03:00
{
return m_size = = 0 ;
}
2016-08-28 02:41:02 +03:00
void clear ( )
{
m_size = 0 ;
2017-08-16 18:38:37 -05:00
m_chunkOffset = 0 ;
2016-08-28 02:41:02 +03:00
m_chunks . clear ( ) ;
m_chunks . emplace_back ( ) ;
}
2016-09-11 16:57:35 +03:00
/** Serialize data to stream.
\ warning Data will be cleared after serialization .
*/
void serialize ( profiler : : OStream & _outputStream )
{
2017-04-17 22:27:10 +03:00
// Chunks are stored in reversed order (stack).
2017-08-10 14:23:26 -05:00
// To be able to iterate them in direct order we have to invert the chunks list.
2016-09-11 16:57:35 +03:00
m_chunks . invert ( ) ;
2017-08-10 14:23:26 -05:00
// Each chunk is an array of N bytes that can hold between
// 1(if the list isn't empty) and however many elements can fit in a chunk,
// where an element consists of a payload size + a payload as follows:
2017-08-17 18:35:29 -05:00
// elementStart[0..1]: size as a uint16_t
// elementStart[2..size-1]: payload.
// The maximum chunk offset is N-sizeof(uint16_t) b/c, if we hit that (or go past),
// there is either no space left, 1 byte left, or 2 bytes left, all of which are
// too small to cary more than a zero-sized element.
2017-08-10 14:23:26 -05:00
chunk * current = m_chunks . last ;
2017-08-17 18:35:29 -05:00
do {
2017-08-16 16:16:23 -05:00
const char * data = ( char * ) current - > data ;
2017-08-17 18:35:29 -05:00
int_fast32_t chunkOffset = 0 ; // signed int so overflow is not checked.
2017-08-19 22:10:51 -05:00
uint16_t payloadSize = unaligned_load16 < uint16_t > ( data ) ;
2017-09-03 21:51:27 -05:00
while ( ( chunkOffset < MAX_CHUNK_OFFSET ) & ( payloadSize ! = 0 ) ) {
2017-08-10 14:23:26 -05:00
const uint16_t chunkSize = sizeof ( uint16_t ) + payloadSize ;
2017-08-16 16:16:23 -05:00
_outputStream . write ( data , chunkSize ) ;
2017-08-10 14:23:26 -05:00
data + = chunkSize ;
2017-08-17 18:35:29 -05:00
chunkOffset + = chunkSize ;
2017-08-22 15:10:19 -05:00
unaligned_load16 ( data , & payloadSize ) ;
2016-09-16 01:37:50 +03:00
}
2017-08-17 18:35:29 -05:00
2016-09-11 16:57:35 +03:00
current = current - > prev ;
2017-08-17 18:35:29 -05:00
} while ( current ! = nullptr ) ;
2016-09-11 16:57:35 +03:00
clear ( ) ;
}
2017-04-17 22:27:10 +03:00
} ; // END of class chunk_allocator.
2016-02-18 19:27:17 +03:00
2016-08-28 02:41:02 +03:00
//////////////////////////////////////////////////////////////////////////
2016-02-20 05:24:12 +03:00
2017-04-17 22:27:10 +03:00
class NonscopedBlock : public profiler : : Block
{
2017-06-07 20:50:16 +03:00
char * m_runtimeName ; ///< a copy of _runtimeName to make it safe to begin block in one function and end it in another
2016-09-04 19:35:58 +03:00
2017-04-17 22:27:10 +03:00
NonscopedBlock ( ) = delete ;
NonscopedBlock ( const NonscopedBlock & ) = delete ;
NonscopedBlock ( NonscopedBlock & & ) = delete ;
NonscopedBlock & operator = ( const NonscopedBlock & ) = delete ;
NonscopedBlock & operator = ( NonscopedBlock & & ) = delete ;
2016-09-07 21:50:42 +03:00
2017-04-17 22:27:10 +03:00
public :
NonscopedBlock ( const profiler : : BaseBlockDescriptor * _desc , const char * _runtimeName , bool = false ) ;
~ NonscopedBlock ( ) ;
/** Copy string from m_name to m_runtimeName to make it safe to end block in another function.
Performs any work if block is ON and m_name ! = " "
*/
void copyname ( ) ;
2017-04-17 23:31:07 +03:00
void destroy ( ) ;
2017-04-17 22:27:10 +03:00
} ; // END of class NonscopedBlock.
//////////////////////////////////////////////////////////////////////////
2017-04-17 23:31:07 +03:00
template < class T >
inline void destroy_elem ( T * )
{
}
inline void destroy_elem ( NonscopedBlock * _elem )
{
_elem - > destroy ( ) ;
}
2017-04-17 22:27:10 +03:00
template < class T >
class StackBuffer
2016-08-28 02:41:02 +03:00
{
2017-04-17 22:27:10 +03:00
struct chunk { int8_t data [ sizeof ( T ) ] ; } ;
2016-02-20 05:24:12 +03:00
2017-04-17 22:27:10 +03:00
std : : list < chunk > m_overflow ; ///< List of additional stack elements if current capacity of buffer is not enough
T * m_buffer ; ///< Contiguous buffer used for stack
uint32_t m_size ; ///< Current size of stack
uint32_t m_capacity ; ///< Current capacity of m_buffer
uint32_t m_maxcapacity ; ///< Maximum used capacity including m_buffer and m_overflow
2016-09-07 21:50:42 +03:00
2017-04-17 22:27:10 +03:00
public :
2016-09-07 21:50:42 +03:00
2017-06-07 20:50:16 +03:00
StackBuffer ( uint32_t N ) : m_buffer ( static_cast < T * > ( malloc ( N * sizeof ( T ) ) ) ) , m_size ( 0 ) , m_capacity ( N ) , m_maxcapacity ( N )
2017-04-17 22:27:10 +03:00
{
}
2016-09-07 21:50:42 +03:00
2017-04-17 22:27:10 +03:00
~ StackBuffer ( )
{
2017-04-17 23:31:07 +03:00
for ( uint32_t i = 0 ; i < m_size ; + + i )
destroy_elem ( m_buffer + i ) ;
2017-04-17 22:27:10 +03:00
free ( m_buffer ) ;
2017-04-17 23:31:07 +03:00
for ( auto & elem : m_overflow )
destroy_elem ( reinterpret_cast < T * > ( elem . data + 0 ) ) ;
2017-04-17 22:27:10 +03:00
}
2016-09-07 21:50:42 +03:00
2017-04-17 22:27:10 +03:00
template < class . . . TArgs >
T & push ( TArgs . . . _args )
{
if ( m_size < m_capacity )
return * ( : : new ( m_buffer + m_size + + ) T ( _args . . . ) ) ;
2016-09-09 00:09:47 +03:00
2017-04-17 22:27:10 +03:00
m_overflow . emplace_back ( ) ;
2017-06-07 20:50:16 +03:00
const uint32_t cap = m_capacity + static_cast < uint32_t > ( m_overflow . size ( ) ) ;
2017-04-17 22:27:10 +03:00
if ( m_maxcapacity < cap )
m_maxcapacity = cap ;
2016-09-07 21:50:42 +03:00
2017-04-17 22:27:10 +03:00
return * ( : : new ( m_overflow . back ( ) . data + 0 ) T ( _args . . . ) ) ;
}
void pop ( )
{
if ( m_overflow . empty ( ) )
{
// m_size should not be equal to 0 here because ProfileManager behavior does not allow such situation
2017-04-17 23:31:07 +03:00
destroy_elem ( m_buffer + - - m_size ) ;
if ( m_size = = 0 & & m_maxcapacity > m_capacity )
2017-04-17 22:27:10 +03:00
{
2017-04-17 23:31:07 +03:00
// When stack gone empty we can resize buffer to use enough space in the future
2017-04-17 22:27:10 +03:00
free ( m_buffer ) ;
m_maxcapacity = m_capacity = std : : max ( m_maxcapacity , m_capacity < < 1 ) ;
2017-06-07 20:51:45 +03:00
m_buffer = static_cast < T * > ( malloc ( m_capacity * sizeof ( T ) ) ) ;
2017-04-17 22:27:10 +03:00
}
2016-09-07 21:50:42 +03:00
2017-04-17 22:27:10 +03:00
return ;
2016-09-04 14:48:35 +03:00
}
2016-08-11 23:52:33 +03:00
2017-04-17 23:31:07 +03:00
destroy_elem ( reinterpret_cast < T * > ( m_overflow . back ( ) . data + 0 ) ) ;
2017-04-17 22:27:10 +03:00
m_overflow . pop_back ( ) ;
}
} ; // END of class StackBuffer.
//////////////////////////////////////////////////////////////////////////
template < class T , const uint16_t N >
struct BlocksList
{
BlocksList ( ) = default ;
std : : vector < T > openedList ;
2016-09-11 16:57:35 +03:00
chunk_allocator < N > closedList ;
2016-09-07 21:50:42 +03:00
uint64_t usedMemorySize = 0 ;
void clearClosed ( ) {
2016-09-11 16:57:35 +03:00
//closedList.clear();
2016-09-07 21:50:42 +03:00
usedMemorySize = 0 ;
}
2017-04-17 22:27:10 +03:00
} ; // END of struct BlocksList.
//////////////////////////////////////////////////////////////////////////
2017-06-07 01:39:45 +03:00
class CSwitchBlock : public profiler : : CSwitchEvent
2017-06-06 20:46:06 +03:00
{
2017-06-07 01:39:45 +03:00
const char * m_name ;
2017-06-06 20:46:06 +03:00
public :
2017-06-07 01:39:45 +03:00
CSwitchBlock ( profiler : : timestamp_t _begin_time , profiler : : thread_id_t _tid , const char * _runtimeName ) ;
inline const char * name ( ) const { return m_name ; }
2017-06-06 20:46:06 +03:00
} ;
//////////////////////////////////////////////////////////////////////////
const uint16_t SIZEOF_BLOCK = sizeof ( profiler : : BaseBlockData ) + 1 + sizeof ( uint16_t ) ; // SerializedBlock stores BaseBlockData + at least 1 character for name ('\0') + 2 bytes for size of serialized data
2017-06-07 01:39:45 +03:00
const uint16_t SIZEOF_CSWITCH = sizeof ( profiler : : CSwitchEvent ) + 1 + sizeof ( uint16_t ) ; // SerializedCSwitch also stores additional 4 bytes to be able to save 64-bit thread_id
2016-09-07 21:50:42 +03:00
2016-09-22 23:06:43 +03:00
struct ThreadStorage
2016-09-07 21:50:42 +03:00
{
2017-04-17 22:27:10 +03:00
StackBuffer < NonscopedBlock > nonscopedBlocks ;
2017-06-06 20:46:06 +03:00
BlocksList < std : : reference_wrapper < profiler : : Block > , SIZEOF_BLOCK * ( uint16_t ) 128U > blocks ;
BlocksList < CSwitchBlock , SIZEOF_CSWITCH * ( uint16_t ) 128U > sync ;
2017-04-17 22:27:10 +03:00
std : : string name ; ///< Thread name
2016-11-27 14:26:00 +03:00
# ifndef _WIN32
2017-04-17 22:27:10 +03:00
const pthread_t pthread_id ; ///< Thread pointer
2016-11-27 14:26:00 +03:00
# endif
2017-04-17 22:27:10 +03:00
const profiler : : thread_id_t id ; ///< Thread ID
std : : atomic < char > expired ; ///< Is thread expired
std : : atomic_bool frame ; ///< Is new frame opened
bool allowChildren ; ///< False if one of previously opened blocks has OFF_RECURSIVE or ON_WITHOUT_CHILDREN status
bool named ; ///< True if thread name was set
bool guarded ; ///< True if thread has been registered using ThreadGuard
2016-08-11 23:52:33 +03:00
2016-09-04 14:48:35 +03:00
void storeBlock ( const profiler : : Block & _block ) ;
2017-06-06 20:46:06 +03:00
void storeCSwitch ( const CSwitchBlock & _block ) ;
2016-08-28 02:41:02 +03:00
void clearClosed ( ) ;
2017-04-05 22:36:06 +03:00
void popSilent ( ) ;
2016-09-07 21:40:40 +03:00
2016-11-20 17:09:50 +03:00
ThreadStorage ( ) ;
2017-04-17 22:27:10 +03:00
} ; // END of struct ThreadStorage.
2016-08-28 02:41:02 +03:00
//////////////////////////////////////////////////////////////////////////
2017-06-06 20:46:06 +03:00
typedef uint64_t processid_t ;
2016-12-12 22:26:32 +03:00
2016-09-22 23:06:43 +03:00
class BlockDescriptor ;
class ProfileManager
2016-08-28 02:41:02 +03:00
{
2016-09-22 23:06:43 +03:00
# ifndef EASY_MAGIC_STATIC_CPP11
friend class ProfileManagerInstance ;
# endif
2016-09-11 16:57:35 +03:00
2016-08-28 02:41:02 +03:00
ProfileManager ( ) ;
ProfileManager ( const ProfileManager & p ) = delete ;
ProfileManager & operator = ( const ProfileManager & ) = delete ;
typedef profiler : : guard_lock < profiler : : spin_lock > guard_lock_t ;
typedef std : : map < profiler : : thread_id_t , ThreadStorage > map_of_threads_stacks ;
2016-09-22 23:06:43 +03:00
typedef std : : vector < BlockDescriptor * > block_descriptors_t ;
2016-09-14 23:23:09 +03:00
2017-02-13 20:19:41 +03:00
# ifdef EASY_PROFILER_HASHED_CSTR_DEFINED
2016-09-22 23:06:43 +03:00
typedef std : : unordered_map < profiler : : hashed_cstr , profiler : : block_id_t > descriptors_map_t ;
2016-09-14 23:23:09 +03:00
# else
2016-09-22 23:06:43 +03:00
typedef std : : unordered_map < profiler : : hashed_stdstring , profiler : : block_id_t > descriptors_map_t ;
2016-09-14 23:23:09 +03:00
# endif
2016-09-13 23:03:01 +03:00
2017-04-09 10:23:59 +03:00
const processid_t m_processId ;
map_of_threads_stacks m_threads ;
block_descriptors_t m_descriptors ;
descriptors_map_t m_descriptorsMap ;
uint64_t m_usedMemorySize ;
profiler : : timestamp_t m_beginTime ;
profiler : : timestamp_t m_endTime ;
std : : atomic < profiler : : timestamp_t > m_frameMax ;
std : : atomic < profiler : : timestamp_t > m_frameAvg ;
std : : atomic < profiler : : timestamp_t > m_frameCur ;
profiler : : spin_lock m_spin ;
profiler : : spin_lock m_storedSpin ;
profiler : : spin_lock m_dumpSpin ;
std : : atomic < profiler : : thread_id_t > m_mainThreadId ;
std : : atomic < char > m_profilerStatus ;
std : : atomic_bool m_isEventTracingEnabled ;
std : : atomic_bool m_isAlreadyListening ;
std : : atomic_bool m_frameMaxReset ;
std : : atomic_bool m_frameAvgReset ;
2016-09-13 23:03:01 +03:00
2016-09-05 22:11:03 +03:00
std : : string m_csInfoFilename = " /tmp/cs_profiling_info.log " ;
2016-12-21 21:59:40 +03:00
uint32_t dumpBlocksToStream ( profiler : : OStream & _outputStream , bool _lockSpin ) ;
2016-09-22 23:06:43 +03:00
void setBlockStatus ( profiler : : block_id_t _id , profiler : : EasyBlockStatus _status ) ;
2016-09-11 16:57:35 +03:00
2016-09-08 21:03:05 +03:00
std : : thread m_listenThread ;
2016-12-01 23:25:54 +03:00
void listen ( uint16_t _port ) ;
2016-09-08 21:03:05 +03:00
2016-09-08 23:15:01 +03:00
std : : atomic_bool m_stopListen ;
2016-09-28 00:37:20 +03:00
2016-02-16 23:21:12 +03:00
public :
2016-08-11 23:52:33 +03:00
2016-02-17 18:18:06 +03:00
static ProfileManager & instance ( ) ;
2016-09-07 21:32:14 +03:00
~ ProfileManager ( ) ;
2016-08-11 23:52:33 +03:00
2016-09-22 23:06:43 +03:00
const profiler : : BaseBlockDescriptor * addBlockDescriptor ( profiler : : EasyBlockStatus _defaultStatus ,
const char * _autogenUniqueId ,
const char * _name ,
const char * _filename ,
int _line ,
profiler : : block_type_t _block_type ,
2016-12-21 21:59:40 +03:00
profiler : : color_t _color ,
bool _copyName = false ) ;
2016-09-06 23:03:05 +03:00
2016-12-12 22:26:32 +03:00
bool storeBlock ( const profiler : : BaseBlockDescriptor * _desc , const char * _runtimeName ) ;
2017-04-17 22:27:10 +03:00
bool storeBlock ( const profiler : : BaseBlockDescriptor * _desc , const char * _runtimeName , profiler : : timestamp_t _beginTime , profiler : : timestamp_t _endTime ) ;
2016-08-21 14:46:16 +03:00
void beginBlock ( profiler : : Block & _block ) ;
2017-04-17 22:27:10 +03:00
void beginNonScopedBlock ( const profiler : : BaseBlockDescriptor * _desc , const char * _runtimeName ) ;
2016-09-07 21:32:14 +03:00
void endBlock ( ) ;
2017-03-31 21:17:08 +03:00
profiler : : timestamp_t maxFrameDuration ( ) ;
2017-04-03 23:08:52 +03:00
profiler : : timestamp_t avgFrameDuration ( ) ;
profiler : : timestamp_t curFrameDuration ( ) const ;
2016-12-21 21:59:40 +03:00
void setEnabled ( bool isEnable ) ;
2017-03-01 11:14:02 +02:00
bool isEnabled ( ) const ;
2016-09-15 23:15:07 +03:00
void setEventTracingEnabled ( bool _isEnable ) ;
2017-03-01 11:14:02 +02:00
bool isEventTracingEnabled ( ) const ;
2016-08-28 02:41:02 +03:00
uint32_t dumpBlocksToFile ( const char * filename ) ;
2016-09-20 22:57:34 +03:00
const char * registerThread ( const char * name , profiler : : ThreadGuard & threadGuard ) ;
2016-11-20 17:09:50 +03:00
const char * registerThread ( const char * name ) ;
2016-08-28 02:41:02 +03:00
2016-09-05 22:11:03 +03:00
void setContextSwitchLogFilename ( const char * name )
{
m_csInfoFilename = name ;
}
const char * getContextSwitchLogFilename ( ) const
{
return m_csInfoFilename . c_str ( ) ;
}
2016-09-13 23:03:01 +03:00
void beginContextSwitch ( profiler : : thread_id_t _thread_id , profiler : : timestamp_t _time , profiler : : thread_id_t _target_thread_id , const char * _target_process , bool _lockSpin = true ) ;
2016-12-12 22:26:32 +03:00
void endContextSwitch ( profiler : : thread_id_t _thread_id , processid_t _process_id , profiler : : timestamp_t _endtime , bool _lockSpin = true ) ;
2016-12-01 23:30:43 +03:00
void startListen ( uint16_t _port ) ;
void stopListen ( ) ;
2017-03-01 11:14:02 +02:00
bool isListening ( ) const ;
2016-09-28 00:37:20 +03:00
2016-08-28 02:41:02 +03:00
private :
2017-04-02 14:23:11 +03:00
void beginFrame ( ) ;
void endFrame ( ) ;
2016-12-21 21:59:40 +03:00
void enableEventTracer ( ) ;
void disableEventTracer ( ) ;
char checkThreadExpired ( ThreadStorage & _registeredThread ) ;
2016-11-20 17:09:50 +03:00
2016-09-28 00:37:20 +03:00
void storeBlockForce ( const profiler : : BaseBlockDescriptor * _desc , const char * _runtimeName , : : profiler : : timestamp_t & _timestamp ) ;
void storeBlockForce2 ( const profiler : : BaseBlockDescriptor * _desc , const char * _runtimeName , : : profiler : : timestamp_t _timestamp ) ;
2016-12-12 22:26:32 +03:00
void storeBlockForce2 ( ThreadStorage & _registeredThread , const profiler : : BaseBlockDescriptor * _desc , const char * _runtimeName , : : profiler : : timestamp_t _timestamp ) ;
2016-09-28 00:37:20 +03:00
2016-12-12 22:26:32 +03:00
ThreadStorage & _threadStorage ( profiler : : thread_id_t _thread_id ) ;
2016-09-13 23:03:01 +03:00
ThreadStorage * _findThreadStorage ( profiler : : thread_id_t _thread_id ) ;
2016-12-12 22:26:32 +03:00
inline ThreadStorage & threadStorage ( profiler : : thread_id_t _thread_id )
{
guard_lock_t lock ( m_spin ) ;
return _threadStorage ( _thread_id ) ;
}
inline ThreadStorage * findThreadStorage ( profiler : : thread_id_t _thread_id )
2016-09-13 23:03:01 +03:00
{
guard_lock_t lock ( m_spin ) ;
return _findThreadStorage ( _thread_id ) ;
}
2017-04-17 22:27:10 +03:00
} ; // END of class ProfileManager.
//////////////////////////////////////////////////////////////////////////
2016-02-16 23:21:12 +03:00
2017-03-05 22:59:03 +03:00
# endif // EASY_PROFILER_MANAGER_H