2016-09-15 22:41:47 +03:00
/************************************************************************
* file name : reader . cpp
* - - - - - - - - - - - - - - - - - :
* creation time : 2016 / 06 / 19
* authors : Sergey Yagovtsev , Victor Zarubkin
* emails : yse . sey @ gmail . com , v . s . zarubkin @ gmail . com
* - - - - - - - - - - - - - - - - - :
* description : The file contains implementation of fillTreesFromFile function
* : which reads profiler file and fill profiler blocks tree .
* - - - - - - - - - - - - - - - - - :
* change log : * 2016 / 06 / 19 Sergey Yagovtsev : First fillTreesFromFile implementation .
* :
* : * 2016 / 06 / 25 Victor Zarubkin : Removed unnecessary memory allocation and copy
* : when creating and inserting blocks into the tree .
* :
* : * 2016 / 06 / 26 Victor Zarubkin : Added statistics gathering ( min , max , average duration ,
* : number of block calls ) .
* : * 2016 / 06 / 26 Victor Zarubkin , Sergey Yagovtsev : Added statistics gathering for root
* : blocks in the tree .
* :
* : * 2016 / 06 / 29 Victor Zarubkin : Added calculaton of total children number for blocks .
* :
* : * 2016 / 06 / 30 Victor Zarubkin : Added this header .
* : Added tree depth calculation .
* :
* : *
* - - - - - - - - - - - - - - - - - :
2016-09-06 21:49:32 +03:00
* license : Lightweight profiler library for c + +
2017-03-30 06:55:15 +03:00
* : Copyright ( C ) 2016 - 2017 Sergey Yagovtsev , Victor Zarubkin
2016-09-06 21:49:32 +03:00
* :
2017-03-30 06:55:15 +03:00
* : Licensed under either of
* : * MIT license ( LICENSE . MIT or http : //opensource.org/licenses/MIT)
* : * Apache License , Version 2.0 , ( LICENSE . APACHE or http : //www.apache.org/licenses/LICENSE-2.0)
* : at your option .
2016-11-13 16:39:59 +03:00
* :
2017-03-30 06:55:15 +03:00
* : The MIT License
* :
* : Permission is hereby granted , free of charge , to any person obtaining a copy
* : of this software and associated documentation files ( the " Software " ) , to deal
* : in the Software without restriction , including without limitation the rights
* : to use , copy , modify , merge , publish , distribute , sublicense , and / or sell copies
* : of the Software , and to permit persons to whom the Software is furnished
* : to do so , subject to the following conditions :
* :
* : The above copyright notice and this permission notice shall be included in all
* : copies or substantial portions of the Software .
* :
* : THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR IMPLIED ,
* : INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY , FITNESS FOR A PARTICULAR
* : PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
* : LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT ,
* : TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* : USE OR OTHER DEALINGS IN THE SOFTWARE .
* :
* : The Apache License , Version 2.0 ( the " License " )
* :
* : You may not use this file except in compliance with the License .
2016-11-13 16:39:59 +03:00
* : You may obtain a copy of the License at
* :
* : http : //www.apache.org/licenses/LICENSE-2.0
* :
* : Unless required by applicable law or agreed to in writing , software
* : distributed under the License is distributed on an " AS IS " BASIS ,
* : WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
* : See the License for the specific language governing permissions and
* : limitations under the License .
2016-09-15 22:41:47 +03:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2017-03-05 23:50:38 +03:00
# include <easy/reader.h>
2016-09-15 22:41:47 +03:00
# include "hashed_cstr.h"
2017-03-05 23:50:38 +03:00
2016-09-15 22:41:47 +03:00
# include <fstream>
2016-09-25 11:49:49 +03:00
# include <sstream>
2016-09-15 22:41:47 +03:00
# include <iterator>
# include <algorithm>
# include <unordered_map>
# include <thread>
//////////////////////////////////////////////////////////////////////////
2016-12-12 22:26:32 +03:00
typedef uint32_t processid_t ;
2016-11-20 13:42:05 +03:00
extern const uint32_t PROFILER_SIGNATURE ;
extern const uint32_t EASY_CURRENT_VERSION ;
# define EASY_VERSION_INT(v_major, v_minor, v_patch) ((static_cast<uint32_t>(v_major) << 24) | (static_cast<uint32_t>(v_minor) << 16) | static_cast<uint32_t>(v_patch))
2017-02-25 14:28:25 +03:00
const uint32_t MIN_COMPATIBLE_VERSION = EASY_VERSION_INT ( 0 , 1 , 0 ) ; ///< minimal compatible version (.prof file format was not changed seriously since this version)
const uint32_t EASY_V_100 = EASY_VERSION_INT ( 1 , 0 , 0 ) ; ///< in v1.0.0 some additional data were added into .prof file
2017-06-05 12:57:03 +03:00
const uint32_t EASY_V_130 = EASY_VERSION_INT ( 1 , 3 , 0 ) ; ///< in v1.3.0 changed sizeof(thread_id_t) uint32_t -> uint64_t
2016-11-20 13:42:05 +03:00
# undef EASY_VERSION_INT
2016-09-27 22:28:04 +03:00
2016-12-04 18:42:32 +03:00
const uint64_t TIME_FACTOR = 1000000000ULL ;
// TODO: use 128 bit integer operations for better accuracy
# define EASY_USE_FLOATING_POINT_CONVERSION
# ifdef EASY_USE_FLOATING_POINT_CONVERSION
// Suppress warnings about double to uint64 conversion
2017-02-13 20:19:41 +03:00
# ifdef _MSC_VER
2016-12-04 18:42:32 +03:00
# pragma warning(disable:4244)
# elif defined(__GNUC__)
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wconversion"
# pragma GCC diagnostic ignored "-Wsign-conversion"
# elif defined(__clang__)
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wconversion"
# pragma clang diagnostic ignored "-Wsign-conversion"
# endif
# define EASY_CONVERT_TO_NANO(t, freq, factor) t *= factor
# else
# define EASY_CONVERT_TO_NANO(t, freq, factor) t *= TIME_FACTOR; t / = freq
# endif
2016-09-29 22:40:19 +03:00
//////////////////////////////////////////////////////////////////////////
2017-02-25 14:28:25 +03:00
inline bool isCompatibleVersion ( uint32_t _version )
2016-09-27 22:28:04 +03:00
{
2017-02-25 14:28:25 +03:00
return _version > = MIN_COMPATIBLE_VERSION ;
2016-09-27 22:28:04 +03:00
}
2016-09-29 22:40:19 +03:00
inline void write ( : : std : : stringstream & _stream , const char * _value , size_t _size )
{
_stream . write ( _value , _size ) ;
}
template < class T >
inline void write ( : : std : : stringstream & _stream , const T & _value )
{
_stream . write ( ( const char * ) & _value , sizeof ( T ) ) ;
}
//////////////////////////////////////////////////////////////////////////
2016-09-27 22:28:04 +03:00
2016-09-15 22:41:47 +03:00
namespace profiler {
2016-09-29 22:40:19 +03:00
void SerializedData : : set ( char * _data , uint64_t _size )
2016-09-15 22:41:47 +03:00
{
2016-09-21 22:09:04 +03:00
delete [ ] m_data ;
2016-09-15 22:41:47 +03:00
m_data = _data ;
2016-09-21 22:09:04 +03:00
m_size = _size ;
2016-09-15 22:41:47 +03:00
}
2016-09-29 22:40:19 +03:00
void SerializedData : : set ( uint64_t _size )
2016-09-15 22:41:47 +03:00
{
2016-09-21 22:09:04 +03:00
if ( _size ! = 0 )
set ( new char [ _size ] , _size ) ;
else
set ( nullptr , 0 ) ;
}
2016-09-29 22:40:19 +03:00
void SerializedData : : extend ( uint64_t _size )
2016-09-21 22:09:04 +03:00
{
auto olddata = m_data ;
auto oldsize = m_size ;
m_size = oldsize + _size ;
m_data = new char [ m_size ] ;
if ( olddata ! = nullptr ) {
memcpy ( m_data , olddata , oldsize ) ;
delete [ ] olddata ;
2016-09-15 22:41:47 +03:00
}
2016-09-21 22:09:04 +03:00
}
extern " C " PROFILER_API void release_stats ( BlockStatistics * & _stats )
{
if ( _stats = = nullptr )
return ;
2016-09-15 22:41:47 +03:00
if ( - - _stats - > calls_number = = 0 )
delete _stats ;
_stats = nullptr ;
}
}
//////////////////////////////////////////////////////////////////////////
2017-02-13 20:19:41 +03:00
# ifdef EASY_PROFILER_HASHED_CSTR_DEFINED
2016-09-15 22:41:47 +03:00
typedef : : std : : unordered_map < : : profiler : : block_id_t , : : profiler : : BlockStatistics * , : : profiler : : passthrough_hash > StatsMap ;
/** \note It is absolutely safe to use hashed_cstr (which simply stores pointer) because std::unordered_map,
which uses it as a key , exists only inside fillTreesFromFile function . */
typedef : : std : : unordered_map < : : profiler : : hashed_cstr , : : profiler : : block_id_t > IdMap ;
2016-12-14 23:17:02 +03:00
typedef : : std : : unordered_map < : : profiler : : hashed_cstr , : : profiler : : BlockStatistics * > CsStatsMap ;
2016-09-15 22:41:47 +03:00
# else
// TODO: Create optimized version of profiler::hashed_cstr for Linux too.
typedef : : std : : unordered_map < : : profiler : : block_id_t , : : profiler : : BlockStatistics * , : : profiler : : passthrough_hash > StatsMap ;
typedef : : std : : unordered_map < : : profiler : : hashed_stdstring , : : profiler : : block_id_t > IdMap ;
2016-12-14 23:17:02 +03:00
typedef : : std : : unordered_map < : : profiler : : hashed_stdstring , : : profiler : : BlockStatistics * > CsStatsMap ;
2016-09-15 22:41:47 +03:00
# endif
//////////////////////////////////////////////////////////////////////////
/** \brief Updates statistics for a profiler block.
\ param _stats_map Storage of statistics for blocks .
\ param _current Pointer to the current block .
\ param _stats Reference to the variable where pointer to the block statistics must be written .
\ note All blocks with similar name have the same pointer to statistics information .
\ note As all profiler block keeps a pointer to it ' s statistics , all similar blocks
automatically receive statistics update .
*/
2017-04-20 22:29:02 +03:00
: : profiler : : BlockStatistics * update_statistics ( StatsMap & _stats_map , const : : profiler : : BlocksTree & _current , : : profiler : : block_index_t _current_index , : : profiler : : block_index_t _parent_index , const : : profiler : : blocks_t & _blocks , bool _calculate_children = true )
2016-09-15 22:41:47 +03:00
{
auto duration = _current . node - > duration ( ) ;
//StatsMap::key_type key(_current.node->name());
//auto it = _stats_map.find(key);
auto it = _stats_map . find ( _current . node - > id ( ) ) ;
if ( it ! = _stats_map . end ( ) )
{
// Update already existing statistics
auto stats = it - > second ; // write pointer to statistics into output (this is BlocksTree:: per_thread_stats or per_parent_stats or per_frame_stats)
+ + stats - > calls_number ; // update calls number of this block
stats - > total_duration + = duration ; // update summary duration of all block calls
2017-04-20 22:29:02 +03:00
if ( _calculate_children )
{
for ( auto i : _current . children )
stats - > total_children_duration + = _blocks [ i ] . node - > duration ( ) ;
}
2016-12-18 15:56:08 +03:00
if ( duration > _blocks [ stats - > max_duration_block ] . node - > duration ( ) )
2016-09-15 22:41:47 +03:00
{
// update max duration
stats - > max_duration_block = _current_index ;
2016-12-18 15:56:08 +03:00
//stats->max_duration = duration;
2016-09-15 22:41:47 +03:00
}
2016-12-18 15:56:08 +03:00
if ( duration < _blocks [ stats - > min_duration_block ] . node - > duration ( ) )
2016-09-15 22:41:47 +03:00
{
// update min duraton
stats - > min_duration_block = _current_index ;
2016-12-18 15:56:08 +03:00
//stats->min_duration = duration;
2016-09-15 22:41:47 +03:00
}
// average duration is calculated inside average_duration() method by dividing total_duration to the calls_number
return stats ;
}
// This is first time the block appear in the file.
// Create new statistics.
2016-12-18 17:59:41 +03:00
auto stats = new : : profiler : : BlockStatistics ( duration , _current_index , _parent_index ) ;
2016-09-15 22:41:47 +03:00
//_stats_map.emplace(key, stats);
_stats_map . emplace ( _current . node - > id ( ) , stats ) ;
2017-04-20 22:29:02 +03:00
if ( _calculate_children )
{
for ( auto i : _current . children )
stats - > total_children_duration + = _blocks [ i ] . node - > duration ( ) ;
}
2016-09-15 22:41:47 +03:00
return stats ;
}
2017-04-20 22:29:02 +03:00
: : profiler : : BlockStatistics * update_statistics ( CsStatsMap & _stats_map , const : : profiler : : BlocksTree & _current , : : profiler : : block_index_t _current_index , : : profiler : : block_index_t _parent_index , const : : profiler : : blocks_t & _blocks , bool _calculate_children = true )
2016-12-14 23:17:02 +03:00
{
auto duration = _current . node - > duration ( ) ;
CsStatsMap : : key_type key ( _current . node - > name ( ) ) ;
auto it = _stats_map . find ( key ) ;
if ( it ! = _stats_map . end ( ) )
{
// Update already existing statistics
auto stats = it - > second ; // write pointer to statistics into output (this is BlocksTree:: per_thread_stats or per_parent_stats or per_frame_stats)
+ + stats - > calls_number ; // update calls number of this block
stats - > total_duration + = duration ; // update summary duration of all block calls
2017-04-20 22:29:02 +03:00
if ( _calculate_children )
{
for ( auto i : _current . children )
stats - > total_children_duration + = _blocks [ i ] . node - > duration ( ) ;
}
2016-12-18 15:56:08 +03:00
if ( duration > _blocks [ stats - > max_duration_block ] . node - > duration ( ) )
2016-12-14 23:17:02 +03:00
{
// update max duration
stats - > max_duration_block = _current_index ;
2016-12-18 15:56:08 +03:00
//stats->max_duration = duration;
2016-12-14 23:17:02 +03:00
}
2016-12-18 15:56:08 +03:00
if ( duration < _blocks [ stats - > min_duration_block ] . node - > duration ( ) )
2016-12-14 23:17:02 +03:00
{
// update min duraton
stats - > min_duration_block = _current_index ;
2016-12-18 15:56:08 +03:00
//stats->min_duration = duration;
2016-12-14 23:17:02 +03:00
}
// average duration is calculated inside average_duration() method by dividing total_duration to the calls_number
return stats ;
}
// This is first time the block appear in the file.
// Create new statistics.
auto stats = new : : profiler : : BlockStatistics ( duration , _current_index , _parent_index ) ;
_stats_map . emplace ( key , stats ) ;
2017-04-20 22:29:02 +03:00
if ( _calculate_children )
{
for ( auto i : _current . children )
stats - > total_children_duration + = _blocks [ i ] . node - > duration ( ) ;
}
2016-12-14 23:17:02 +03:00
return stats ;
}
2016-09-15 22:41:47 +03:00
//////////////////////////////////////////////////////////////////////////
2016-12-18 17:59:41 +03:00
void update_statistics_recursive ( StatsMap & _stats_map , : : profiler : : BlocksTree & _current , : : profiler : : block_index_t _current_index , : : profiler : : block_index_t _parent_index , : : profiler : : blocks_t & _blocks )
2016-09-15 22:41:47 +03:00
{
2017-04-20 22:29:02 +03:00
_current . per_frame_stats = update_statistics ( _stats_map , _current , _current_index , _parent_index , _blocks , false ) ;
2016-09-15 22:41:47 +03:00
for ( auto i : _current . children )
2017-04-20 22:29:02 +03:00
{
_current . per_frame_stats - > total_children_duration + = _blocks [ i ] . node - > duration ( ) ;
2016-12-18 17:59:41 +03:00
update_statistics_recursive ( _stats_map , _blocks [ i ] , i , _parent_index , _blocks ) ;
2017-04-20 22:29:02 +03:00
}
2016-09-21 22:09:04 +03:00
}
//////////////////////////////////////////////////////////////////////////
/*void validate_pointers(::std::atomic<int>& _progress, const char* _oldbase, ::profiler::SerializedData& _serialized_blocks, ::profiler::blocks_t& _blocks, size_t _size)
{
if ( _oldbase = = nullptr )
{
_progress . store ( 25 , : : std : : memory_order_release ) ;
return ;
}
for ( size_t i = 0 ; i < _size ; + + i )
{
auto & tree = _blocks [ i ] ;
auto dist = : : std : : distance ( _oldbase , reinterpret_cast < const char * > ( tree . node ) ) ;
tree . node = reinterpret_cast < : : profiler : : SerializedBlock * > ( _serialized_blocks . data ( ) + dist ) ;
_progress . store ( 20 + static_cast < int > ( 5 * i / _size ) , : : std : : memory_order_release ) ;
2016-09-15 22:41:47 +03:00
}
}
2016-09-21 22:09:04 +03:00
void validate_pointers ( : : std : : atomic < int > & _progress , const char * _oldbase , : : profiler : : SerializedData & _serialized_descriptors , : : profiler : : descriptors_list_t & _descriptors , size_t _size )
{
if ( _oldbase = = nullptr )
{
_progress . store ( 5 , : : std : : memory_order_release ) ;
return ;
}
for ( size_t i = 0 ; i < _size ; + + i )
{
auto dist = : : std : : distance ( _oldbase , reinterpret_cast < const char * > ( _descriptors [ i ] ) ) ;
_descriptors [ i ] = reinterpret_cast < : : profiler : : SerializedBlockDescriptor * > ( _serialized_descriptors . data ( ) + dist ) ;
_progress . store ( static_cast < int > ( 5 * i / _size ) ) ;
}
} */
2016-09-15 22:41:47 +03:00
//////////////////////////////////////////////////////////////////////////
2016-09-25 11:49:49 +03:00
extern " C " {
2016-09-15 22:41:47 +03:00
2016-09-25 11:49:49 +03:00
PROFILER_API : : profiler : : block_index_t fillTreesFromFile ( : : std : : atomic < int > & progress , const char * filename ,
2016-10-02 12:13:12 +03:00
: : profiler : : SerializedData & serialized_blocks ,
: : profiler : : SerializedData & serialized_descriptors ,
2016-09-29 22:40:19 +03:00
: : profiler : : descriptors_list_t & descriptors ,
: : profiler : : blocks_t & blocks ,
: : profiler : : thread_blocks_tree_t & threaded_trees ,
2016-10-02 12:13:12 +03:00
uint32_t & total_descriptors_number ,
2016-09-29 22:40:19 +03:00
bool gather_statistics ,
: : std : : stringstream & _log )
2016-09-22 23:06:43 +03:00
{
2016-09-29 22:40:19 +03:00
auto oldprogress = progress . exchange ( 0 , : : std : : memory_order_release ) ;
if ( oldprogress < 0 )
{
_log < < " Reading was interrupted " ;
return 0 ;
}
2016-09-27 00:12:28 +03:00
: : std : : ifstream inFile ( filename , : : std : : fstream : : binary ) ;
2016-09-25 11:49:49 +03:00
if ( ! inFile . is_open ( ) )
2016-09-27 22:28:04 +03:00
{
_log < < " Can not open file " < < filename ;
2016-09-25 11:49:49 +03:00
return 0 ;
2016-09-27 22:28:04 +03:00
}
2016-09-27 00:12:28 +03:00
2016-09-25 11:49:49 +03:00
: : std : : stringstream str ;
2016-09-27 01:07:13 +03:00
2017-02-08 22:06:38 +03:00
// Replace str buffer to inFile buffer to avoid redundant copying
2016-09-27 01:07:13 +03:00
typedef : : std : : basic_iostream < : : std : : stringstream : : char_type , : : std : : stringstream : : traits_type > stringstream_parent ;
stringstream_parent & s = str ;
auto oldbuf = s . rdbuf ( inFile . rdbuf ( ) ) ;
2016-09-27 00:51:45 +03:00
2017-02-08 22:06:38 +03:00
// Read data from file
2016-10-02 12:13:12 +03:00
auto result = fillTreesFromStream ( progress , str , serialized_blocks , serialized_descriptors , descriptors , blocks ,
threaded_trees , total_descriptors_number , gather_statistics , _log ) ;
2017-02-08 22:06:38 +03:00
// Restore old str buffer to avoid possible second memory free on stringstream destructor
2016-09-27 01:07:13 +03:00
s . rdbuf ( oldbuf ) ;
2016-09-27 00:51:45 +03:00
2016-09-27 01:07:13 +03:00
return result ;
2016-09-22 23:06:43 +03:00
}
2016-09-25 11:49:49 +03:00
//////////////////////////////////////////////////////////////////////////
2016-09-15 22:41:47 +03:00
2016-09-25 11:49:49 +03:00
PROFILER_API : : profiler : : block_index_t fillTreesFromStream ( : : std : : atomic < int > & progress , : : std : : stringstream & inFile ,
2016-10-02 12:13:12 +03:00
: : profiler : : SerializedData & serialized_blocks ,
: : profiler : : SerializedData & serialized_descriptors ,
2016-09-29 22:40:19 +03:00
: : profiler : : descriptors_list_t & descriptors ,
: : profiler : : blocks_t & blocks ,
: : profiler : : thread_blocks_tree_t & threaded_trees ,
2016-10-02 12:13:12 +03:00
uint32_t & total_descriptors_number ,
2016-09-29 22:40:19 +03:00
bool gather_statistics ,
: : std : : stringstream & _log )
2016-09-15 22:41:47 +03:00
{
2016-09-25 11:49:49 +03:00
EASY_FUNCTION ( : : profiler : : colors : : Cyan ) ;
2016-09-15 22:41:47 +03:00
2016-09-29 22:40:19 +03:00
auto oldprogress = progress . exchange ( 0 , : : std : : memory_order_release ) ;
if ( oldprogress < 0 )
{
_log < < " Reading was interrupted " ;
return 0 ;
}
2016-09-15 22:41:47 +03:00
2016-09-27 22:28:04 +03:00
uint32_t signature = 0 ;
inFile . read ( ( char * ) & signature , sizeof ( uint32_t ) ) ;
if ( signature ! = PROFILER_SIGNATURE )
{
_log < < " Wrong signature " < < signature < < " \n This is not EasyProfiler file/stream. " ;
return 0 ;
}
uint32_t version = 0 ;
inFile . read ( ( char * ) & version , sizeof ( uint32_t ) ) ;
if ( ! isCompatibleVersion ( version ) )
{
_log < < " Incompatible version: v " < < ( version > > 24 ) < < " . " < < ( ( version & 0x00ff0000 ) > > 16 ) < < " . " < < ( version & 0x0000ffff ) ;
return 0 ;
}
2016-12-12 22:26:32 +03:00
processid_t pid = 0 ;
if ( version > EASY_V_100 )
inFile . read ( ( char * ) & pid , sizeof ( processid_t ) ) ;
2016-12-04 18:42:32 +03:00
int64_t file_cpu_frequency = 0LL ;
inFile . read ( ( char * ) & file_cpu_frequency , sizeof ( int64_t ) ) ;
uint64_t cpu_frequency = file_cpu_frequency ;
const double conversion_factor = static_cast < double > ( TIME_FACTOR ) / static_cast < double > ( cpu_frequency ) ;
2016-09-15 22:41:47 +03:00
2016-10-02 12:13:12 +03:00
: : profiler : : timestamp_t begin_time = 0ULL ;
: : profiler : : timestamp_t end_time = 0ULL ;
2016-09-25 11:49:49 +03:00
inFile . read ( ( char * ) & begin_time , sizeof ( : : profiler : : timestamp_t ) ) ;
inFile . read ( ( char * ) & end_time , sizeof ( : : profiler : : timestamp_t ) ) ;
if ( cpu_frequency ! = 0 )
2016-09-15 22:41:47 +03:00
{
2016-12-04 18:42:32 +03:00
EASY_CONVERT_TO_NANO ( begin_time , cpu_frequency , conversion_factor ) ;
EASY_CONVERT_TO_NANO ( end_time , cpu_frequency , conversion_factor ) ;
2016-09-15 22:41:47 +03:00
}
2016-09-25 11:49:49 +03:00
uint32_t total_blocks_number = 0 ;
2016-10-02 12:13:12 +03:00
inFile . read ( ( char * ) & total_blocks_number , sizeof ( uint32_t ) ) ;
2016-09-25 11:49:49 +03:00
if ( total_blocks_number = = 0 )
2016-09-27 22:28:04 +03:00
{
_log < < " Profiled blocks number == 0 " ;
2016-09-25 11:49:49 +03:00
return 0 ;
2016-09-27 22:28:04 +03:00
}
2016-09-25 11:49:49 +03:00
uint64_t memory_size = 0 ;
inFile . read ( ( char * ) & memory_size , sizeof ( decltype ( memory_size ) ) ) ;
if ( memory_size = = 0 )
2016-09-27 22:28:04 +03:00
{
_log < < " Wrong memory size == 0 for " < < total_blocks_number < < " blocks " ;
2016-09-25 11:49:49 +03:00
return 0 ;
2016-09-27 22:28:04 +03:00
}
2016-09-25 11:49:49 +03:00
2016-10-02 12:13:12 +03:00
total_descriptors_number = 0 ;
inFile . read ( ( char * ) & total_descriptors_number , sizeof ( uint32_t ) ) ;
2016-09-25 11:49:49 +03:00
if ( total_descriptors_number = = 0 )
2016-09-27 22:28:04 +03:00
{
_log < < " Blocks description number == 0 " ;
2016-09-25 11:49:49 +03:00
return 0 ;
2016-09-27 22:28:04 +03:00
}
2016-09-25 11:49:49 +03:00
uint64_t descriptors_memory_size = 0 ;
inFile . read ( ( char * ) & descriptors_memory_size , sizeof ( decltype ( descriptors_memory_size ) ) ) ;
if ( descriptors_memory_size = = 0 )
2016-09-27 22:28:04 +03:00
{
_log < < " Wrong memory size == 0 for " < < total_descriptors_number < < " blocks descriptions " ;
2016-09-25 11:49:49 +03:00
return 0 ;
2016-09-27 22:28:04 +03:00
}
2016-09-25 11:49:49 +03:00
descriptors . reserve ( total_descriptors_number ) ;
//const char* olddata = append_regime ? serialized_descriptors.data() : nullptr;
serialized_descriptors . set ( descriptors_memory_size ) ;
//validate_pointers(progress, olddata, serialized_descriptors, descriptors, descriptors.size());
uint64_t i = 0 ;
while ( ! inFile . eof ( ) & & descriptors . size ( ) < total_descriptors_number )
2016-09-15 22:41:47 +03:00
{
uint16_t sz = 0 ;
inFile . read ( ( char * ) & sz , sizeof ( sz ) ) ;
if ( sz = = 0 )
2016-09-25 11:49:49 +03:00
{
descriptors . push_back ( nullptr ) ;
continue ;
}
//if (i + sz > descriptors_memory_size) {
// printf("FILE CORRUPTED\n");
// return 0;
//}
2016-09-15 22:41:47 +03:00
2016-09-25 11:49:49 +03:00
char * data = serialized_descriptors [ i ] ;
2016-09-15 22:41:47 +03:00
inFile . read ( data , sz ) ;
2016-09-25 11:49:49 +03:00
auto descriptor = reinterpret_cast < : : profiler : : SerializedBlockDescriptor * > ( data ) ;
descriptors . push_back ( descriptor ) ;
2016-09-15 22:41:47 +03:00
i + = sz ;
2016-09-25 11:49:49 +03:00
auto oldprogress = progress . exchange ( static_cast < int > ( 15 * i / descriptors_memory_size ) , : : std : : memory_order_release ) ;
if ( oldprogress < 0 )
2016-09-27 22:28:04 +03:00
{
_log < < " Reading was interrupted " ;
2016-09-25 11:49:49 +03:00
return 0 ; // Loading interrupted
2016-09-27 22:28:04 +03:00
}
2016-09-25 11:49:49 +03:00
}
2016-09-15 22:41:47 +03:00
2017-06-05 21:24:01 +03:00
# ifdef _WIN64
2016-09-25 11:49:49 +03:00
typedef : : std : : unordered_map < : : profiler : : thread_id_t , StatsMap , : : profiler : : passthrough_hash > PerThreadStats ;
2017-06-05 21:24:01 +03:00
# else
typedef : : std : : unordered_map < : : profiler : : thread_id_t , StatsMap > PerThreadStats ;
# endif
2016-12-14 23:17:02 +03:00
PerThreadStats parent_statistics , frame_statistics ;
2016-09-25 11:49:49 +03:00
IdMap identification_table ;
blocks . reserve ( total_blocks_number ) ;
//olddata = append_regime ? serialized_blocks.data() : nullptr;
serialized_blocks . set ( memory_size ) ;
//validate_pointers(progress, olddata, serialized_blocks, blocks, blocks.size());
i = 0 ;
uint32_t read_number = 0 ;
: : profiler : : block_index_t blocks_counter = 0 ;
: : std : : vector < char > name ;
2017-06-05 13:11:02 +03:00
const size_t thread_id_t_size = version < EASY_V_130 ? sizeof ( uint32_t ) : sizeof ( : : profiler : : thread_id_t ) ;
2016-09-25 11:49:49 +03:00
while ( ! inFile . eof ( ) & & read_number < total_blocks_number )
{
EASY_BLOCK ( " Read thread data " , : : profiler : : colors : : DarkGreen ) ;
: : profiler : : thread_id_t thread_id = 0 ;
2017-06-05 12:57:03 +03:00
inFile . read ( ( char * ) & thread_id , thread_id_t_size ) ;
2016-09-25 11:49:49 +03:00
auto & root = threaded_trees [ thread_id ] ;
uint16_t name_size = 0 ;
inFile . read ( ( char * ) & name_size , sizeof ( uint16_t ) ) ;
if ( name_size ! = 0 )
2016-09-15 22:41:47 +03:00
{
2016-09-25 11:49:49 +03:00
name . resize ( name_size ) ;
inFile . read ( name . data ( ) , name_size ) ;
root . thread_name = name . data ( ) ;
2016-09-15 22:41:47 +03:00
}
2016-12-14 23:17:02 +03:00
CsStatsMap per_thread_statistics_cs ;
2016-09-25 11:49:49 +03:00
uint32_t blocks_number_in_thread = 0 ;
inFile . read ( ( char * ) & blocks_number_in_thread , sizeof ( decltype ( blocks_number_in_thread ) ) ) ;
auto threshold = read_number + blocks_number_in_thread ;
while ( ! inFile . eof ( ) & & read_number < threshold )
2016-09-22 23:06:43 +03:00
{
2016-09-25 11:49:49 +03:00
EASY_BLOCK ( " Read context switch " , : : profiler : : colors : : Green ) ;
2016-09-22 23:06:43 +03:00
2016-09-25 11:49:49 +03:00
+ + read_number ;
2016-09-15 22:41:47 +03:00
2016-09-25 11:49:49 +03:00
uint16_t sz = 0 ;
inFile . read ( ( char * ) & sz , sizeof ( sz ) ) ;
if ( sz = = 0 )
2016-09-27 22:28:04 +03:00
{
_log < < " Bad CSwitch block size == 0 " ;
2016-09-25 11:49:49 +03:00
return 0 ;
2016-09-27 22:28:04 +03:00
}
2016-09-15 22:41:47 +03:00
2016-09-25 11:49:49 +03:00
char * data = serialized_blocks [ i ] ;
inFile . read ( data , sz ) ;
i + = sz ;
auto baseData = reinterpret_cast < : : profiler : : SerializedBlock * > ( data ) ;
auto t_begin = reinterpret_cast < : : profiler : : timestamp_t * > ( data ) ;
auto t_end = t_begin + 1 ;
2016-09-15 22:41:47 +03:00
2016-09-25 11:49:49 +03:00
if ( cpu_frequency ! = 0 )
{
2016-12-04 18:42:32 +03:00
EASY_CONVERT_TO_NANO ( * t_begin , cpu_frequency , conversion_factor ) ;
EASY_CONVERT_TO_NANO ( * t_end , cpu_frequency , conversion_factor ) ;
2016-09-25 11:49:49 +03:00
}
2016-09-15 22:41:47 +03:00
2016-09-25 11:49:49 +03:00
if ( * t_end > begin_time )
{
if ( * t_begin < begin_time )
* t_begin = begin_time ;
2016-09-15 22:41:47 +03:00
2016-09-25 11:49:49 +03:00
blocks . emplace_back ( ) ;
: : profiler : : BlocksTree & tree = blocks . back ( ) ;
tree . node = baseData ;
const auto block_index = blocks_counter + + ;
2016-09-15 22:41:47 +03:00
2016-12-14 22:16:14 +03:00
root . wait_time + = baseData - > duration ( ) ;
2016-09-25 11:49:49 +03:00
root . sync . emplace_back ( block_index ) ;
2016-12-14 23:17:02 +03:00
if ( gather_statistics )
{
EASY_BLOCK ( " Gather per thread statistics " , : : profiler : : colors : : Coral ) ;
2016-12-18 15:56:08 +03:00
tree . per_thread_stats = update_statistics ( per_thread_statistics_cs , tree , block_index , thread_id , blocks ) ;
2016-12-14 23:17:02 +03:00
}
2016-09-25 11:49:49 +03:00
}
2016-09-15 22:41:47 +03:00
2016-09-25 11:49:49 +03:00
auto oldprogress = progress . exchange ( 20 + static_cast < int > ( 70 * i / memory_size ) , : : std : : memory_order_release ) ;
if ( oldprogress < 0 )
2016-09-27 22:28:04 +03:00
{
_log < < " Reading was interrupted " ;
2016-09-25 11:49:49 +03:00
return 0 ; // Loading interrupted
2016-09-27 22:28:04 +03:00
}
2016-09-25 11:49:49 +03:00
}
2016-09-15 22:41:47 +03:00
2016-09-25 11:49:49 +03:00
if ( inFile . eof ( ) )
break ;
2016-09-22 23:06:43 +03:00
2016-12-14 23:17:02 +03:00
StatsMap per_thread_statistics ;
2016-09-25 11:49:49 +03:00
blocks_number_in_thread = 0 ;
inFile . read ( ( char * ) & blocks_number_in_thread , sizeof ( decltype ( blocks_number_in_thread ) ) ) ;
threshold = read_number + blocks_number_in_thread ;
while ( ! inFile . eof ( ) & & read_number < threshold )
2016-09-15 22:41:47 +03:00
{
2016-09-25 11:49:49 +03:00
EASY_BLOCK ( " Read block " , : : profiler : : colors : : Green ) ;
2016-09-15 22:41:47 +03:00
2016-09-25 11:49:49 +03:00
+ + read_number ;
2016-09-15 22:41:47 +03:00
2016-09-25 11:49:49 +03:00
uint16_t sz = 0 ;
inFile . read ( ( char * ) & sz , sizeof ( sz ) ) ;
if ( sz = = 0 )
2016-09-27 22:28:04 +03:00
{
_log < < " Bad block size == 0 " ;
2016-09-25 11:49:49 +03:00
return 0 ;
2016-09-27 22:28:04 +03:00
}
2016-09-15 22:41:47 +03:00
2016-09-25 11:49:49 +03:00
char * data = serialized_blocks [ i ] ;
inFile . read ( data , sz ) ;
i + = sz ;
auto baseData = reinterpret_cast < : : profiler : : SerializedBlock * > ( data ) ;
2016-09-27 22:28:04 +03:00
if ( baseData - > id ( ) > = total_descriptors_number )
{
_log < < " Bad block id == " < < baseData - > id ( ) ;
return 0 ;
}
2016-09-25 11:49:49 +03:00
auto desc = descriptors [ baseData - > id ( ) ] ;
if ( desc = = nullptr )
2016-09-27 22:28:04 +03:00
{
_log < < " Bad block id == " < < baseData - > id ( ) < < " . Description is null. " ;
2016-09-25 11:49:49 +03:00
return 0 ;
2016-09-27 22:28:04 +03:00
}
2016-09-15 22:41:47 +03:00
2016-09-25 11:49:49 +03:00
auto t_begin = reinterpret_cast < : : profiler : : timestamp_t * > ( data ) ;
auto t_end = t_begin + 1 ;
2016-09-22 23:06:43 +03:00
2016-09-25 11:49:49 +03:00
if ( cpu_frequency ! = 0 )
{
2016-12-04 18:42:32 +03:00
EASY_CONVERT_TO_NANO ( * t_begin , cpu_frequency , conversion_factor ) ;
EASY_CONVERT_TO_NANO ( * t_end , cpu_frequency , conversion_factor ) ;
2016-09-15 22:41:47 +03:00
}
2016-09-25 11:49:49 +03:00
if ( * t_end > = begin_time )
2016-09-15 22:41:47 +03:00
{
2016-09-25 11:49:49 +03:00
if ( * t_begin < begin_time )
* t_begin = begin_time ;
2016-09-22 23:06:43 +03:00
2016-09-25 11:49:49 +03:00
blocks . emplace_back ( ) ;
: : profiler : : BlocksTree & tree = blocks . back ( ) ;
tree . node = baseData ;
const auto block_index = blocks_counter + + ;
2016-09-22 23:06:43 +03:00
2016-09-25 11:49:49 +03:00
if ( * tree . node - > name ( ) ! = 0 )
{
// If block has runtime name then generate new id for such block.
// Blocks with the same name will have same id.
IdMap : : key_type key ( tree . node - > name ( ) ) ;
auto it = identification_table . find ( key ) ;
if ( it ! = identification_table . end ( ) )
{
// There is already block with such name, use it's id
baseData - > setId ( it - > second ) ;
2016-09-15 22:41:47 +03:00
}
2016-09-22 23:06:43 +03:00
else
2016-09-15 22:41:47 +03:00
{
2016-09-25 11:49:49 +03:00
// There were no blocks with such name, generate new id and save it in the table for further usage.
auto id = static_cast < : : profiler : : block_id_t > ( descriptors . size ( ) ) ;
identification_table . emplace ( key , id ) ;
if ( descriptors . capacity ( ) = = descriptors . size ( ) )
descriptors . reserve ( ( descriptors . size ( ) * 3 ) > > 1 ) ;
descriptors . push_back ( descriptors [ baseData - > id ( ) ] ) ;
baseData - > setId ( id ) ;
}
}
if ( ! root . children . empty ( ) )
{
auto & back = blocks [ root . children . back ( ) ] ;
auto t1 = back . node - > end ( ) ;
auto mt0 = tree . node - > begin ( ) ;
if ( mt0 < t1 ) //parent - starts earlier than last ends
{
//auto lower = ::std::lower_bound(root.children.begin(), root.children.end(), tree);
/**/
EASY_BLOCK ( " Find children " , : : profiler : : colors : : Blue ) ;
auto rlower1 = + + root . children . rbegin ( ) ;
for ( ; rlower1 ! = root . children . rend ( ) & & ! ( mt0 > blocks [ * rlower1 ] . node - > begin ( ) ) ; + + rlower1 ) ;
auto lower = rlower1 . base ( ) ;
: : std : : move ( lower , root . children . end ( ) , : : std : : back_inserter ( tree . children ) ) ;
root . children . erase ( lower , root . children . end ( ) ) ;
EASY_END_BLOCK ;
if ( gather_statistics )
2016-09-22 23:06:43 +03:00
{
2016-09-25 11:49:49 +03:00
EASY_BLOCK ( " Gather statistic within parent " , : : profiler : : colors : : Magenta ) ;
2016-12-14 23:17:02 +03:00
auto & per_parent_statistics = parent_statistics [ thread_id ] ;
2016-09-25 11:49:49 +03:00
per_parent_statistics . clear ( ) ;
//per_parent_statistics.reserve(tree.children.size()); // this gives slow-down on Windows
//per_parent_statistics.reserve(tree.children.size() * 2); // this gives no speed-up on Windows
// TODO: check this behavior on Linux
for ( auto i : tree . children )
{
auto & child = blocks [ i ] ;
2016-12-18 17:59:41 +03:00
child . per_parent_stats = update_statistics ( per_parent_statistics , child , i , block_index , blocks ) ;
2016-09-25 11:49:49 +03:00
if ( tree . depth < child . depth )
tree . depth = child . depth ;
}
2016-09-22 23:06:43 +03:00
}
2016-09-25 11:49:49 +03:00
else
{
for ( auto i : tree . children )
{
const auto & child = blocks [ i ] ;
if ( tree . depth < child . depth )
tree . depth = child . depth ;
}
}
2017-04-20 22:29:02 +03:00
if ( tree . depth = = 254 )
{
// 254 because we need 1 additional level for root (thread).
// In other words: real stack depth = 1 root block + 254 children
if ( * tree . node - > name ( ) ! = 0 )
_log < < " Stack depth exceeded value of 254 \n for block \" " < < desc - > name ( ) < < " \" " ;
else
_log < < " Stack depth exceeded value of 254 \n for block \" " < < desc - > name ( ) < < " \" \n from file \" " < < desc - > file ( ) < < " \" : " < < desc - > line ( ) ;
return 0 ;
}
2016-09-25 11:49:49 +03:00
+ + tree . depth ;
2016-09-15 22:41:47 +03:00
}
2016-09-25 11:49:49 +03:00
}
2016-09-15 22:41:47 +03:00
2016-12-12 22:26:32 +03:00
+ + root . blocks_number ;
2016-09-25 11:49:49 +03:00
root . children . emplace_back ( block_index ) ; // ::std::move(tree));
if ( desc - > type ( ) = = : : profiler : : BLOCK_TYPE_EVENT )
root . events . emplace_back ( block_index ) ;
if ( gather_statistics )
{
EASY_BLOCK ( " Gather per thread statistics " , : : profiler : : colors : : Coral ) ;
2016-12-18 17:59:41 +03:00
tree . per_thread_stats = update_statistics ( per_thread_statistics , tree , block_index , thread_id , blocks ) ;
2016-09-22 23:06:43 +03:00
}
2016-09-15 22:41:47 +03:00
}
2016-09-25 11:49:49 +03:00
auto oldprogress = progress . exchange ( 20 + static_cast < int > ( 70 * i / memory_size ) , : : std : : memory_order_release ) ;
if ( oldprogress < 0 )
2016-09-27 22:28:04 +03:00
{
_log < < " Reading was interrupted " ;
2016-09-25 11:49:49 +03:00
return 0 ; // Loading interrupted
2016-09-27 22:28:04 +03:00
}
2016-09-25 11:49:49 +03:00
}
}
if ( progress . load ( : : std : : memory_order_acquire ) < 0 )
2016-09-27 22:28:04 +03:00
{
_log < < " Reading was interrupted " ;
2016-09-25 11:49:49 +03:00
return 0 ; // Loading interrupted
2016-09-27 22:28:04 +03:00
}
2016-09-25 11:49:49 +03:00
EASY_BLOCK ( " Gather statistics for roots " , : : profiler : : colors : : Purple ) ;
if ( gather_statistics )
{
: : std : : vector < : : std : : thread > statistics_threads ;
statistics_threads . reserve ( threaded_trees . size ( ) ) ;
for ( auto & it : threaded_trees )
{
auto & root = it . second ;
root . thread_id = it . first ;
//root.tree.shrink_to_fit();
2016-09-15 22:41:47 +03:00
2016-09-25 11:49:49 +03:00
auto & per_frame_statistics = frame_statistics [ root . thread_id ] ;
auto & per_parent_statistics = parent_statistics [ it . first ] ;
per_parent_statistics . clear ( ) ;
2016-09-15 22:41:47 +03:00
2016-12-18 17:59:41 +03:00
statistics_threads . emplace_back ( : : std : : thread ( [ & per_parent_statistics , & per_frame_statistics , & blocks ] ( : : profiler : : BlocksTreeRoot & root )
2016-09-22 23:06:43 +03:00
{
2016-09-25 11:49:49 +03:00
//::std::sort(root.sync.begin(), root.sync.end(), [&blocks](::profiler::block_index_t left, ::profiler::block_index_t right)
//{
// return blocks[left].node->begin() < blocks[right].node->begin();
//});
2016-09-15 22:41:47 +03:00
2016-12-14 23:17:02 +03:00
: : profiler : : block_index_t cs_index = 0 ;
2016-09-25 11:49:49 +03:00
for ( auto i : root . children )
{
auto & frame = blocks [ i ] ;
2016-12-18 17:59:41 +03:00
frame . per_parent_stats = update_statistics ( per_parent_statistics , frame , i , root . thread_id , blocks ) ;
2016-09-15 22:41:47 +03:00
2016-09-25 11:49:49 +03:00
per_frame_statistics . clear ( ) ;
2016-12-18 17:59:41 +03:00
update_statistics_recursive ( per_frame_statistics , frame , i , i , blocks ) ;
2016-09-15 22:41:47 +03:00
2016-12-14 23:17:02 +03:00
if ( cs_index < root . sync . size ( ) )
{
CsStatsMap frame_stats_cs ;
do {
auto j = root . sync [ cs_index ] ;
auto & cs = blocks [ j ] ;
if ( cs . node - > end ( ) < frame . node - > begin ( ) )
continue ;
if ( cs . node - > begin ( ) > frame . node - > end ( ) )
break ;
2016-12-18 15:56:08 +03:00
cs . per_frame_stats = update_statistics ( frame_stats_cs , cs , cs_index , i , blocks ) ;
2016-12-14 23:17:02 +03:00
} while ( + + cs_index < root . sync . size ( ) ) ;
}
2016-09-25 11:49:49 +03:00
if ( root . depth < frame . depth )
root . depth = frame . depth ;
2016-09-15 22:41:47 +03:00
2016-12-14 22:16:14 +03:00
root . profiled_time + = frame . node - > duration ( ) ;
2016-09-25 11:49:49 +03:00
}
2016-09-15 22:41:47 +03:00
2016-09-25 11:49:49 +03:00
+ + root . depth ;
} , : : std : : ref ( root ) ) ) ;
}
2016-09-15 22:41:47 +03:00
2016-09-25 11:49:49 +03:00
int j = 0 , n = static_cast < int > ( statistics_threads . size ( ) ) ;
for ( auto & t : statistics_threads )
2016-09-15 22:41:47 +03:00
{
2016-09-25 11:49:49 +03:00
t . join ( ) ;
progress . store ( 90 + ( 10 * + + j ) / n , : : std : : memory_order_release ) ;
}
}
else
{
int j = 0 , n = static_cast < int > ( threaded_trees . size ( ) ) ;
for ( auto & it : threaded_trees )
{
auto & root = it . second ;
root . thread_id = it . first ;
2016-09-15 22:41:47 +03:00
//::std::sort(root.sync.begin(), root.sync.end(), [&blocks](::profiler::block_index_t left, ::profiler::block_index_t right)
//{
// return blocks[left].node->begin() < blocks[right].node->begin();
//});
2016-09-25 11:49:49 +03:00
//root.tree.shrink_to_fit();
2016-09-15 22:41:47 +03:00
for ( auto i : root . children )
{
auto & frame = blocks [ i ] ;
if ( root . depth < frame . depth )
root . depth = frame . depth ;
2016-12-14 22:16:14 +03:00
root . profiled_time + = frame . node - > duration ( ) ;
2016-09-15 22:41:47 +03:00
}
+ + root . depth ;
2016-09-25 11:49:49 +03:00
progress . store ( 90 + ( 10 * + + j ) / n , : : std : : memory_order_release ) ;
}
2016-09-15 22:41:47 +03:00
}
2016-09-25 11:49:49 +03:00
// No need to delete BlockStatistics instances - they will be deleted inside BlocksTree destructors
return blocks_counter ;
2016-09-15 22:41:47 +03:00
}
2016-09-25 11:49:49 +03:00
//////////////////////////////////////////////////////////////////////////
PROFILER_API bool readDescriptionsFromStream ( : : std : : atomic < int > & progress , : : std : : stringstream & inFile ,
: : profiler : : SerializedData & serialized_descriptors ,
2016-09-27 22:28:04 +03:00
: : profiler : : descriptors_list_t & descriptors ,
: : std : : stringstream & _log )
2016-09-15 22:41:47 +03:00
{
2016-09-25 11:49:49 +03:00
EASY_FUNCTION ( : : profiler : : colors : : Cyan ) ;
progress . store ( 0 ) ;
2016-09-27 22:28:04 +03:00
uint32_t signature = 0 ;
inFile . read ( ( char * ) & signature , sizeof ( uint32_t ) ) ;
if ( signature ! = PROFILER_SIGNATURE )
{
2016-11-20 13:42:05 +03:00
_log < < " Wrong file signature. \n This is not EasyProfiler file/stream. " ;
2016-09-27 22:28:04 +03:00
return false ;
}
uint32_t version = 0 ;
inFile . read ( ( char * ) & version , sizeof ( uint32_t ) ) ;
if ( ! isCompatibleVersion ( version ) )
{
_log < < " Incompatible version: v " < < ( version > > 24 ) < < " . " < < ( ( version & 0x00ff0000 ) > > 16 ) < < " . " < < ( version & 0x0000ffff ) ;
return false ;
}
2016-09-25 11:49:49 +03:00
uint32_t total_descriptors_number = 0 ;
inFile . read ( ( char * ) & total_descriptors_number , sizeof ( decltype ( total_descriptors_number ) ) ) ;
if ( total_descriptors_number = = 0 )
2016-09-27 22:28:04 +03:00
{
_log < < " Blocks description number == 0 " ;
2016-09-25 11:49:49 +03:00
return false ;
2016-09-27 22:28:04 +03:00
}
2016-09-25 11:49:49 +03:00
uint64_t descriptors_memory_size = 0 ;
inFile . read ( ( char * ) & descriptors_memory_size , sizeof ( decltype ( descriptors_memory_size ) ) ) ;
if ( descriptors_memory_size = = 0 )
2016-09-27 22:28:04 +03:00
{
_log < < " Wrong memory size == 0 for " < < total_descriptors_number < < " blocks descriptions " ;
2016-09-25 11:49:49 +03:00
return false ;
2016-09-27 22:28:04 +03:00
}
2016-09-15 22:41:47 +03:00
2016-09-25 11:49:49 +03:00
descriptors . reserve ( total_descriptors_number ) ;
//const char* olddata = append_regime ? serialized_descriptors.data() : nullptr;
serialized_descriptors . set ( descriptors_memory_size ) ;
//validate_pointers(progress, olddata, serialized_descriptors, descriptors, descriptors.size());
2016-09-15 22:41:47 +03:00
2016-09-25 11:49:49 +03:00
uint64_t i = 0 ;
while ( ! inFile . eof ( ) & & descriptors . size ( ) < total_descriptors_number )
{
uint16_t sz = 0 ;
inFile . read ( ( char * ) & sz , sizeof ( sz ) ) ;
if ( sz = = 0 )
2016-09-15 22:41:47 +03:00
{
2016-09-25 11:49:49 +03:00
descriptors . push_back ( nullptr ) ;
continue ;
2016-09-15 22:41:47 +03:00
}
2016-09-25 11:49:49 +03:00
//if (i + sz > descriptors_memory_size) {
// printf("FILE CORRUPTED\n");
// return 0;
//}
char * data = serialized_descriptors [ i ] ;
inFile . read ( data , sz ) ;
auto descriptor = reinterpret_cast < : : profiler : : SerializedBlockDescriptor * > ( data ) ;
descriptors . push_back ( descriptor ) ;
2016-09-15 22:41:47 +03:00
2016-09-25 11:49:49 +03:00
i + = sz ;
auto oldprogress = progress . exchange ( static_cast < int > ( 100 * i / descriptors_memory_size ) , : : std : : memory_order_release ) ;
if ( oldprogress < 0 )
2016-09-27 22:28:04 +03:00
{
_log < < " Reading was interrupted " ;
2016-09-25 11:49:49 +03:00
return false ; // Loading interrupted
2016-09-27 22:28:04 +03:00
}
2016-09-15 22:41:47 +03:00
}
2016-09-25 11:49:49 +03:00
return ! descriptors . empty ( ) ;
2016-09-15 22:41:47 +03:00
}
2016-09-25 11:49:49 +03:00
//////////////////////////////////////////////////////////////////////////
2016-09-15 22:41:47 +03:00
}
2016-12-04 18:42:32 +03:00
# undef EASY_CONVERT_TO_NANO
# ifdef EASY_USE_FLOATING_POINT_CONVERSION
2017-02-13 20:19:41 +03:00
# ifdef _MSC_VER
2016-12-04 18:42:32 +03:00
# pragma warning(default:4244)
# elif defined(__GNUC__)
# pragma GCC diagnostic pop
# elif defined(__clang__)
# pragma clang diagnostic pop
# endif
# undef EASY_USE_FLOATING_POINT_CONVERSION
# endif