2010-09-26 16:55:54 +02:00
|
|
|
/*
|
2016-01-28 15:07:31 +01:00
|
|
|
Copyright (c) 2007-2016 Contributors as noted in the AUTHORS file
|
2010-09-26 16:55:54 +02:00
|
|
|
|
2015-06-02 22:33:55 +02:00
|
|
|
This file is part of libzmq, the ZeroMQ core engine in C++.
|
2010-09-26 16:55:54 +02:00
|
|
|
|
2015-06-02 22:33:55 +02:00
|
|
|
libzmq is free software; you can redistribute it and/or modify it under
|
|
|
|
the terms of the GNU Lesser General Public License (LGPL) as published
|
|
|
|
by the Free Software Foundation; either version 3 of the License, or
|
2010-09-26 16:55:54 +02:00
|
|
|
(at your option) any later version.
|
|
|
|
|
2015-06-02 22:33:55 +02:00
|
|
|
As a special exception, the Contributors give you permission to link
|
|
|
|
this library with independent modules to produce an executable,
|
|
|
|
regardless of the license terms of these independent modules, and to
|
|
|
|
copy and distribute the resulting executable under terms of your choice,
|
|
|
|
provided that you also meet, for each linked independent module, the
|
|
|
|
terms and conditions of the license of that module. An independent
|
|
|
|
module is a module which is not derived from or based on this library.
|
|
|
|
If you modify this library, you must extend this exception to your
|
|
|
|
version of the library.
|
|
|
|
|
|
|
|
libzmq is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
|
|
|
|
License for more details.
|
2010-09-26 16:55:54 +02:00
|
|
|
|
2010-10-30 15:08:28 +02:00
|
|
|
You should have received a copy of the GNU Lesser General Public License
|
2010-09-26 16:55:54 +02:00
|
|
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
2016-02-18 10:56:52 -06:00
|
|
|
#include "precompiled.hpp"
|
2010-09-26 16:55:54 +02:00
|
|
|
#include "clock.hpp"
|
|
|
|
#include "likely.hpp"
|
|
|
|
#include "config.hpp"
|
|
|
|
#include "err.hpp"
|
2013-11-07 14:06:54 -02:00
|
|
|
#include "mutex.hpp"
|
2010-09-26 16:55:54 +02:00
|
|
|
|
|
|
|
#include <stddef.h>
|
|
|
|
|
2010-10-26 15:35:56 +02:00
|
|
|
#if defined _MSC_VER
|
2013-02-19 16:49:23 +01:00
|
|
|
#if defined _WIN32_WCE
|
2012-03-14 19:12:28 +04:00
|
|
|
#include <cmnintrin.h>
|
|
|
|
#else
|
2010-10-26 15:35:56 +02:00
|
|
|
#include <intrin.h>
|
2020-08-06 18:41:15 +02:00
|
|
|
#if defined(_M_ARM) || defined(_M_ARM64)
|
|
|
|
#include <arm_neon.h>
|
|
|
|
#endif
|
2010-10-26 15:35:56 +02:00
|
|
|
#endif
|
2012-03-14 19:12:28 +04:00
|
|
|
#endif
|
2010-10-26 15:35:56 +02:00
|
|
|
|
2010-09-26 16:55:54 +02:00
|
|
|
#if !defined ZMQ_HAVE_WINDOWS
|
|
|
|
#include <sys/time.h>
|
|
|
|
#endif
|
|
|
|
|
2012-01-19 12:27:19 -06:00
|
|
|
#if defined HAVE_CLOCK_GETTIME || defined HAVE_GETHRTIME
|
2011-12-03 13:07:30 +01:00
|
|
|
#include <time.h>
|
|
|
|
#endif
|
|
|
|
|
2018-03-10 03:03:02 -08:00
|
|
|
#if defined ZMQ_HAVE_VXWORKS
|
|
|
|
#include "timers.h"
|
|
|
|
#endif
|
|
|
|
|
2015-02-13 22:50:28 -08:00
|
|
|
#if defined ZMQ_HAVE_OSX
|
2016-10-28 11:04:21 +01:00
|
|
|
int alt_clock_gettime (int clock_id, timespec *ts)
|
2015-02-13 22:50:28 -08:00
|
|
|
{
|
|
|
|
clock_serv_t cclock;
|
|
|
|
mach_timespec_t mts;
|
2017-04-20 00:13:06 +02:00
|
|
|
host_get_clock_service (mach_host_self (), clock_id, &cclock);
|
2015-02-13 22:50:28 -08:00
|
|
|
clock_get_time (cclock, &mts);
|
|
|
|
mach_port_deallocate (mach_task_self (), cclock);
|
|
|
|
ts->tv_sec = mts.tv_sec;
|
|
|
|
ts->tv_nsec = mts.tv_nsec;
|
2015-02-13 23:10:39 -08:00
|
|
|
return 0;
|
2015-02-13 22:50:28 -08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-11-07 15:21:42 -02:00
|
|
|
#ifdef ZMQ_HAVE_WINDOWS
|
2013-11-07 14:06:54 -02:00
|
|
|
typedef ULONGLONG (*f_compatible_get_tick_count64) ();
|
|
|
|
|
|
|
|
static zmq::mutex_t compatible_get_tick_count64_mutex;
|
|
|
|
|
|
|
|
ULONGLONG compatible_get_tick_count64 ()
|
|
|
|
{
|
2017-04-04 10:50:33 +02:00
|
|
|
#ifdef ZMQ_HAVE_WINDOWS_UWP
|
|
|
|
const ULONGLONG result = ::GetTickCount64 ();
|
|
|
|
return result;
|
|
|
|
#else
|
2016-07-31 16:57:11 +04:30
|
|
|
zmq::scoped_lock_t locker (compatible_get_tick_count64_mutex);
|
|
|
|
|
2013-11-07 14:06:54 -02:00
|
|
|
static DWORD s_wrap = 0;
|
|
|
|
static DWORD s_last_tick = 0;
|
|
|
|
const DWORD current_tick = ::GetTickCount ();
|
2016-07-31 16:57:11 +04:30
|
|
|
|
2013-11-07 14:06:54 -02:00
|
|
|
if (current_tick < s_last_tick)
|
|
|
|
++s_wrap;
|
|
|
|
|
|
|
|
s_last_tick = current_tick;
|
|
|
|
const ULONGLONG result = (static_cast<ULONGLONG> (s_wrap) << 32)
|
|
|
|
+ static_cast<ULONGLONG> (current_tick);
|
2016-07-31 16:57:11 +04:30
|
|
|
|
2013-11-07 14:06:54 -02:00
|
|
|
return result;
|
2017-04-04 10:50:33 +02:00
|
|
|
#endif
|
2013-11-07 14:06:54 -02:00
|
|
|
}
|
|
|
|
|
|
|
|
f_compatible_get_tick_count64 init_compatible_get_tick_count64 ()
|
|
|
|
{
|
|
|
|
f_compatible_get_tick_count64 func = NULL;
|
2017-04-04 10:50:33 +02:00
|
|
|
#if !defined ZMQ_HAVE_WINDOWS_UWP
|
|
|
|
|
2019-12-25 13:51:21 +01:00
|
|
|
const HMODULE module = ::LoadLibraryA ("Kernel32.dll");
|
2013-11-07 14:06:54 -02:00
|
|
|
if (module != NULL)
|
|
|
|
func = reinterpret_cast<f_compatible_get_tick_count64> (
|
|
|
|
::GetProcAddress (module, "GetTickCount64"));
|
2017-04-04 10:50:33 +02:00
|
|
|
#endif
|
2013-11-07 14:06:54 -02:00
|
|
|
if (func == NULL)
|
|
|
|
func = compatible_get_tick_count64;
|
|
|
|
|
2017-04-04 10:50:33 +02:00
|
|
|
#if !defined ZMQ_HAVE_WINDOWS_UWP
|
2020-04-14 00:03:19 +02:00
|
|
|
if (module != NULL)
|
|
|
|
::FreeLibrary (module);
|
2017-04-04 10:50:33 +02:00
|
|
|
#endif
|
2016-02-21 15:49:47 -06:00
|
|
|
|
2013-11-07 14:06:54 -02:00
|
|
|
return func;
|
|
|
|
}
|
|
|
|
|
|
|
|
static f_compatible_get_tick_count64 my_get_tick_count64 =
|
|
|
|
init_compatible_get_tick_count64 ();
|
2013-11-07 15:21:42 -02:00
|
|
|
#endif
|
2013-11-07 14:06:54 -02:00
|
|
|
|
2021-05-02 16:49:27 -07:00
|
|
|
#ifndef ZMQ_HAVE_WINDOWS
|
2018-05-26 18:45:58 +02:00
|
|
|
const uint64_t usecs_per_msec = 1000;
|
|
|
|
const uint64_t nsecs_per_usec = 1000;
|
2021-05-02 16:49:27 -07:00
|
|
|
#endif
|
|
|
|
const uint64_t usecs_per_sec = 1000000;
|
2018-05-26 18:45:58 +02:00
|
|
|
|
2010-09-26 16:55:54 +02:00
|
|
|
zmq::clock_t::clock_t () :
|
2018-05-27 11:10:39 +02:00
|
|
|
_last_tsc (rdtsc ()),
|
2013-11-07 14:06:54 -02:00
|
|
|
#ifdef ZMQ_HAVE_WINDOWS
|
2018-05-27 11:10:39 +02:00
|
|
|
_last_time (static_cast<uint64_t> ((*my_get_tick_count64) ()))
|
2013-11-07 14:06:54 -02:00
|
|
|
#else
|
2018-05-27 11:10:39 +02:00
|
|
|
_last_time (now_us () / usecs_per_msec)
|
2013-11-07 14:06:54 -02:00
|
|
|
#endif
|
2010-09-26 16:55:54 +02:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t zmq::clock_t::now_us ()
|
|
|
|
{
|
|
|
|
#if defined ZMQ_HAVE_WINDOWS
|
|
|
|
|
|
|
|
// Get the high resolution counter's accuracy.
|
2018-05-27 10:03:09 +02:00
|
|
|
// While QueryPerformanceFrequency only needs to be called once, since its
|
|
|
|
// value does not change during runtime, we query it here since this is a
|
|
|
|
// static function. It might make sense to cache it, though.
|
2018-05-25 22:41:05 +02:00
|
|
|
LARGE_INTEGER ticks_per_second;
|
|
|
|
QueryPerformanceFrequency (&ticks_per_second);
|
2010-09-26 16:55:54 +02:00
|
|
|
|
|
|
|
// What time is it?
|
|
|
|
LARGE_INTEGER tick;
|
|
|
|
QueryPerformanceCounter (&tick);
|
|
|
|
|
|
|
|
// Convert the tick number into the number of seconds
|
|
|
|
// since the system was started.
|
2018-05-26 18:45:58 +02:00
|
|
|
const double ticks_div =
|
|
|
|
static_cast<double> (ticks_per_second.QuadPart) / usecs_per_sec;
|
2018-05-18 15:54:00 +02:00
|
|
|
return static_cast<uint64_t> (tick.QuadPart / ticks_div);
|
2010-09-26 16:55:54 +02:00
|
|
|
|
2018-03-10 03:03:02 -08:00
|
|
|
#elif defined HAVE_CLOCK_GETTIME \
|
|
|
|
&& (defined CLOCK_MONOTONIC || defined ZMQ_HAVE_VXWORKS)
|
2011-12-03 13:07:30 +01:00
|
|
|
|
|
|
|
// Use POSIX clock_gettime function to get precise monotonic time.
|
|
|
|
struct timespec tv;
|
2016-10-28 11:04:21 +01:00
|
|
|
|
2017-04-20 00:13:06 +02:00
|
|
|
#if defined ZMQ_HAVE_OSX \
|
|
|
|
&& __MAC_OS_X_VERSION_MIN_REQUIRED < 101200 // less than macOS 10.12
|
|
|
|
int rc = alt_clock_gettime (SYSTEM_CLOCK, &tv);
|
2016-10-28 11:04:21 +01:00
|
|
|
#else
|
2011-12-03 13:07:30 +01:00
|
|
|
int rc = clock_gettime (CLOCK_MONOTONIC, &tv);
|
2016-10-28 11:04:21 +01:00
|
|
|
#endif
|
2015-08-20 07:46:34 -07:00
|
|
|
// Fix case where system has clock_gettime but CLOCK_MONOTONIC is not supported.
|
|
|
|
// This should be a configuration check, but I looked into it and writing an
|
|
|
|
// AC_FUNC_CLOCK_MONOTONIC seems beyond my powers.
|
|
|
|
if (rc != 0) {
|
2018-03-10 03:03:02 -08:00
|
|
|
#ifndef ZMQ_HAVE_VXWORKS
|
2015-08-20 07:46:34 -07:00
|
|
|
// Use POSIX gettimeofday function to get precise time.
|
|
|
|
struct timeval tv;
|
|
|
|
int rc = gettimeofday (&tv, NULL);
|
|
|
|
errno_assert (rc == 0);
|
2018-05-26 18:45:58 +02:00
|
|
|
return tv.tv_sec * usecs_per_sec + tv.tv_usec;
|
2018-03-10 03:03:02 -08:00
|
|
|
#endif
|
2015-08-20 07:46:34 -07:00
|
|
|
}
|
2018-05-26 18:45:58 +02:00
|
|
|
return tv.tv_sec * usecs_per_sec + tv.tv_nsec / nsecs_per_usec;
|
2011-12-03 13:07:30 +01:00
|
|
|
|
2012-01-19 12:27:19 -06:00
|
|
|
#elif defined HAVE_GETHRTIME
|
|
|
|
|
2018-05-26 18:45:58 +02:00
|
|
|
return gethrtime () / nsecs_per_usec;
|
2012-01-19 12:27:19 -06:00
|
|
|
|
2010-09-26 16:55:54 +02:00
|
|
|
#else
|
|
|
|
|
2021-07-22 21:53:19 -07:00
|
|
|
LIBZMQ_UNUSED (nsecs_per_usec);
|
2010-09-26 16:55:54 +02:00
|
|
|
// Use POSIX gettimeofday function to get precise time.
|
|
|
|
struct timeval tv;
|
|
|
|
int rc = gettimeofday (&tv, NULL);
|
|
|
|
errno_assert (rc == 0);
|
2018-05-26 18:45:58 +02:00
|
|
|
return tv.tv_sec * usecs_per_sec + tv.tv_usec;
|
2010-09-26 16:55:54 +02:00
|
|
|
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t zmq::clock_t::now_ms ()
|
|
|
|
{
|
2019-12-25 13:51:21 +01:00
|
|
|
const uint64_t tsc = rdtsc ();
|
2010-09-26 16:55:54 +02:00
|
|
|
|
|
|
|
// If TSC is not supported, get precise time and chop off the microseconds.
|
|
|
|
if (!tsc) {
|
2013-11-07 14:06:54 -02:00
|
|
|
#ifdef ZMQ_HAVE_WINDOWS
|
|
|
|
// Under Windows, now_us is not so reliable since QueryPerformanceCounter
|
|
|
|
// does not guarantee that it will use a hardware that offers a monotonic timer.
|
|
|
|
// So, lets use GetTickCount when GetTickCount64 is not available with an workaround
|
|
|
|
// to its 32 bit limitation.
|
|
|
|
return static_cast<uint64_t> ((*my_get_tick_count64) ());
|
|
|
|
#else
|
2018-05-26 18:45:58 +02:00
|
|
|
return now_us () / usecs_per_msec;
|
2013-11-07 14:06:54 -02:00
|
|
|
#endif
|
|
|
|
}
|
2010-09-26 16:55:54 +02:00
|
|
|
|
|
|
|
// If TSC haven't jumped back (in case of migration to a different
|
|
|
|
// CPU core) and if not too much time elapsed since last measurement,
|
|
|
|
// we can return cached time value.
|
2018-05-27 11:10:39 +02:00
|
|
|
if (likely (tsc - _last_tsc <= (clock_precision / 2) && tsc >= _last_tsc))
|
|
|
|
return _last_time;
|
2010-09-26 16:55:54 +02:00
|
|
|
|
2018-05-27 11:10:39 +02:00
|
|
|
_last_tsc = tsc;
|
2013-11-07 14:06:54 -02:00
|
|
|
#ifdef ZMQ_HAVE_WINDOWS
|
2018-05-27 11:10:39 +02:00
|
|
|
_last_time = static_cast<uint64_t> ((*my_get_tick_count64) ());
|
2013-11-07 14:06:54 -02:00
|
|
|
#else
|
2018-05-27 11:10:39 +02:00
|
|
|
_last_time = now_us () / usecs_per_msec;
|
2013-11-07 14:06:54 -02:00
|
|
|
#endif
|
2018-05-27 11:10:39 +02:00
|
|
|
return _last_time;
|
2010-09-26 16:55:54 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t zmq::clock_t::rdtsc ()
|
|
|
|
{
|
|
|
|
#if (defined _MSC_VER && (defined _M_IX86 || defined _M_X64))
|
2010-09-27 11:18:21 +02:00
|
|
|
return __rdtsc ();
|
2020-08-15 12:26:14 +01:00
|
|
|
#elif defined(_MSC_VER) && defined(_M_ARM) // NC => added for windows ARM
|
2020-08-06 18:41:15 +02:00
|
|
|
return __rdpmccntr64 ();
|
|
|
|
#elif defined(_MSC_VER) && defined(_M_ARM64) // NC => added for windows ARM64
|
|
|
|
//return __rdpmccntr64 ();
|
|
|
|
//return __rdtscp (nullptr);
|
|
|
|
// todo: find proper implementation for ARM64
|
|
|
|
static uint64_t snCounter = 0;
|
|
|
|
return ++snCounter;
|
2010-09-26 16:55:54 +02:00
|
|
|
#elif (defined __GNUC__ && (defined __i386__ || defined __x86_64__))
|
|
|
|
uint32_t low, high;
|
|
|
|
__asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
|
2018-05-26 18:45:58 +02:00
|
|
|
return static_cast<uint64_t> (high) << 32 | low;
|
2010-10-14 16:31:58 +02:00
|
|
|
#elif (defined __SUNPRO_CC && (__SUNPRO_CC >= 0x5100) \
|
|
|
|
&& (defined __i386 || defined __amd64 || defined __x86_64))
|
|
|
|
union
|
|
|
|
{
|
|
|
|
uint64_t u64val;
|
|
|
|
uint32_t u32val[2];
|
|
|
|
} tsc;
|
|
|
|
asm("rdtsc" : "=a"(tsc.u32val[0]), "=d"(tsc.u32val[1]));
|
|
|
|
return tsc.u64val;
|
2011-01-12 09:22:25 +01:00
|
|
|
#elif defined(__s390__)
|
|
|
|
uint64_t tsc;
|
|
|
|
asm("\tstck\t%0\n" : "=Q"(tsc) : : "cc");
|
2018-05-26 18:45:58 +02:00
|
|
|
return tsc;
|
2010-09-26 16:55:54 +02:00
|
|
|
#else
|
2016-10-14 20:33:27 +08:00
|
|
|
struct timespec ts;
|
2017-01-26 15:21:41 +02:00
|
|
|
#if defined ZMQ_HAVE_OSX \
|
|
|
|
&& __MAC_OS_X_VERSION_MIN_REQUIRED < 101200 // less than macOS 10.12
|
2017-04-20 00:13:06 +02:00
|
|
|
alt_clock_gettime (SYSTEM_CLOCK, &ts);
|
2017-01-26 15:21:41 +02:00
|
|
|
#else
|
|
|
|
clock_gettime (CLOCK_MONOTONIC, &ts);
|
|
|
|
#endif
|
2018-05-26 18:45:58 +02:00
|
|
|
return static_cast<uint64_t> (ts.tv_sec) * nsecs_per_usec * usecs_per_sec
|
|
|
|
+ ts.tv_nsec;
|
2010-09-26 16:55:54 +02:00
|
|
|
#endif
|
|
|
|
}
|