windows: improve timer precision

Improve timing precision by using QueryPerformanceCounter.

This is part of the fix for Node.js' test-timers-first-fire.js.
This commit is contained in:
Alexis Campailla 2014-09-09 18:08:46 +02:00 committed by Saúl Ibarra Corretgé
parent 234b1e046b
commit 6ced8c2cc7
5 changed files with 47 additions and 68 deletions

View File

@ -316,8 +316,6 @@ RB_HEAD(uv_timer_tree_s, uv_timer_s);
HANDLE iocp; \ HANDLE iocp; \
/* The current time according to the event loop. in msecs. */ \ /* The current time according to the event loop. in msecs. */ \
uint64_t time; \ uint64_t time; \
/* GetTickCount() result when the event loop time was last updated. */ \
DWORD last_tick_count; \
/* Tail of a single-linked circular queue of pending reqs. If the queue */ \ /* Tail of a single-linked circular queue of pending reqs. If the queue */ \
/* is empty, tail_ is NULL. If there is only one item, */ \ /* is empty, tail_ is NULL. If there is only one item, */ \
/* tail_->next_req == tail_ */ \ /* tail_->next_req == tail_ */ \

View File

@ -137,7 +137,6 @@ int uv_loop_init(uv_loop_t* loop) {
* to zero before calling uv_update_time for the first time. * to zero before calling uv_update_time for the first time.
*/ */
loop->time = 0; loop->time = 0;
loop->last_tick_count = 0;
uv_update_time(loop); uv_update_time(loop);
QUEUE_INIT(&loop->wq); QUEUE_INIT(&loop->wq);
@ -314,13 +313,17 @@ static void uv_poll(uv_loop_t* loop, DWORD timeout) {
/* Package was dequeued */ /* Package was dequeued */
req = uv_overlapped_to_req(overlapped); req = uv_overlapped_to_req(overlapped);
uv_insert_pending_req(loop, req); uv_insert_pending_req(loop, req);
/* Some time might have passed waiting for I/O,
* so update the loop time here.
*/
uv_update_time(loop);
} else if (GetLastError() != WAIT_TIMEOUT) { } else if (GetLastError() != WAIT_TIMEOUT) {
/* Serious error */ /* Serious error */
uv_fatal_error(GetLastError(), "GetQueuedCompletionStatus"); uv_fatal_error(GetLastError(), "GetQueuedCompletionStatus");
} else { } else if (timeout > 0) {
/* We're sure that at least `timeout` milliseconds have expired, but /* GetQueuedCompletionStatus can occasionally return a little early.
* this may not be reflected yet in the GetTickCount() return value. * Make sure that the desired timeout is reflected in the loop time.
* Therefore we ensure it's taken into account here.
*/ */
uv__time_forward(loop, timeout); uv__time_forward(loop, timeout);
} }
@ -347,13 +350,17 @@ static void uv_poll_ex(uv_loop_t* loop, DWORD timeout) {
req = uv_overlapped_to_req(overlappeds[i].lpOverlapped); req = uv_overlapped_to_req(overlappeds[i].lpOverlapped);
uv_insert_pending_req(loop, req); uv_insert_pending_req(loop, req);
} }
/* Some time might have passed waiting for I/O,
* so update the loop time here.
*/
uv_update_time(loop);
} else if (GetLastError() != WAIT_TIMEOUT) { } else if (GetLastError() != WAIT_TIMEOUT) {
/* Serious error */ /* Serious error */
uv_fatal_error(GetLastError(), "GetQueuedCompletionStatusEx"); uv_fatal_error(GetLastError(), "GetQueuedCompletionStatusEx");
} else if (timeout > 0) { } else if (timeout > 0) {
/* We're sure that at least `timeout` milliseconds have expired, but /* GetQueuedCompletionStatus can occasionally return a little early.
* this may not be reflected yet in the GetTickCount() return value. * Make sure that the desired timeout is reflected in the loop time.
* Therefore we ensure it's taken into account here.
*/ */
uv__time_forward(loop, timeout); uv__time_forward(loop, timeout);
} }
@ -412,7 +419,6 @@ int uv_run(uv_loop_t *loop, uv_run_mode mode) {
* UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from * UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
* the check. * the check.
*/ */
uv_update_time(loop);
uv_process_timers(loop); uv_process_timers(loop);
} }

View File

@ -322,6 +322,7 @@ void uv__fs_poll_endgame(uv_loop_t* loop, uv_fs_poll_t* handle);
*/ */
void uv__util_init(); void uv__util_init();
uint64_t uv__hrtime(double scale);
int uv_parent_pid(); int uv_parent_pid();
__declspec(noreturn) void uv_fatal_error(const int errorno, const char* syscall); __declspec(noreturn) void uv_fatal_error(const int errorno, const char* syscall);

View File

@ -28,39 +28,17 @@
#include "handle-inl.h" #include "handle-inl.h"
/* The number of milliseconds in one second. */
#define UV__MILLISEC 1000
void uv_update_time(uv_loop_t* loop) { void uv_update_time(uv_loop_t* loop) {
DWORD ticks; uint64_t new_time = uv__hrtime(UV__MILLISEC);
ULARGE_INTEGER time; if (new_time > loop->time) {
loop->time = new_time;
ticks = GetTickCount(); }
time.QuadPart = loop->time;
/* GetTickCount() can conceivably wrap around, so when the current tick
* count is lower than the last tick count, we'll assume it has wrapped.
* uv_poll must make sure that the timer can never overflow more than
* once between two subsequent uv_update_time calls.
*/
time.LowPart = ticks;
if (ticks < loop->last_tick_count)
time.HighPart++;
/* Remember the last tick count. */
loop->last_tick_count = ticks;
/* The GetTickCount() resolution isn't too good. Sometimes it'll happen
* that GetQueuedCompletionStatus() or GetQueuedCompletionStatusEx() has
* waited for a couple of ms but this is not reflected in the GetTickCount
* result yet. Therefore whenever GetQueuedCompletionStatus times out
* we'll add the number of ms that it has waited to the current loop time.
* When that happened the loop time might be a little ms farther than what
* we've just computed, and we shouldn't update the loop time.
*/
if (loop->time < time.QuadPart)
loop->time = time.QuadPart;
} }
void uv__time_forward(uv_loop_t* loop, uint64_t msecs) { void uv__time_forward(uv_loop_t* loop, uint64_t msecs) {
loop->time += msecs; loop->time += msecs;
} }
@ -191,16 +169,9 @@ DWORD uv__next_timeout(const uv_loop_t* loop) {
timer = RB_MIN(uv_timer_tree_s, &((uv_loop_t*)loop)->timers); timer = RB_MIN(uv_timer_tree_s, &((uv_loop_t*)loop)->timers);
if (timer) { if (timer) {
delta = timer->due - loop->time; delta = timer->due - loop->time;
if (delta >= UINT_MAX >> 1) { if (delta >= UINT_MAX - 1) {
/* A timeout value of UINT_MAX means infinite, so that's no good. But /* A timeout value of UINT_MAX means infinite, so that's no good. */
* more importantly, there's always the risk that GetTickCount wraps. return UINT_MAX - 1;
* uv_update_time can detect this, but we must make sure that the
* tick counter never overflows twice between two subsequent
* uv_update_time calls. We do this by never sleeping more than half
* the time it takes to wrap the counter - which is huge overkill,
* but hey, it's not so bad to wake up every 25 days.
*/
return UINT_MAX >> 1;
} else if (delta < 0) { } else if (delta < 0) {
/* Negative timeout values are not allowed */ /* Negative timeout values are not allowed */
return 0; return 0;

View File

@ -52,16 +52,15 @@
#define MAX_TITLE_LENGTH 8192 #define MAX_TITLE_LENGTH 8192
/* The number of nanoseconds in one second. */ /* The number of nanoseconds in one second. */
#undef NANOSEC #define UV__NANOSEC 1000000000
#define NANOSEC 1000000000
/* Cached copy of the process title, plus a mutex guarding it. */ /* Cached copy of the process title, plus a mutex guarding it. */
static char *process_title; static char *process_title;
static CRITICAL_SECTION process_title_lock; static CRITICAL_SECTION process_title_lock;
/* Frequency (ticks per nanosecond) of the high-resolution clock. */ /* Interval (in seconds) of the high-resolution clock. */
static double hrtime_frequency_ = 0; static double hrtime_interval_ = 0;
/* /*
@ -73,11 +72,14 @@ void uv__util_init() {
/* Initialize process title access mutex. */ /* Initialize process title access mutex. */
InitializeCriticalSection(&process_title_lock); InitializeCriticalSection(&process_title_lock);
/* Retrieve high-resolution timer frequency. */ /* Retrieve high-resolution timer frequency
if (QueryPerformanceFrequency(&perf_frequency)) * and precompute its reciprocal.
hrtime_frequency_ = (double) perf_frequency.QuadPart / (double) NANOSEC; */
else if (QueryPerformanceFrequency(&perf_frequency)) {
hrtime_frequency_= 0; hrtime_interval_ = 1.0 / perf_frequency.QuadPart;
} else {
hrtime_interval_= 0;
}
} }
@ -463,26 +465,27 @@ int uv_get_process_title(char* buffer, size_t size) {
uint64_t uv_hrtime(void) { uint64_t uv_hrtime(void) {
uv__once_init();
return uv__hrtime(UV__NANOSEC);
}
uint64_t uv__hrtime(double scale) {
LARGE_INTEGER counter; LARGE_INTEGER counter;
uv__once_init(); /* If the performance interval is zero, there's no support. */
if (hrtime_interval_ == 0) {
/* If the performance frequency is zero, there's no support. */
if (hrtime_frequency_ == 0) {
/* uv__set_sys_error(loop, ERROR_NOT_SUPPORTED); */
return 0; return 0;
} }
if (!QueryPerformanceCounter(&counter)) { if (!QueryPerformanceCounter(&counter)) {
/* uv__set_sys_error(loop, GetLastError()); */
return 0; return 0;
} }
/* Because we have no guarantee about the order of magnitude of the /* Because we have no guarantee about the order of magnitude of the
* performance counter frequency, integer math could cause this computation * performance counter interval, integer math could cause this computation
* to overflow. Therefore we resort to floating point math. * to overflow. Therefore we resort to floating point math.
*/ */
return (uint64_t) ((double) counter.QuadPart / hrtime_frequency_); return (uint64_t) ((double) counter.QuadPart * hrtime_interval_ * scale);
} }