nanosecond timers wip

This commit is contained in:
Ben Noordhuis 2025-06-02 09:22:45 +02:00
parent b00c5d1a09
commit 79e1f7a59f
12 changed files with 146 additions and 95 deletions

View File

@ -64,10 +64,10 @@ int uv_timer_init(uv_loop_t* loop, uv_timer_t* handle) {
}
int uv_timer_start(uv_timer_t* handle,
uv_timer_cb cb,
uint64_t timeout,
uint64_t repeat) {
static int uv__timer_start(uv_timer_t* handle,
uv_timer_cb cb,
uint64_t timeout,
uint64_t repeat) {
uint64_t clamped_timeout;
if (uv__is_closing(handle) || cb == NULL)
@ -94,6 +94,23 @@ int uv_timer_start(uv_timer_t* handle,
}
static uint64_t uv__ms_to_ns_clamp(uint64_t ms) {
if (ms > (uint64_t) -1 / (1000 * 1000))
return (uint64_t) -1;
return ms * (1000 * 1000);
}
int uv_timer_start(uv_timer_t* handle,
uv_timer_cb cb,
uint64_t timeout,
uint64_t repeat) {
timeout = uv__ms_to_ns_clamp(timeout);
repeat = uv__ms_to_ns_clamp(repeat);
return uv__timer_start(handle, cb, timeout, repeat);
}
int uv_timer_stop(uv_timer_t* handle) {
if (uv__is_active(handle)) {
heap_remove(timer_heap(handle->loop),
@ -115,7 +132,7 @@ int uv_timer_again(uv_timer_t* handle) {
if (handle->repeat) {
uv_timer_stop(handle);
uv_timer_start(handle, handle->timer_cb, handle->repeat, handle->repeat);
uv__timer_start(handle, handle->timer_cb, handle->repeat, handle->repeat);
}
return 0;
@ -123,12 +140,12 @@ int uv_timer_again(uv_timer_t* handle) {
void uv_timer_set_repeat(uv_timer_t* handle, uint64_t repeat) {
handle->repeat = repeat;
handle->repeat = uv__ms_to_ns_clamp(repeat);
}
uint64_t uv_timer_get_repeat(const uv_timer_t* handle) {
return handle->repeat;
return uv__ns_to_ms(handle->repeat);
}
@ -136,14 +153,16 @@ uint64_t uv_timer_get_due_in(const uv_timer_t* handle) {
if (handle->loop->time >= handle->timeout)
return 0;
return handle->timeout - handle->loop->time;
if (handle->timeout == (uint64_t) -1)
return -1;
return uv__ns_to_ms(handle->timeout - handle->loop->time);
}
int uv__next_timeout(const uv_loop_t* loop) {
uint64_t uv__next_timeout(const uv_loop_t* loop) {
const struct heap_node* heap_node;
const uv_timer_t* handle;
uint64_t diff;
heap_node = heap_min(timer_heap(loop));
if (heap_node == NULL)
@ -153,11 +172,7 @@ int uv__next_timeout(const uv_loop_t* loop) {
if (handle->timeout <= loop->time)
return 0;
diff = handle->timeout - loop->time;
if (diff > INT_MAX)
diff = INT_MAX;
return (int) diff;
return handle->timeout - loop->time;
}
@ -197,3 +212,31 @@ void uv__run_timers(uv_loop_t* loop) {
void uv__timer_close(uv_timer_t* handle) {
uv_timer_stop(handle);
}
uint64_t uv__ns_to_ms(uint64_t ns) {
uint64_t frac;
uint64_t ms;
if (ns == (uint64_t) -1)
return (uint64_t) -1;
frac = ns % (1000 * 1000);
ms = ns / (1000 * 1000);
if (frac > 500 * 1000)
ms += 1;
else if (frac == 500 * 1000)
ms += ms & 1; /* Bankers' rounding, averages out exact .5 midpoints. */
return ms;
}
int uv__ns_to_ms_sat(uint64_t ns) {
uint64_t ms;
ms = uv__ns_to_ms(ns);
if (ms == (uint64_t) -1)
return -1;
if (ms > INT_MAX)
return INT_MAX;
return ms;
}

View File

@ -130,7 +130,7 @@ int uv__io_check_fd(uv_loop_t* loop, int fd) {
}
void uv__io_poll(uv_loop_t* loop, int timeout) {
void uv__io_poll(uv_loop_t* loop, uint64_t timeout) {
uv__loop_internal_fields_t* lfields;
struct pollfd events[1024];
struct pollfd pqry;
@ -147,8 +147,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
int i;
int rc;
int add_failed;
int user_timeout;
int reset_timeout;
uint64_t user_timeout;
uint64_t reset_timeout;
if (loop->nfds == 0) {
assert(uv__queue_empty(&loop->watcher_queue));
@ -216,12 +216,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
w->events = w->pevents;
}
assert(timeout >= -1);
base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */
if (lfields->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
reset_timeout = 1000 * 1000; /* One millisecond. */
user_timeout = timeout;
timeout = 0;
} else {
@ -244,7 +243,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
nfds = pollset_poll(loop->backend_fd,
events,
ARRAY_SIZE(events),
timeout);
uv__ns_to_ms_sat(timeout));
/* Update loop->time unconditionally. It's tempting to skip the update when
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
@ -262,7 +261,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
goto update_timeout;
}
assert(timeout != -1);
assert(timeout != (uint64_t) -1);
return;
}
@ -276,7 +275,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
reset_timeout = 0;
}
if (timeout == -1)
if (timeout == (uint64_t) -1)
continue;
if (timeout == 0)
@ -360,14 +359,14 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
if (timeout == 0)
return;
if (timeout == -1)
if (timeout == (uint64_t) -1)
continue;
update_timeout:
assert(timeout > 0);
diff = loop->time - base;
if (diff >= (uint64_t) timeout)
if (diff >= timeout)
return;
timeout -= diff;

View File

@ -398,7 +398,7 @@ static int uv__loop_alive(const uv_loop_t* loop) {
}
static int uv__backend_timeout(const uv_loop_t* loop) {
static uint64_t uv__backend_timeout(const uv_loop_t* loop) {
if (loop->stop_flag == 0 &&
/* uv__loop_alive(loop) && */
(uv__has_active_handles(loop) || uv__has_active_reqs(loop)) &&
@ -413,7 +413,7 @@ static int uv__backend_timeout(const uv_loop_t* loop) {
int uv_backend_timeout(const uv_loop_t* loop) {
if (uv__queue_empty(&loop->watcher_queue))
return uv__backend_timeout(loop);
return uv__ns_to_ms_sat(uv__backend_timeout(loop));
/* Need to call uv_run to update the backend fd state. */
return 0;
}
@ -425,9 +425,9 @@ int uv_loop_alive(const uv_loop_t* loop) {
int uv_run(uv_loop_t* loop, uv_run_mode mode) {
int timeout;
int r;
int can_sleep;
uint64_t timeout;
r = uv__loop_alive(loop);
if (!r)

View File

@ -268,7 +268,7 @@ void uv__io_close(uv_loop_t* loop, uv__io_t* w);
void uv__io_feed(uv_loop_t* loop, uv__io_t* w);
int uv__io_active(const uv__io_t* w, unsigned int events);
int uv__io_check_fd(uv_loop_t* loop, int fd);
void uv__io_poll(uv_loop_t* loop, int timeout); /* in milliseconds or -1 */
void uv__io_poll(uv_loop_t* loop, uint64_t timeout); /* in nanoseconds or -1 */
int uv__io_fork(uv_loop_t* loop);
int uv__fd_exists(uv_loop_t* loop, int fd);
@ -406,7 +406,7 @@ void uv__fsevents_loop_delete(uv_loop_t* loop);
UV_UNUSED(static void uv__update_time(uv_loop_t* loop)) {
/* Use a fast time source if available. We only need millisecond precision.
*/
loop->time = uv__hrtime(UV_CLOCK_FAST) / 1000000;
loop->time = uv__ns_to_ms(uv__hrtime(UV_CLOCK_FAST)) * 1000 * 1000;
}
UV_UNUSED(static char* uv__basename_r(const char* path)) {

View File

@ -156,7 +156,7 @@ static void uv__kqueue_delete(int kqfd, const struct kevent *ev) {
}
void uv__io_poll(uv_loop_t* loop, int timeout) {
void uv__io_poll(uv_loop_t* loop, uint64_t timeout) {
uv__loop_internal_fields_t* lfields;
struct kevent events[1024];
struct kevent* ev;
@ -178,8 +178,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
int fd;
int op;
int i;
int user_timeout;
int reset_timeout;
uint64_t user_timeout;
uint64_t reset_timeout;
if (loop->nfds == 0) {
assert(uv__queue_empty(&loop->watcher_queue));
@ -250,12 +250,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
sigaddset(pset, SIGPROF);
}
assert(timeout >= -1);
base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */
if (lfields->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
reset_timeout = 1000 * 1000; /* One millisecond. */
user_timeout = timeout;
timeout = 0;
} else {
@ -269,9 +268,9 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
if (timeout != 0)
uv__metrics_set_provider_entry_time(loop);
if (timeout != -1) {
spec.tv_sec = timeout / 1000;
spec.tv_nsec = (timeout % 1000) * 1000000;
if (timeout != (uint64_t) -1) {
spec.tv_sec = timeout / (1000 * 1000 * 1000);
spec.tv_nsec = timeout % (1000 * 1000 * 1000);
}
if (pset != NULL)
@ -294,7 +293,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
assert(errno == EINTR);
else if (nfds == 0)
/* Unlimited timeout should only return with events or signal. */
assert(timeout != -1);
assert(timeout != (uint64_t) -1);
if (pset != NULL)
pthread_sigmask(SIG_UNBLOCK, pset, NULL);
@ -462,13 +461,13 @@ update_timeout:
if (timeout == 0)
return;
if (timeout == -1)
if (timeout == (uint64_t) -1)
continue;
assert(timeout > 0);
diff = loop->time - base;
if (diff >= (uint64_t) timeout)
if (diff >= timeout)
return;
timeout -= diff;

View File

@ -1347,7 +1347,7 @@ static void uv__epoll_ctl_flush(int epollfd,
}
void uv__io_poll(uv_loop_t* loop, int timeout) {
void uv__io_poll(uv_loop_t* loop, uint64_t timeout) {
uv__loop_internal_fields_t* lfields;
struct epoll_event events[1024];
struct epoll_event prep[256];
@ -1356,12 +1356,13 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
struct epoll_event e;
struct uv__iou* ctl;
struct uv__iou* iou;
int real_timeout;
uint64_t real_timeout;
struct uv__queue* q;
uv__io_t* w;
sigset_t* sigmask;
sigset_t sigset;
uint64_t base;
uint64_t diff;
int have_iou_events;
int have_signals;
int nevents;
@ -1371,8 +1372,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
int fd;
int op;
int i;
int user_timeout;
int reset_timeout;
uint64_t user_timeout;
uint64_t reset_timeout;
lfields = uv__get_internal_fields(loop);
ctl = &lfields->ctl;
@ -1385,13 +1386,12 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
sigmask = &sigset;
}
assert(timeout >= -1);
base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */
real_timeout = timeout;
if (lfields->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
reset_timeout = 1000 * 1000; /* One millisecond. */
user_timeout = timeout;
timeout = 0;
} else {
@ -1462,7 +1462,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
*/
lfields->current_timeout = timeout;
nfds = epoll_pwait(epollfd, events, ARRAY_SIZE(events), timeout, sigmask);
nfds = epoll_pwait(epollfd,
events,
ARRAY_SIZE(events),
uv__ns_to_ms_sat(timeout),
sigmask);
/* Update loop->time unconditionally. It's tempting to skip the update when
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
@ -1474,7 +1478,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
assert(errno == EINTR);
else if (nfds == 0)
/* Unlimited timeout should only return with events or signal. */
assert(timeout != -1);
assert(timeout != (uint64_t) -1);
if (nfds == 0 || nfds == -1) {
if (reset_timeout != 0) {
@ -1602,15 +1606,16 @@ update_timeout:
if (timeout == 0)
break;
if (timeout == -1)
if (timeout == (uint64_t) -1)
continue;
assert(timeout > 0);
real_timeout -= (loop->time - base);
if (real_timeout <= 0)
break;
diff = loop->time - base;
if (diff >= real_timeout)
return;
real_timeout -= diff;
timeout = real_timeout;
}

View File

@ -808,7 +808,7 @@ static int os390_message_queue_handler(uv__os390_epoll* ep) {
}
void uv__io_poll(uv_loop_t* loop, int timeout) {
void uv__io_poll(uv_loop_t* loop, uint64_t timeout) {
static const int max_safe_timeout = 1789569;
uv__loop_internal_fields_t* lfields;
struct epoll_event events[1024];
@ -816,17 +816,18 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
struct epoll_event e;
uv__os390_epoll* ep;
int have_signals;
int real_timeout;
uint64_t real_timeout;
struct uv__queue* q;
uv__io_t* w;
uint64_t base;
uint64_t diff;
int count;
int nfds;
int fd;
int op;
int i;
int user_timeout;
int reset_timeout;
uint64_t user_timeout;
uint64_t reset_timeout;
if (loop->nfds == 0) {
assert(uv__queue_empty(&loop->watcher_queue));
@ -875,7 +876,6 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
w->events = w->pevents;
}
assert(timeout >= -1);
base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */
real_timeout = timeout;
@ -883,7 +883,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
have_signals = 0;
if (lfields->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
reset_timeout = 1000 * 1000; /* One millisecond. */
user_timeout = timeout;
timeout = 0;
} else {
@ -907,8 +907,10 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
*/
lfields->current_timeout = timeout;
nfds = epoll_wait(loop->ep, events,
ARRAY_SIZE(events), timeout);
nfds = epoll_wait(loop->ep,
events,
ARRAY_SIZE(events),
uv__ns_to_ms_sat(timeout));
/* Update loop->time unconditionally. It's tempting to skip the update when
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
@ -917,14 +919,14 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
base = loop->time;
SAVE_ERRNO(uv__update_time(loop));
if (nfds == 0) {
assert(timeout != -1);
assert(timeout != (uint64_t) -1);
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (timeout == -1)
if (timeout == (uint64_t) -1)
continue;
if (timeout == 0)
@ -946,7 +948,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
reset_timeout = 0;
}
if (timeout == -1)
if (timeout == (uint64_t) -1)
continue;
if (timeout == 0)
@ -1044,16 +1046,17 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
if (timeout == 0)
return;
if (timeout == -1)
if (timeout == (uint64_t) -1)
continue;
update_timeout:
assert(timeout > 0);
real_timeout -= (loop->time - base);
if (real_timeout <= 0)
diff = loop->time - base;
if (diff >= real_timeout)
return;
real_timeout -= diff;
timeout = real_timeout;
}
}

View File

@ -131,7 +131,7 @@ static void uv__pollfds_del(uv_loop_t* loop, int fd) {
}
void uv__io_poll(uv_loop_t* loop, int timeout) {
void uv__io_poll(uv_loop_t* loop, uint64_t timeout) {
uv__loop_internal_fields_t* lfields;
sigset_t* pset;
sigset_t set;
@ -145,8 +145,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
int have_signals;
struct pollfd* pe;
int fd;
int user_timeout;
int reset_timeout;
uint64_t user_timeout;
uint64_t reset_timeout;
if (loop->nfds == 0) {
assert(uv__queue_empty(&loop->watcher_queue));
@ -179,11 +179,10 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
sigaddset(pset, SIGPROF);
}
assert(timeout >= -1);
time_base = loop->time;
if (lfields->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
reset_timeout = 1000 * 1000; /* One millisecond. */
user_timeout = timeout;
timeout = 0;
} else {
@ -210,7 +209,9 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
if (pset != NULL)
if (pthread_sigmask(SIG_BLOCK, pset, NULL))
abort();
nfds = poll(loop->poll_fds, (nfds_t)loop->poll_fds_used, timeout);
nfds = poll(loop->poll_fds,
(nfds_t)loop->poll_fds_used,
uv__ns_to_ms_sat(timeout));
if (pset != NULL)
if (pthread_sigmask(SIG_UNBLOCK, pset, NULL))
abort();
@ -225,13 +226,13 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
if (timeout == -1)
if (timeout == (uint64_t) -1)
continue;
if (timeout > 0)
goto update_timeout;
}
assert(timeout != -1);
assert(timeout != (uint64_t) -1);
return;
}
@ -244,7 +245,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
reset_timeout = 0;
}
if (timeout == -1)
if (timeout == (uint64_t) -1)
continue;
if (timeout == 0)
@ -334,7 +335,7 @@ update_timeout:
assert(timeout > 0);
time_diff = loop->time - time_base;
if (time_diff >= (uint64_t) timeout)
if (time_diff >= timeout)
return;
timeout -= time_diff;

View File

@ -144,7 +144,7 @@ int uv__io_check_fd(uv_loop_t* loop, int fd) {
}
void uv__io_poll(uv_loop_t* loop, int timeout) {
void uv__io_poll(uv_loop_t* loop, uint64_t timeout) {
struct port_event events[1024];
struct port_event* pe;
struct timespec spec;
@ -162,8 +162,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
int count;
int err;
int fd;
int user_timeout;
int reset_timeout;
uint64_t user_timeout;
uint64_t reset_timeout;
if (loop->nfds == 0) {
assert(uv__queue_empty(&loop->watcher_queue));
@ -197,12 +197,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
sigaddset(pset, SIGPROF);
}
assert(timeout >= -1);
base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
reset_timeout = 1000 * 1000; /* One millisecond. */
user_timeout = timeout;
timeout = 0;
} else {
@ -216,9 +215,9 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
if (timeout != 0)
uv__metrics_set_provider_entry_time(loop);
if (timeout != -1) {
spec.tv_sec = timeout / 1000;
spec.tv_nsec = (timeout % 1000) * 1000000;
if (timeout != (uint64_t) -1) {
spec.tv_sec = timeout / (1000 * 1000 * 1000);
spec.tv_nsec = timeout % (1000 * 1000 * 1000);
}
/* Work around a kernel bug where nfds is not updated. */
@ -234,7 +233,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
events,
ARRAY_SIZE(events),
&nfds,
timeout == -1 ? NULL : &spec);
timeout == (uint64_t) -1 ? NULL : &spec);
if (pset != NULL)
pthread_sigmask(SIG_UNBLOCK, pset, NULL);
@ -266,14 +265,14 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
if (timeout == 0)
return;
if (timeout == -1)
if (timeout == (uint64_t) -1)
continue;
goto update_timeout;
}
if (nfds == 0) {
assert(timeout != -1);
assert(timeout != (uint64_t) -1);
return;
}
@ -348,21 +347,21 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
}
if (saved_errno == ETIME) {
assert(timeout != -1);
assert(timeout != (uint64_t) -1);
return;
}
if (timeout == 0)
return;
if (timeout == -1)
if (timeout == (uint64_t) -1)
continue;
update_timeout:
assert(timeout > 0);
diff = loop->time - base;
if (diff >= (uint64_t) timeout)
if (diff >= timeout)
return;
timeout -= diff;

View File

@ -636,7 +636,7 @@ void uv_stop(uv_loop_t* loop) {
uint64_t uv_now(const uv_loop_t* loop) {
return loop->time;
return uv__ns_to_ms(loop->time);
}

View File

@ -233,9 +233,11 @@ void uv__fs_scandir_cleanup(uv_fs_t* req);
void uv__fs_readdir_cleanup(uv_fs_t* req);
uv_dirent_type_t uv__fs_get_dirent_type(uv__dirent_t* dent);
int uv__next_timeout(const uv_loop_t* loop);
uint64_t uv__next_timeout(const uv_loop_t* loop);
void uv__run_timers(uv_loop_t* loop);
void uv__timer_close(uv_timer_t* handle);
uint64_t uv__ns_to_ms(uint64_t ns);
int uv__ns_to_ms_sat(uint64_t ns); /* Saturates to INT_MAX. */
void uv__process_title_cleanup(void);
void uv__signal_cleanup(void);
@ -431,7 +433,7 @@ struct uv__iou {
struct uv__loop_internal_fields_s {
unsigned int flags;
uv__loop_metrics_t loop_metrics;
int current_timeout;
uint64_t current_timeout;
#ifdef __linux__
struct uv__iou ctl;
struct uv__iou iou;

View File

@ -287,7 +287,7 @@ TEST_IMPL(timer_huge_timeout) {
0));
ASSERT_OK(uv_timer_start(&huge_timer2, tiny_timer_cb, (uint64_t) -1, 0));
ASSERT_UINT64_EQ(1, uv_timer_get_due_in(&tiny_timer));
ASSERT_UINT64_EQ(281474976710655, uv_timer_get_due_in(&huge_timer1));
ASSERT_UINT64_EQ(UINT64_MAX, uv_timer_get_due_in(&huge_timer1));
ASSERT_UINT64_LE(0, uv_timer_get_due_in(&huge_timer2));
ASSERT_OK(uv_run(uv_default_loop(), UV_RUN_DEFAULT));
MAKE_VALGRIND_HAPPY(uv_default_loop());