feat: use KEVENT_FLAG_IMMEDIATE when timeout is zero and EV64 is defined

This commit is contained in:
Andrew Johnston 2026-02-16 23:52:59 +00:00
parent 350f735786
commit 9da40101c7
4 changed files with 41 additions and 40 deletions

View File

@ -45,17 +45,17 @@ static int kqueue_evfilt_user_support = 1;
static void uv__kqueue_runtime_detection(void) {
int kq;
struct kevent ev[2];
KEVENT_S ev[2];
struct timespec timeout = {0, 0};
/* Perform the runtime detection to ensure that kqueue with
* EVFILT_USER actually works. */
kq = kqueue();
EV_SET(ev, UV__KQUEUE_EVFILT_USER_IDENT, EVFILT_USER,
EV_ADD | EV_CLEAR, 0, 0, 0);
EV_SET(ev + 1, UV__KQUEUE_EVFILT_USER_IDENT, EVFILT_USER,
0, NOTE_TRIGGER, 0, 0);
if (kevent(kq, ev, 2, ev, 1, &timeout) < 1 ||
SET_EVENT(ev, UV__KQUEUE_EVFILT_USER_IDENT, EVFILT_USER,
EV_ADD | EV_CLEAR, 0);
SET_EVENT(ev + 1, UV__KQUEUE_EVFILT_USER_IDENT, EVFILT_USER,
0, NOTE_TRIGGER);
if (KEVENT(kq, ev, 2, ev, 1, 0, &timeout) < 1 ||
ev[0].filter != EVFILT_USER ||
ev[0].ident != UV__KQUEUE_EVFILT_USER_IDENT ||
ev[0].flags & EV_ERROR)
@ -228,12 +228,12 @@ static void uv__async_send(uv_loop_t* loop) {
fd = loop->async_io_watcher.fd; /* eventfd */
}
#elif UV__KQUEUE_EVFILT_USER
struct kevent ev;
KEVENT_S ev;
if (kqueue_evfilt_user_support) {
fd = loop->async_io_watcher.fd; /* magic number for EVFILT_USER */
EV_SET(&ev, fd, EVFILT_USER, 0, NOTE_TRIGGER, 0, 0);
r = kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL);
SET_EVENT(&ev, fd, EVFILT_USER, 0, NOTE_TRIGGER);
r = KEVENT(loop->backend_fd, &ev, 1, NULL, 0, 0, NULL);
if (r == 0)
return;
abort();
@ -259,7 +259,7 @@ static int uv__async_start(uv_loop_t* loop) {
int pipefd[2];
int err;
#if UV__KQUEUE_EVFILT_USER
struct kevent ev;
KEVENT_S ev;
#endif
if (loop->async_io_watcher.fd != -1)
@ -288,13 +288,13 @@ static int uv__async_start(uv_loop_t* loop) {
pipefd[1] = -1;
/* When using EVFILT_USER event to wake up the kqueue, this event must be
* registered beforehand. Otherwise, calling kevent() to issue an
* registered beforehand. Otherwise, calling kevent64() to issue an
* unregistered EVFILT_USER event will get an ENOENT.
* Since uv__async_send() may happen before uv__io_poll() with multi-threads,
* we can't defer this registration of EVFILT_USER event as we did for other
* events, but must perform it right away. */
EV_SET(&ev, err, EVFILT_USER, EV_ADD | EV_CLEAR, 0, 0, 0);
err = kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL);
SET_EVENT(&ev, err, EVFILT_USER, EV_ADD | EV_CLEAR, 0);
err = KEVENT(loop->backend_fd, &ev, 1, NULL, 0, 0, NULL);
if (err < 0)
return UV__ERR(errno);
} else {

View File

@ -95,7 +95,7 @@ int uv__io_fork(uv_loop_t* loop) {
int uv__io_check_fd(uv_loop_t* loop, int fd) {
struct kevent ev[2];
KEVENT_S ev[2];
struct stat sb;
#ifdef __APPLE__
char path[MAXPATHLEN];
@ -130,21 +130,21 @@ int uv__io_check_fd(uv_loop_t* loop, int fd) {
}
#endif
EV_SET(ev, fd, EVFILT_READ, EV_ADD, 0, 0, 0);
EV_SET(ev + 1, fd, EVFILT_READ, EV_DELETE, 0, 0, 0);
if (kevent(loop->backend_fd, ev, 2, NULL, 0, NULL))
SET_EVENT(ev, fd, EVFILT_READ, EV_ADD, 0);
SET_EVENT(ev + 1, fd, EVFILT_READ, EV_DELETE, 0);
if (KEVENT(loop->backend_fd, ev, 2, NULL, 0, 0, NULL))
return UV__ERR(errno);
return 0;
}
static void uv__kqueue_delete(int kqfd, const struct kevent *ev) {
struct kevent change;
static void uv__kqueue_delete(int kqfd, const KEVENT_S *ev) {
KEVENT_S change;
EV_SET(&change, ev->ident, ev->filter, EV_DELETE, 0, 0, 0);
SET_EVENT(&change, ev->ident, ev->filter, EV_DELETE, 0);
if (0 == kevent(kqfd, &change, 1, NULL, 0, NULL))
if (0 == KEVENT(kqfd, &change, 1, NULL, 0, 0, NULL))
return;
if (errno == EBADF || errno == ENOENT)
@ -156,8 +156,8 @@ static void uv__kqueue_delete(int kqfd, const struct kevent *ev) {
void uv__io_poll(uv_loop_t* loop, int timeout) {
uv__loop_internal_fields_t* lfields;
struct kevent events[1024];
struct kevent* ev;
KEVENT_S events[1024];
KEVENT_S* ev;
struct timespec spec;
unsigned int nevents;
unsigned int revents;
@ -209,30 +209,30 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
op = EV_ADD | EV_ONESHOT; /* Stop the event from firing repeatedly. */
}
EV_SET(events + nevents, w->fd, filter, op, fflags, 0, 0);
SET_EVENT(events + nevents, w->fd, filter, op, fflags);
if (++nevents == ARRAY_SIZE(events)) {
if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
if (KEVENT(loop->backend_fd, events, nevents, NULL, 0, 0, NULL))
abort();
nevents = 0;
}
}
if ((w->events & POLLOUT) == 0 && (w->pevents & POLLOUT) != 0) {
EV_SET(events + nevents, w->fd, EVFILT_WRITE, EV_ADD, 0, 0, 0);
SET_EVENT(events + nevents, w->fd, EVFILT_WRITE, EV_ADD, 0);
if (++nevents == ARRAY_SIZE(events)) {
if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
if (KEVENT(loop->backend_fd, events, nevents, NULL, 0, 0, NULL))
abort();
nevents = 0;
}
}
if ((w->events & UV__POLLPRI) == 0 && (w->pevents & UV__POLLPRI) != 0) {
EV_SET(events + nevents, w->fd, EV_OOBAND, EV_ADD, 0, 0, 0);
SET_EVENT(events + nevents, w->fd, EV_OOBAND, EV_ADD, 0);
if (++nevents == ARRAY_SIZE(events)) {
if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
if (KEVENT(loop->backend_fd, events, nevents, NULL, 0, 0, NULL))
abort();
nevents = 0;
}
@ -267,11 +267,12 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
}
uv__io_poll_prepare(loop, pset, timeout);
nfds = kevent(loop->backend_fd,
nfds = KEVENT(loop->backend_fd,
events,
nevents,
events,
ARRAY_SIZE(events),
timeout == 0 ? KEVENT_FLAG_IMMEDIATE : KEVENT_FLAG_NONE,
timeout == -1 ? NULL : &spec);
uv__io_poll_check(loop, pset);
@ -474,7 +475,7 @@ void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags) {
uv_fs_event_t* handle;
struct kevent ev;
KEVENT_S ev;
int events;
const char* path;
#if defined(F_GETPATH)
@ -527,9 +528,9 @@ void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags) {
fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
| NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
EV_SET(&ev, w->fd, EVFILT_VNODE, EV_ADD | EV_ONESHOT, fflags, 0, 0);
SET_EVENT(&ev, w->fd, EVFILT_VNODE, EV_ADD | EV_ONESHOT, fflags);
if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
if (KEVENT(loop->backend_fd, &ev, 1, NULL, 0, 0, NULL))
abort();
}

View File

@ -1035,9 +1035,9 @@ int uv_spawn(uv_loop_t* loop,
* with waitpid. */
if (exec_errorno == 0) {
#ifndef UV_USE_SIGCHLD
struct kevent event;
EV_SET(&event, pid, EVFILT_PROC, EV_ADD | EV_ONESHOT, NOTE_EXIT, 0, 0);
if (kevent(loop->backend_fd, &event, 1, NULL, 0, NULL)) {
KEVENT_S event;
SET_EVENT(&event, pid, EVFILT_PROC, EV_ADD | EV_ONESHOT, NOTE_EXIT);
if (KEVENT(loop->backend_fd, &event, 1, NULL, 0, 0, NULL)) {
if (errno != ESRCH)
abort();
/* Process already exited. Call waitpid on the next loop iteration. */

View File

@ -276,8 +276,8 @@ int uv__stream_try_select(uv_stream_t* stream, int* fd) {
* select(2) in separate thread for those fds
*/
struct kevent filter[1];
struct kevent events[1];
KEVENT_S filter[1];
KEVENT_S events[1];
struct timespec timeout;
uv__stream_select_t* s;
int fds[2];
@ -295,14 +295,14 @@ int uv__stream_try_select(uv_stream_t* stream, int* fd) {
return UV__ERR(errno);
}
EV_SET(&filter[0], *fd, EVFILT_READ, EV_ADD | EV_ENABLE, 0, 0, 0);
SET_EVENT(&filter[0], *fd, EVFILT_READ, EV_ADD | EV_ENABLE, 0);
/* Use small timeout, because we only want to capture EINVALs */
timeout.tv_sec = 0;
timeout.tv_nsec = 1;
do
ret = kevent(kq, filter, 1, events, 1, &timeout);
ret = KEVENT(kq, filter, 1, events, 1, 0, &timeout);
while (ret == -1 && errno == EINTR);
uv__close(kq);