unix: factor out common i/o poll code
This commit is contained in:
parent
2e2114ed89
commit
a0a4027cca
@ -229,28 +229,12 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
/* Only need to set the provider_entry_time if timeout != 0. The function
|
||||
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
|
||||
*/
|
||||
if (timeout != 0)
|
||||
uv__metrics_set_provider_entry_time(loop);
|
||||
|
||||
/* Store the current timeout in a location that's globally accessible so
|
||||
* other locations like uv__work_done() can determine whether the queue
|
||||
* of events in the callback were waiting when poll was called.
|
||||
*/
|
||||
lfields->current_timeout = timeout;
|
||||
|
||||
uv__io_poll_prepare(loop, NULL, timeout);
|
||||
nfds = pollset_poll(loop->backend_fd,
|
||||
events,
|
||||
ARRAY_SIZE(events),
|
||||
timeout);
|
||||
|
||||
/* Update loop->time unconditionally. It's tempting to skip the update when
|
||||
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
|
||||
* operating system didn't reschedule our process while in the syscall.
|
||||
*/
|
||||
SAVE_ERRNO(uv__update_time(loop));
|
||||
uv__io_poll_check(loop, NULL);
|
||||
|
||||
if (nfds == 0) {
|
||||
if (reset_timeout != 0) {
|
||||
|
||||
@ -1056,6 +1056,39 @@ int uv__io_active(const uv__io_t* w, unsigned int events) {
|
||||
}
|
||||
|
||||
|
||||
void uv__io_poll_prepare(uv_loop_t* loop, sigset_t* pset, int timeout) {
|
||||
uv__loop_internal_fields_t* lfields;
|
||||
|
||||
/* Only need to set the provider_entry_time if timeout != 0. The function
|
||||
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
|
||||
*/
|
||||
if (timeout != 0)
|
||||
uv__metrics_set_provider_entry_time(loop);
|
||||
|
||||
/* Store the current timeout in a location that's globally accessible so
|
||||
* other locations like uv__work_done() can determine whether the queue
|
||||
* of events in the callback were waiting when poll was called.
|
||||
*/
|
||||
lfields = uv__get_internal_fields(loop);
|
||||
lfields->current_timeout = timeout;
|
||||
|
||||
if (pset != NULL)
|
||||
if (pthread_sigmask(SIG_BLOCK, pset, NULL))
|
||||
abort();
|
||||
}
|
||||
|
||||
void uv__io_poll_check(uv_loop_t* loop, sigset_t* pset) {
|
||||
if (pset != NULL)
|
||||
if (pthread_sigmask(SIG_UNBLOCK, pset, NULL))
|
||||
abort();
|
||||
|
||||
/* Update loop->time unconditionally. It's tempting to skip the update when
|
||||
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
|
||||
* operating system didn't reschedule our process while in the syscall.
|
||||
*/
|
||||
SAVE_ERRNO(uv__update_time(loop));
|
||||
}
|
||||
|
||||
int uv__fd_exists(uv_loop_t* loop, int fd) {
|
||||
return (unsigned) fd < loop->nwatchers && loop->watchers[fd] != NULL;
|
||||
}
|
||||
|
||||
@ -328,6 +328,8 @@ int uv__io_active(const uv__io_t* w, unsigned int events);
|
||||
int uv__io_check_fd(uv_loop_t* loop, int fd);
|
||||
void uv__io_poll(uv_loop_t* loop, int timeout); /* in milliseconds or -1 */
|
||||
int uv__io_fork(uv_loop_t* loop);
|
||||
void uv__io_poll_prepare(uv_loop_t* loop, sigset_t* pset, int timeout);
|
||||
void uv__io_poll_check(uv_loop_t* loop, sigset_t* pset);
|
||||
int uv__fd_exists(uv_loop_t* loop, int fd);
|
||||
|
||||
/* async */
|
||||
|
||||
@ -261,32 +261,19 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
}
|
||||
|
||||
for (;; nevents = 0) {
|
||||
/* Only need to set the provider_entry_time if timeout != 0. The function
|
||||
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
|
||||
*/
|
||||
if (timeout != 0)
|
||||
uv__metrics_set_provider_entry_time(loop);
|
||||
|
||||
if (timeout != -1) {
|
||||
spec.tv_sec = timeout / 1000;
|
||||
spec.tv_nsec = (timeout % 1000) * 1000000;
|
||||
}
|
||||
|
||||
if (pset != NULL)
|
||||
pthread_sigmask(SIG_BLOCK, pset, NULL);
|
||||
|
||||
/* Store the current timeout in a location that's globally accessible so
|
||||
* other locations like uv__work_done() can determine whether the queue
|
||||
* of events in the callback were waiting when poll was called.
|
||||
*/
|
||||
lfields->current_timeout = timeout;
|
||||
|
||||
uv__io_poll_prepare(loop, pset, timeout);
|
||||
nfds = kevent(loop->backend_fd,
|
||||
events,
|
||||
nevents,
|
||||
events,
|
||||
ARRAY_SIZE(events),
|
||||
timeout == -1 ? NULL : &spec);
|
||||
uv__io_poll_check(loop, pset);
|
||||
|
||||
if (nfds == -1)
|
||||
assert(errno == EINTR);
|
||||
@ -294,15 +281,6 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
/* Unlimited timeout should only return with events or signal. */
|
||||
assert(timeout != -1);
|
||||
|
||||
if (pset != NULL)
|
||||
pthread_sigmask(SIG_UNBLOCK, pset, NULL);
|
||||
|
||||
/* Update loop->time unconditionally. It's tempting to skip the update when
|
||||
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
|
||||
* operating system didn't reschedule our process while in the syscall.
|
||||
*/
|
||||
uv__update_time(loop);
|
||||
|
||||
if (nfds == 0 || nfds == -1) {
|
||||
/* If kqueue is empty or interrupted, we might still have children ready
|
||||
* to reap immediately. */
|
||||
|
||||
@ -1447,25 +1447,9 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
while (*ctl->sqhead != *ctl->sqtail)
|
||||
uv__epoll_ctl_flush(epollfd, ctl, &prep);
|
||||
|
||||
/* Only need to set the provider_entry_time if timeout != 0. The function
|
||||
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
|
||||
*/
|
||||
if (timeout != 0)
|
||||
uv__metrics_set_provider_entry_time(loop);
|
||||
|
||||
/* Store the current timeout in a location that's globally accessible so
|
||||
* other locations like uv__work_done() can determine whether the queue
|
||||
* of events in the callback were waiting when poll was called.
|
||||
*/
|
||||
lfields->current_timeout = timeout;
|
||||
|
||||
uv__io_poll_prepare(loop, NULL, timeout);
|
||||
nfds = epoll_pwait(epollfd, events, ARRAY_SIZE(events), timeout, sigmask);
|
||||
|
||||
/* Update loop->time unconditionally. It's tempting to skip the update when
|
||||
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
|
||||
* operating system didn't reschedule our process while in the syscall.
|
||||
*/
|
||||
SAVE_ERRNO(uv__update_time(loop));
|
||||
uv__io_poll_check(loop, NULL);
|
||||
|
||||
if (nfds == -1)
|
||||
assert(errno == EINTR);
|
||||
|
||||
@ -892,30 +892,15 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
|
||||
nfds = 0;
|
||||
for (;;) {
|
||||
/* Only need to set the provider_entry_time if timeout != 0. The function
|
||||
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
|
||||
*/
|
||||
if (timeout != 0)
|
||||
uv__metrics_set_provider_entry_time(loop);
|
||||
|
||||
if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout)
|
||||
timeout = max_safe_timeout;
|
||||
|
||||
/* Store the current timeout in a location that's globally accessible so
|
||||
* other locations like uv__work_done() can determine whether the queue
|
||||
* of events in the callback were waiting when poll was called.
|
||||
*/
|
||||
lfields->current_timeout = timeout;
|
||||
|
||||
uv__io_poll_prepare(loop, NULL, timeout);
|
||||
nfds = epoll_wait(loop->ep, events,
|
||||
ARRAY_SIZE(events), timeout);
|
||||
|
||||
/* Update loop->time unconditionally. It's tempting to skip the update when
|
||||
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
|
||||
* operating system didn't reschedule our process while in the syscall.
|
||||
*/
|
||||
base = loop->time;
|
||||
SAVE_ERRNO(uv__update_time(loop));
|
||||
uv__io_poll_check(loop, NULL);
|
||||
|
||||
if (nfds == 0) {
|
||||
assert(timeout != -1);
|
||||
|
||||
|
||||
@ -195,31 +195,9 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
* our caller then we need to loop around and poll() again.
|
||||
*/
|
||||
for (;;) {
|
||||
/* Only need to set the provider_entry_time if timeout != 0. The function
|
||||
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
|
||||
*/
|
||||
if (timeout != 0)
|
||||
uv__metrics_set_provider_entry_time(loop);
|
||||
|
||||
/* Store the current timeout in a location that's globally accessible so
|
||||
* other locations like uv__work_done() can determine whether the queue
|
||||
* of events in the callback were waiting when poll was called.
|
||||
*/
|
||||
lfields->current_timeout = timeout;
|
||||
|
||||
if (pset != NULL)
|
||||
if (pthread_sigmask(SIG_BLOCK, pset, NULL))
|
||||
abort();
|
||||
uv__io_poll_prepare(loop, pset, timeout);
|
||||
nfds = poll(loop->poll_fds, (nfds_t)loop->poll_fds_used, timeout);
|
||||
if (pset != NULL)
|
||||
if (pthread_sigmask(SIG_UNBLOCK, pset, NULL))
|
||||
abort();
|
||||
|
||||
/* Update loop->time unconditionally. It's tempting to skip the update when
|
||||
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
|
||||
* operating system didn't reschedule our process while in the syscall.
|
||||
*/
|
||||
SAVE_ERRNO(uv__update_time(loop));
|
||||
uv__io_poll_check(loop, pset);
|
||||
|
||||
if (nfds == 0) {
|
||||
if (reset_timeout != 0) {
|
||||
|
||||
@ -210,12 +210,6 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
/* Only need to set the provider_entry_time if timeout != 0. The function
|
||||
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
|
||||
*/
|
||||
if (timeout != 0)
|
||||
uv__metrics_set_provider_entry_time(loop);
|
||||
|
||||
if (timeout != -1) {
|
||||
spec.tv_sec = timeout / 1000;
|
||||
spec.tv_nsec = (timeout % 1000) * 1000000;
|
||||
@ -227,17 +221,13 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
nfds = 1;
|
||||
saved_errno = 0;
|
||||
|
||||
if (pset != NULL)
|
||||
pthread_sigmask(SIG_BLOCK, pset, NULL);
|
||||
|
||||
uv__io_poll_prepare(loop, pset, timeout);
|
||||
err = port_getn(loop->backend_fd,
|
||||
events,
|
||||
ARRAY_SIZE(events),
|
||||
&nfds,
|
||||
timeout == -1 ? NULL : &spec);
|
||||
|
||||
if (pset != NULL)
|
||||
pthread_sigmask(SIG_UNBLOCK, pset, NULL);
|
||||
uv__io_poll_check(loop, pset);
|
||||
|
||||
if (err) {
|
||||
/* Work around another kernel bug: port_getn() may return events even
|
||||
@ -251,12 +241,6 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
}
|
||||
}
|
||||
|
||||
/* Update loop->time unconditionally. It's tempting to skip the update when
|
||||
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
|
||||
* operating system didn't reschedule our process while in the syscall.
|
||||
*/
|
||||
SAVE_ERRNO(uv__update_time(loop));
|
||||
|
||||
if (events[0].portev_source == 0) {
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
|
||||
Loading…
Reference in New Issue
Block a user