Compare commits

...

14 Commits

Author SHA1 Message Date
Stefan Stojanovic
44125af62a
win: fix watch loop logic (#5013)
Refs: https://github.com/nodejs/node/issues/61398
2026-03-30 22:09:25 -04:00
Jameson Nash
84816e064a
misc: revise security vulnerability reporting instructions
The project has been getting a flood of private vulnerability reports, most of which are invalid, and a few of which should have just been normal bugs. This has essentially been a DoS attack on maintainer time, since we're unable to change them into normal bugs after assessment. We now have a libuv-security@googlegroups.com list instead to help redirect those seeking CVE fame. The hope is to redirect most people to actually use the issue list as it has always been intended to be used.
2026-03-29 15:14:04 +02:00
Saúl Ibarra Corretgé
901e28384b
test: use correct type for uv_fileno arguments (#4673)
Fixes a -Wincompatible-pointer-types error on Windows, since uv_os_fd_t
is not int but HANDLE.
2026-03-25 20:29:59 -04:00
Jameson Nash
40d45efebf
unix: use posix_spawn instead of fork (#3520)
Remove the conditionals so that posix_spawn will be used whenever
possible, and not only on Apple.

Tests specifically if posix_spawn works before using it: it is broken
on QEMU with glibc, for example, since fork/clone is broken there.
2026-03-25 20:21:11 -04:00
Jameson Nash
d19855c702
win: use WSA_FLAG_NO_HANDLE_INHERIT in all WSASocketW (#5097)
Like #4810, but for all socket calls.
2026-03-25 20:17:11 -04:00
Jameson Nash
046aebe79b
win,pipe: skip IOCP for pipe handles (#5071)
Copy the optimization from tcp/udp stack: when read/writes don't require
IOCP to complete, put them directly right into the pending queue, so
they can be processed without a full loop through IOCP each time to
drain them asynchronously.
2026-03-24 10:45:00 -04:00
Jameson Nash
fa0ac9ec0c
io: make libuv 64-bit safe (#5076)
Because libuv truncates the result of every call to INT32_MAX, it needs
to internally limit operations to INT32_MAX to be safe to use libuv.
This isn't an API change, since these operations weren't guaranteed to
work, and in fact usually failed in bizare ways already. This is very
long in coming, since we've had a lot of compiler warnings about this
and several PRs to fix this open for a decade, but the main consumers
that usually fix things didn't care (nodejs is 32-bit and julia patched
this downstream more than a decade ago, though it did run into this
again recently by mistake with sendfile).

Replaces #1501
Fixes #3360
2026-03-24 10:32:27 -04:00
BarryLhm
e3a27e0728
android: fix termux build (#5093)
Fixes: https://github.com/libuv/libuv/issues/5092
2026-03-24 09:59:50 +01:00
Ben Noordhuis
8877568581
unix: enforce recvmmsg buffer size requirements (#5095)
The documentation already stated that the receive buffer should be
a multiple of 64 KiB when the UV_UDP_RECVMMSG is used, but make that
more prominent in the documentation and enforce it in the code.

Refs: https://github.com/libuv/libuv/security/advisories/GHSA-r846-fxvr-f3rx
2026-03-23 21:25:33 +01:00
Jameson Nash
9f0101dcb8
linux: close streams without an extra read (#3250) 2026-03-21 15:10:05 -04:00
Ben Noordhuis
14f6c4cc1f
test: fix -Wunused-function warning (#5090)
The function is only used on BSDs. Introduced in commit 919b92d9 ("unix:
support long path names in pipe.c") from a few days ago.
2026-03-21 13:18:43 +01:00
Ben Noordhuis
1f56978a28
unix: check return value of fcntl call (#5089)
The fcntl cannot fail at this point but let's check the return code
anyway, just in case.

Fixes: https://github.com/libuv/libuv/issues/5080
2026-03-21 13:18:13 +01:00
Jameson Nash
58418d5310
process: better validation for process->pid usage (#3539)
Previously, the user might unknowingly close a uv_process_t before
doing waitpid on the zombie, leaving it forever undead. Track the state
of the child, so that the application wrapper can avoid this by calling
uv_process_kill and checking for UV_ESRCH error.
2026-03-19 15:28:02 -04:00
Ben Noordhuis
1899789be8
doc: remove random reference to libev (#5075)
The document doesn't reference libev anywhere else so it's rather
incongruous to mention it here.

Also add a comma to make the sentence flow better.
2026-03-18 22:53:00 +01:00
32 changed files with 760 additions and 300 deletions

View File

@ -634,6 +634,7 @@ if(LIBUV_BUILD_TESTS)
test/test-socket-buffer-size.c
test/test-spawn.c
test/test-stdio-over-pipes.c
test/test-io-64-safe.c
test/test-strscpy.c
test/test-strtok.c
test/test-tcp-alloc-cb-fail.c

View File

@ -259,6 +259,7 @@ test_run_tests_SOURCES = test/blackhole-server.c \
test/test-socket-buffer-size.c \
test/test-spawn.c \
test/test-stdio-over-pipes.c \
test/test-io-64-safe.c \
test/test-strscpy.c \
test/test-strtok.c \
test/test-tcp-alloc-cb-fail.c \

View File

@ -10,9 +10,9 @@ Currently, we are providing security updates for the latest release in the v1.x
## Reporting a Vulnerability
If you believe you have found a security vulnerability in `libuv`, please use the [GitHub's private vulnerability reporting feature](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability) in the [libuv repository](https://github.com/libuv/libuv) to report it to us.
If you believe you have found an active security vulnerability in `libuv`, please report it to libuv-security@googlegroups.com. Please report all other issues on the github issue tracker. We have been forced to terminate the ability to use Github's private vulnerability reporting due to a flood of AI-generated report spam, and a lack of sufficient moderation tools to manage the false reports.
This will allow us to assess the risk, and make a fix available before we add a bug report to the GitHub repository.
This will allow us to assess the risk and make a fix available before we add a bug report to the GitHub repository and issue a Github security advisory and assign a CVE.
Please do:
@ -24,4 +24,4 @@ Please do not:
* Post any information about the vulnerability in public places.
* Attempt to exploit the vulnerability yourself.
We take all security bugs seriously. Thank you for improving the security of `libuv`. We appreciate your efforts and responsible disclosure and will make every effort to acknowledge your contributions.
We take all security bugs seriously. Thank you for improving the security of `libuv`. We appreciate your efforts and responsible disclosure and will make every effort to acknowledge your contributions.

View File

@ -66,9 +66,9 @@ that other events can continue to be handled as fast as they come in [#]_.
processor, libuv and OSes will usually run background/worker threads and/or
polling to perform tasks in a non-blocking manner.
Bert Belder, one of the libuv core developers has a small video explaining the
Bert Belder, one of the libuv core developers, has a small video explaining the
architecture of libuv and its background. If you have no prior experience with
either libuv or libev, it is a quick, useful watch.
libuv, it is a quick, useful watch.
libuv's event loop is explained in more detail in the `documentation
<https://docs.libuv.org/en/v1.x/design.html#the-i-o-loop>`_.

View File

@ -297,6 +297,13 @@ API
`base` and `len` members of the uv_buf_t struct. The user is responsible for
freeing `base` after the uv_buf_t is done. Return struct passed by value.
.. warning:: It is discouraged to set `len` to a large value as that may
result in spurious failures. Specifically, Windows may fail on
writes larger than about 511 MB, and various Unicies may fail
on I/O larger than about 2 GB (0x7ffff000 bytes). Instead it is
generally better to split the data into multiple `uv_write`
calls (attach the `write_cb` to the last one).
.. c:function:: char** uv_setup_args(int argc, char** argv)
Store the program arguments. Required for getting / setting the process title

View File

@ -172,7 +172,10 @@ Public members
.. c:member:: int uv_process_t.pid
The PID of the spawned process. It's set after calling :c:func:`uv_spawn`.
The PID of the spawned process. It is set after calling :c:func:`uv_spawn`
and retains the value even after the process exits. The value is only
unique while the process is alive; after exit, another process may be
reassigned the same PID.
.. note::
The :c:type:`uv_handle_t` members also apply.
@ -259,14 +262,20 @@ API
Initializes the process handle and starts the process. If the process is
successfully spawned, this function will return 0. Otherwise, the
negative error code corresponding to the reason it couldn't spawn is
returned. Note that either way you must eventually call :c:func:`uv_close`
to close the handle again.
returned. Note that either way-success or failure--you must eventually call
:c:func:`uv_close` to close the handle again before freeing the memory of
the handle, unlike other the other init functions in libuv.
Possible reasons for failing to spawn would include (but not be limited to)
the file to execute not existing, not having permissions to use the setuid or
setgid specified, or not having enough memory to allocate for the new
process.
.. warning::
On unix, if the process has not yet exited when you call `uv_close`,
you will create a zombie that libuv cannot reap. You are responsible
for calling `waitpid` later. This is not relevant on Windows.
.. versionchanged:: 1.24.0 Added `UV_PROCESS_WINDOWS_HIDE_CONSOLE` and
`UV_PROCESS_WINDOWS_HIDE_GUI` flags.
@ -278,6 +287,11 @@ API
Sends the specified signal to the given process handle. Check the documentation
on :c:ref:`signal` for signal support, specially on Windows.
If the specified process is already dead, this will not kill a different
process which happened to reuse the same pid. By contrast, `uv_kill` may
kill an arbitrary other process if you use a cached value of
:c:func:`uv_process_get_pid`.
.. c:function:: int uv_kill(int pid, int signum)
Sends the specified signal to the given PID. Check the documentation

View File

@ -76,7 +76,8 @@ Data types
*/
UV_UDP_REUSEPORT = 64,
/*
* Indicates that recvmmsg should be used, if available.
* Indicates that recvmmsg should be used, if available. The uv_alloc_cb
* for this handle should create buffers that are multiples of 64 KiB.
*/
UV_UDP_RECVMMSG = 256
};
@ -168,7 +169,8 @@ API
The remaining bits can be used to set one of these flags:
* `UV_UDP_RECVMMSG`: if set, and the platform supports it, :man:`recvmmsg(2)` will
be used.
be used. The :c:type:`uv_alloc_cb` for this handle should create
buffers that are multiples of 64 KiB.
.. versionadded:: 1.7.0
.. versionchanged:: 1.37.0 added the `UV_UDP_RECVMMSG` flag.
@ -481,8 +483,8 @@ API
`suggested_size` in `alloc_cb` for udp_recv is always set to the size of 1 max size dgram.
.. versionchanged:: 1.35.0 added support for :man:`recvmmsg(2)` on supported platforms).
The use of this feature requires a buffer larger than
2 * 64KB to be passed to `alloc_cb`.
The :c:type:`uv_alloc_cb` for this handle should create
buffers that are multiples of 64 KiB.
.. versionchanged:: 1.37.0 :man:`recvmmsg(2)` support is no longer enabled implicitly,
it must be explicitly requested by passing the `UV_UDP_RECVMMSG` flag to
:c:func:`uv_udp_init_ex`.

View File

@ -705,7 +705,8 @@ enum uv_udp_flags {
*/
UV_UDP_REUSEPORT = 64,
/*
* Indicates that recvmmsg should be used, if available.
* Indicates that recvmmsg should be used, if available. The uv_alloc_cb
* for this handle should create buffers that are multiples of 64 KiB.
*/
UV_UDP_RECVMMSG = 256
};

View File

@ -167,7 +167,7 @@ static int uv__fs_close(int fd) {
}
static ssize_t uv__fs_fsync(uv_fs_t* req) {
static int uv__fs_fsync(uv_fs_t* req) {
#if defined(__APPLE__)
/* Apple's fdatasync and fsync explicitly do NOT flush the drive write cache
* to the drive platters. This is in contrast to Linux's fdatasync and fsync
@ -191,7 +191,7 @@ static ssize_t uv__fs_fsync(uv_fs_t* req) {
}
static ssize_t uv__fs_fdatasync(uv_fs_t* req) {
static int uv__fs_fdatasync(uv_fs_t* req) {
#if defined(__linux__) || defined(__sun) || defined(__NetBSD__)
return fdatasync(req->file);
#elif defined(__APPLE__)
@ -233,7 +233,7 @@ static struct timespec uv__fs_to_timespec(double time) {
#endif
static ssize_t uv__fs_futime(uv_fs_t* req) {
static int uv__fs_futime(uv_fs_t* req) {
#if defined(__APPLE__) \
|| defined(_AIX71) \
|| defined(__DragonFly__) \
@ -263,7 +263,7 @@ static ssize_t uv__fs_futime(uv_fs_t* req) {
}
static ssize_t uv__fs_mkdtemp(uv_fs_t* req) {
static int uv__fs_mkdtemp(uv_fs_t* req) {
return mkdtemp((char*) req->path) ? 0 : -1;
}
@ -359,7 +359,7 @@ clobber:
}
static ssize_t uv__fs_open(uv_fs_t* req) {
static int uv__fs_open(uv_fs_t* req) {
#ifdef O_CLOEXEC
return open(req->path, req->flags | O_CLOEXEC, req->mode);
#else /* O_CLOEXEC */
@ -388,11 +388,11 @@ static ssize_t uv__fs_open(uv_fs_t* req) {
}
static ssize_t uv__preadv_or_pwritev_emul(int fd,
const struct iovec* bufs,
size_t nbufs,
off_t off,
int is_pread) {
static int uv__preadv_or_pwritev_emul(int fd,
const struct iovec* bufs,
size_t nbufs,
off_t off,
int is_pread) {
ssize_t total;
ssize_t r;
size_t i;
@ -435,18 +435,18 @@ typedef size_t uv__iovcnt;
#endif
static ssize_t uv__preadv_emul(int fd,
const struct iovec* bufs,
uv__iovcnt nbufs,
off_t off) {
static int uv__preadv_emul(int fd,
const struct iovec* bufs,
uv__iovcnt nbufs,
off_t off) {
return uv__preadv_or_pwritev_emul(fd, bufs, nbufs, off, /*is_pread*/1);
}
static ssize_t uv__pwritev_emul(int fd,
const struct iovec* bufs,
uv__iovcnt nbufs,
off_t off) {
static int uv__pwritev_emul(int fd,
const struct iovec* bufs,
uv__iovcnt nbufs,
off_t off) {
return uv__preadv_or_pwritev_emul(fd, bufs, nbufs, off, /*is_pread*/0);
}
@ -454,14 +454,14 @@ static ssize_t uv__pwritev_emul(int fd,
/* The function pointer cache is an uintptr_t because _Atomic void*
* doesn't work on macos/ios/etc...
*/
static ssize_t uv__preadv_or_pwritev(int fd,
const struct iovec* bufs,
size_t nbufs,
off_t off,
_Atomic uintptr_t* cache,
int is_pread) {
static int uv__preadv_or_pwritev(int fd,
const struct iovec* bufs,
size_t nbufs,
off_t off,
_Atomic uintptr_t* cache,
int is_pread) {
union {
ssize_t (*f)(int, const struct iovec*, uv__iovcnt, off_t);
int (*f)(int, const struct iovec*, uv__iovcnt, off_t);
void* p;
} u;
@ -485,7 +485,7 @@ static ssize_t uv__preadv_or_pwritev(int fd,
}
static ssize_t uv__preadv(int fd,
static int uv__preadv(int fd,
const struct iovec* bufs,
size_t nbufs,
off_t off) {
@ -494,16 +494,16 @@ static ssize_t uv__preadv(int fd,
}
static ssize_t uv__pwritev(int fd,
const struct iovec* bufs,
size_t nbufs,
off_t off) {
static int uv__pwritev(int fd,
const struct iovec* bufs,
size_t nbufs,
off_t off) {
static _Atomic uintptr_t cache;
return uv__preadv_or_pwritev(fd, bufs, nbufs, off, &cache, /*is_pread*/0);
}
static ssize_t uv__fs_read(uv_fs_t* req) {
static int uv__fs_read(uv_fs_t* req) {
const struct iovec* bufs;
unsigned int iovmax;
size_t nbufs;
@ -520,17 +520,35 @@ static ssize_t uv__fs_read(uv_fs_t* req) {
if (nbufs > iovmax)
nbufs = iovmax;
/* Truncate multi-buf reads to UV__IO_MAX_BYTES total, dropping trailing bufs. */
if (nbufs > 1) {
size_t total;
size_t n;
for (total = 0, n = 0; n < nbufs; n++) {
if (bufs[n].iov_len > UV__IO_MAX_BYTES - total)
break;
total += bufs[n].iov_len;
}
nbufs = n > 0 ? n : 1;
}
r = 0;
if (off < 0) {
if (nbufs == 1)
r = read(fd, bufs->iov_base, bufs->iov_len);
else if (nbufs > 1)
if (nbufs == 1) {
r = read(fd, bufs->iov_base,
bufs->iov_len > UV__IO_MAX_BYTES ? UV__IO_MAX_BYTES : bufs->iov_len);
} else if (nbufs > 1) {
r = readv(fd, bufs, nbufs);
}
} else {
if (nbufs == 1)
r = pread(fd, bufs->iov_base, bufs->iov_len, off);
else if (nbufs > 1)
if (nbufs == 1) {
r = pread(fd, bufs->iov_base,
bufs->iov_len > UV__IO_MAX_BYTES ? UV__IO_MAX_BYTES : bufs->iov_len,
off);
}
else if (nbufs > 1) {
r = uv__preadv(fd, bufs, nbufs, off);
}
}
#ifdef __PASE__
@ -567,7 +585,7 @@ static int uv__fs_scandir_sort(const uv__dirent_t** a, const uv__dirent_t** b) {
}
static ssize_t uv__fs_scandir(uv_fs_t* req) {
static int uv__fs_scandir(uv_fs_t* req) {
uv__dirent_t** dents;
int n;
@ -732,7 +750,7 @@ static ssize_t uv__fs_pathmax_size(const char* path) {
return pathmax;
}
static ssize_t uv__fs_readlink(uv_fs_t* req) {
static int uv__fs_readlink(uv_fs_t* req) {
ssize_t maxlen;
ssize_t len;
char* buf;
@ -791,7 +809,7 @@ static ssize_t uv__fs_readlink(uv_fs_t* req) {
return 0;
}
static ssize_t uv__fs_realpath(uv_fs_t* req) {
static int uv__fs_realpath(uv_fs_t* req) {
char* buf;
char* tmp;
@ -829,7 +847,7 @@ static ssize_t uv__fs_realpath(uv_fs_t* req) {
return 0;
}
static ssize_t uv__fs_sendfile_emul(uv_fs_t* req) {
static int uv__fs_sendfile_emul(uv_fs_t* req) {
struct pollfd pfd;
int use_pread;
off_t offset;
@ -1027,7 +1045,7 @@ static ssize_t uv__fs_try_copy_file_range(int in_fd, off_t* off,
#endif /* __linux__ */
static ssize_t uv__fs_sendfile(uv_fs_t* req) {
static int uv__fs_sendfile(uv_fs_t* req) {
int in_fd;
int out_fd;
@ -1116,7 +1134,7 @@ static ssize_t uv__fs_sendfile(uv_fs_t* req) {
*/
if (r == 0 || ((errno == EAGAIN || errno == EINTR) && len != 0)) {
req->off += len;
return (ssize_t) len;
return len;
}
if (errno == EINVAL ||
@ -1139,7 +1157,7 @@ static ssize_t uv__fs_sendfile(uv_fs_t* req) {
}
static ssize_t uv__fs_utime(uv_fs_t* req) {
static int uv__fs_utime(uv_fs_t* req) {
#if defined(__APPLE__) \
|| defined(_AIX71) \
|| defined(__DragonFly__) \
@ -1174,7 +1192,7 @@ static ssize_t uv__fs_utime(uv_fs_t* req) {
}
static ssize_t uv__fs_lutime(uv_fs_t* req) {
static int uv__fs_lutime(uv_fs_t* req) {
#if defined(__APPLE__) \
|| defined(_AIX71) \
|| defined(__DragonFly__) \
@ -1196,7 +1214,7 @@ static ssize_t uv__fs_lutime(uv_fs_t* req) {
}
static ssize_t uv__fs_write(uv_fs_t* req) {
static int uv__fs_write(uv_fs_t* req) {
const struct iovec* bufs;
size_t nbufs;
ssize_t r;
@ -1225,7 +1243,7 @@ static ssize_t uv__fs_write(uv_fs_t* req) {
}
static ssize_t uv__fs_copyfile(uv_fs_t* req) {
static int uv__fs_copyfile(uv_fs_t* req) {
uv_fs_t fs_req;
uv_file srcfd;
uv_file dstfd;
@ -1633,7 +1651,7 @@ static size_t uv__fs_buf_offset(uv_buf_t* bufs, size_t size) {
return offset;
}
static ssize_t uv__fs_write_all(uv_fs_t* req) {
static int uv__fs_write_all(uv_fs_t* req) {
unsigned int iovmax;
unsigned int nbufs;
uv_buf_t* bufs;
@ -1682,7 +1700,7 @@ static ssize_t uv__fs_write_all(uv_fs_t* req) {
static void uv__fs_work(struct uv__work* w) {
int retry_on_eintr;
uv_fs_t* req;
ssize_t r;
int r;
req = container_of(w, uv_fs_t, work_req);
retry_on_eintr = !(req->fs_type == UV_FS_CLOSE ||
@ -2150,6 +2168,8 @@ int uv_fs_sendfile(uv_loop_t* loop,
req->flags = in_fd; /* hack */
req->file = out_fd;
req->off = off;
if (len > UV__IO_MAX_BYTES)
return UV_EINVAL;
req->bufsml[0].len = len;
POST;
}
@ -2217,6 +2237,9 @@ int uv_fs_write(uv_loop_t* loop,
if (bufs == NULL || nbufs == 0)
return UV_EINVAL;
if (uv__count_bufs(bufs, nbufs) > UV__IO_MAX_BYTES)
return UV_EINVAL;
req->file = file;
req->nbufs = nbufs;

View File

@ -27,6 +27,7 @@
#include "internal.h"
#include <inttypes.h>
#include <limits.h>
#include <stdatomic.h>
#include <stddef.h> /* offsetof */
#include <stdint.h>
@ -56,6 +57,14 @@
#include <time.h>
#include <unistd.h>
/* android ndk workaround */
#ifndef LLONG_MAX
#define LLONG_MAX 9223372036854775807LL
#endif
#ifndef LLONG_MIN
#define LLONG_MIN (-9223372036854775807LL - 1)
#endif
#ifndef __NR_io_uring_setup
# define __NR_io_uring_setup 425
#endif
@ -1516,25 +1525,6 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
*/
pe->events &= w->pevents | POLLERR | POLLHUP;
/* Work around an epoll quirk where it sometimes reports just the
* EPOLLERR or EPOLLHUP event. In order to force the event loop to
* move forward, we merge in the read/write events that the watcher
* is interested in; uv__read() and uv__write() will then deal with
* the error or hangup in the usual fashion.
*
* Note to self: happens when epoll reports EPOLLIN|EPOLLHUP, the user
* reads the available data, calls uv_read_stop(), then sometime later
* calls uv_read_start() again. By then, libuv has forgotten about the
* hangup and the kernel won't report EPOLLIN again because there's
* nothing left to read. If anything, libuv is to blame here. The
* current hack is just a quick bandaid; to properly fix it, libuv
* needs to remember the error/hangup event. We should get that for
* free when we switch over to edge-triggered I/O.
*/
if (pe->events == POLLERR || pe->events == POLLHUP)
pe->events |=
w->pevents & (POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
if (pe->events != 0) {
/* Run signal watchers last. This also affects child process watchers
* because those are implemented in terms of signal watchers.

View File

@ -60,6 +60,12 @@ void uv__poll_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
if (events & UV__POLLRDHUP)
pevents |= UV_DISCONNECT;
/* On error or hangup, mix in events the user is interested in so the
* appropriate read/write callbacks are invoked. */
if (events & (POLLERR | POLLHUP))
pevents |=
w->pevents & (UV_READABLE | UV_PRIORITIZED | UV_WRITABLE | UV_DISCONNECT);
handle->poll_cb(handle, 0, pevents);
}

View File

@ -33,15 +33,13 @@
#include <sys/wait.h>
#include <unistd.h>
#include <fcntl.h>
#include <poll.h>
#include <spawn.h>
#include <paths.h>
#include <dlfcn.h>
#if defined(__APPLE__)
# include <spawn.h>
# include <paths.h>
# include <sys/kauth.h>
# include <sys/types.h>
# include <sys/sysctl.h>
# include <dlfcn.h>
# include <crt_externs.h>
# include <xlocale.h>
# define environ (*_NSGetEnviron())
@ -53,6 +51,9 @@
#else
extern char **environ;
#ifndef POSIX_SPAWN_SETSID
# define POSIX_SPAWN_SETSID 0
#endif
#endif
#if defined(__linux__) || \
@ -70,6 +71,15 @@ extern char **environ;
#define UV_USE_SIGCHLD
#endif
static uv_once_t posix_spawn_init_once = UV_ONCE_INIT;
static int posix_spawn_can_use_setsid;
static volatile int posix_spawn_works;
static struct uv__posix_spawn_fncs_s {
struct {
int (*addchdir)(posix_spawn_file_actions_t *, const char *);
} file_actions;
} posix_spawn_fncs;
#ifdef UV_USE_SIGCHLD
static void uv__chld(uv_signal_t* handle, int signum) {
@ -145,6 +155,7 @@ void uv__wait_children(uv_loop_t* loop) {
}
assert(pid == process->pid);
process->flags |= UV_HANDLE_ESRCH; /* pid is no longer valid (or unique) */
process->status = status;
uv__queue_remove(&process->queue);
uv__queue_insert_tail(&pending, &process->queue);
@ -291,6 +302,7 @@ static void uv__process_child_init(const uv_process_options_t* options,
sigset_t signewset;
int close_fd;
int use_fd;
int err;
int fd;
int n;
@ -333,9 +345,9 @@ static void uv__process_child_init(const uv_process_options_t* options,
if (pipes[fd][1] == -1)
uv__write_errno(error_fd);
#ifndef F_DUPFD_CLOEXEC /* POSIX 2008 */
n = uv__cloexec(pipes[fd][1], 1);
if (n)
uv__write_int(error_fd, n);
err = uv__cloexec(pipes[fd][1], 1);
if (err)
uv__write_int(error_fd, err);
#endif
}
@ -360,9 +372,9 @@ static void uv__process_child_init(const uv_process_options_t* options,
if (fd == use_fd) {
if (close_fd == -1) {
n = uv__cloexec(use_fd, 0);
if (n)
uv__write_int(error_fd, n);
err = uv__cloexec(use_fd, 0);
if (err)
uv__write_int(error_fd, err);
}
}
else {
@ -372,8 +384,11 @@ static void uv__process_child_init(const uv_process_options_t* options,
if (fd == -1)
uv__write_errno(error_fd);
if (fd <= 2 && close_fd == -1)
uv__nonblock_fcntl(fd, 0);
if (fd <= 2 && close_fd == -1) {
err = uv__nonblock_fcntl(fd, 0);
if (err)
uv__write_int(error_fd, err);
}
if (close_fd >= stdio_count)
uv__close(close_fd);
@ -418,25 +433,6 @@ static void uv__process_child_init(const uv_process_options_t* options,
#if defined(__APPLE__)
typedef struct uv__posix_spawn_fncs_tag {
struct {
int (*addchdir_np)(const posix_spawn_file_actions_t *, const char *);
} file_actions;
} uv__posix_spawn_fncs_t;
static uv_once_t posix_spawn_init_once = UV_ONCE_INIT;
static uv__posix_spawn_fncs_t posix_spawn_fncs;
static int posix_spawn_can_use_setsid;
static void uv__spawn_init_posix_spawn_fncs(void) {
/* Try to locate all non-portable functions at runtime */
posix_spawn_fncs.file_actions.addchdir_np =
dlsym(RTLD_DEFAULT, "posix_spawn_file_actions_addchdir_np");
}
static void uv__spawn_init_can_use_setsid(void) {
int which[] = {CTL_KERN, KERN_OSRELEASE};
unsigned major;
@ -455,20 +451,52 @@ static void uv__spawn_init_can_use_setsid(void) {
posix_spawn_can_use_setsid = (major >= 19); /* macOS Catalina */
}
#endif
static void uv__spawn_init_posix_spawn(void) {
/* Init handles to all potentially non-defined functions */
uv__spawn_init_posix_spawn_fncs();
#if !defined(__linux__)
posix_spawn_works = 1;
#elif !defined(__ANDROID__)
pid_t pid;
int status;
/* Init feature detection for POSIX_SPAWN_SETSID flag */
/* Probe whether vfork()/clone(CLONE_VM) correctly shares the address space,
* i.e. a write by the child before _exit() is visible to the parent once it
* resumes. On Linux vfork() is equivalent to
* clone(CLONE_VM|CLONE_VFORK|SIGCHLD). On QEMU and WSL1, CLONE_VM is broken,
* resulting in glibc errors if we try to use posix_spawn(). */
posix_spawn_works = 0;
pid = vfork();
if (pid == 0) {
posix_spawn_works = 1;
_exit(0);
}
if (pid > 0)
waitpid(pid, &status, 0);
#endif
/* Try to locate all new functions at runtime.
* Expected on macOS, glibc, and musl. */
posix_spawn_fncs.file_actions.addchdir =
dlsym(RTLD_DEFAULT, "posix_spawn_file_actions_addchdir");
if (posix_spawn_fncs.file_actions.addchdir == NULL)
posix_spawn_fncs.file_actions.addchdir =
dlsym(RTLD_DEFAULT, "posix_spawn_file_actions_addchdir_np");
#ifdef __APPLE__
/* Init feature detection for POSIX_SPAWN_SETSID flag. */
uv__spawn_init_can_use_setsid();
#elif POSIX_SPAWN_SETSID != 0
/* Otherwise, if SETSID is defined, we can use it
* (added in glibc 2.26 circa 2017). */
posix_spawn_can_use_setsid = 1;
#endif
}
static int uv__spawn_set_posix_spawn_attrs(
posix_spawnattr_t* attrs,
const uv__posix_spawn_fncs_t* posix_spawn_fncs,
const uv_process_options_t* options) {
int err;
unsigned int flags;
@ -490,18 +518,17 @@ static int uv__spawn_set_posix_spawn_attrs(
}
/* Set flags for spawn behavior
* 1) POSIX_SPAWN_CLOEXEC_DEFAULT: (Apple Extension) All descriptors in the
* 1) POSIX_SPAWN_SETSIGDEF: Signals mentioned in spawn-sigdefault in the
* spawn attributes will be reset to behave as their default
* 2) POSIX_SPAWN_SETSIGMASK: Signal mask will be set to the value of
* spawn-sigmask in attributes
* 3) POSIX_SPAWN_SETSID: Make the process a new session leader if a detached
* session was requested.
* 4) POSIX_SPAWN_CLOEXEC_DEFAULT: (Apple Extension) All descriptors in the
* parent will be treated as if they had been created with O_CLOEXEC. The
* only fds that will be passed on to the child are those manipulated by
* the file actions
* 2) POSIX_SPAWN_SETSIGDEF: Signals mentioned in spawn-sigdefault in the
* spawn attributes will be reset to behave as their default
* 3) POSIX_SPAWN_SETSIGMASK: Signal mask will be set to the value of
* spawn-sigmask in attributes
* 4) POSIX_SPAWN_SETSID: Make the process a new session leader if a detached
* session was requested. */
flags = POSIX_SPAWN_CLOEXEC_DEFAULT |
POSIX_SPAWN_SETSIGDEF |
* the file actions */
flags = POSIX_SPAWN_SETSIGDEF |
POSIX_SPAWN_SETSIGMASK;
if (options->flags & UV_PROCESS_DETACHED) {
/* If running on a version of macOS where this flag is not supported,
@ -514,6 +541,9 @@ static int uv__spawn_set_posix_spawn_attrs(
flags |= POSIX_SPAWN_SETSID;
}
#ifdef __APPLE__
flags |= POSIX_SPAWN_CLOEXEC_DEFAULT;
#endif
err = posix_spawnattr_setflags(attrs, flags);
if (err != 0)
goto error;
@ -540,7 +570,6 @@ error:
static int uv__spawn_set_posix_spawn_file_actions(
posix_spawn_file_actions_t* actions,
const uv__posix_spawn_fncs_t* posix_spawn_fncs,
const uv_process_options_t* options,
int stdio_count,
int (*pipes)[2]) {
@ -557,12 +586,12 @@ static int uv__spawn_set_posix_spawn_file_actions(
/* Set the current working directory if requested */
if (options->cwd != NULL) {
if (posix_spawn_fncs->file_actions.addchdir_np == NULL) {
if (posix_spawn_fncs.file_actions.addchdir == NULL) {
err = ENOSYS;
goto error;
}
err = posix_spawn_fncs->file_actions.addchdir_np(actions, options->cwd);
err = posix_spawn_fncs.file_actions.addchdir(actions, options->cwd);
if (err != 0)
goto error;
}
@ -575,8 +604,16 @@ static int uv__spawn_set_posix_spawn_file_actions(
* stdout and stderr go to the same fd, which was not the intention. */
for (fd = 0; fd < stdio_count; fd++) {
use_fd = pipes[fd][1];
#if defined(__APPLE__) || defined(__linux__)
if (use_fd < 0 || use_fd >= fd)
continue;
#else
/* The behavior of posix_spawn_file_actions_adddup2 may be undefined if
* use_fd==fd, so we do this extra little dance to copy it up and back, on
* platforms where we aren't sure if it works. */
if (use_fd < 0 || use_fd > fd)
continue;
#endif
use_fd = stdio_count;
for (fd2 = 0; fd2 < stdio_count; fd2++) {
/* If we were not setting POSIX_SPAWN_CLOEXEC_DEFAULT, we would need to
@ -618,9 +655,11 @@ static int uv__spawn_set_posix_spawn_file_actions(
}
}
#ifdef __APPLE__
if (fd == use_fd)
err = posix_spawn_file_actions_addinherit_np(actions, fd);
else
#endif
err = posix_spawn_file_actions_adddup2(actions, use_fd, fd);
assert(err != ENOSYS);
if (err != 0)
@ -776,22 +815,24 @@ static int uv__spawn_resolve_and_spawn(const uv_process_options_t* options,
static int uv__spawn_and_init_child_posix_spawn(
uv_loop_t* loop,
const uv_process_options_t* options,
int stdio_count,
int (*pipes)[2],
pid_t* pid,
const uv__posix_spawn_fncs_t* posix_spawn_fncs) {
pid_t* pid) {
int err;
posix_spawnattr_t attrs;
posix_spawn_file_actions_t actions;
err = uv__spawn_set_posix_spawn_attrs(&attrs, posix_spawn_fncs, options);
if (!posix_spawn_works)
return UV_ENOSYS;
err = uv__spawn_set_posix_spawn_attrs(&attrs, options);
if (err != 0)
goto error;
/* This may mutate pipes. */
err = uv__spawn_set_posix_spawn_file_actions(&actions,
posix_spawn_fncs,
options,
stdio_count,
pipes);
@ -800,12 +841,23 @@ static int uv__spawn_and_init_child_posix_spawn(
goto error;
}
#ifndef __APPLE__
/* Acquire write lock to prevent opening new fds in worker threads.
* Unnecessary on Apple, since we set POSIX_SPAWN_CLOEXEC_DEFAULT. */
uv_rwlock_wrlock(&loop->cloexec_lock);
#endif
/* Try to spawn options->file resolving in the provided environment
* if any */
* if any. */
err = uv__spawn_resolve_and_spawn(options, &attrs, &actions, pid);
assert(err != ENOSYS);
/* Destroy the actions/attributes */
#ifndef __APPLE__
/* Release lock in parent process. */
uv_rwlock_wrunlock(&loop->cloexec_lock);
#endif
/* Destroy the actions/attributes. */
(void) posix_spawn_file_actions_destroy(&actions);
(void) posix_spawnattr_destroy(&attrs);
@ -814,7 +866,7 @@ error:
* already destroyed, only the happy path requires cleanup */
return UV__ERR(err);
}
#endif
static int uv__spawn_and_init_child_fork(const uv_process_options_t* options,
int stdio_count,
@ -869,36 +921,21 @@ static int uv__spawn_and_init_child(
int exec_errorno;
ssize_t r;
#if defined(__APPLE__)
uv_once(&posix_spawn_init_once, uv__spawn_init_posix_spawn);
/* Special child process spawn case for macOS Big Sur (11.0) onwards
*
* Big Sur introduced a significant performance degradation on a call to
* fork/exec when the process has many pages mmaped in with MAP_JIT, like, say
* a javascript interpreter. Electron-based applications, for example,
* are impacted; though the magnitude of the impact depends on how much the
* app relies on subprocesses.
*
* On macOS, though, posix_spawn is implemented in a way that does not
* exhibit the problem. This block implements the forking and preparation
* logic with posix_spawn and its related primitives. It also takes advantage of
* the macOS extension POSIX_SPAWN_CLOEXEC_DEFAULT that makes impossible to
* leak descriptors to the child process. */
err = uv__spawn_and_init_child_posix_spawn(options,
/* Calling posix_spawn is considerably faster, if it supports the given
* options. The posix_spawn flow will return UV_ENOSYS if any of the
* posix_spawn_x_np non-standard functions is both _needed_ and _undefined_.
* In those cases, default back to the fork/execve strategy. For all other
* errors, just fail. */
err = uv__spawn_and_init_child_posix_spawn(loop,
options,
stdio_count,
pipes,
pid,
&posix_spawn_fncs);
/* The posix_spawn flow will return UV_ENOSYS if any of the posix_spawn_x_np
* non-standard functions is both _needed_ and _undefined_. In those cases,
* default back to the fork/execve strategy. For all other errors, just fail. */
pid);
if (err != UV_ENOSYS)
return err;
#endif
/* This pipe is used by the parent to wait until
* the child has called `execve()`. We need this
* to avoid the following race condition:
@ -923,12 +960,12 @@ static int uv__spawn_and_init_child(
if (err)
return err;
/* Acquire write lock to prevent opening new fds in worker threads */
/* Acquire write lock to prevent opening new fds in worker threads. */
uv_rwlock_wrlock(&loop->cloexec_lock);
err = uv__spawn_and_init_child_fork(options, stdio_count, pipes, signal_pipe[1], pid);
/* Release lock in parent process */
/* Release lock in parent process. */
uv_rwlock_wrunlock(&loop->cloexec_lock);
uv__close(signal_pipe[1]);
@ -963,11 +1000,16 @@ static int uv__spawn_and_init_child(
}
#endif /* ISN'T TARGET_OS_TV || TARGET_OS_WATCH */
int uv_spawn(uv_loop_t* loop,
uv_process_t* process,
const uv_process_options_t* options) {
#if defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH)
/* fork is marked __WATCHOS_PROHIBITED __TVOS_PROHIBITED. */
uv__handle_init(loop, (uv_handle_t*)process, UV_PROCESS);
QUEUE_INIT(&process->queue);
process->status = 0;
process->pid = 0;
return UV_ENOSYS;
#else
int pipes_storage[8][2];
@ -991,6 +1033,7 @@ int uv_spawn(uv_loop_t* loop,
uv__handle_init(loop, (uv_handle_t*)process, UV_PROCESS);
uv__queue_init(&process->queue);
process->status = 0;
process->pid = 0;
stdio_count = options->stdio_count;
if (stdio_count < 3)
@ -1095,6 +1138,8 @@ error:
int uv_process_kill(uv_process_t* process, int signum) {
if (process->flags & UV_HANDLE_ESRCH)
return UV_ESRCH;
return uv_kill(process->pid, signum);
}
@ -1115,6 +1160,9 @@ int uv_kill(int pid, int signum) {
void uv__process_close(uv_process_t* handle) {
/* Warning: if UV_HANDLE_ESRCH is not set, the caller is creating a zombie
* that we cannot reap. We assume here that it is intentional, and that the
* user will be wise and cleanup later. */
uv__queue_remove(&handle->queue);
uv__handle_stop(handle);
#ifdef UV_USE_SIGCHLD

View File

@ -1058,11 +1058,14 @@ static void uv__read(uv_stream_t* stream) {
if (!is_ipc) {
do {
nread = read(uv__stream_fd(stream), buf.base, buf.len);
}
while (nread < 0 && errno == EINTR);
nread = read(uv__stream_fd(stream),
buf.base,
buf.len > UV__IO_MAX_BYTES ? UV__IO_MAX_BYTES : buf.len);
} while (nread < 0 && errno == EINTR);
} else {
/* ipc uses recvmsg */
if (buf.len > UV__IO_MAX_BYTES)
buf.len = UV__IO_MAX_BYTES;
msg.msg_flags = 0;
msg.msg_iov = (struct iovec*) &buf;
msg.msg_iovlen = 1;
@ -1074,8 +1077,7 @@ static void uv__read(uv_stream_t* stream) {
do {
nread = uv__recvmsg(uv__stream_fd(stream), &msg, 0);
}
while (nread < 0 && errno == EINTR);
} while (nread < 0 && errno == EINTR);
}
if (nread < 0) {
@ -1218,17 +1220,16 @@ void uv__stream_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
* operating systems, devices like PTYs sometimes produce partial reads even
* when more data is available.
*/
if ((events & POLLHUP) &&
if ((events & (POLLHUP | UV__POLLRDHUP)) &&
!(events & POLLIN) &&
(stream->flags & UV_HANDLE_READING) &&
!(stream->flags & UV_HANDLE_READ_EOF)) {
uv_buf_t buf = { NULL, 0 };
uv__stream_eof(stream, &buf);
if (uv__stream_fd(stream) == -1)
return; /* read_cb closed stream. */
}
if (uv__stream_fd(stream) == -1)
return; /* read_cb closed stream. */
if (events & (POLLOUT | POLLERR | POLLHUP)) {
uv__write(stream);
uv__write_callbacks(stream);
@ -1295,6 +1296,7 @@ static void uv__stream_connect(uv_stream_t* stream) {
static int uv__check_before_write(uv_stream_t* stream,
const uv_buf_t bufs[],
unsigned int nbufs,
uv_stream_t* send_handle) {
assert((stream->type == UV_TCP ||
@ -1309,6 +1311,13 @@ static int uv__check_before_write(uv_stream_t* stream,
if (nbufs < 1 || nbufs > 1024*1024)
return UV_EINVAL;
/* Reject writes above UV__IO_MAX_BYTES to be consistent with EINVAL on platforms
* such as macOS that fail when the total size of the iov exceeds 2GB,
* and catch/prevent sign-extension bugs.
*/
if (uv__count_bufs(bufs, nbufs) > UV__IO_MAX_BYTES)
return UV_EINVAL;
if (uv__stream_fd(stream) < 0)
return UV_EBADF;
@ -1347,7 +1356,7 @@ int uv_write2(uv_write_t* req,
int empty_queue;
int err;
err = uv__check_before_write(stream, nbufs, send_handle);
err = uv__check_before_write(stream, bufs, nbufs, send_handle);
if (err < 0)
return err;
@ -1436,7 +1445,7 @@ int uv_try_write2(uv_stream_t* stream,
if (stream->connect_req != NULL || stream->write_queue_size != 0)
return UV_EAGAIN;
err = uv__check_before_write(stream, nbufs, send_handle);
err = uv__check_before_write(stream, bufs, nbufs, send_handle);
if (err < 0)
return err;

View File

@ -178,7 +178,11 @@ void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents) {
handle = container_of(w, uv_udp_t, io_watcher);
assert(handle->type == UV_UDP);
if (revents & POLLIN)
/* Trigger a recv and send to find out what POLLERR occurred. */
if (revents & POLLERR)
revents |= POLLIN | POLLOUT;
if (revents & (POLLIN | POLLERR))
uv__udp_recvmsg(handle, 0);
/* Just Linux support for now. */
@ -213,6 +217,8 @@ static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf, int flag) {
/* prepare structures for recvmmsg */
chunks = buf->len / UV__UDP_DGRAM_MAXSIZE;
if (chunks == 0)
return UV_EINVAL;
if (chunks > ARRAY_SIZE(iov))
chunks = ARRAY_SIZE(iov);
for (k = 0; k < chunks; ++k) {
@ -312,8 +318,11 @@ static void uv__udp_recvmsg(uv_udp_t* handle, int flag) {
if (uv_udp_using_recvmmsg(handle)) {
nread = uv__udp_recvmmsg(handle, &buf, flag);
if (nread > 0)
count -= nread;
if (nread <= 0) {
handle->recv_cb(handle, nread, &buf, NULL, 0);
return;
}
count -= nread;
continue;
}
@ -721,9 +730,6 @@ int uv__udp_try_send(uv_udp_t* handle,
unsigned int addrlen) {
int err;
if (nbufs < 1)
return UV_EINVAL;
/* already sending a message */
if (handle->send_queue_count != 0)
return UV_EAGAIN;

View File

@ -453,7 +453,10 @@ int uv__udp_is_connected(uv_udp_t* handle) {
}
int uv__udp_check_before_send(uv_udp_t* handle, const struct sockaddr* addr) {
int uv__udp_check_before_send(uv_udp_t* handle,
const uv_buf_t bufs[],
unsigned int nbufs,
const struct sockaddr* addr) {
unsigned int addrlen;
if (handle->type != UV_UDP)
@ -480,6 +483,12 @@ int uv__udp_check_before_send(uv_udp_t* handle, const struct sockaddr* addr) {
addrlen = 0;
}
if (nbufs < 1 || nbufs > 1024 * 1024)
return UV_EINVAL;
if (uv__count_bufs(bufs, nbufs) > UV__IO_MAX_BYTES)
return UV_EINVAL;
return addrlen;
}
@ -492,10 +501,7 @@ int uv_udp_send(uv_udp_send_t* req,
uv_udp_send_cb send_cb) {
int addrlen;
if (nbufs < 1 || nbufs > 1024 * 1024)
return UV_EINVAL;
addrlen = uv__udp_check_before_send(handle, addr);
addrlen = uv__udp_check_before_send(handle, bufs, nbufs, addr);
if (addrlen < 0)
return addrlen;
@ -509,10 +515,7 @@ int uv_udp_try_send(uv_udp_t* handle,
const struct sockaddr* addr) {
int addrlen;
if (nbufs < 1 || nbufs > 1024 * 1024)
return UV_EINVAL;
addrlen = uv__udp_check_before_send(handle, addr);
addrlen = uv__udp_check_before_send(handle, bufs, nbufs, addr);
if (addrlen < 0)
return addrlen;
@ -527,6 +530,7 @@ int uv_udp_try_send2(uv_udp_t* handle,
struct sockaddr* addrs[/*count*/],
unsigned int flags) {
unsigned int i;
int addrlen;
if (count < 1)
return UV_EINVAL;
@ -534,9 +538,11 @@ int uv_udp_try_send2(uv_udp_t* handle,
if (flags != 0)
return UV_EINVAL;
for (i = 0; i < count; i++)
if (nbufs[i] < 1 || nbufs[i] > 1024 * 1024)
return UV_EINVAL;
for (i = 0; i < count; i++) {
addrlen = uv__udp_check_before_send(handle, bufs[i], nbufs[i], addrs[i]);
if (addrlen < 0)
return addrlen;
}
if (handle->send_queue_count > 0)
return UV_EAGAIN;
@ -663,8 +669,11 @@ size_t uv__count_bufs(const uv_buf_t bufs[], unsigned int nbufs) {
size_t bytes;
bytes = 0;
for (i = 0; i < nbufs; i++)
bytes += (size_t) bufs[i].len;
for (i = 0; i < nbufs; i++) {
if (bufs[i].len > (size_t) INT32_MAX - bytes)
return INT32_MAX;
bytes += bufs[i].len;
}
return bytes;
}

View File

@ -137,6 +137,7 @@ enum {
UV_HANDLE_POLL_SLOW = 0x01000000,
/* Only used by uv_process_t handles. */
UV_HANDLE_ESRCH = 0x01000000,
UV_HANDLE_REAP = 0x10000000
};
@ -226,6 +227,12 @@ void uv__work_done(uv_async_t* handle);
size_t uv__count_bufs(const uv_buf_t bufs[], unsigned int nbufs);
/* On some platforms, notably macOS, attempting a read or write > 2GB returns
* an EINVAL. On Linux, IO syscalls will transfer at most this many bytes.
* Use this limit everywhere to avoid platform-specific failures.
*/
#define UV__IO_MAX_BYTES 0x7ffff000
int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value);
void uv__fs_scandir_cleanup(uv_fs_t* req);

View File

@ -433,6 +433,7 @@ void uv__process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
WCHAR* filenamew = NULL;
WCHAR* long_filenamew = NULL;
DWORD offset = 0;
int dir_event_detected = 0;
assert(req->type == UV_FS_EVENT_REQ);
assert(handle->req_pending);
@ -459,6 +460,10 @@ void uv__process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
assert(!filenamew);
assert(!long_filenamew);
if (file_info->FileNameLength == 0) {
dir_event_detected = 1;
}
/*
* Fire the event only if we were asked to watch a directory,
* or if the filename filter matches.
@ -587,6 +592,7 @@ void uv__process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
sizeof(info)) &&
info.Directory &&
info.DeletePending) {
dir_event_detected = 1;
uv__convert_utf16_to_utf8(handle->dirw, -1, &filename);
handle->cb(handle, filename, UV_RENAME, 0);
uv__free(filename);
@ -599,6 +605,26 @@ void uv__process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
if (handle->flags & UV_HANDLE_CLOSING) {
uv__want_endgame(loop, (uv_handle_t*)handle);
} else if (uv__is_active(handle)) {
/*
* Check if the handle has become a zombie pointing to \$Extend\$Deleted\.
* Only perform the check if we detected an event on the directory, which
* may indicate deletion.
*/
if (dir_event_detected) {
WCHAR path_buf[MAX_PATH];
DWORD path_len = GetFinalPathNameByHandleW(handle->dir_handle,
path_buf,
ARRAY_SIZE(path_buf),
FILE_NAME_NORMALIZED | VOLUME_NAME_NONE);
if (path_len > 0 && path_len < ARRAY_SIZE(path_buf)) {
if (wcsstr(path_buf, L"\\$Extend\\$Deleted\\") != NULL) {
handle->cb(handle, NULL, 0, UV_ENOENT);
return;
}
}
}
uv__fs_event_queue_readdirchanges(loop, handle);
}
}

View File

@ -889,6 +889,7 @@ void fs__read(uv_fs_t* req) {
bytes = 0;
do {
DWORD incremental_bytes;
DWORD to_read;
if (offset != -1) {
offset_.QuadPart = offset + bytes;
@ -896,9 +897,12 @@ void fs__read(uv_fs_t* req) {
overlapped.OffsetHigh = offset_.HighPart;
}
to_read = req->fs.info.bufs[index].len;
if (to_read > UV__IO_MAX_BYTES)
to_read = UV__IO_MAX_BYTES;
result = ReadFile(handle,
req->fs.info.bufs[index].base,
req->fs.info.bufs[index].len,
to_read,
&incremental_bytes,
overlapped_ptr);
bytes += incremental_bytes;
@ -1103,7 +1107,7 @@ void fs__write(uv_fs_t* req) {
result = WriteFile(handle,
req->fs.info.bufs[index].base,
req->fs.info.bufs[index].len,
(DWORD) req->fs.info.bufs[index].len,
&incremental_bytes,
overlapped_ptr);
bytes += incremental_bytes;
@ -3329,6 +3333,11 @@ int uv_fs_write(uv_loop_t* loop,
return UV_EINVAL;
}
if (uv__count_bufs(bufs, nbufs) > UV__IO_MAX_BYTES) {
SET_REQ_UV_ERROR(req, UV_EINVAL, ERROR_INVALID_PARAMETER);
return UV_EINVAL;
}
req->file.fd = fd;
req->fs.info.nbufs = nbufs;
@ -3698,6 +3707,8 @@ int uv_fs_sendfile(uv_loop_t* loop, uv_fs_t* req, uv_file fd_out,
req->file.fd = fd_in;
req->fs.info.fd_out = fd_out;
req->fs.info.offset = in_offset;
if (length > UV__IO_MAX_BYTES)
return UV_EINVAL;
req->fs.info.bufsml[0].len = length;
POST;
}

View File

@ -391,6 +391,32 @@ int uv_pipe(uv_file fds[2], int read_flags, int write_flags) {
}
static DWORD uv__pipe_attach_iocp(HANDLE pipeHandle,
HANDLE iocp,
uv_pipe_t* handle) {
UCHAR sfcnm_flags;
DWORD err = 0;
if (CreateIoCompletionPort(pipeHandle, iocp, (ULONG_PTR) handle, 0) == NULL) {
err = GetLastError();
handle->flags |= UV_HANDLE_EMULATE_IOCP;
}
sfcnm_flags = FILE_SKIP_SET_EVENT_ON_HANDLE;
if (!err)
sfcnm_flags |= FILE_SKIP_COMPLETION_PORT_ON_SUCCESS;
if (SetFileCompletionNotificationModes(pipeHandle, sfcnm_flags)) {
if (sfcnm_flags & FILE_SKIP_COMPLETION_PORT_ON_SUCCESS)
handle->flags |= UV_HANDLE_SYNC_BYPASS_IOCP;
} else {
err = GetLastError();
}
return err;
}
int uv__create_stdio_pipe_pair(uv_loop_t* loop,
uv_pipe_t* parent_pipe, HANDLE* child_pipe_ptr, unsigned int flags) {
/* The parent_pipe is always the server_pipe and kept by libuv.
@ -430,13 +456,9 @@ int uv__create_stdio_pipe_pair(uv_loop_t* loop,
if (err)
goto error;
if (CreateIoCompletionPort(server_pipe,
loop->iocp,
(ULONG_PTR) parent_pipe,
0) == NULL) {
err = GetLastError();
goto error;
}
err = uv__pipe_attach_iocp(server_pipe, loop->iocp, parent_pipe);
if (err)
uv_fatal_error(err, "uv__pipe_attach_iocp");
parent_pipe->handle = server_pipe;
*child_pipe_ptr = client_pipe;
@ -521,13 +543,10 @@ static int uv__set_pipe_handle(uv_loop_t* loop,
handle->pipe.conn.readfile_thread_handle = NULL;
InitializeCriticalSection(&handle->pipe.conn.readfile_thread_lock);
} else {
/* Overlapped pipe. Try to associate with IOCP. */
if (CreateIoCompletionPort(pipeHandle,
loop->iocp,
(ULONG_PTR) handle,
0) == NULL) {
handle->flags |= UV_HANDLE_EMULATE_IOCP;
}
/* Overlapped pipe. Try to associate with IOCP.
* Will set compatibility flags internally if this fails
* (because some other process already has activated IOCP). */
uv__pipe_attach_iocp(pipeHandle, loop->iocp, handle);
}
handle->handle = pipeHandle;
@ -540,6 +559,8 @@ static int uv__set_pipe_handle(uv_loop_t* loop,
static int pipe_alloc_accept(uv_loop_t* loop, uv_pipe_t* handle,
uv_pipe_accept_t* req, BOOL firstInstance) {
DWORD err;
assert(req->pipeHandle == INVALID_HANDLE_VALUE);
req->pipeHandle =
@ -554,12 +575,9 @@ static int pipe_alloc_accept(uv_loop_t* loop, uv_pipe_t* handle,
}
/* Associate it with IOCP so we can get events. */
if (CreateIoCompletionPort(req->pipeHandle,
loop->iocp,
(ULONG_PTR) handle,
0) == NULL) {
uv_fatal_error(GetLastError(), "CreateIoCompletionPort");
}
err = uv__pipe_attach_iocp(req->pipeHandle, loop->iocp, handle);
if (err)
uv_fatal_error(err, "uv__pipe_attach_iocp");
/* Stash a handle in the server object for use from places such as
* getsockname and chmod. As we transfer ownership of these to client
@ -664,7 +682,7 @@ void uv__pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle) {
FROM_PROTOCOL_INFO,
&xfer_queue_item->xfer_info.socket_info,
0,
WSA_FLAG_OVERLAPPED);
WSA_FLAG_OVERLAPPED | WSA_FLAG_NO_HANDLE_INHERIT);
uv__free(xfer_queue_item);
if (socket != INVALID_SOCKET)
@ -1106,6 +1124,8 @@ void uv__pipe_close(uv_loop_t* loop, uv_pipe_t* handle) {
static void uv__pipe_queue_accept(uv_loop_t* loop, uv_pipe_t* handle,
uv_pipe_accept_t* req, BOOL firstInstance) {
BOOL success;
assert(handle->flags & UV_HANDLE_LISTENING);
if (!firstInstance && !pipe_alloc_accept(loop, handle, req, FALSE)) {
@ -1120,22 +1140,22 @@ static void uv__pipe_queue_accept(uv_loop_t* loop, uv_pipe_t* handle,
/* Prepare the overlapped structure. */
memset(&(req->u.io.overlapped), 0, sizeof(req->u.io.overlapped));
if (!ConnectNamedPipe(req->pipeHandle, &req->u.io.overlapped) &&
GetLastError() != ERROR_IO_PENDING) {
success = ConnectNamedPipe(req->pipeHandle, &req->u.io.overlapped);
if (UV_SUCCEEDED_WITHOUT_IOCP(success)) {
/* Process the req without IOCP. */
SET_REQ_SUCCESS(req);
uv__insert_pending_req(loop, (uv_req_t*) req);
} else if (!UV_SUCCEEDED_WITH_IOCP(success)) {
if (GetLastError() == ERROR_PIPE_CONNECTED) {
SET_REQ_SUCCESS(req);
} else {
CloseHandle(req->pipeHandle);
req->pipeHandle = INVALID_HANDLE_VALUE;
/* Make this req pending reporting an error. */
SET_REQ_ERROR(req, GetLastError());
}
uv__insert_pending_req(loop, (uv_req_t*) req);
handle->reqs_pending++;
return;
}
/* Wait for completion via IOCP */
handle->reqs_pending++;
}
@ -1183,6 +1203,7 @@ int uv__pipe_accept(uv_pipe_t* server, uv_stream_t* client) {
/* Initialize the client handle and copy the pipeHandle to the client */
pipe_client->handle = req->pipeHandle;
pipe_client->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
pipe_client->flags |= UV_HANDLE_SYNC_BYPASS_IOCP;
/* Prepare the req to pick up a new connection */
server->pipe.serv.pending_accepts = req->next_pending;
@ -1392,8 +1413,6 @@ static void uv__pipe_queue_read(uv_loop_t* loop, uv_pipe_t* handle) {
if (!QueueUserWorkItem(&uv_pipe_zero_readfile_thread_proc,
req,
WT_EXECUTELONGFUNCTION)) {
/* Make this req pending reporting an error. */
SET_REQ_ERROR(req, GetLastError());
goto error;
}
} else {
@ -1410,18 +1429,15 @@ static void uv__pipe_queue_read(uv_loop_t* loop, uv_pipe_t* handle) {
NULL,
&req->u.io.overlapped);
if (!result && GetLastError() != ERROR_IO_PENDING) {
/* Make this req pending reporting an error. */
SET_REQ_ERROR(req, GetLastError());
if (UV_SUCCEEDED_WITHOUT_IOCP(result)) {
uv__insert_pending_req(loop, (uv_req_t*) req);
} else if (!UV_SUCCEEDED_WITH_IOCP(result)) {
goto error;
}
if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
} else if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
assert(req->wait_handle == INVALID_HANDLE_VALUE);
if (!RegisterWaitForSingleObject(&req->wait_handle,
req->event_handle, post_completion_read_wait, (void*) req,
INFINITE, WT_EXECUTEINWAITTHREAD | WT_EXECUTEONLYONCE)) {
SET_REQ_ERROR(req, GetLastError());
goto error;
}
}
@ -1434,6 +1450,8 @@ static void uv__pipe_queue_read(uv_loop_t* loop, uv_pipe_t* handle) {
return;
error:
/* Make this req pending reporting an error. */
SET_REQ_ERROR(req, GetLastError());
uv__insert_pending_req(loop, (uv_req_t*)req);
handle->flags |= UV_HANDLE_READ_PENDING;
handle->reqs_pending++;
@ -1622,6 +1640,9 @@ static int uv__pipe_write_data(uv_loop_t* loop,
return err;
}
if (write_buf.len > UV__IO_MAX_BYTES)
return ERROR_INVALID_PARAMETER; /* Maps to UV_EINVAL. */
if ((handle->flags &
(UV_HANDLE_BLOCKING_WRITES | UV_HANDLE_NON_OVERLAPPED_PIPE)) ==
(UV_HANDLE_BLOCKING_WRITES | UV_HANDLE_NON_OVERLAPPED_PIPE)) {
@ -1695,21 +1716,18 @@ static int uv__pipe_write_data(uv_loop_t* loop,
write_buf.len,
NULL,
&req->u.io.overlapped);
if (!result && GetLastError() != ERROR_IO_PENDING) {
return GetLastError();
}
if (result) {
/* Request completed immediately. */
req->u.io.queued_bytes = 0;
} else {
/* Request queued by the kernel. */
req->u.io.queued_bytes = write_buf.len;
handle->write_queue_size += req->u.io.queued_bytes;
}
if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
if (UV_SUCCEEDED_WITHOUT_IOCP(result)) {
uv__insert_pending_req(loop, (uv_req_t*) req);
} else if (!UV_SUCCEEDED_WITH_IOCP(result)) {
return GetLastError();
} else if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
if (!RegisterWaitForSingleObject(&req->wait_handle,
req->event_handle, post_completion_write_wait, (void*) req,
INFINITE, WT_EXECUTEINWAITTHREAD | WT_EXECUTEONLYONCE)) {
@ -1968,8 +1986,11 @@ static int uv__pipe_read_data(uv_loop_t* loop,
/* Ensure we read at most the smaller of:
* (a) the length of the user-allocated buffer.
* (b) the maximum data length as specified by the `max_bytes` argument.
* (c) the amount of data that can be read non-blocking
* (c) the amount of data that can be read non-blocking.
* (d) UV__IO_MAX_BYTES.
*/
if (buf.len > UV__IO_MAX_BYTES)
buf.len = UV__IO_MAX_BYTES;
if (max_bytes > buf.len)
max_bytes = buf.len;

View File

@ -836,10 +836,6 @@ void uv__process_proc_exit(uv_loop_t* loop, uv_process_t* handle) {
handle->wait_handle = INVALID_HANDLE_VALUE;
}
/* Set the handle to inactive: no callbacks will be made after the exit
* callback. */
uv__handle_stop(handle);
if (GetExitCodeProcess(handle->process_handle, &status)) {
exit_code = status;
} else {
@ -847,6 +843,15 @@ void uv__process_proc_exit(uv_loop_t* loop, uv_process_t* handle) {
exit_code = uv_translate_sys_error(GetLastError());
}
/* Clean-up the process handle eagerly. */
CloseHandle(handle->process_handle);
handle->process_handle = INVALID_HANDLE_VALUE;
handle->flags |= UV_HANDLE_ESRCH;
/* Set the handle to inactive: no callbacks will be made after the exit
* callback. */
uv__handle_stop(handle);
/* Fire the exit callback. */
if (handle->exit_cb) {
handle->exit_cb(handle, exit_code, handle->exit_signal);
@ -881,7 +886,8 @@ void uv__process_endgame(uv_loop_t* loop, uv_process_t* handle) {
assert(!(handle->flags & UV_HANDLE_CLOSED));
/* Clean-up the process handle. */
CloseHandle(handle->process_handle);
if (handle->process_handle != INVALID_HANDLE_VALUE)
CloseHandle(handle->process_handle);
uv__handle_close(handle);
}
@ -1364,8 +1370,8 @@ static int uv__kill(HANDLE process_handle, int signum) {
int uv_process_kill(uv_process_t* process, int signum) {
int err;
if (process->process_handle == INVALID_HANDLE_VALUE) {
return UV_EINVAL;
if (process->flags & UV_HANDLE_ESRCH) {
return UV_ESRCH;
}
err = uv__kill(process->process_handle, signum);

View File

@ -111,7 +111,9 @@ int uv_read_stop(uv_stream_t* handle) {
}
static int uv__check_before_write(uv_stream_t* handle, unsigned int nbufs) {
static int uv__check_before_write(uv_stream_t* handle,
const uv_buf_t bufs[],
unsigned int nbufs) {
/* We're not beholden to IOV_MAX but limit the buffer count to catch sign
* conversion bugs where a caller passes in a signed negative number that
* then gets converted to a really large unsigned number.
@ -120,6 +122,10 @@ static int uv__check_before_write(uv_stream_t* handle, unsigned int nbufs) {
return UV_EINVAL;
}
if (uv__count_bufs(bufs, nbufs) > UV__IO_MAX_BYTES) {
return UV_EINVAL;
}
if (!(handle->flags & UV_HANDLE_WRITABLE)) {
return UV_EPIPE;
}
@ -136,7 +142,7 @@ int uv_write(uv_write_t* req,
uv_loop_t* loop = handle->loop;
int err;
err = uv__check_before_write(handle, nbufs);
err = uv__check_before_write(handle, bufs, nbufs);
if (err != 0) {
return err;
}
@ -174,7 +180,7 @@ int uv_write2(uv_write_t* req,
return uv_write(req, handle, bufs, nbufs, cb);
}
err = uv__check_before_write(handle, nbufs);
err = uv__check_before_write(handle, bufs, nbufs);
if (err != 0) {
return err;
}
@ -194,7 +200,7 @@ int uv_try_write(uv_stream_t* stream,
unsigned int nbufs) {
int err;
err = uv__check_before_write(stream, nbufs);
err = uv__check_before_write(stream, bufs, nbufs);
if (err != 0) {
return err;
}

View File

@ -163,10 +163,6 @@ static int uv__tcp_set_socket(uv_loop_t* loop,
return WSAGetLastError();
}
/* Make the socket non-inheritable */
if (!SetHandleInformation((HANDLE) socket, HANDLE_FLAG_INHERIT, 0))
return GetLastError();
/* Associate it with the I/O completion port. Use uv_handle_t pointer as
* completion key. */
if (CreateIoCompletionPort((HANDLE)socket,
@ -248,7 +244,8 @@ int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* handle, unsigned int flags) {
SOCKET sock;
DWORD err;
sock = socket(domain, SOCK_STREAM, 0);
sock = WSASocketW(domain, SOCK_STREAM, 0, NULL, 0,
WSA_FLAG_OVERLAPPED | WSA_FLAG_NO_HANDLE_INHERIT);
if (sock == INVALID_SOCKET) {
err = WSAGetLastError();
uv__queue_remove(&handle->handle_queue);
@ -375,7 +372,8 @@ static int uv__tcp_try_bind(uv_tcp_t* handle,
if ((flags & UV_TCP_IPV6ONLY) && addr->sa_family != AF_INET6)
return ERROR_INVALID_PARAMETER;
sock = socket(addr->sa_family, SOCK_STREAM, 0);
sock = WSASocketW(addr->sa_family, SOCK_STREAM, 0, NULL, 0,
WSA_FLAG_OVERLAPPED | WSA_FLAG_NO_HANDLE_INHERIT);
if (sock == INVALID_SOCKET) {
return WSAGetLastError();
}
@ -478,7 +476,8 @@ static void uv__tcp_queue_accept(uv_tcp_t* handle, uv_tcp_accept_t* req) {
}
/* Open a socket for the accepted connection. */
accept_socket = socket(family, SOCK_STREAM, 0);
accept_socket = WSASocketW(family, SOCK_STREAM, 0, NULL, 0,
WSA_FLAG_OVERLAPPED | WSA_FLAG_NO_HANDLE_INHERIT);
if (accept_socket == INVALID_SOCKET) {
SET_REQ_ERROR(req, WSAGetLastError());
uv__insert_pending_req(loop, (uv_req_t*)req);
@ -486,15 +485,6 @@ static void uv__tcp_queue_accept(uv_tcp_t* handle, uv_tcp_accept_t* req) {
return;
}
/* Make the socket non-inheritable */
if (!SetHandleInformation((HANDLE) accept_socket, HANDLE_FLAG_INHERIT, 0)) {
SET_REQ_ERROR(req, GetLastError());
uv__insert_pending_req(loop, (uv_req_t*)req);
handle->reqs_pending++;
closesocket(accept_socket);
return;
}
/* Prepare the overlapped structure. */
memset(&(req->u.io.overlapped), 0, sizeof(req->u.io.overlapped));
if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
@ -1103,6 +1093,8 @@ void uv__process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle,
break;
}
assert(buf.base != NULL);
if (buf.len > UV__IO_MAX_BYTES)
buf.len = UV__IO_MAX_BYTES;
flags = 0;
if (WSARecv(handle->socket,
@ -1347,7 +1339,7 @@ int uv__tcp_xfer_import(uv_tcp_t* tcp,
FROM_PROTOCOL_INFO,
&xfer_info->socket_info,
0,
WSA_FLAG_OVERLAPPED);
WSA_FLAG_OVERLAPPED | WSA_FLAG_NO_HANDLE_INHERIT);
if (socket == INVALID_SOCKET) {
return WSAGetLastError();
@ -1652,8 +1644,6 @@ int uv_socketpair(int type, int protocol, uv_os_sock_t fds[2], int flags0, int f
WSA_FLAG_OVERLAPPED | WSA_FLAG_NO_HANDLE_INHERIT);
if (server == INVALID_SOCKET)
goto wsaerror;
if (!SetHandleInformation((HANDLE) server, HANDLE_FLAG_INHERIT, 0))
goto error;
name.sin_family = AF_INET;
name.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
name.sin_port = 0;
@ -1667,15 +1657,11 @@ int uv_socketpair(int type, int protocol, uv_os_sock_t fds[2], int flags0, int f
client0 = WSASocketW(AF_INET, type, protocol, NULL, 0, client0_flags);
if (client0 == INVALID_SOCKET)
goto wsaerror;
if (!SetHandleInformation((HANDLE) client0, HANDLE_FLAG_INHERIT, 0))
goto error;
if (connect(client0, (SOCKADDR*) &name, sizeof(name)) != 0)
goto wsaerror;
client1 = WSASocketW(AF_INET, type, protocol, NULL, 0, client1_flags);
if (client1 == INVALID_SOCKET)
goto wsaerror;
if (!SetHandleInformation((HANDLE) client1, HANDLE_FLAG_INHERIT, 0))
goto error;
if (!uv__get_acceptex_function(server, &func_acceptex)) {
err = WSAEAFNOSUPPORT;
goto cleanup;
@ -1721,10 +1707,6 @@ int uv_socketpair(int type, int protocol, uv_os_sock_t fds[2], int flags0, int f
err = WSAGetLastError();
goto cleanup;
error:
err = GetLastError();
goto cleanup;
cleanup:
if (server != INVALID_SOCKET)
closesocket(server);

View File

@ -69,11 +69,6 @@ static int uv__udp_set_socket(uv_loop_t* loop, uv_udp_t* handle, SOCKET socket,
return WSAGetLastError();
}
/* Make the socket non-inheritable */
if (!SetHandleInformation((HANDLE)socket, HANDLE_FLAG_INHERIT, 0)) {
return GetLastError();
}
/* Associate it with the I/O completion port. Use uv_handle_t pointer as
* completion key. */
if (CreateIoCompletionPort((HANDLE)socket,
@ -143,7 +138,8 @@ int uv__udp_init_ex(uv_loop_t* loop,
SOCKET sock;
DWORD err;
sock = socket(domain, SOCK_DGRAM, 0);
sock = WSASocketW(domain, SOCK_DGRAM, 0, NULL, 0,
WSA_FLAG_OVERLAPPED | WSA_FLAG_NO_HANDLE_INHERIT);
if (sock == INVALID_SOCKET) {
err = WSAGetLastError();
uv__queue_remove(&handle->handle_queue);
@ -212,7 +208,8 @@ static int uv__udp_maybe_bind(uv_udp_t* handle,
}
if (handle->socket == INVALID_SOCKET) {
SOCKET sock = socket(addr->sa_family, SOCK_DGRAM, 0);
SOCKET sock = WSASocketW(addr->sa_family, SOCK_DGRAM, 0, NULL, 0,
WSA_FLAG_OVERLAPPED | WSA_FLAG_NO_HANDLE_INHERIT);
if (sock == INVALID_SOCKET) {
return WSAGetLastError();
}
@ -1129,9 +1126,6 @@ int uv__udp_try_send(uv_udp_t* handle,
struct sockaddr_storage converted;
int err;
if (nbufs < 1)
return UV_EINVAL;
if (addr != NULL) {
err = uv__convert_to_localhost_if_unspecified(addr, &converted);
if (err)

View File

@ -102,7 +102,8 @@ void uv__winsock_init(void) {
/* Try to detect non-IFS LSPs */
uv_tcp_non_ifs_lsp_ipv4 = 1;
dummy = socket(AF_INET, SOCK_STREAM, IPPROTO_IP);
dummy = WSASocketW(AF_INET, SOCK_STREAM, IPPROTO_IP, NULL, 0,
WSA_FLAG_NO_HANDLE_INHERIT);
if (dummy != INVALID_SOCKET) {
opt_len = (int) sizeof protocol_info;
if (getsockopt(dummy,
@ -118,7 +119,8 @@ void uv__winsock_init(void) {
/* Try to detect IPV6 support and non-IFS LSPs */
uv_tcp_non_ifs_lsp_ipv6 = 1;
dummy = socket(AF_INET6, SOCK_STREAM, IPPROTO_IP);
dummy = WSASocketW(AF_INET6, SOCK_STREAM, IPPROTO_IP, NULL, 0,
WSA_FLAG_NO_HANDLE_INHERIT);
if (dummy != INVALID_SOCKET) {
opt_len = (int) sizeof protocol_info;
if (getsockopt(dummy,

View File

@ -28,7 +28,9 @@
#include "uv.h"
/* Refs: https://github.com/libuv/libuv/issues/4369 */
#if defined(__ANDROID__)
/* Refs: https://github.com/libuv/libuv/issues/5092 */
#if defined(__ANDROID__) && __ANDROID_API__ >= __ANDROID_API_Q__
#define USE_FDSAN
#include <android/fdsan.h>
#endif
@ -149,7 +151,7 @@ void log_tap_result(int test_count,
void enable_fdsan(void) {
/* Refs: https://github.com/libuv/libuv/issues/4369 */
#if defined(__ANDROID__)
#if defined(USE_FDSAN)
android_fdsan_set_error_level(ANDROID_FDSAN_ERROR_LEVEL_WARN_ALWAYS);
#endif
}

View File

@ -519,6 +519,51 @@ TEST_IMPL(fs_event_watch_delete_dir) {
return 0;
}
#ifdef _WIN32
static int fs_event_cb_del_dir_perm_got_enoent;
static void fs_event_cb_del_dir_perm(uv_fs_event_t* handle,
const char* filename,
int events,
int status) {
if (status == UV_ENOENT) {
fs_event_cb_del_dir_perm_got_enoent = 1;
uv_close((uv_handle_t*)handle, close_cb);
}
}
TEST_IMPL(fs_event_watch_delete_dir_win) {
uv_loop_t* loop = uv_default_loop();
int r;
/* Setup */
fs_event_cb_del_dir_perm_got_enoent = 0;
fs_event_unlink_files(NULL);
delete_dir("watch_del_dir/");
create_dir("watch_del_dir");
r = uv_fs_event_init(loop, &fs_event);
ASSERT_OK(r);
r = uv_fs_event_start(&fs_event, fs_event_cb_del_dir_perm, "watch_del_dir", 0);
ASSERT_OK(r);
r = uv_timer_init(loop, &timer);
ASSERT_OK(r);
r = uv_timer_start(&timer, fs_event_del_dir, 100, 0);
ASSERT_OK(r);
uv_run(loop, UV_RUN_DEFAULT);
ASSERT_EQ(1, fs_event_cb_del_dir_perm_got_enoent);
ASSERT_EQ(2, close_cb_called);
/* Cleanup */
fs_event_unlink_files(NULL);
MAKE_VALGRIND_HAPPY(loop);
return 0;
}
#endif
TEST_IMPL(fs_event_watch_dir_recursive) {
#if defined(__APPLE__) && defined(__TSAN__)

182
test/test-io-64-safe.c Normal file
View File

@ -0,0 +1,182 @@
/* Copyright libuv contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/* Verify that passing INT32_MAX as a buffer length is rejected with UV_EINVAL
* at the various I/O entry points that enforce UV__IO_MAX_BYTES.
*/
#include "uv.h"
#include "task.h"
#include <fcntl.h>
#include <stdint.h>
#include <sys/stat.h>
#define TEST_FILE "tmp_io_64_safe"
static void on_udp_send(uv_udp_send_t* req, int status) {
/* Should never be called: uv_udp_send must reject synchronously. */
ASSERT(0 && "on_udp_send callback must not be invoked");
(void) req;
(void) status;
}
TEST_IMPL(io_64_safe) {
uv_loop_t* loop;
uv_fs_t open_req;
uv_fs_t fs_req;
uv_write_t write_req;
uv_udp_t udp;
uv_tcp_t tcp;
uv_udp_send_t send_req;
struct sockaddr_in addr;
uv_buf_t* t2_bufs[1];
unsigned int t2_nbufs[1];
struct sockaddr* t2_addrs[1];
uv_buf_t buf;
uv_buf_t bufs2[2];
uv_file fd;
uv_file in_fd;
uv_file out_fd;
loop = uv_default_loop();
/* A buf whose length just exceeds UV__IO_MAX_BYTES (0x7ffff000). */
buf = uv_buf_init(NULL, INT32_MAX);
/* Two buffers whose individual sizes are reasonable but whose sum exceeds
* UV__IO_MAX_BYTES (0x7ffff000). Each is 1 GiB + 1 byte.
*/
bufs2[0] = uv_buf_init(NULL, 0x40000001u);
bufs2[1] = uv_buf_init(NULL, 0x40000001u);
/* ------------------------------------------------------------------ */
/* uv_fs_write: reject synchronous filesystem write > UV__IO_MAX_BYTES. */
/* ------------------------------------------------------------------ */
{
fd = uv_fs_open(NULL, &open_req, TEST_FILE,
UV_FS_O_WRONLY | UV_FS_O_CREAT | UV_FS_O_TRUNC, S_IRUSR | S_IWUSR, NULL);
ASSERT_GE(fd, 0);
uv_fs_req_cleanup(&open_req);
ASSERT_EQ(UV_EINVAL, uv_fs_write(NULL, &fs_req, fd, &buf, 1, 0, NULL));
uv_fs_req_cleanup(&fs_req);
/* nbufs > 1 where sum > UV__IO_MAX_BYTES */
ASSERT_EQ(UV_EINVAL, uv_fs_write(NULL, &fs_req, fd, bufs2, 2, 0, NULL));
uv_fs_req_cleanup(&fs_req);
uv_fs_close(NULL, &fs_req, fd, NULL);
uv_fs_req_cleanup(&fs_req);
}
/* ------------------------------------------------------------------ */
/* uv_fs_sendfile: reject len > UV__IO_MAX_BYTES. */
/* ------------------------------------------------------------------ */
{
in_fd = uv_fs_open(NULL, &open_req, TEST_FILE,
UV_FS_O_RDONLY | UV_FS_O_CREAT, S_IRUSR | S_IWUSR, NULL);
ASSERT_GE(in_fd, 0);
uv_fs_req_cleanup(&open_req);
out_fd = uv_fs_open(NULL, &open_req, TEST_FILE,
UV_FS_O_WRONLY | UV_FS_O_CREAT | UV_FS_O_APPEND, S_IRUSR | S_IWUSR,
NULL);
ASSERT_GE(out_fd, 0);
uv_fs_req_cleanup(&open_req);
ASSERT_EQ(UV_EINVAL,
uv_fs_sendfile(NULL, &fs_req, out_fd, in_fd, 0,
(size_t) INT32_MAX, NULL));
uv_fs_req_cleanup(&fs_req);
uv_fs_close(NULL, &fs_req, in_fd, NULL);
uv_fs_req_cleanup(&fs_req);
uv_fs_close(NULL, &fs_req, out_fd, NULL);
uv_fs_req_cleanup(&fs_req);
}
uv_fs_unlink(NULL, &fs_req, TEST_FILE, NULL);
uv_fs_req_cleanup(&fs_req);
{
/* uv_write: reject stream write > UV__IO_MAX_BYTES before queuing. */
ASSERT_OK(uv_tcp_init(loop, &tcp));
ASSERT_EQ(UV_EINVAL,
uv_write(&write_req, (uv_stream_t*) &tcp, &buf, 1, NULL));
/* nbufs > 1 where sum > UV__IO_MAX_BYTES */
ASSERT_EQ(UV_EINVAL,
uv_write(&write_req, (uv_stream_t*) &tcp, bufs2, 2, NULL));
/* uv_try_write: same check via the synchronous path. */
ASSERT_EQ(UV_EINVAL, uv_try_write((uv_stream_t*) &tcp, &buf, 1));
/* nbufs > 1 via try_write */
ASSERT_EQ(UV_EINVAL, uv_try_write((uv_stream_t*) &tcp, bufs2, 2));
uv_close((uv_handle_t*) &tcp, NULL);
}
{
ASSERT_OK(uv_udp_init(loop, &udp));
ASSERT_OK(uv_ip4_addr("127.0.0.1", TEST_PORT, &addr));
/* uv_udp_try_send: reject UDP send > UV__IO_MAX_BYTES. */
ASSERT_EQ(UV_EINVAL,
uv_udp_try_send(&udp, &buf, 1,
(const struct sockaddr*) &addr));
/* nbufs > 1 via try_send */
ASSERT_EQ(UV_EINVAL,
uv_udp_try_send(&udp, bufs2, 2,
(const struct sockaddr*) &addr));
/* uv_udp_send (async): reject synchronously before queuing. */
ASSERT_EQ(UV_EINVAL,
uv_udp_send(&send_req, &udp, &buf, 1,
(const struct sockaddr*) &addr, on_udp_send));
/* nbufs > 1 via async send */
ASSERT_EQ(UV_EINVAL,
uv_udp_send(&send_req, &udp, bufs2, 2,
(const struct sockaddr*) &addr, on_udp_send));
/* uv_udp_try_send2: reject per-batch size > UV__IO_MAX_BYTES. */
t2_bufs[0] = &buf;
t2_nbufs[0] = 1;
t2_addrs[0] = (struct sockaddr*) &addr;
ASSERT_EQ(UV_EINVAL,
uv_udp_try_send2(&udp, 1, t2_bufs, t2_nbufs, t2_addrs, 0));
/* nbufs > 1 per batch via try_send2 */
t2_bufs[0] = bufs2;
t2_nbufs[0] = 2;
ASSERT_EQ(UV_EINVAL,
uv_udp_try_send2(&udp, 1, t2_bufs, t2_nbufs, t2_addrs, 0));
uv_close((uv_handle_t*) &udp, NULL);
}
uv_run(loop, UV_RUN_DEFAULT);
MAKE_VALGRIND_HAPPY(loop);
return 0;
}

View File

@ -181,6 +181,7 @@ TEST_DECLARE (udp_recvmsg_unreachable_error)
TEST_DECLARE (udp_recvmsg_unreachable_error6)
TEST_DECLARE (udp_send_pollerr_no_recv)
TEST_DECLARE (udp_mmsg)
TEST_DECLARE (udp_mmsg_small_buf)
TEST_DECLARE (udp_multicast_join)
TEST_DECLARE (udp_multicast_join6)
TEST_DECLARE (udp_multicast_ttl)
@ -409,6 +410,9 @@ TEST_FS_DECLARE (fs_read_bufs)
TEST_FS_DECLARE (fs_read_file_eof)
TEST_DECLARE (fs_event_watch_dir)
TEST_DECLARE (fs_event_watch_delete_dir)
#ifdef _WIN32
TEST_DECLARE (fs_event_watch_delete_dir_win)
#endif
TEST_DECLARE (fs_event_watch_dir_recursive)
#ifdef _WIN32
TEST_DECLARE (fs_event_watch_dir_short_path)
@ -465,6 +469,7 @@ TEST_FS_DECLARE (fs_invalid_mkdir_name)
TEST_FS_DECLARE (fs_wtf)
#endif
TEST_FS_DECLARE (fs_get_system_error)
TEST_DECLARE (io_64_safe)
TEST_DECLARE (strscpy)
TEST_DECLARE (strtok)
TEST_DECLARE (threadpool_queue_work_simple)
@ -826,6 +831,7 @@ TASK_LIST_START
TEST_ENTRY (udp_options6)
TEST_ENTRY (udp_no_autobind)
TEST_ENTRY (udp_mmsg)
TEST_ENTRY (udp_mmsg_small_buf)
TEST_ENTRY (udp_multicast_interface)
TEST_ENTRY (udp_multicast_interface6)
TEST_ENTRY (udp_multicast_join)
@ -1140,6 +1146,9 @@ TASK_LIST_START
TEST_FS_ENTRY (fs_file_open_append)
TEST_ENTRY (fs_event_watch_dir)
TEST_ENTRY (fs_event_watch_delete_dir)
#ifdef _WIN32
TEST_ENTRY (fs_event_watch_delete_dir_win)
#endif
TEST_ENTRY (fs_event_watch_dir_recursive)
#ifdef _WIN32
TEST_ENTRY (fs_event_watch_dir_short_path)
@ -1196,6 +1205,7 @@ TASK_LIST_START
TEST_FS_ENTRY (fs_get_system_error)
TEST_ENTRY (get_osfhandle_valid_handle)
TEST_ENTRY (open_osfhandle_valid_handle)
TEST_ENTRY (io_64_safe)
TEST_ENTRY (strscpy)
TEST_ENTRY (strtok)
TEST_ENTRY (threadpool_queue_work_simple)

View File

@ -371,11 +371,12 @@ TEST_IMPL(pipe_getsockname_blocking) {
return 0;
}
#ifdef SOCK_MAXADDRLEN
static void long_path_connect_cb(uv_connect_t* req, int status) {
ASSERT_OK(status);
uv_close((uv_handle_t*) req->handle, NULL);
}
#endif
TEST_IMPL(pipe_getsockname_long_path) {
#ifndef SOCK_MAXADDRLEN

View File

@ -68,7 +68,7 @@ static void idle_cb(uv_idle_t* idle) {
static void poll_cb(uv_poll_t* handle, int status, int events) {
char buffer[5];
int n;
int fd;
uv_os_fd_t fd;
ASSERT_OK(uv_fileno((uv_handle_t*)handle, &fd));
memset(buffer, 0, 5);

View File

@ -52,11 +52,6 @@ static void read_cb(uv_stream_t* t, ssize_t nread, const uv_buf_t* buf) {
ASSERT_PTR_EQ((uv_tcp_t*) t, &tcp);
ASSERT_EQ(nread, UV_ECONNRESET);
int fd;
ASSERT_OK(uv_fileno((uv_handle_t*) t, &fd));
uv_handle_type type = uv_guess_handle(fd);
ASSERT_EQ(type, UV_TCP);
uv_close((uv_handle_t *) t, close_cb);
free(buf->base);
}

View File

@ -106,6 +106,31 @@ static void recv_cb(uv_udp_t* handle,
}
static void small_alloc_cb(uv_handle_t* handle,
size_t suggested_size,
uv_buf_t* buf) {
CHECK_HANDLE(handle);
buf->len = 64;
buf->base = malloc(buf->len);
ASSERT_NOT_NULL(buf->base);
alloc_cb_called++;
}
static void small_recv_cb(uv_udp_t* handle,
ssize_t nread,
const uv_buf_t* rcvbuf,
const struct sockaddr* addr,
unsigned flags) {
CHECK_HANDLE(handle);
ASSERT_EQ(UV_EINVAL, nread);
uv_close((uv_handle_t*) &recver, close_cb);
uv_close((uv_handle_t*) &sender, close_cb);
free(rcvbuf->base);
recv_cb_called++;
}
TEST_IMPL(udp_mmsg) {
struct sockaddr_in addr;
uv_buf_t buf;
@ -148,3 +173,31 @@ TEST_IMPL(udp_mmsg) {
MAKE_VALGRIND_HAPPY(uv_default_loop());
return 0;
}
TEST_IMPL(udp_mmsg_small_buf) {
struct sockaddr_in addr;
uv_loop_t* loop;
uv_buf_t buf;
loop = uv_default_loop();
ASSERT_OK(uv_udp_init_ex(loop, &recver, AF_UNSPEC | UV_UDP_RECVMMSG));
if (uv_udp_using_recvmmsg(&recver)) {
ASSERT_OK(uv_ip4_addr("0.0.0.0", TEST_PORT, &addr));
ASSERT_OK(uv_udp_bind(&recver, (const struct sockaddr*) &addr, 0));
ASSERT_OK(uv_udp_recv_start(&recver, small_alloc_cb, small_recv_cb));
ASSERT_OK(uv_ip4_addr("127.0.0.1", TEST_PORT, &addr));
ASSERT_OK(uv_udp_init(loop, &sender));
buf = uv_buf_init("PING", 4);
ASSERT_EQ(4, uv_udp_try_send(&sender, &buf, 1, (const struct sockaddr*) &addr));
ASSERT_OK(uv_run(loop, UV_RUN_DEFAULT));
ASSERT_EQ(1, recv_cb_called);
ASSERT_EQ(2, close_cb_called);
} else {
uv_close((uv_handle_t*) &recver, close_cb);
ASSERT_OK(uv_run(loop, UV_RUN_DEFAULT));
ASSERT_EQ(1, close_cb_called);
}
MAKE_VALGRIND_HAPPY(loop);
return 0;
}