io: make libuv 64-bit safe (#5076)

Because libuv truncates the result of every call to INT32_MAX, it needs
to internally limit operations to INT32_MAX to be safe to use libuv.
This isn't an API change, since these operations weren't guaranteed to
work, and in fact usually failed in bizare ways already. This is very
long in coming, since we've had a lot of compiler warnings about this
and several PRs to fix this open for a decade, but the main consumers
that usually fix things didn't care (nodejs is 32-bit and julia patched
this downstream more than a decade ago, though it did run into this
again recently by mistake with sendfile).

Replaces #1501
Fixes #3360
This commit is contained in:
Jameson Nash 2026-03-24 10:32:27 -04:00 committed by GitHub
parent e3a27e0728
commit fa0ac9ec0c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 343 additions and 83 deletions

View File

@ -634,6 +634,7 @@ if(LIBUV_BUILD_TESTS)
test/test-socket-buffer-size.c test/test-socket-buffer-size.c
test/test-spawn.c test/test-spawn.c
test/test-stdio-over-pipes.c test/test-stdio-over-pipes.c
test/test-io-64-safe.c
test/test-strscpy.c test/test-strscpy.c
test/test-strtok.c test/test-strtok.c
test/test-tcp-alloc-cb-fail.c test/test-tcp-alloc-cb-fail.c

View File

@ -259,6 +259,7 @@ test_run_tests_SOURCES = test/blackhole-server.c \
test/test-socket-buffer-size.c \ test/test-socket-buffer-size.c \
test/test-spawn.c \ test/test-spawn.c \
test/test-stdio-over-pipes.c \ test/test-stdio-over-pipes.c \
test/test-io-64-safe.c \
test/test-strscpy.c \ test/test-strscpy.c \
test/test-strtok.c \ test/test-strtok.c \
test/test-tcp-alloc-cb-fail.c \ test/test-tcp-alloc-cb-fail.c \

View File

@ -297,6 +297,13 @@ API
`base` and `len` members of the uv_buf_t struct. The user is responsible for `base` and `len` members of the uv_buf_t struct. The user is responsible for
freeing `base` after the uv_buf_t is done. Return struct passed by value. freeing `base` after the uv_buf_t is done. Return struct passed by value.
.. warning:: It is discouraged to set `len` to a large value as that may
result in spurious failures. Specifically, Windows may fail on
writes larger than about 511 MB, and various Unicies may fail
on I/O larger than about 2 GB (0x7ffff000 bytes). Instead it is
generally better to split the data into multiple `uv_write`
calls (attach the `write_cb` to the last one).
.. c:function:: char** uv_setup_args(int argc, char** argv) .. c:function:: char** uv_setup_args(int argc, char** argv)
Store the program arguments. Required for getting / setting the process title Store the program arguments. Required for getting / setting the process title

View File

@ -167,7 +167,7 @@ static int uv__fs_close(int fd) {
} }
static ssize_t uv__fs_fsync(uv_fs_t* req) { static int uv__fs_fsync(uv_fs_t* req) {
#if defined(__APPLE__) #if defined(__APPLE__)
/* Apple's fdatasync and fsync explicitly do NOT flush the drive write cache /* Apple's fdatasync and fsync explicitly do NOT flush the drive write cache
* to the drive platters. This is in contrast to Linux's fdatasync and fsync * to the drive platters. This is in contrast to Linux's fdatasync and fsync
@ -191,7 +191,7 @@ static ssize_t uv__fs_fsync(uv_fs_t* req) {
} }
static ssize_t uv__fs_fdatasync(uv_fs_t* req) { static int uv__fs_fdatasync(uv_fs_t* req) {
#if defined(__linux__) || defined(__sun) || defined(__NetBSD__) #if defined(__linux__) || defined(__sun) || defined(__NetBSD__)
return fdatasync(req->file); return fdatasync(req->file);
#elif defined(__APPLE__) #elif defined(__APPLE__)
@ -233,7 +233,7 @@ static struct timespec uv__fs_to_timespec(double time) {
#endif #endif
static ssize_t uv__fs_futime(uv_fs_t* req) { static int uv__fs_futime(uv_fs_t* req) {
#if defined(__APPLE__) \ #if defined(__APPLE__) \
|| defined(_AIX71) \ || defined(_AIX71) \
|| defined(__DragonFly__) \ || defined(__DragonFly__) \
@ -263,7 +263,7 @@ static ssize_t uv__fs_futime(uv_fs_t* req) {
} }
static ssize_t uv__fs_mkdtemp(uv_fs_t* req) { static int uv__fs_mkdtemp(uv_fs_t* req) {
return mkdtemp((char*) req->path) ? 0 : -1; return mkdtemp((char*) req->path) ? 0 : -1;
} }
@ -359,7 +359,7 @@ clobber:
} }
static ssize_t uv__fs_open(uv_fs_t* req) { static int uv__fs_open(uv_fs_t* req) {
#ifdef O_CLOEXEC #ifdef O_CLOEXEC
return open(req->path, req->flags | O_CLOEXEC, req->mode); return open(req->path, req->flags | O_CLOEXEC, req->mode);
#else /* O_CLOEXEC */ #else /* O_CLOEXEC */
@ -388,11 +388,11 @@ static ssize_t uv__fs_open(uv_fs_t* req) {
} }
static ssize_t uv__preadv_or_pwritev_emul(int fd, static int uv__preadv_or_pwritev_emul(int fd,
const struct iovec* bufs, const struct iovec* bufs,
size_t nbufs, size_t nbufs,
off_t off, off_t off,
int is_pread) { int is_pread) {
ssize_t total; ssize_t total;
ssize_t r; ssize_t r;
size_t i; size_t i;
@ -435,18 +435,18 @@ typedef size_t uv__iovcnt;
#endif #endif
static ssize_t uv__preadv_emul(int fd, static int uv__preadv_emul(int fd,
const struct iovec* bufs, const struct iovec* bufs,
uv__iovcnt nbufs, uv__iovcnt nbufs,
off_t off) { off_t off) {
return uv__preadv_or_pwritev_emul(fd, bufs, nbufs, off, /*is_pread*/1); return uv__preadv_or_pwritev_emul(fd, bufs, nbufs, off, /*is_pread*/1);
} }
static ssize_t uv__pwritev_emul(int fd, static int uv__pwritev_emul(int fd,
const struct iovec* bufs, const struct iovec* bufs,
uv__iovcnt nbufs, uv__iovcnt nbufs,
off_t off) { off_t off) {
return uv__preadv_or_pwritev_emul(fd, bufs, nbufs, off, /*is_pread*/0); return uv__preadv_or_pwritev_emul(fd, bufs, nbufs, off, /*is_pread*/0);
} }
@ -454,14 +454,14 @@ static ssize_t uv__pwritev_emul(int fd,
/* The function pointer cache is an uintptr_t because _Atomic void* /* The function pointer cache is an uintptr_t because _Atomic void*
* doesn't work on macos/ios/etc... * doesn't work on macos/ios/etc...
*/ */
static ssize_t uv__preadv_or_pwritev(int fd, static int uv__preadv_or_pwritev(int fd,
const struct iovec* bufs, const struct iovec* bufs,
size_t nbufs, size_t nbufs,
off_t off, off_t off,
_Atomic uintptr_t* cache, _Atomic uintptr_t* cache,
int is_pread) { int is_pread) {
union { union {
ssize_t (*f)(int, const struct iovec*, uv__iovcnt, off_t); int (*f)(int, const struct iovec*, uv__iovcnt, off_t);
void* p; void* p;
} u; } u;
@ -485,7 +485,7 @@ static ssize_t uv__preadv_or_pwritev(int fd,
} }
static ssize_t uv__preadv(int fd, static int uv__preadv(int fd,
const struct iovec* bufs, const struct iovec* bufs,
size_t nbufs, size_t nbufs,
off_t off) { off_t off) {
@ -494,16 +494,16 @@ static ssize_t uv__preadv(int fd,
} }
static ssize_t uv__pwritev(int fd, static int uv__pwritev(int fd,
const struct iovec* bufs, const struct iovec* bufs,
size_t nbufs, size_t nbufs,
off_t off) { off_t off) {
static _Atomic uintptr_t cache; static _Atomic uintptr_t cache;
return uv__preadv_or_pwritev(fd, bufs, nbufs, off, &cache, /*is_pread*/0); return uv__preadv_or_pwritev(fd, bufs, nbufs, off, &cache, /*is_pread*/0);
} }
static ssize_t uv__fs_read(uv_fs_t* req) { static int uv__fs_read(uv_fs_t* req) {
const struct iovec* bufs; const struct iovec* bufs;
unsigned int iovmax; unsigned int iovmax;
size_t nbufs; size_t nbufs;
@ -520,17 +520,35 @@ static ssize_t uv__fs_read(uv_fs_t* req) {
if (nbufs > iovmax) if (nbufs > iovmax)
nbufs = iovmax; nbufs = iovmax;
/* Truncate multi-buf reads to UV__IO_MAX_BYTES total, dropping trailing bufs. */
if (nbufs > 1) {
size_t total;
size_t n;
for (total = 0, n = 0; n < nbufs; n++) {
if (bufs[n].iov_len > UV__IO_MAX_BYTES - total)
break;
total += bufs[n].iov_len;
}
nbufs = n > 0 ? n : 1;
}
r = 0; r = 0;
if (off < 0) { if (off < 0) {
if (nbufs == 1) if (nbufs == 1) {
r = read(fd, bufs->iov_base, bufs->iov_len); r = read(fd, bufs->iov_base,
else if (nbufs > 1) bufs->iov_len > UV__IO_MAX_BYTES ? UV__IO_MAX_BYTES : bufs->iov_len);
} else if (nbufs > 1) {
r = readv(fd, bufs, nbufs); r = readv(fd, bufs, nbufs);
}
} else { } else {
if (nbufs == 1) if (nbufs == 1) {
r = pread(fd, bufs->iov_base, bufs->iov_len, off); r = pread(fd, bufs->iov_base,
else if (nbufs > 1) bufs->iov_len > UV__IO_MAX_BYTES ? UV__IO_MAX_BYTES : bufs->iov_len,
off);
}
else if (nbufs > 1) {
r = uv__preadv(fd, bufs, nbufs, off); r = uv__preadv(fd, bufs, nbufs, off);
}
} }
#ifdef __PASE__ #ifdef __PASE__
@ -567,7 +585,7 @@ static int uv__fs_scandir_sort(const uv__dirent_t** a, const uv__dirent_t** b) {
} }
static ssize_t uv__fs_scandir(uv_fs_t* req) { static int uv__fs_scandir(uv_fs_t* req) {
uv__dirent_t** dents; uv__dirent_t** dents;
int n; int n;
@ -732,7 +750,7 @@ static ssize_t uv__fs_pathmax_size(const char* path) {
return pathmax; return pathmax;
} }
static ssize_t uv__fs_readlink(uv_fs_t* req) { static int uv__fs_readlink(uv_fs_t* req) {
ssize_t maxlen; ssize_t maxlen;
ssize_t len; ssize_t len;
char* buf; char* buf;
@ -791,7 +809,7 @@ static ssize_t uv__fs_readlink(uv_fs_t* req) {
return 0; return 0;
} }
static ssize_t uv__fs_realpath(uv_fs_t* req) { static int uv__fs_realpath(uv_fs_t* req) {
char* buf; char* buf;
char* tmp; char* tmp;
@ -829,7 +847,7 @@ static ssize_t uv__fs_realpath(uv_fs_t* req) {
return 0; return 0;
} }
static ssize_t uv__fs_sendfile_emul(uv_fs_t* req) { static int uv__fs_sendfile_emul(uv_fs_t* req) {
struct pollfd pfd; struct pollfd pfd;
int use_pread; int use_pread;
off_t offset; off_t offset;
@ -1027,7 +1045,7 @@ static ssize_t uv__fs_try_copy_file_range(int in_fd, off_t* off,
#endif /* __linux__ */ #endif /* __linux__ */
static ssize_t uv__fs_sendfile(uv_fs_t* req) { static int uv__fs_sendfile(uv_fs_t* req) {
int in_fd; int in_fd;
int out_fd; int out_fd;
@ -1116,7 +1134,7 @@ static ssize_t uv__fs_sendfile(uv_fs_t* req) {
*/ */
if (r == 0 || ((errno == EAGAIN || errno == EINTR) && len != 0)) { if (r == 0 || ((errno == EAGAIN || errno == EINTR) && len != 0)) {
req->off += len; req->off += len;
return (ssize_t) len; return len;
} }
if (errno == EINVAL || if (errno == EINVAL ||
@ -1139,7 +1157,7 @@ static ssize_t uv__fs_sendfile(uv_fs_t* req) {
} }
static ssize_t uv__fs_utime(uv_fs_t* req) { static int uv__fs_utime(uv_fs_t* req) {
#if defined(__APPLE__) \ #if defined(__APPLE__) \
|| defined(_AIX71) \ || defined(_AIX71) \
|| defined(__DragonFly__) \ || defined(__DragonFly__) \
@ -1174,7 +1192,7 @@ static ssize_t uv__fs_utime(uv_fs_t* req) {
} }
static ssize_t uv__fs_lutime(uv_fs_t* req) { static int uv__fs_lutime(uv_fs_t* req) {
#if defined(__APPLE__) \ #if defined(__APPLE__) \
|| defined(_AIX71) \ || defined(_AIX71) \
|| defined(__DragonFly__) \ || defined(__DragonFly__) \
@ -1196,7 +1214,7 @@ static ssize_t uv__fs_lutime(uv_fs_t* req) {
} }
static ssize_t uv__fs_write(uv_fs_t* req) { static int uv__fs_write(uv_fs_t* req) {
const struct iovec* bufs; const struct iovec* bufs;
size_t nbufs; size_t nbufs;
ssize_t r; ssize_t r;
@ -1225,7 +1243,7 @@ static ssize_t uv__fs_write(uv_fs_t* req) {
} }
static ssize_t uv__fs_copyfile(uv_fs_t* req) { static int uv__fs_copyfile(uv_fs_t* req) {
uv_fs_t fs_req; uv_fs_t fs_req;
uv_file srcfd; uv_file srcfd;
uv_file dstfd; uv_file dstfd;
@ -1633,7 +1651,7 @@ static size_t uv__fs_buf_offset(uv_buf_t* bufs, size_t size) {
return offset; return offset;
} }
static ssize_t uv__fs_write_all(uv_fs_t* req) { static int uv__fs_write_all(uv_fs_t* req) {
unsigned int iovmax; unsigned int iovmax;
unsigned int nbufs; unsigned int nbufs;
uv_buf_t* bufs; uv_buf_t* bufs;
@ -1682,7 +1700,7 @@ static ssize_t uv__fs_write_all(uv_fs_t* req) {
static void uv__fs_work(struct uv__work* w) { static void uv__fs_work(struct uv__work* w) {
int retry_on_eintr; int retry_on_eintr;
uv_fs_t* req; uv_fs_t* req;
ssize_t r; int r;
req = container_of(w, uv_fs_t, work_req); req = container_of(w, uv_fs_t, work_req);
retry_on_eintr = !(req->fs_type == UV_FS_CLOSE || retry_on_eintr = !(req->fs_type == UV_FS_CLOSE ||
@ -2150,6 +2168,8 @@ int uv_fs_sendfile(uv_loop_t* loop,
req->flags = in_fd; /* hack */ req->flags = in_fd; /* hack */
req->file = out_fd; req->file = out_fd;
req->off = off; req->off = off;
if (len > UV__IO_MAX_BYTES)
return UV_EINVAL;
req->bufsml[0].len = len; req->bufsml[0].len = len;
POST; POST;
} }
@ -2217,6 +2237,9 @@ int uv_fs_write(uv_loop_t* loop,
if (bufs == NULL || nbufs == 0) if (bufs == NULL || nbufs == 0)
return UV_EINVAL; return UV_EINVAL;
if (uv__count_bufs(bufs, nbufs) > UV__IO_MAX_BYTES)
return UV_EINVAL;
req->file = file; req->file = file;
req->nbufs = nbufs; req->nbufs = nbufs;

View File

@ -1058,11 +1058,14 @@ static void uv__read(uv_stream_t* stream) {
if (!is_ipc) { if (!is_ipc) {
do { do {
nread = read(uv__stream_fd(stream), buf.base, buf.len); nread = read(uv__stream_fd(stream),
} buf.base,
while (nread < 0 && errno == EINTR); buf.len > UV__IO_MAX_BYTES ? UV__IO_MAX_BYTES : buf.len);
} while (nread < 0 && errno == EINTR);
} else { } else {
/* ipc uses recvmsg */ /* ipc uses recvmsg */
if (buf.len > UV__IO_MAX_BYTES)
buf.len = UV__IO_MAX_BYTES;
msg.msg_flags = 0; msg.msg_flags = 0;
msg.msg_iov = (struct iovec*) &buf; msg.msg_iov = (struct iovec*) &buf;
msg.msg_iovlen = 1; msg.msg_iovlen = 1;
@ -1074,8 +1077,7 @@ static void uv__read(uv_stream_t* stream) {
do { do {
nread = uv__recvmsg(uv__stream_fd(stream), &msg, 0); nread = uv__recvmsg(uv__stream_fd(stream), &msg, 0);
} } while (nread < 0 && errno == EINTR);
while (nread < 0 && errno == EINTR);
} }
if (nread < 0) { if (nread < 0) {
@ -1294,6 +1296,7 @@ static void uv__stream_connect(uv_stream_t* stream) {
static int uv__check_before_write(uv_stream_t* stream, static int uv__check_before_write(uv_stream_t* stream,
const uv_buf_t bufs[],
unsigned int nbufs, unsigned int nbufs,
uv_stream_t* send_handle) { uv_stream_t* send_handle) {
assert((stream->type == UV_TCP || assert((stream->type == UV_TCP ||
@ -1308,6 +1311,13 @@ static int uv__check_before_write(uv_stream_t* stream,
if (nbufs < 1 || nbufs > 1024*1024) if (nbufs < 1 || nbufs > 1024*1024)
return UV_EINVAL; return UV_EINVAL;
/* Reject writes above UV__IO_MAX_BYTES to be consistent with EINVAL on platforms
* such as macOS that fail when the total size of the iov exceeds 2GB,
* and catch/prevent sign-extension bugs.
*/
if (uv__count_bufs(bufs, nbufs) > UV__IO_MAX_BYTES)
return UV_EINVAL;
if (uv__stream_fd(stream) < 0) if (uv__stream_fd(stream) < 0)
return UV_EBADF; return UV_EBADF;
@ -1346,7 +1356,7 @@ int uv_write2(uv_write_t* req,
int empty_queue; int empty_queue;
int err; int err;
err = uv__check_before_write(stream, nbufs, send_handle); err = uv__check_before_write(stream, bufs, nbufs, send_handle);
if (err < 0) if (err < 0)
return err; return err;
@ -1435,7 +1445,7 @@ int uv_try_write2(uv_stream_t* stream,
if (stream->connect_req != NULL || stream->write_queue_size != 0) if (stream->connect_req != NULL || stream->write_queue_size != 0)
return UV_EAGAIN; return UV_EAGAIN;
err = uv__check_before_write(stream, nbufs, send_handle); err = uv__check_before_write(stream, bufs, nbufs, send_handle);
if (err < 0) if (err < 0)
return err; return err;

View File

@ -730,9 +730,6 @@ int uv__udp_try_send(uv_udp_t* handle,
unsigned int addrlen) { unsigned int addrlen) {
int err; int err;
if (nbufs < 1)
return UV_EINVAL;
/* already sending a message */ /* already sending a message */
if (handle->send_queue_count != 0) if (handle->send_queue_count != 0)
return UV_EAGAIN; return UV_EAGAIN;

View File

@ -453,7 +453,10 @@ int uv__udp_is_connected(uv_udp_t* handle) {
} }
int uv__udp_check_before_send(uv_udp_t* handle, const struct sockaddr* addr) { int uv__udp_check_before_send(uv_udp_t* handle,
const uv_buf_t bufs[],
unsigned int nbufs,
const struct sockaddr* addr) {
unsigned int addrlen; unsigned int addrlen;
if (handle->type != UV_UDP) if (handle->type != UV_UDP)
@ -480,6 +483,12 @@ int uv__udp_check_before_send(uv_udp_t* handle, const struct sockaddr* addr) {
addrlen = 0; addrlen = 0;
} }
if (nbufs < 1 || nbufs > 1024 * 1024)
return UV_EINVAL;
if (uv__count_bufs(bufs, nbufs) > UV__IO_MAX_BYTES)
return UV_EINVAL;
return addrlen; return addrlen;
} }
@ -492,10 +501,7 @@ int uv_udp_send(uv_udp_send_t* req,
uv_udp_send_cb send_cb) { uv_udp_send_cb send_cb) {
int addrlen; int addrlen;
if (nbufs < 1 || nbufs > 1024 * 1024) addrlen = uv__udp_check_before_send(handle, bufs, nbufs, addr);
return UV_EINVAL;
addrlen = uv__udp_check_before_send(handle, addr);
if (addrlen < 0) if (addrlen < 0)
return addrlen; return addrlen;
@ -509,10 +515,7 @@ int uv_udp_try_send(uv_udp_t* handle,
const struct sockaddr* addr) { const struct sockaddr* addr) {
int addrlen; int addrlen;
if (nbufs < 1 || nbufs > 1024 * 1024) addrlen = uv__udp_check_before_send(handle, bufs, nbufs, addr);
return UV_EINVAL;
addrlen = uv__udp_check_before_send(handle, addr);
if (addrlen < 0) if (addrlen < 0)
return addrlen; return addrlen;
@ -527,6 +530,7 @@ int uv_udp_try_send2(uv_udp_t* handle,
struct sockaddr* addrs[/*count*/], struct sockaddr* addrs[/*count*/],
unsigned int flags) { unsigned int flags) {
unsigned int i; unsigned int i;
int addrlen;
if (count < 1) if (count < 1)
return UV_EINVAL; return UV_EINVAL;
@ -534,9 +538,11 @@ int uv_udp_try_send2(uv_udp_t* handle,
if (flags != 0) if (flags != 0)
return UV_EINVAL; return UV_EINVAL;
for (i = 0; i < count; i++) for (i = 0; i < count; i++) {
if (nbufs[i] < 1 || nbufs[i] > 1024 * 1024) addrlen = uv__udp_check_before_send(handle, bufs[i], nbufs[i], addrs[i]);
return UV_EINVAL; if (addrlen < 0)
return addrlen;
}
if (handle->send_queue_count > 0) if (handle->send_queue_count > 0)
return UV_EAGAIN; return UV_EAGAIN;
@ -663,8 +669,11 @@ size_t uv__count_bufs(const uv_buf_t bufs[], unsigned int nbufs) {
size_t bytes; size_t bytes;
bytes = 0; bytes = 0;
for (i = 0; i < nbufs; i++) for (i = 0; i < nbufs; i++) {
bytes += (size_t) bufs[i].len; if (bufs[i].len > (size_t) INT32_MAX - bytes)
return INT32_MAX;
bytes += bufs[i].len;
}
return bytes; return bytes;
} }

View File

@ -227,6 +227,12 @@ void uv__work_done(uv_async_t* handle);
size_t uv__count_bufs(const uv_buf_t bufs[], unsigned int nbufs); size_t uv__count_bufs(const uv_buf_t bufs[], unsigned int nbufs);
/* On some platforms, notably macOS, attempting a read or write > 2GB returns
* an EINVAL. On Linux, IO syscalls will transfer at most this many bytes.
* Use this limit everywhere to avoid platform-specific failures.
*/
#define UV__IO_MAX_BYTES 0x7ffff000
int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value); int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value);
void uv__fs_scandir_cleanup(uv_fs_t* req); void uv__fs_scandir_cleanup(uv_fs_t* req);

View File

@ -889,6 +889,7 @@ void fs__read(uv_fs_t* req) {
bytes = 0; bytes = 0;
do { do {
DWORD incremental_bytes; DWORD incremental_bytes;
DWORD to_read;
if (offset != -1) { if (offset != -1) {
offset_.QuadPart = offset + bytes; offset_.QuadPart = offset + bytes;
@ -896,9 +897,12 @@ void fs__read(uv_fs_t* req) {
overlapped.OffsetHigh = offset_.HighPart; overlapped.OffsetHigh = offset_.HighPart;
} }
to_read = req->fs.info.bufs[index].len;
if (to_read > UV__IO_MAX_BYTES)
to_read = UV__IO_MAX_BYTES;
result = ReadFile(handle, result = ReadFile(handle,
req->fs.info.bufs[index].base, req->fs.info.bufs[index].base,
req->fs.info.bufs[index].len, to_read,
&incremental_bytes, &incremental_bytes,
overlapped_ptr); overlapped_ptr);
bytes += incremental_bytes; bytes += incremental_bytes;
@ -1103,7 +1107,7 @@ void fs__write(uv_fs_t* req) {
result = WriteFile(handle, result = WriteFile(handle,
req->fs.info.bufs[index].base, req->fs.info.bufs[index].base,
req->fs.info.bufs[index].len, (DWORD) req->fs.info.bufs[index].len,
&incremental_bytes, &incremental_bytes,
overlapped_ptr); overlapped_ptr);
bytes += incremental_bytes; bytes += incremental_bytes;
@ -3329,6 +3333,11 @@ int uv_fs_write(uv_loop_t* loop,
return UV_EINVAL; return UV_EINVAL;
} }
if (uv__count_bufs(bufs, nbufs) > UV__IO_MAX_BYTES) {
SET_REQ_UV_ERROR(req, UV_EINVAL, ERROR_INVALID_PARAMETER);
return UV_EINVAL;
}
req->file.fd = fd; req->file.fd = fd;
req->fs.info.nbufs = nbufs; req->fs.info.nbufs = nbufs;
@ -3698,6 +3707,8 @@ int uv_fs_sendfile(uv_loop_t* loop, uv_fs_t* req, uv_file fd_out,
req->file.fd = fd_in; req->file.fd = fd_in;
req->fs.info.fd_out = fd_out; req->fs.info.fd_out = fd_out;
req->fs.info.offset = in_offset; req->fs.info.offset = in_offset;
if (length > UV__IO_MAX_BYTES)
return UV_EINVAL;
req->fs.info.bufsml[0].len = length; req->fs.info.bufsml[0].len = length;
POST; POST;
} }

View File

@ -1622,6 +1622,9 @@ static int uv__pipe_write_data(uv_loop_t* loop,
return err; return err;
} }
if (write_buf.len > UV__IO_MAX_BYTES)
return ERROR_INVALID_PARAMETER; /* Maps to UV_EINVAL. */
if ((handle->flags & if ((handle->flags &
(UV_HANDLE_BLOCKING_WRITES | UV_HANDLE_NON_OVERLAPPED_PIPE)) == (UV_HANDLE_BLOCKING_WRITES | UV_HANDLE_NON_OVERLAPPED_PIPE)) ==
(UV_HANDLE_BLOCKING_WRITES | UV_HANDLE_NON_OVERLAPPED_PIPE)) { (UV_HANDLE_BLOCKING_WRITES | UV_HANDLE_NON_OVERLAPPED_PIPE)) {
@ -1968,8 +1971,11 @@ static int uv__pipe_read_data(uv_loop_t* loop,
/* Ensure we read at most the smaller of: /* Ensure we read at most the smaller of:
* (a) the length of the user-allocated buffer. * (a) the length of the user-allocated buffer.
* (b) the maximum data length as specified by the `max_bytes` argument. * (b) the maximum data length as specified by the `max_bytes` argument.
* (c) the amount of data that can be read non-blocking * (c) the amount of data that can be read non-blocking.
* (d) UV__IO_MAX_BYTES.
*/ */
if (buf.len > UV__IO_MAX_BYTES)
buf.len = UV__IO_MAX_BYTES;
if (max_bytes > buf.len) if (max_bytes > buf.len)
max_bytes = buf.len; max_bytes = buf.len;

View File

@ -111,7 +111,9 @@ int uv_read_stop(uv_stream_t* handle) {
} }
static int uv__check_before_write(uv_stream_t* handle, unsigned int nbufs) { static int uv__check_before_write(uv_stream_t* handle,
const uv_buf_t bufs[],
unsigned int nbufs) {
/* We're not beholden to IOV_MAX but limit the buffer count to catch sign /* We're not beholden to IOV_MAX but limit the buffer count to catch sign
* conversion bugs where a caller passes in a signed negative number that * conversion bugs where a caller passes in a signed negative number that
* then gets converted to a really large unsigned number. * then gets converted to a really large unsigned number.
@ -120,6 +122,10 @@ static int uv__check_before_write(uv_stream_t* handle, unsigned int nbufs) {
return UV_EINVAL; return UV_EINVAL;
} }
if (uv__count_bufs(bufs, nbufs) > UV__IO_MAX_BYTES) {
return UV_EINVAL;
}
if (!(handle->flags & UV_HANDLE_WRITABLE)) { if (!(handle->flags & UV_HANDLE_WRITABLE)) {
return UV_EPIPE; return UV_EPIPE;
} }
@ -136,7 +142,7 @@ int uv_write(uv_write_t* req,
uv_loop_t* loop = handle->loop; uv_loop_t* loop = handle->loop;
int err; int err;
err = uv__check_before_write(handle, nbufs); err = uv__check_before_write(handle, bufs, nbufs);
if (err != 0) { if (err != 0) {
return err; return err;
} }
@ -174,7 +180,7 @@ int uv_write2(uv_write_t* req,
return uv_write(req, handle, bufs, nbufs, cb); return uv_write(req, handle, bufs, nbufs, cb);
} }
err = uv__check_before_write(handle, nbufs); err = uv__check_before_write(handle, bufs, nbufs);
if (err != 0) { if (err != 0) {
return err; return err;
} }
@ -194,7 +200,7 @@ int uv_try_write(uv_stream_t* stream,
unsigned int nbufs) { unsigned int nbufs) {
int err; int err;
err = uv__check_before_write(stream, nbufs); err = uv__check_before_write(stream, bufs, nbufs);
if (err != 0) { if (err != 0) {
return err; return err;
} }

View File

@ -1103,6 +1103,8 @@ void uv__process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle,
break; break;
} }
assert(buf.base != NULL); assert(buf.base != NULL);
if (buf.len > UV__IO_MAX_BYTES)
buf.len = UV__IO_MAX_BYTES;
flags = 0; flags = 0;
if (WSARecv(handle->socket, if (WSARecv(handle->socket,

View File

@ -1129,9 +1129,6 @@ int uv__udp_try_send(uv_udp_t* handle,
struct sockaddr_storage converted; struct sockaddr_storage converted;
int err; int err;
if (nbufs < 1)
return UV_EINVAL;
if (addr != NULL) { if (addr != NULL) {
err = uv__convert_to_localhost_if_unspecified(addr, &converted); err = uv__convert_to_localhost_if_unspecified(addr, &converted);
if (err) if (err)

182
test/test-io-64-safe.c Normal file
View File

@ -0,0 +1,182 @@
/* Copyright libuv contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/* Verify that passing INT32_MAX as a buffer length is rejected with UV_EINVAL
* at the various I/O entry points that enforce UV__IO_MAX_BYTES.
*/
#include "uv.h"
#include "task.h"
#include <fcntl.h>
#include <stdint.h>
#include <sys/stat.h>
#define TEST_FILE "tmp_io_64_safe"
static void on_udp_send(uv_udp_send_t* req, int status) {
/* Should never be called: uv_udp_send must reject synchronously. */
ASSERT(0 && "on_udp_send callback must not be invoked");
(void) req;
(void) status;
}
TEST_IMPL(io_64_safe) {
uv_loop_t* loop;
uv_fs_t open_req;
uv_fs_t fs_req;
uv_write_t write_req;
uv_udp_t udp;
uv_tcp_t tcp;
uv_udp_send_t send_req;
struct sockaddr_in addr;
uv_buf_t* t2_bufs[1];
unsigned int t2_nbufs[1];
struct sockaddr* t2_addrs[1];
uv_buf_t buf;
uv_buf_t bufs2[2];
uv_file fd;
uv_file in_fd;
uv_file out_fd;
loop = uv_default_loop();
/* A buf whose length just exceeds UV__IO_MAX_BYTES (0x7ffff000). */
buf = uv_buf_init(NULL, INT32_MAX);
/* Two buffers whose individual sizes are reasonable but whose sum exceeds
* UV__IO_MAX_BYTES (0x7ffff000). Each is 1 GiB + 1 byte.
*/
bufs2[0] = uv_buf_init(NULL, 0x40000001u);
bufs2[1] = uv_buf_init(NULL, 0x40000001u);
/* ------------------------------------------------------------------ */
/* uv_fs_write: reject synchronous filesystem write > UV__IO_MAX_BYTES. */
/* ------------------------------------------------------------------ */
{
fd = uv_fs_open(NULL, &open_req, TEST_FILE,
UV_FS_O_WRONLY | UV_FS_O_CREAT | UV_FS_O_TRUNC, S_IRUSR | S_IWUSR, NULL);
ASSERT_GE(fd, 0);
uv_fs_req_cleanup(&open_req);
ASSERT_EQ(UV_EINVAL, uv_fs_write(NULL, &fs_req, fd, &buf, 1, 0, NULL));
uv_fs_req_cleanup(&fs_req);
/* nbufs > 1 where sum > UV__IO_MAX_BYTES */
ASSERT_EQ(UV_EINVAL, uv_fs_write(NULL, &fs_req, fd, bufs2, 2, 0, NULL));
uv_fs_req_cleanup(&fs_req);
uv_fs_close(NULL, &fs_req, fd, NULL);
uv_fs_req_cleanup(&fs_req);
}
/* ------------------------------------------------------------------ */
/* uv_fs_sendfile: reject len > UV__IO_MAX_BYTES. */
/* ------------------------------------------------------------------ */
{
in_fd = uv_fs_open(NULL, &open_req, TEST_FILE,
UV_FS_O_RDONLY | UV_FS_O_CREAT, S_IRUSR | S_IWUSR, NULL);
ASSERT_GE(in_fd, 0);
uv_fs_req_cleanup(&open_req);
out_fd = uv_fs_open(NULL, &open_req, TEST_FILE,
UV_FS_O_WRONLY | UV_FS_O_CREAT | UV_FS_O_APPEND, S_IRUSR | S_IWUSR,
NULL);
ASSERT_GE(out_fd, 0);
uv_fs_req_cleanup(&open_req);
ASSERT_EQ(UV_EINVAL,
uv_fs_sendfile(NULL, &fs_req, out_fd, in_fd, 0,
(size_t) INT32_MAX, NULL));
uv_fs_req_cleanup(&fs_req);
uv_fs_close(NULL, &fs_req, in_fd, NULL);
uv_fs_req_cleanup(&fs_req);
uv_fs_close(NULL, &fs_req, out_fd, NULL);
uv_fs_req_cleanup(&fs_req);
}
uv_fs_unlink(NULL, &fs_req, TEST_FILE, NULL);
uv_fs_req_cleanup(&fs_req);
{
/* uv_write: reject stream write > UV__IO_MAX_BYTES before queuing. */
ASSERT_OK(uv_tcp_init(loop, &tcp));
ASSERT_EQ(UV_EINVAL,
uv_write(&write_req, (uv_stream_t*) &tcp, &buf, 1, NULL));
/* nbufs > 1 where sum > UV__IO_MAX_BYTES */
ASSERT_EQ(UV_EINVAL,
uv_write(&write_req, (uv_stream_t*) &tcp, bufs2, 2, NULL));
/* uv_try_write: same check via the synchronous path. */
ASSERT_EQ(UV_EINVAL, uv_try_write((uv_stream_t*) &tcp, &buf, 1));
/* nbufs > 1 via try_write */
ASSERT_EQ(UV_EINVAL, uv_try_write((uv_stream_t*) &tcp, bufs2, 2));
uv_close((uv_handle_t*) &tcp, NULL);
}
{
ASSERT_OK(uv_udp_init(loop, &udp));
ASSERT_OK(uv_ip4_addr("127.0.0.1", TEST_PORT, &addr));
/* uv_udp_try_send: reject UDP send > UV__IO_MAX_BYTES. */
ASSERT_EQ(UV_EINVAL,
uv_udp_try_send(&udp, &buf, 1,
(const struct sockaddr*) &addr));
/* nbufs > 1 via try_send */
ASSERT_EQ(UV_EINVAL,
uv_udp_try_send(&udp, bufs2, 2,
(const struct sockaddr*) &addr));
/* uv_udp_send (async): reject synchronously before queuing. */
ASSERT_EQ(UV_EINVAL,
uv_udp_send(&send_req, &udp, &buf, 1,
(const struct sockaddr*) &addr, on_udp_send));
/* nbufs > 1 via async send */
ASSERT_EQ(UV_EINVAL,
uv_udp_send(&send_req, &udp, bufs2, 2,
(const struct sockaddr*) &addr, on_udp_send));
/* uv_udp_try_send2: reject per-batch size > UV__IO_MAX_BYTES. */
t2_bufs[0] = &buf;
t2_nbufs[0] = 1;
t2_addrs[0] = (struct sockaddr*) &addr;
ASSERT_EQ(UV_EINVAL,
uv_udp_try_send2(&udp, 1, t2_bufs, t2_nbufs, t2_addrs, 0));
/* nbufs > 1 per batch via try_send2 */
t2_bufs[0] = bufs2;
t2_nbufs[0] = 2;
ASSERT_EQ(UV_EINVAL,
uv_udp_try_send2(&udp, 1, t2_bufs, t2_nbufs, t2_addrs, 0));
uv_close((uv_handle_t*) &udp, NULL);
}
uv_run(loop, UV_RUN_DEFAULT);
MAKE_VALGRIND_HAPPY(loop);
return 0;
}

View File

@ -466,6 +466,7 @@ TEST_FS_DECLARE (fs_invalid_mkdir_name)
TEST_FS_DECLARE (fs_wtf) TEST_FS_DECLARE (fs_wtf)
#endif #endif
TEST_FS_DECLARE (fs_get_system_error) TEST_FS_DECLARE (fs_get_system_error)
TEST_DECLARE (io_64_safe)
TEST_DECLARE (strscpy) TEST_DECLARE (strscpy)
TEST_DECLARE (strtok) TEST_DECLARE (strtok)
TEST_DECLARE (threadpool_queue_work_simple) TEST_DECLARE (threadpool_queue_work_simple)
@ -1198,6 +1199,7 @@ TASK_LIST_START
TEST_FS_ENTRY (fs_get_system_error) TEST_FS_ENTRY (fs_get_system_error)
TEST_ENTRY (get_osfhandle_valid_handle) TEST_ENTRY (get_osfhandle_valid_handle)
TEST_ENTRY (open_osfhandle_valid_handle) TEST_ENTRY (open_osfhandle_valid_handle)
TEST_ENTRY (io_64_safe)
TEST_ENTRY (strscpy) TEST_ENTRY (strscpy)
TEST_ENTRY (strtok) TEST_ENTRY (strtok)
TEST_ENTRY (threadpool_queue_work_simple) TEST_ENTRY (threadpool_queue_work_simple)