io: make libuv 64-bit safe
Because libuv truncates the result of every call to INT32_MAX, it needs to internally limit operations to INT32_MAX to be safe to use libuv. This isn't an API change, since these operations weren't guaranteed to work, and in fact usually failed in bizare ways already. This is very long in coming, since we've had a lot of compiler warnings about this and several PRs to fix this open for a decade, but the main consumers that usually fix things didn't care (nodejs is 32-bit and julia patched this downstream more than a decade ago, though it did run into this again recently by mistake with sendfile). Replaces #1501 Fixes #3360
This commit is contained in:
parent
ea4667b68e
commit
533a669ac6
@ -634,6 +634,7 @@ if(LIBUV_BUILD_TESTS)
|
||||
test/test-socket-buffer-size.c
|
||||
test/test-spawn.c
|
||||
test/test-stdio-over-pipes.c
|
||||
test/test-io-64-safe.c
|
||||
test/test-strscpy.c
|
||||
test/test-strtok.c
|
||||
test/test-tcp-alloc-cb-fail.c
|
||||
|
||||
@ -259,6 +259,7 @@ test_run_tests_SOURCES = test/blackhole-server.c \
|
||||
test/test-socket-buffer-size.c \
|
||||
test/test-spawn.c \
|
||||
test/test-stdio-over-pipes.c \
|
||||
test/test-io-64-safe.c \
|
||||
test/test-strscpy.c \
|
||||
test/test-strtok.c \
|
||||
test/test-tcp-alloc-cb-fail.c \
|
||||
|
||||
@ -297,6 +297,13 @@ API
|
||||
`base` and `len` members of the uv_buf_t struct. The user is responsible for
|
||||
freeing `base` after the uv_buf_t is done. Return struct passed by value.
|
||||
|
||||
.. warning:: It is discouraged to set `len` to a large value as that may
|
||||
result in spurious failures. Specifically, Windows may fail on
|
||||
writes larger than about 511 MB, and various Unicies may fail
|
||||
on I/O larger than about 2 GB (0x7ffff000 bytes). Instead it is
|
||||
generally better to split the data into multiple `uv_write`
|
||||
calls (attach the `write_cb` to the last one).
|
||||
|
||||
.. c:function:: char** uv_setup_args(int argc, char** argv)
|
||||
|
||||
Store the program arguments. Required for getting / setting the process title
|
||||
|
||||
111
src/unix/fs.c
111
src/unix/fs.c
@ -167,7 +167,7 @@ static int uv__fs_close(int fd) {
|
||||
}
|
||||
|
||||
|
||||
static ssize_t uv__fs_fsync(uv_fs_t* req) {
|
||||
static int uv__fs_fsync(uv_fs_t* req) {
|
||||
#if defined(__APPLE__)
|
||||
/* Apple's fdatasync and fsync explicitly do NOT flush the drive write cache
|
||||
* to the drive platters. This is in contrast to Linux's fdatasync and fsync
|
||||
@ -191,7 +191,7 @@ static ssize_t uv__fs_fsync(uv_fs_t* req) {
|
||||
}
|
||||
|
||||
|
||||
static ssize_t uv__fs_fdatasync(uv_fs_t* req) {
|
||||
static int uv__fs_fdatasync(uv_fs_t* req) {
|
||||
#if defined(__linux__) || defined(__sun) || defined(__NetBSD__)
|
||||
return fdatasync(req->file);
|
||||
#elif defined(__APPLE__)
|
||||
@ -233,7 +233,7 @@ static struct timespec uv__fs_to_timespec(double time) {
|
||||
#endif
|
||||
|
||||
|
||||
static ssize_t uv__fs_futime(uv_fs_t* req) {
|
||||
static int uv__fs_futime(uv_fs_t* req) {
|
||||
#if defined(__APPLE__) \
|
||||
|| defined(_AIX71) \
|
||||
|| defined(__DragonFly__) \
|
||||
@ -263,7 +263,7 @@ static ssize_t uv__fs_futime(uv_fs_t* req) {
|
||||
}
|
||||
|
||||
|
||||
static ssize_t uv__fs_mkdtemp(uv_fs_t* req) {
|
||||
static int uv__fs_mkdtemp(uv_fs_t* req) {
|
||||
return mkdtemp((char*) req->path) ? 0 : -1;
|
||||
}
|
||||
|
||||
@ -359,7 +359,7 @@ clobber:
|
||||
}
|
||||
|
||||
|
||||
static ssize_t uv__fs_open(uv_fs_t* req) {
|
||||
static int uv__fs_open(uv_fs_t* req) {
|
||||
#ifdef O_CLOEXEC
|
||||
return open(req->path, req->flags | O_CLOEXEC, req->mode);
|
||||
#else /* O_CLOEXEC */
|
||||
@ -388,11 +388,11 @@ static ssize_t uv__fs_open(uv_fs_t* req) {
|
||||
}
|
||||
|
||||
|
||||
static ssize_t uv__preadv_or_pwritev_emul(int fd,
|
||||
const struct iovec* bufs,
|
||||
size_t nbufs,
|
||||
off_t off,
|
||||
int is_pread) {
|
||||
static int uv__preadv_or_pwritev_emul(int fd,
|
||||
const struct iovec* bufs,
|
||||
size_t nbufs,
|
||||
off_t off,
|
||||
int is_pread) {
|
||||
ssize_t total;
|
||||
ssize_t r;
|
||||
size_t i;
|
||||
@ -435,18 +435,18 @@ typedef size_t uv__iovcnt;
|
||||
#endif
|
||||
|
||||
|
||||
static ssize_t uv__preadv_emul(int fd,
|
||||
const struct iovec* bufs,
|
||||
uv__iovcnt nbufs,
|
||||
off_t off) {
|
||||
static int uv__preadv_emul(int fd,
|
||||
const struct iovec* bufs,
|
||||
uv__iovcnt nbufs,
|
||||
off_t off) {
|
||||
return uv__preadv_or_pwritev_emul(fd, bufs, nbufs, off, /*is_pread*/1);
|
||||
}
|
||||
|
||||
|
||||
static ssize_t uv__pwritev_emul(int fd,
|
||||
const struct iovec* bufs,
|
||||
uv__iovcnt nbufs,
|
||||
off_t off) {
|
||||
static int uv__pwritev_emul(int fd,
|
||||
const struct iovec* bufs,
|
||||
uv__iovcnt nbufs,
|
||||
off_t off) {
|
||||
return uv__preadv_or_pwritev_emul(fd, bufs, nbufs, off, /*is_pread*/0);
|
||||
}
|
||||
|
||||
@ -454,14 +454,14 @@ static ssize_t uv__pwritev_emul(int fd,
|
||||
/* The function pointer cache is an uintptr_t because _Atomic void*
|
||||
* doesn't work on macos/ios/etc...
|
||||
*/
|
||||
static ssize_t uv__preadv_or_pwritev(int fd,
|
||||
const struct iovec* bufs,
|
||||
size_t nbufs,
|
||||
off_t off,
|
||||
_Atomic uintptr_t* cache,
|
||||
int is_pread) {
|
||||
static int uv__preadv_or_pwritev(int fd,
|
||||
const struct iovec* bufs,
|
||||
size_t nbufs,
|
||||
off_t off,
|
||||
_Atomic uintptr_t* cache,
|
||||
int is_pread) {
|
||||
union {
|
||||
ssize_t (*f)(int, const struct iovec*, uv__iovcnt, off_t);
|
||||
int (*f)(int, const struct iovec*, uv__iovcnt, off_t);
|
||||
void* p;
|
||||
} u;
|
||||
|
||||
@ -485,7 +485,7 @@ static ssize_t uv__preadv_or_pwritev(int fd,
|
||||
}
|
||||
|
||||
|
||||
static ssize_t uv__preadv(int fd,
|
||||
static int uv__preadv(int fd,
|
||||
const struct iovec* bufs,
|
||||
size_t nbufs,
|
||||
off_t off) {
|
||||
@ -494,16 +494,16 @@ static ssize_t uv__preadv(int fd,
|
||||
}
|
||||
|
||||
|
||||
static ssize_t uv__pwritev(int fd,
|
||||
const struct iovec* bufs,
|
||||
size_t nbufs,
|
||||
off_t off) {
|
||||
static int uv__pwritev(int fd,
|
||||
const struct iovec* bufs,
|
||||
size_t nbufs,
|
||||
off_t off) {
|
||||
static _Atomic uintptr_t cache;
|
||||
return uv__preadv_or_pwritev(fd, bufs, nbufs, off, &cache, /*is_pread*/0);
|
||||
}
|
||||
|
||||
|
||||
static ssize_t uv__fs_read(uv_fs_t* req) {
|
||||
static int uv__fs_read(uv_fs_t* req) {
|
||||
const struct iovec* bufs;
|
||||
unsigned int iovmax;
|
||||
size_t nbufs;
|
||||
@ -520,15 +520,31 @@ static ssize_t uv__fs_read(uv_fs_t* req) {
|
||||
if (nbufs > iovmax)
|
||||
nbufs = iovmax;
|
||||
|
||||
/* Truncate multi-buf reads to IO_MAX_BYTES total, dropping trailing bufs. */
|
||||
if (nbufs > 1) {
|
||||
size_t total;
|
||||
size_t n;
|
||||
for (total = 0, n = 0; n < nbufs; n++) {
|
||||
if (bufs[n].iov_len > IO_MAX_BYTES - total)
|
||||
break;
|
||||
total += bufs[n].iov_len;
|
||||
}
|
||||
if (n < nbufs)
|
||||
nbufs = n > 0 ? n : 1;
|
||||
}
|
||||
|
||||
r = 0;
|
||||
if (off < 0) {
|
||||
if (nbufs == 1)
|
||||
r = read(fd, bufs->iov_base, bufs->iov_len);
|
||||
r = read(fd, bufs->iov_base,
|
||||
bufs->iov_len > IO_MAX_BYTES ? IO_MAX_BYTES : bufs->iov_len);
|
||||
else if (nbufs > 1)
|
||||
r = readv(fd, bufs, nbufs);
|
||||
} else {
|
||||
if (nbufs == 1)
|
||||
r = pread(fd, bufs->iov_base, bufs->iov_len, off);
|
||||
r = pread(fd, bufs->iov_base,
|
||||
bufs->iov_len > IO_MAX_BYTES ? IO_MAX_BYTES : bufs->iov_len,
|
||||
off);
|
||||
else if (nbufs > 1)
|
||||
r = uv__preadv(fd, bufs, nbufs, off);
|
||||
}
|
||||
@ -567,7 +583,7 @@ static int uv__fs_scandir_sort(const uv__dirent_t** a, const uv__dirent_t** b) {
|
||||
}
|
||||
|
||||
|
||||
static ssize_t uv__fs_scandir(uv_fs_t* req) {
|
||||
static int uv__fs_scandir(uv_fs_t* req) {
|
||||
uv__dirent_t** dents;
|
||||
int n;
|
||||
|
||||
@ -732,7 +748,7 @@ static ssize_t uv__fs_pathmax_size(const char* path) {
|
||||
return pathmax;
|
||||
}
|
||||
|
||||
static ssize_t uv__fs_readlink(uv_fs_t* req) {
|
||||
static int uv__fs_readlink(uv_fs_t* req) {
|
||||
ssize_t maxlen;
|
||||
ssize_t len;
|
||||
char* buf;
|
||||
@ -791,7 +807,7 @@ static ssize_t uv__fs_readlink(uv_fs_t* req) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t uv__fs_realpath(uv_fs_t* req) {
|
||||
static int uv__fs_realpath(uv_fs_t* req) {
|
||||
char* buf;
|
||||
char* tmp;
|
||||
|
||||
@ -829,7 +845,7 @@ static ssize_t uv__fs_realpath(uv_fs_t* req) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t uv__fs_sendfile_emul(uv_fs_t* req) {
|
||||
static int uv__fs_sendfile_emul(uv_fs_t* req) {
|
||||
struct pollfd pfd;
|
||||
int use_pread;
|
||||
off_t offset;
|
||||
@ -1027,7 +1043,7 @@ static ssize_t uv__fs_try_copy_file_range(int in_fd, off_t* off,
|
||||
#endif /* __linux__ */
|
||||
|
||||
|
||||
static ssize_t uv__fs_sendfile(uv_fs_t* req) {
|
||||
static int uv__fs_sendfile(uv_fs_t* req) {
|
||||
int in_fd;
|
||||
int out_fd;
|
||||
|
||||
@ -1116,7 +1132,7 @@ static ssize_t uv__fs_sendfile(uv_fs_t* req) {
|
||||
*/
|
||||
if (r == 0 || ((errno == EAGAIN || errno == EINTR) && len != 0)) {
|
||||
req->off += len;
|
||||
return (ssize_t) len;
|
||||
return len;
|
||||
}
|
||||
|
||||
if (errno == EINVAL ||
|
||||
@ -1139,7 +1155,7 @@ static ssize_t uv__fs_sendfile(uv_fs_t* req) {
|
||||
}
|
||||
|
||||
|
||||
static ssize_t uv__fs_utime(uv_fs_t* req) {
|
||||
static int uv__fs_utime(uv_fs_t* req) {
|
||||
#if defined(__APPLE__) \
|
||||
|| defined(_AIX71) \
|
||||
|| defined(__DragonFly__) \
|
||||
@ -1174,7 +1190,7 @@ static ssize_t uv__fs_utime(uv_fs_t* req) {
|
||||
}
|
||||
|
||||
|
||||
static ssize_t uv__fs_lutime(uv_fs_t* req) {
|
||||
static int uv__fs_lutime(uv_fs_t* req) {
|
||||
#if defined(__APPLE__) \
|
||||
|| defined(_AIX71) \
|
||||
|| defined(__DragonFly__) \
|
||||
@ -1196,7 +1212,7 @@ static ssize_t uv__fs_lutime(uv_fs_t* req) {
|
||||
}
|
||||
|
||||
|
||||
static ssize_t uv__fs_write(uv_fs_t* req) {
|
||||
static int uv__fs_write(uv_fs_t* req) {
|
||||
const struct iovec* bufs;
|
||||
size_t nbufs;
|
||||
ssize_t r;
|
||||
@ -1225,7 +1241,7 @@ static ssize_t uv__fs_write(uv_fs_t* req) {
|
||||
}
|
||||
|
||||
|
||||
static ssize_t uv__fs_copyfile(uv_fs_t* req) {
|
||||
static int uv__fs_copyfile(uv_fs_t* req) {
|
||||
uv_fs_t fs_req;
|
||||
uv_file srcfd;
|
||||
uv_file dstfd;
|
||||
@ -1633,7 +1649,7 @@ static size_t uv__fs_buf_offset(uv_buf_t* bufs, size_t size) {
|
||||
return offset;
|
||||
}
|
||||
|
||||
static ssize_t uv__fs_write_all(uv_fs_t* req) {
|
||||
static int uv__fs_write_all(uv_fs_t* req) {
|
||||
unsigned int iovmax;
|
||||
unsigned int nbufs;
|
||||
uv_buf_t* bufs;
|
||||
@ -1682,7 +1698,7 @@ static ssize_t uv__fs_write_all(uv_fs_t* req) {
|
||||
static void uv__fs_work(struct uv__work* w) {
|
||||
int retry_on_eintr;
|
||||
uv_fs_t* req;
|
||||
ssize_t r;
|
||||
int r;
|
||||
|
||||
req = container_of(w, uv_fs_t, work_req);
|
||||
retry_on_eintr = !(req->fs_type == UV_FS_CLOSE ||
|
||||
@ -2150,6 +2166,8 @@ int uv_fs_sendfile(uv_loop_t* loop,
|
||||
req->flags = in_fd; /* hack */
|
||||
req->file = out_fd;
|
||||
req->off = off;
|
||||
if (len > IO_MAX_BYTES)
|
||||
return UV_EINVAL;
|
||||
req->bufsml[0].len = len;
|
||||
POST;
|
||||
}
|
||||
@ -2217,6 +2235,9 @@ int uv_fs_write(uv_loop_t* loop,
|
||||
if (bufs == NULL || nbufs == 0)
|
||||
return UV_EINVAL;
|
||||
|
||||
if (uv__count_bufs(bufs, nbufs) > IO_MAX_BYTES)
|
||||
return UV_EINVAL;
|
||||
|
||||
req->file = file;
|
||||
|
||||
req->nbufs = nbufs;
|
||||
|
||||
@ -78,6 +78,7 @@ static size_t uv__write_req_size(uv_write_t* req);
|
||||
static void uv__drain(uv_stream_t* stream);
|
||||
|
||||
|
||||
|
||||
void uv__stream_init(uv_loop_t* loop,
|
||||
uv_stream_t* stream,
|
||||
uv_handle_type type) {
|
||||
@ -1057,12 +1058,15 @@ static void uv__read(uv_stream_t* stream) {
|
||||
assert(uv__stream_fd(stream) >= 0);
|
||||
|
||||
if (!is_ipc) {
|
||||
do {
|
||||
nread = read(uv__stream_fd(stream), buf.base, buf.len);
|
||||
}
|
||||
do
|
||||
nread = read(uv__stream_fd(stream),
|
||||
buf.base,
|
||||
buf.len > IO_MAX_BYTES ? IO_MAX_BYTES : buf.len);
|
||||
while (nread < 0 && errno == EINTR);
|
||||
} else {
|
||||
/* ipc uses recvmsg */
|
||||
if (buf.len > IO_MAX_BYTES)
|
||||
buf.len = IO_MAX_BYTES;
|
||||
msg.msg_flags = 0;
|
||||
msg.msg_iov = (struct iovec*) &buf;
|
||||
msg.msg_iovlen = 1;
|
||||
@ -1295,6 +1299,7 @@ static void uv__stream_connect(uv_stream_t* stream) {
|
||||
|
||||
|
||||
static int uv__check_before_write(uv_stream_t* stream,
|
||||
const uv_buf_t bufs[],
|
||||
unsigned int nbufs,
|
||||
uv_stream_t* send_handle) {
|
||||
assert((stream->type == UV_TCP ||
|
||||
@ -1309,6 +1314,13 @@ static int uv__check_before_write(uv_stream_t* stream,
|
||||
if (nbufs < 1 || nbufs > 1024*1024)
|
||||
return UV_EINVAL;
|
||||
|
||||
/* Reject writes above IO_MAX_BYTES to be consistent with EINVAL on platforms
|
||||
* such as macOS that fail when the total size of the iov exceeds 2GB,
|
||||
* and catch/prevent sign-extension bugs.
|
||||
*/
|
||||
if (uv__count_bufs(bufs, nbufs) > IO_MAX_BYTES)
|
||||
return UV_EINVAL;
|
||||
|
||||
if (uv__stream_fd(stream) < 0)
|
||||
return UV_EBADF;
|
||||
|
||||
@ -1347,7 +1359,7 @@ int uv_write2(uv_write_t* req,
|
||||
int empty_queue;
|
||||
int err;
|
||||
|
||||
err = uv__check_before_write(stream, nbufs, send_handle);
|
||||
err = uv__check_before_write(stream, bufs, nbufs, send_handle);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
@ -1436,7 +1448,7 @@ int uv_try_write2(uv_stream_t* stream,
|
||||
if (stream->connect_req != NULL || stream->write_queue_size != 0)
|
||||
return UV_EAGAIN;
|
||||
|
||||
err = uv__check_before_write(stream, nbufs, send_handle);
|
||||
err = uv__check_before_write(stream, bufs, nbufs, send_handle);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
|
||||
@ -721,9 +721,6 @@ int uv__udp_try_send(uv_udp_t* handle,
|
||||
unsigned int addrlen) {
|
||||
int err;
|
||||
|
||||
if (nbufs < 1)
|
||||
return UV_EINVAL;
|
||||
|
||||
/* already sending a message */
|
||||
if (handle->send_queue_count != 0)
|
||||
return UV_EAGAIN;
|
||||
|
||||
@ -453,7 +453,10 @@ int uv__udp_is_connected(uv_udp_t* handle) {
|
||||
}
|
||||
|
||||
|
||||
int uv__udp_check_before_send(uv_udp_t* handle, const struct sockaddr* addr) {
|
||||
int uv__udp_check_before_send(uv_udp_t* handle,
|
||||
const uv_buf_t bufs[],
|
||||
unsigned int nbufs,
|
||||
const struct sockaddr* addr) {
|
||||
unsigned int addrlen;
|
||||
|
||||
if (handle->type != UV_UDP)
|
||||
@ -480,6 +483,12 @@ int uv__udp_check_before_send(uv_udp_t* handle, const struct sockaddr* addr) {
|
||||
addrlen = 0;
|
||||
}
|
||||
|
||||
if (nbufs < 1 || nbufs > 1024 * 1024)
|
||||
return UV_EINVAL;
|
||||
|
||||
if (uv__count_bufs(bufs, nbufs) > IO_MAX_BYTES)
|
||||
return UV_EINVAL;
|
||||
|
||||
return addrlen;
|
||||
}
|
||||
|
||||
@ -492,10 +501,7 @@ int uv_udp_send(uv_udp_send_t* req,
|
||||
uv_udp_send_cb send_cb) {
|
||||
int addrlen;
|
||||
|
||||
if (nbufs < 1 || nbufs > 1024 * 1024)
|
||||
return UV_EINVAL;
|
||||
|
||||
addrlen = uv__udp_check_before_send(handle, addr);
|
||||
addrlen = uv__udp_check_before_send(handle, bufs, nbufs, addr);
|
||||
if (addrlen < 0)
|
||||
return addrlen;
|
||||
|
||||
@ -509,10 +515,7 @@ int uv_udp_try_send(uv_udp_t* handle,
|
||||
const struct sockaddr* addr) {
|
||||
int addrlen;
|
||||
|
||||
if (nbufs < 1 || nbufs > 1024 * 1024)
|
||||
return UV_EINVAL;
|
||||
|
||||
addrlen = uv__udp_check_before_send(handle, addr);
|
||||
addrlen = uv__udp_check_before_send(handle, bufs, nbufs, addr);
|
||||
if (addrlen < 0)
|
||||
return addrlen;
|
||||
|
||||
@ -527,6 +530,7 @@ int uv_udp_try_send2(uv_udp_t* handle,
|
||||
struct sockaddr* addrs[/*count*/],
|
||||
unsigned int flags) {
|
||||
unsigned int i;
|
||||
int addrlen;
|
||||
|
||||
if (count < 1)
|
||||
return UV_EINVAL;
|
||||
@ -534,9 +538,11 @@ int uv_udp_try_send2(uv_udp_t* handle,
|
||||
if (flags != 0)
|
||||
return UV_EINVAL;
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
if (nbufs[i] < 1 || nbufs[i] > 1024 * 1024)
|
||||
return UV_EINVAL;
|
||||
for (i = 0; i < count; i++) {
|
||||
addrlen = uv__udp_check_before_send(handle, bufs[i], nbufs[i], addrs[i]);
|
||||
if (addrlen < 0)
|
||||
return addrlen;
|
||||
}
|
||||
|
||||
if (handle->send_queue_count > 0)
|
||||
return UV_EAGAIN;
|
||||
@ -663,8 +669,11 @@ size_t uv__count_bufs(const uv_buf_t bufs[], unsigned int nbufs) {
|
||||
size_t bytes;
|
||||
|
||||
bytes = 0;
|
||||
for (i = 0; i < nbufs; i++)
|
||||
bytes += (size_t) bufs[i].len;
|
||||
for (i = 0; i < nbufs; i++) {
|
||||
if (bufs[i].len > (size_t) INT32_MAX - bytes)
|
||||
return INT32_MAX;
|
||||
bytes += bufs[i].len;
|
||||
}
|
||||
|
||||
return bytes;
|
||||
}
|
||||
|
||||
@ -226,6 +226,12 @@ void uv__work_done(uv_async_t* handle);
|
||||
|
||||
size_t uv__count_bufs(const uv_buf_t bufs[], unsigned int nbufs);
|
||||
|
||||
/* On some platforms, notably macOS, attempting a read or write > 2GB returns
|
||||
* an EINVAL. On Linux, IO syscalls will transfer at most this many bytes.
|
||||
* Use this limit everywhere to avoid platform-specific failures.
|
||||
*/
|
||||
#define IO_MAX_BYTES 0x7ffff000
|
||||
|
||||
int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value);
|
||||
|
||||
void uv__fs_scandir_cleanup(uv_fs_t* req);
|
||||
|
||||
15
src/win/fs.c
15
src/win/fs.c
@ -889,6 +889,7 @@ void fs__read(uv_fs_t* req) {
|
||||
bytes = 0;
|
||||
do {
|
||||
DWORD incremental_bytes;
|
||||
DWORD to_read;
|
||||
|
||||
if (offset != -1) {
|
||||
offset_.QuadPart = offset + bytes;
|
||||
@ -896,9 +897,12 @@ void fs__read(uv_fs_t* req) {
|
||||
overlapped.OffsetHigh = offset_.HighPart;
|
||||
}
|
||||
|
||||
to_read = req->fs.info.bufs[index].len;
|
||||
if (to_read > IO_MAX_BYTES)
|
||||
to_read = IO_MAX_BYTES;
|
||||
result = ReadFile(handle,
|
||||
req->fs.info.bufs[index].base,
|
||||
req->fs.info.bufs[index].len,
|
||||
to_read,
|
||||
&incremental_bytes,
|
||||
overlapped_ptr);
|
||||
bytes += incremental_bytes;
|
||||
@ -1103,7 +1107,7 @@ void fs__write(uv_fs_t* req) {
|
||||
|
||||
result = WriteFile(handle,
|
||||
req->fs.info.bufs[index].base,
|
||||
req->fs.info.bufs[index].len,
|
||||
(DWORD) req->fs.info.bufs[index].len,
|
||||
&incremental_bytes,
|
||||
overlapped_ptr);
|
||||
bytes += incremental_bytes;
|
||||
@ -3329,6 +3333,11 @@ int uv_fs_write(uv_loop_t* loop,
|
||||
return UV_EINVAL;
|
||||
}
|
||||
|
||||
if (uv__count_bufs(bufs, nbufs) > IO_MAX_BYTES) {
|
||||
SET_REQ_UV_ERROR(req, UV_EINVAL, ERROR_INVALID_PARAMETER);
|
||||
return UV_EINVAL;
|
||||
}
|
||||
|
||||
req->file.fd = fd;
|
||||
|
||||
req->fs.info.nbufs = nbufs;
|
||||
@ -3698,6 +3707,8 @@ int uv_fs_sendfile(uv_loop_t* loop, uv_fs_t* req, uv_file fd_out,
|
||||
req->file.fd = fd_in;
|
||||
req->fs.info.fd_out = fd_out;
|
||||
req->fs.info.offset = in_offset;
|
||||
if (length > IO_MAX_BYTES)
|
||||
return UV_EINVAL;
|
||||
req->fs.info.bufsml[0].len = length;
|
||||
POST;
|
||||
}
|
||||
|
||||
@ -1622,6 +1622,9 @@ static int uv__pipe_write_data(uv_loop_t* loop,
|
||||
return err;
|
||||
}
|
||||
|
||||
if (write_buf.len > IO_MAX_BYTES)
|
||||
return ERROR_INVALID_PARAMETER; /* Maps to UV_EINVAL. */
|
||||
|
||||
if ((handle->flags &
|
||||
(UV_HANDLE_BLOCKING_WRITES | UV_HANDLE_NON_OVERLAPPED_PIPE)) ==
|
||||
(UV_HANDLE_BLOCKING_WRITES | UV_HANDLE_NON_OVERLAPPED_PIPE)) {
|
||||
@ -1968,8 +1971,11 @@ static int uv__pipe_read_data(uv_loop_t* loop,
|
||||
/* Ensure we read at most the smaller of:
|
||||
* (a) the length of the user-allocated buffer.
|
||||
* (b) the maximum data length as specified by the `max_bytes` argument.
|
||||
* (c) the amount of data that can be read non-blocking
|
||||
* (c) the amount of data that can be read non-blocking.
|
||||
* (d) IO_MAX_BYTES.
|
||||
*/
|
||||
if (buf.len > IO_MAX_BYTES)
|
||||
buf.len = IO_MAX_BYTES;
|
||||
if (max_bytes > buf.len)
|
||||
max_bytes = buf.len;
|
||||
|
||||
|
||||
@ -111,7 +111,9 @@ int uv_read_stop(uv_stream_t* handle) {
|
||||
}
|
||||
|
||||
|
||||
static int uv__check_before_write(uv_stream_t* handle, unsigned int nbufs) {
|
||||
static int uv__check_before_write(uv_stream_t* handle,
|
||||
const uv_buf_t bufs[],
|
||||
unsigned int nbufs) {
|
||||
/* We're not beholden to IOV_MAX but limit the buffer count to catch sign
|
||||
* conversion bugs where a caller passes in a signed negative number that
|
||||
* then gets converted to a really large unsigned number.
|
||||
@ -120,6 +122,10 @@ static int uv__check_before_write(uv_stream_t* handle, unsigned int nbufs) {
|
||||
return UV_EINVAL;
|
||||
}
|
||||
|
||||
if (uv__count_bufs(bufs, nbufs) > IO_MAX_BYTES) {
|
||||
return UV_EINVAL;
|
||||
}
|
||||
|
||||
if (!(handle->flags & UV_HANDLE_WRITABLE)) {
|
||||
return UV_EPIPE;
|
||||
}
|
||||
@ -136,7 +142,7 @@ int uv_write(uv_write_t* req,
|
||||
uv_loop_t* loop = handle->loop;
|
||||
int err;
|
||||
|
||||
err = uv__check_before_write(handle, nbufs);
|
||||
err = uv__check_before_write(handle, bufs, nbufs);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
@ -174,7 +180,7 @@ int uv_write2(uv_write_t* req,
|
||||
return uv_write(req, handle, bufs, nbufs, cb);
|
||||
}
|
||||
|
||||
err = uv__check_before_write(handle, nbufs);
|
||||
err = uv__check_before_write(handle, bufs, nbufs);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
@ -194,7 +200,7 @@ int uv_try_write(uv_stream_t* stream,
|
||||
unsigned int nbufs) {
|
||||
int err;
|
||||
|
||||
err = uv__check_before_write(stream, nbufs);
|
||||
err = uv__check_before_write(stream, bufs, nbufs);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1103,6 +1103,8 @@ void uv__process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle,
|
||||
break;
|
||||
}
|
||||
assert(buf.base != NULL);
|
||||
if (buf.len > IO_MAX_BYTES)
|
||||
buf.len = IO_MAX_BYTES;
|
||||
|
||||
flags = 0;
|
||||
if (WSARecv(handle->socket,
|
||||
|
||||
@ -1129,9 +1129,6 @@ int uv__udp_try_send(uv_udp_t* handle,
|
||||
struct sockaddr_storage converted;
|
||||
int err;
|
||||
|
||||
if (nbufs < 1)
|
||||
return UV_EINVAL;
|
||||
|
||||
if (addr != NULL) {
|
||||
err = uv__convert_to_localhost_if_unspecified(addr, &converted);
|
||||
if (err)
|
||||
|
||||
182
test/test-io-64-safe.c
Normal file
182
test/test-io-64-safe.c
Normal file
@ -0,0 +1,182 @@
|
||||
/* Copyright libuv contributors. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to
|
||||
* deal in the Software without restriction, including without limitation the
|
||||
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
* sell copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/* Verify that passing INT32_MAX as a buffer length is rejected with UV_EINVAL
|
||||
* at the various I/O entry points that enforce IO_MAX_BYTES.
|
||||
*/
|
||||
|
||||
#include "uv.h"
|
||||
#include "task.h"
|
||||
|
||||
#include <fcntl.h>
|
||||
#include <stdint.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#define TEST_FILE "tmp_io_64_safe"
|
||||
|
||||
static void on_udp_send(uv_udp_send_t* req, int status) {
|
||||
/* Should never be called: uv_udp_send must reject synchronously. */
|
||||
ASSERT(0 && "on_udp_send callback must not be invoked");
|
||||
(void) req;
|
||||
(void) status;
|
||||
}
|
||||
|
||||
TEST_IMPL(io_64_safe) {
|
||||
uv_loop_t* loop;
|
||||
uv_fs_t open_req;
|
||||
uv_fs_t fs_req;
|
||||
uv_write_t write_req;
|
||||
uv_udp_t udp;
|
||||
uv_tcp_t tcp;
|
||||
uv_udp_send_t send_req;
|
||||
struct sockaddr_in addr;
|
||||
uv_buf_t* t2_bufs[1];
|
||||
unsigned int t2_nbufs[1];
|
||||
struct sockaddr* t2_addrs[1];
|
||||
uv_buf_t buf;
|
||||
uv_buf_t bufs2[2];
|
||||
uv_file fd;
|
||||
uv_file in_fd;
|
||||
uv_file out_fd;
|
||||
|
||||
loop = uv_default_loop();
|
||||
|
||||
/* A buf whose length just exceeds IO_MAX_BYTES (0x7ffff000). */
|
||||
buf = uv_buf_init(NULL, INT32_MAX);
|
||||
|
||||
/* Two buffers whose individual sizes are reasonable but whose sum exceeds
|
||||
* IO_MAX_BYTES (0x7ffff000). Each is 1 GiB + 1 byte.
|
||||
*/
|
||||
bufs2[0] = uv_buf_init(NULL, 0x40000001u);
|
||||
bufs2[1] = uv_buf_init(NULL, 0x40000001u);
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* uv_fs_write: reject synchronous filesystem write > IO_MAX_BYTES. */
|
||||
/* ------------------------------------------------------------------ */
|
||||
{
|
||||
fd = uv_fs_open(NULL, &open_req, TEST_FILE,
|
||||
UV_FS_O_WRONLY | UV_FS_O_CREAT | UV_FS_O_TRUNC, S_IRUSR | S_IWUSR, NULL);
|
||||
ASSERT_GE(fd, 0);
|
||||
uv_fs_req_cleanup(&open_req);
|
||||
|
||||
ASSERT_EQ(UV_EINVAL, uv_fs_write(NULL, &fs_req, fd, &buf, 1, 0, NULL));
|
||||
uv_fs_req_cleanup(&fs_req);
|
||||
|
||||
/* nbufs > 1 where sum > IO_MAX_BYTES */
|
||||
ASSERT_EQ(UV_EINVAL, uv_fs_write(NULL, &fs_req, fd, bufs2, 2, 0, NULL));
|
||||
uv_fs_req_cleanup(&fs_req);
|
||||
|
||||
uv_fs_close(NULL, &fs_req, fd, NULL);
|
||||
uv_fs_req_cleanup(&fs_req);
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* uv_fs_sendfile: reject len > IO_MAX_BYTES. */
|
||||
/* ------------------------------------------------------------------ */
|
||||
{
|
||||
in_fd = uv_fs_open(NULL, &open_req, TEST_FILE,
|
||||
UV_FS_O_RDONLY | UV_FS_O_CREAT, S_IRUSR | S_IWUSR, NULL);
|
||||
ASSERT_GE(in_fd, 0);
|
||||
uv_fs_req_cleanup(&open_req);
|
||||
|
||||
out_fd = uv_fs_open(NULL, &open_req, TEST_FILE,
|
||||
UV_FS_O_WRONLY | UV_FS_O_CREAT | UV_FS_O_APPEND, S_IRUSR | S_IWUSR,
|
||||
NULL);
|
||||
ASSERT_GE(out_fd, 0);
|
||||
uv_fs_req_cleanup(&open_req);
|
||||
|
||||
ASSERT_EQ(UV_EINVAL,
|
||||
uv_fs_sendfile(NULL, &fs_req, out_fd, in_fd, 0,
|
||||
(size_t) INT32_MAX, NULL));
|
||||
uv_fs_req_cleanup(&fs_req);
|
||||
|
||||
uv_fs_close(NULL, &fs_req, in_fd, NULL);
|
||||
uv_fs_req_cleanup(&fs_req);
|
||||
uv_fs_close(NULL, &fs_req, out_fd, NULL);
|
||||
uv_fs_req_cleanup(&fs_req);
|
||||
}
|
||||
|
||||
uv_fs_unlink(NULL, &fs_req, TEST_FILE, NULL);
|
||||
uv_fs_req_cleanup(&fs_req);
|
||||
|
||||
{
|
||||
/* uv_write: reject stream write > IO_MAX_BYTES before queuing. */
|
||||
ASSERT_OK(uv_tcp_init(loop, &tcp));
|
||||
ASSERT_EQ(UV_EINVAL,
|
||||
uv_write(&write_req, (uv_stream_t*) &tcp, &buf, 1, NULL));
|
||||
|
||||
/* nbufs > 1 where sum > IO_MAX_BYTES */
|
||||
ASSERT_EQ(UV_EINVAL,
|
||||
uv_write(&write_req, (uv_stream_t*) &tcp, bufs2, 2, NULL));
|
||||
|
||||
/* uv_try_write: same check via the synchronous path. */
|
||||
ASSERT_EQ(UV_EINVAL, uv_try_write((uv_stream_t*) &tcp, &buf, 1));
|
||||
|
||||
/* nbufs > 1 via try_write */
|
||||
ASSERT_EQ(UV_EINVAL, uv_try_write((uv_stream_t*) &tcp, bufs2, 2));
|
||||
|
||||
uv_close((uv_handle_t*) &tcp, NULL);
|
||||
}
|
||||
|
||||
{
|
||||
ASSERT_OK(uv_udp_init(loop, &udp));
|
||||
ASSERT_OK(uv_ip4_addr("127.0.0.1", TEST_PORT, &addr));
|
||||
|
||||
/* uv_udp_try_send: reject UDP send > IO_MAX_BYTES. */
|
||||
ASSERT_EQ(UV_EINVAL,
|
||||
uv_udp_try_send(&udp, &buf, 1,
|
||||
(const struct sockaddr*) &addr));
|
||||
|
||||
/* nbufs > 1 via try_send */
|
||||
ASSERT_EQ(UV_EINVAL,
|
||||
uv_udp_try_send(&udp, bufs2, 2,
|
||||
(const struct sockaddr*) &addr));
|
||||
|
||||
/* uv_udp_send (async): reject synchronously before queuing. */
|
||||
ASSERT_EQ(UV_EINVAL,
|
||||
uv_udp_send(&send_req, &udp, &buf, 1,
|
||||
(const struct sockaddr*) &addr, on_udp_send));
|
||||
|
||||
/* nbufs > 1 via async send */
|
||||
ASSERT_EQ(UV_EINVAL,
|
||||
uv_udp_send(&send_req, &udp, bufs2, 2,
|
||||
(const struct sockaddr*) &addr, on_udp_send));
|
||||
|
||||
/* uv_udp_try_send2: reject per-batch size > IO_MAX_BYTES. */
|
||||
t2_bufs[0] = &buf;
|
||||
t2_nbufs[0] = 1;
|
||||
t2_addrs[0] = (struct sockaddr*) &addr;
|
||||
ASSERT_EQ(UV_EINVAL,
|
||||
uv_udp_try_send2(&udp, 1, t2_bufs, t2_nbufs, t2_addrs, 0));
|
||||
|
||||
/* nbufs > 1 per batch via try_send2 */
|
||||
t2_bufs[0] = bufs2;
|
||||
t2_nbufs[0] = 2;
|
||||
ASSERT_EQ(UV_EINVAL,
|
||||
uv_udp_try_send2(&udp, 1, t2_bufs, t2_nbufs, t2_addrs, 0));
|
||||
|
||||
uv_close((uv_handle_t*) &udp, NULL);
|
||||
}
|
||||
|
||||
uv_run(loop, UV_RUN_DEFAULT);
|
||||
MAKE_VALGRIND_HAPPY(loop);
|
||||
return 0;
|
||||
}
|
||||
@ -465,6 +465,7 @@ TEST_FS_DECLARE (fs_invalid_mkdir_name)
|
||||
TEST_FS_DECLARE (fs_wtf)
|
||||
#endif
|
||||
TEST_FS_DECLARE (fs_get_system_error)
|
||||
TEST_DECLARE (io_64_safe)
|
||||
TEST_DECLARE (strscpy)
|
||||
TEST_DECLARE (strtok)
|
||||
TEST_DECLARE (threadpool_queue_work_simple)
|
||||
@ -1196,6 +1197,7 @@ TASK_LIST_START
|
||||
TEST_FS_ENTRY (fs_get_system_error)
|
||||
TEST_ENTRY (get_osfhandle_valid_handle)
|
||||
TEST_ENTRY (open_osfhandle_valid_handle)
|
||||
TEST_ENTRY (io_64_safe)
|
||||
TEST_ENTRY (strscpy)
|
||||
TEST_ENTRY (strtok)
|
||||
TEST_ENTRY (threadpool_queue_work_simple)
|
||||
|
||||
Loading…
Reference in New Issue
Block a user