win: remove pipe shutdown logic, return ENOTSUP from uv_shutdown
Named pipes do not actually support `shutdown`, and this hack to fake it was unsound. Return `UV_ENOTSOCK` from `uv_shutdown` immediately for `UV_NAMED_PIPE` handles, matching the behavior on unix where `shutdown` on a pipe fd fails with `ENOTSOCK` (as oppposed to a unix domain socket which is also a `uv_pipe_t`, but does supposed `shutdown`). Fixes tests to work correctly whether or not shutdown works. Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
parent
e5f79e487b
commit
4c1bd6f109
@ -349,7 +349,6 @@ typedef struct {
|
||||
uv_pipe_accept_t* pending_accepts;
|
||||
|
||||
#define uv_pipe_connection_fields \
|
||||
uv_timer_t* eof_timer; \
|
||||
DWORD ipc_remote_pid; \
|
||||
struct { \
|
||||
uint32_t payload_remaining; \
|
||||
|
||||
@ -118,8 +118,6 @@ int uv__pipe_write(uv_loop_t* loop,
|
||||
size_t nbufs,
|
||||
uv_stream_t* send_handle,
|
||||
uv_write_cb cb);
|
||||
void uv__pipe_shutdown(uv_loop_t* loop, uv_pipe_t* handle, uv_shutdown_t* req);
|
||||
|
||||
void uv__process_pipe_read_req(uv_loop_t* loop, uv_pipe_t* handle,
|
||||
uv_req_t* req);
|
||||
void uv__process_pipe_write_req(uv_loop_t* loop, uv_pipe_t* handle,
|
||||
|
||||
225
src/win/pipe.c
225
src/win/pipe.c
@ -40,10 +40,6 @@ static char uv_zero_[] = "";
|
||||
/* Null uv_buf_t */
|
||||
static const uv_buf_t uv_null_buf_ = { 0, NULL };
|
||||
|
||||
/* The timeout that the pipe will wait for the remote end to write data when
|
||||
* the local ends wants to shut it down. */
|
||||
static const int64_t eof_timeout = 50; /* ms */
|
||||
|
||||
static const int default_pending_pipe_instances = 4;
|
||||
|
||||
/* Pipe prefix */
|
||||
@ -89,13 +85,6 @@ typedef struct {
|
||||
} uv__coalesced_write_t;
|
||||
|
||||
|
||||
static void eof_timer_init(uv_pipe_t* pipe);
|
||||
static void eof_timer_start(uv_pipe_t* pipe);
|
||||
static void eof_timer_stop(uv_pipe_t* pipe);
|
||||
static void eof_timer_cb(uv_timer_t* timer);
|
||||
static void eof_timer_destroy(uv_pipe_t* pipe);
|
||||
static void eof_timer_close_cb(uv_handle_t* handle);
|
||||
|
||||
|
||||
/* Does the file path contain embedded nul bytes? */
|
||||
static int includes_nul(const char *s, size_t n) {
|
||||
@ -131,7 +120,6 @@ static void uv__pipe_connection_init(uv_pipe_t* handle) {
|
||||
assert(!(handle->flags & UV_HANDLE_PIPESERVER));
|
||||
uv__connection_init((uv_stream_t*) handle);
|
||||
handle->read_req.data = handle;
|
||||
handle->pipe.conn.eof_timer = NULL;
|
||||
}
|
||||
|
||||
|
||||
@ -531,76 +519,6 @@ static int pipe_alloc_accept(uv_loop_t* loop, uv_pipe_t* handle,
|
||||
}
|
||||
|
||||
|
||||
static DWORD WINAPI pipe_shutdown_thread_proc(void* parameter) {
|
||||
uv_loop_t* loop;
|
||||
uv_pipe_t* handle;
|
||||
uv_shutdown_t* req;
|
||||
|
||||
req = (uv_shutdown_t*) parameter;
|
||||
assert(req);
|
||||
handle = (uv_pipe_t*) req->handle;
|
||||
assert(handle);
|
||||
loop = handle->loop;
|
||||
assert(loop);
|
||||
|
||||
FlushFileBuffers(handle->handle);
|
||||
|
||||
/* Post completed */
|
||||
POST_COMPLETION_FOR_REQ(loop, req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void uv__pipe_shutdown(uv_loop_t* loop, uv_pipe_t* handle, uv_shutdown_t *req) {
|
||||
DWORD result;
|
||||
NTSTATUS nt_status;
|
||||
IO_STATUS_BLOCK io_status;
|
||||
FILE_PIPE_LOCAL_INFORMATION pipe_info;
|
||||
|
||||
assert(handle->flags & UV_HANDLE_CONNECTION);
|
||||
assert(req != NULL);
|
||||
assert(handle->stream.conn.write_reqs_pending == 0);
|
||||
SET_REQ_SUCCESS(req);
|
||||
|
||||
if (handle->flags & UV_HANDLE_CLOSING) {
|
||||
uv__insert_pending_req(loop, (uv_req_t*) req);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Try to avoid flushing the pipe buffer in the thread pool. */
|
||||
nt_status = pNtQueryInformationFile(handle->handle,
|
||||
&io_status,
|
||||
&pipe_info,
|
||||
sizeof pipe_info,
|
||||
FilePipeLocalInformation);
|
||||
|
||||
if (nt_status != STATUS_SUCCESS) {
|
||||
SET_REQ_ERROR(req, pRtlNtStatusToDosError(nt_status));
|
||||
handle->flags |= UV_HANDLE_WRITABLE; /* Questionable. */
|
||||
uv__insert_pending_req(loop, (uv_req_t*) req);
|
||||
return;
|
||||
}
|
||||
|
||||
if (pipe_info.OutboundQuota == pipe_info.WriteQuotaAvailable) {
|
||||
/* Short-circuit, no need to call FlushFileBuffers:
|
||||
* all writes have been read. */
|
||||
uv__insert_pending_req(loop, (uv_req_t*) req);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Run FlushFileBuffers in the thread pool. */
|
||||
result = QueueUserWorkItem(pipe_shutdown_thread_proc,
|
||||
req,
|
||||
WT_EXECUTELONGFUNCTION);
|
||||
if (!result) {
|
||||
SET_REQ_ERROR(req, GetLastError());
|
||||
handle->flags |= UV_HANDLE_WRITABLE; /* Questionable. */
|
||||
uv__insert_pending_req(loop, (uv_req_t*) req);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void uv__pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle) {
|
||||
uv__ipc_xfer_queue_item_t* xfer_queue_item;
|
||||
@ -1050,10 +968,6 @@ void uv__pipe_close(uv_loop_t* loop, uv_pipe_t* handle) {
|
||||
handle->handle = INVALID_HANDLE_VALUE;
|
||||
}
|
||||
|
||||
if (handle->flags & UV_HANDLE_CONNECTION) {
|
||||
eof_timer_destroy(handle);
|
||||
}
|
||||
|
||||
if ((handle->flags & UV_HANDLE_CONNECTION)
|
||||
&& handle->handle != INVALID_HANDLE_VALUE) {
|
||||
/* This will eventually destroy the write queue for us too. */
|
||||
@ -1388,8 +1302,6 @@ static void uv__pipe_queue_read(uv_loop_t* loop, uv_pipe_t* handle) {
|
||||
}
|
||||
}
|
||||
|
||||
/* Start the eof timer if there is one */
|
||||
eof_timer_start(handle);
|
||||
handle->flags |= UV_HANDLE_READ_PENDING;
|
||||
handle->reqs_pending++;
|
||||
return;
|
||||
@ -1826,10 +1738,6 @@ int uv__pipe_write(uv_loop_t* loop,
|
||||
|
||||
static void uv__pipe_read_eof(uv_loop_t* loop, uv_pipe_t* handle,
|
||||
uv_buf_t buf) {
|
||||
/* If there is an eof timer running, we don't need it any more, so discard
|
||||
* it. */
|
||||
eof_timer_destroy(handle);
|
||||
|
||||
uv_read_stop((uv_stream_t*) handle);
|
||||
|
||||
handle->read_cb((uv_stream_t*) handle, UV_EOF, &buf);
|
||||
@ -1838,10 +1746,6 @@ static void uv__pipe_read_eof(uv_loop_t* loop, uv_pipe_t* handle,
|
||||
|
||||
static void uv__pipe_read_error(uv_loop_t* loop, uv_pipe_t* handle, int error,
|
||||
uv_buf_t buf) {
|
||||
/* If there is an eof timer running, we don't need it any more, so discard
|
||||
* it. */
|
||||
eof_timer_destroy(handle);
|
||||
|
||||
uv_read_stop((uv_stream_t*) handle);
|
||||
|
||||
handle->read_cb((uv_stream_t*)handle, uv_translate_sys_error(error), &buf);
|
||||
@ -2084,7 +1988,6 @@ void uv__process_pipe_read_req(uv_loop_t* loop,
|
||||
|
||||
handle->flags &= ~(UV_HANDLE_READ_PENDING | UV_HANDLE_CANCELLATION_PENDING);
|
||||
DECREASE_PENDING_REQ_COUNT(handle);
|
||||
eof_timer_stop(handle);
|
||||
|
||||
if (handle->read_req.wait_handle != INVALID_HANDLE_VALUE) {
|
||||
UnregisterWait(handle->read_req.wait_handle);
|
||||
@ -2176,10 +2079,6 @@ void uv__process_pipe_write_req(uv_loop_t* loop, uv_pipe_t* handle,
|
||||
uv__queue_non_overlapped_write(handle);
|
||||
}
|
||||
|
||||
if (handle->stream.conn.write_reqs_pending == 0 &&
|
||||
uv__is_stream_shutting(handle))
|
||||
uv__pipe_shutdown(loop, handle, handle->stream.conn.shutdown_req);
|
||||
|
||||
DECREASE_PENDING_REQ_COUNT(handle);
|
||||
}
|
||||
|
||||
@ -2253,128 +2152,8 @@ void uv__process_pipe_connect_req(uv_loop_t* loop, uv_pipe_t* handle,
|
||||
|
||||
void uv__process_pipe_shutdown_req(uv_loop_t* loop, uv_pipe_t* handle,
|
||||
uv_shutdown_t* req) {
|
||||
int err;
|
||||
|
||||
assert(handle->type == UV_NAMED_PIPE);
|
||||
|
||||
/* Clear the shutdown_req field so we don't go here again. */
|
||||
handle->stream.conn.shutdown_req = NULL;
|
||||
UNREGISTER_HANDLE_REQ(loop, handle);
|
||||
|
||||
if (handle->flags & UV_HANDLE_CLOSING) {
|
||||
/* Already closing. Cancel the shutdown. */
|
||||
err = UV_ECANCELED;
|
||||
} else if (!REQ_SUCCESS(req)) {
|
||||
/* An error occurred in trying to shutdown gracefully. */
|
||||
err = uv_translate_sys_error(GET_REQ_ERROR(req));
|
||||
} else {
|
||||
if (handle->flags & UV_HANDLE_READABLE) {
|
||||
/* Initialize and optionally start the eof timer. Only do this if the pipe
|
||||
* is readable and we haven't seen EOF come in ourselves. */
|
||||
eof_timer_init(handle);
|
||||
|
||||
/* If reading start the timer right now. Otherwise uv__pipe_queue_read will
|
||||
* start it. */
|
||||
if (handle->flags & UV_HANDLE_READ_PENDING) {
|
||||
eof_timer_start(handle);
|
||||
}
|
||||
|
||||
} else {
|
||||
/* This pipe is not readable. We can just close it to let the other end
|
||||
* know that we're done writing. */
|
||||
close_pipe(handle);
|
||||
}
|
||||
err = 0;
|
||||
}
|
||||
|
||||
if (req->cb)
|
||||
req->cb(req, err);
|
||||
|
||||
DECREASE_PENDING_REQ_COUNT(handle);
|
||||
}
|
||||
|
||||
|
||||
static void eof_timer_init(uv_pipe_t* pipe) {
|
||||
int r;
|
||||
|
||||
assert(pipe->pipe.conn.eof_timer == NULL);
|
||||
assert(pipe->flags & UV_HANDLE_CONNECTION);
|
||||
|
||||
pipe->pipe.conn.eof_timer = (uv_timer_t*) uv__malloc(sizeof *pipe->pipe.conn.eof_timer);
|
||||
|
||||
r = uv_timer_init(pipe->loop, pipe->pipe.conn.eof_timer);
|
||||
assert(r == 0); /* timers can't fail */
|
||||
(void) r;
|
||||
pipe->pipe.conn.eof_timer->data = pipe;
|
||||
uv_unref((uv_handle_t*) pipe->pipe.conn.eof_timer);
|
||||
}
|
||||
|
||||
|
||||
static void eof_timer_start(uv_pipe_t* pipe) {
|
||||
assert(pipe->flags & UV_HANDLE_CONNECTION);
|
||||
|
||||
if (pipe->pipe.conn.eof_timer != NULL) {
|
||||
uv_timer_start(pipe->pipe.conn.eof_timer, eof_timer_cb, eof_timeout, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void eof_timer_stop(uv_pipe_t* pipe) {
|
||||
assert(pipe->flags & UV_HANDLE_CONNECTION);
|
||||
|
||||
if (pipe->pipe.conn.eof_timer != NULL) {
|
||||
uv_timer_stop(pipe->pipe.conn.eof_timer);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void eof_timer_cb(uv_timer_t* timer) {
|
||||
uv_pipe_t* pipe = (uv_pipe_t*) timer->data;
|
||||
uv_loop_t* loop = timer->loop;
|
||||
|
||||
assert(pipe->type == UV_NAMED_PIPE);
|
||||
|
||||
/* This should always be true, since we start the timer only in
|
||||
* uv__pipe_queue_read after successfully calling ReadFile, or in
|
||||
* uv__process_pipe_shutdown_req if a read is pending, and we always
|
||||
* immediately stop the timer in uv__process_pipe_read_req. */
|
||||
assert(pipe->flags & UV_HANDLE_READ_PENDING);
|
||||
|
||||
/* If there are many packets coming off the iocp then the timer callback may
|
||||
* be called before the read request is coming off the queue. Therefore we
|
||||
* check here if the read request has completed but will be processed later.
|
||||
*/
|
||||
if ((pipe->flags & UV_HANDLE_READ_PENDING) &&
|
||||
HasOverlappedIoCompleted(&pipe->read_req.u.io.overlapped)) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Force both ends off the pipe. */
|
||||
close_pipe(pipe);
|
||||
|
||||
/* Stop reading, so the pending read that is going to fail will not be
|
||||
* reported to the user. */
|
||||
uv_read_stop((uv_stream_t*) pipe);
|
||||
|
||||
/* Report the eof and update flags. This will get reported even if the user
|
||||
* stopped reading in the meantime. TODO: is that okay? */
|
||||
uv__pipe_read_eof(loop, pipe, uv_null_buf_);
|
||||
}
|
||||
|
||||
|
||||
static void eof_timer_destroy(uv_pipe_t* pipe) {
|
||||
assert(pipe->flags & UV_HANDLE_CONNECTION);
|
||||
|
||||
if (pipe->pipe.conn.eof_timer) {
|
||||
uv_close((uv_handle_t*) pipe->pipe.conn.eof_timer, eof_timer_close_cb);
|
||||
pipe->pipe.conn.eof_timer = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void eof_timer_close_cb(uv_handle_t* handle) {
|
||||
assert(handle->type == UV_TIMER);
|
||||
uv__free(handle);
|
||||
/* uv_shutdown() returns UV_ENOTSOCK for named pipes; this is unreachable. */
|
||||
abort();
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ -203,6 +203,9 @@ int uv_try_write2(uv_stream_t* stream,
|
||||
int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb) {
|
||||
uv_loop_t* loop = handle->loop;
|
||||
|
||||
if (handle->type == UV_NAMED_PIPE)
|
||||
return UV_ENOTSOCK;
|
||||
|
||||
if (!(handle->flags & UV_HANDLE_WRITABLE) ||
|
||||
uv__is_stream_shutting(handle) ||
|
||||
uv__is_closing(handle)) {
|
||||
@ -218,12 +221,8 @@ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb) {
|
||||
handle->reqs_pending++;
|
||||
REGISTER_HANDLE_REQ(loop, handle);
|
||||
|
||||
if (handle->stream.conn.write_reqs_pending == 0) {
|
||||
if (handle->type == UV_NAMED_PIPE)
|
||||
uv__pipe_shutdown(loop, (uv_pipe_t*) handle, req);
|
||||
else
|
||||
uv__insert_pending_req(loop, (uv_req_t*) req);
|
||||
}
|
||||
if (handle->stream.conn.write_reqs_pending == 0)
|
||||
uv__insert_pending_req(loop, (uv_req_t*) req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -81,6 +81,7 @@ static void after_read(uv_stream_t* handle,
|
||||
ssize_t nread,
|
||||
const uv_buf_t* buf) {
|
||||
int i;
|
||||
int r;
|
||||
write_req_t *wr;
|
||||
uv_shutdown_t* sreq;
|
||||
int shutdown = 0;
|
||||
@ -92,7 +93,13 @@ static void after_read(uv_stream_t* handle,
|
||||
free(buf->base);
|
||||
sreq = malloc(sizeof* sreq);
|
||||
if (uv_is_writable(handle)) {
|
||||
ASSERT_OK(uv_shutdown(sreq, handle, after_shutdown));
|
||||
r = uv_shutdown(sreq, handle, after_shutdown);
|
||||
if (r != 0) {
|
||||
/* Cancel pending writes. */
|
||||
ASSERT_EQ(r, UV_ENOTSOCK);
|
||||
sreq->handle = handle;
|
||||
after_shutdown(sreq, 0);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
@ -132,7 +139,7 @@ static void after_read(uv_stream_t* handle,
|
||||
}
|
||||
}
|
||||
|
||||
wr = (write_req_t*) malloc(sizeof *wr);
|
||||
wr = malloc(sizeof *wr);
|
||||
ASSERT_NOT_NULL(wr);
|
||||
wr->buf = uv_buf_init(buf->base, nread);
|
||||
|
||||
@ -140,8 +147,14 @@ static void after_read(uv_stream_t* handle,
|
||||
FATAL("uv_write failed");
|
||||
}
|
||||
|
||||
if (shutdown)
|
||||
ASSERT_OK(uv_shutdown(malloc(sizeof* sreq), handle, on_shutdown));
|
||||
if (shutdown) {
|
||||
sreq = malloc(sizeof* sreq);
|
||||
r = uv_shutdown(sreq, handle, on_shutdown);
|
||||
if (r != 0) {
|
||||
ASSERT_EQ(r, UV_ENOTSOCK);
|
||||
free(sreq);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ -45,6 +45,13 @@ static uv_shutdown_t shutdown_req;
|
||||
|
||||
static size_t bytes_written;
|
||||
static size_t bytes_read;
|
||||
static int shutdown_notsup;
|
||||
|
||||
static void shutdown_cb(uv_shutdown_t* req, int status) {
|
||||
if (status != UV_ENOTCONN)
|
||||
ASSERT_OK(status);
|
||||
uv_close((uv_handle_t*) req->handle, NULL);
|
||||
}
|
||||
|
||||
static void write_cb(uv_write_t* req, int status) {
|
||||
struct write_info* write_info =
|
||||
@ -52,11 +59,13 @@ static void write_cb(uv_write_t* req, int status) {
|
||||
ASSERT_OK(status);
|
||||
bytes_written += BUFFERS_PER_WRITE * BUFFER_SIZE;
|
||||
free(write_info);
|
||||
}
|
||||
|
||||
static void shutdown_cb(uv_shutdown_t* req, int status) {
|
||||
ASSERT(status == 0 || status == UV_ENOTCONN);
|
||||
uv_close((uv_handle_t*) req->handle, NULL);
|
||||
if (bytes_written >= XFER_SIZE) {
|
||||
ASSERT_EQ(bytes_written, XFER_SIZE);
|
||||
if (shutdown_notsup == 1)
|
||||
shutdown_cb(&shutdown_req, 0);
|
||||
shutdown_notsup = -1;
|
||||
}
|
||||
}
|
||||
|
||||
static void do_write(uv_stream_t* handle) {
|
||||
@ -102,10 +111,17 @@ static void read_cb(uv_stream_t* handle, ssize_t nread, const uv_buf_t* buf) {
|
||||
free(buf->base);
|
||||
|
||||
if (bytes_read >= XFER_SIZE) {
|
||||
ASSERT_EQ(bytes_read, XFER_SIZE);
|
||||
r = uv_read_stop(handle);
|
||||
ASSERT_OK(r);
|
||||
r = uv_shutdown(&shutdown_req, handle, shutdown_cb);
|
||||
ASSERT_OK(r);
|
||||
if (r != 0) {
|
||||
ASSERT_EQ(r, UV_ENOTSOCK);
|
||||
shutdown_req.handle = handle;
|
||||
if (shutdown_notsup == -1)
|
||||
shutdown_cb(&shutdown_req, 0);
|
||||
shutdown_notsup = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -69,14 +69,20 @@ static void req_cb(uv_udp_send_t* req, int status) {
|
||||
|
||||
|
||||
static void shutdown_cb(uv_shutdown_t* req, int status) {
|
||||
ASSERT_OK(status);
|
||||
ASSERT_PTR_EQ(req, &shutdown_req);
|
||||
shutdown_cb_called++;
|
||||
}
|
||||
|
||||
|
||||
static void write_cb(uv_write_t* req, int status) {
|
||||
int r;
|
||||
ASSERT_PTR_EQ(req, &write_req);
|
||||
uv_shutdown(&shutdown_req, req->handle, shutdown_cb);
|
||||
r = uv_shutdown(&shutdown_req, req->handle, shutdown_cb);
|
||||
if (r != 0) {
|
||||
ASSERT_EQ(r, UV_ENOTSOCK);
|
||||
shutdown_cb(&shutdown_req, 0);
|
||||
}
|
||||
write_cb_called++;
|
||||
}
|
||||
|
||||
@ -92,9 +98,14 @@ static void connect_and_write(uv_connect_t* req, int status) {
|
||||
|
||||
|
||||
static void connect_and_shutdown(uv_connect_t* req, int status) {
|
||||
int r;
|
||||
ASSERT_PTR_EQ(req, &connect_req);
|
||||
ASSERT_OK(status);
|
||||
uv_shutdown(&shutdown_req, req->handle, shutdown_cb);
|
||||
r = uv_shutdown(&shutdown_req, req->handle, shutdown_cb);
|
||||
if (r != 0) {
|
||||
ASSERT_EQ(r, UV_ENOTSOCK);
|
||||
shutdown_cb(&shutdown_req, 0);
|
||||
}
|
||||
connect_cb_called++;
|
||||
}
|
||||
|
||||
|
||||
@ -38,7 +38,8 @@ static int close_cb_called = 0;
|
||||
|
||||
static void shutdown_cb(uv_shutdown_t* req, int status) {
|
||||
ASSERT_PTR_EQ(req, &shutdown_req);
|
||||
ASSERT(status == 0 || status == UV_ECANCELED);
|
||||
if (status != UV_ECANCELED)
|
||||
ASSERT_OK(status);
|
||||
shutdown_cb_called++;
|
||||
}
|
||||
|
||||
@ -55,7 +56,10 @@ static void connect_cb(uv_connect_t* req, int status) {
|
||||
ASSERT_OK(status);
|
||||
|
||||
r = uv_shutdown(&shutdown_req, req->handle, shutdown_cb);
|
||||
ASSERT_OK(r);
|
||||
if (r != 0) {
|
||||
ASSERT_EQ(r, UV_ENOTSOCK);
|
||||
shutdown_cb(&shutdown_req, 0);
|
||||
}
|
||||
ASSERT_OK(uv_is_closing((uv_handle_t*) req->handle));
|
||||
uv_close((uv_handle_t*) req->handle, close_cb);
|
||||
ASSERT_EQ(1, uv_is_closing((uv_handle_t*) req->handle));
|
||||
|
||||
Loading…
Reference in New Issue
Block a user