Merge remote-tracking branch 'origin/master' into jn/win-write-queue

This commit is contained in:
Jameson Nash 2026-03-16 13:22:37 -04:00
commit 9cfec349aa
21 changed files with 74 additions and 981 deletions

View File

@ -21,13 +21,15 @@ jobs:
fail-fast: false
matrix:
config:
- {toolchain: Visual Studio 16 2019, arch: Win32, server: 2019}
- {toolchain: Visual Studio 16 2019, arch: x64, server: 2019}
- {toolchain: Visual Studio 17 2022, arch: Win32, server: 2022}
- {toolchain: Visual Studio 17 2022, arch: x64, server: 2022}
- {toolchain: Visual Studio 17 2022, arch: x64, server: 2022, config: ASAN}
- {toolchain: Visual Studio 17 2022, arch: x64, server: 2022, config: UBSAN}
- {toolchain: Visual Studio 17 2022, arch: arm64, server: 2022}
- {toolchain: Visual Studio 17 2022, arch: x64, server: 2025}
- {toolchain: Visual Studio 17 2022, arch: x64, server: 2025, config: ASAN}
- {toolchain: Visual Studio 17 2022, arch: x64, server: 2025, config: UBSAN}
- {toolchain: Visual Studio 17 2022, arch: arm64, server: 2025}
steps:
- uses: actions/checkout@v4
- name: Build

View File

@ -172,7 +172,6 @@ if (MSVC)
endif()
set(uv_sources
src/fs-poll.c
src/idna.c
src/inet.c
src/loop-watcher.c
@ -555,7 +554,6 @@ if(LIBUV_BUILD_TESTS)
test/test-fs-event.c
test/test-fs-fd-hash.c
test/test-fs-open-flags.c
test/test-fs-poll.c
test/test-fs-readdir.c
test/test-fs.c
test/test-get-currentexe.c

View File

@ -29,8 +29,7 @@ CLEANFILES =
lib_LTLIBRARIES = libuv.la
libuv_la_CFLAGS = $(AM_CFLAGS)
libuv_la_LDFLAGS = $(AM_LDFLAGS) -no-undefined -version-info 2:0:0
libuv_la_SOURCES = src/fs-poll.c \
src/heap-inl.h \
libuv_la_SOURCES = src/heap-inl.h \
src/idna.c \
src/idna.h \
src/inet.c \
@ -177,7 +176,6 @@ test_run_tests_SOURCES = test/blackhole-server.c \
test/test-fail-always.c \
test/test-fs-copyfile.c \
test/test-fs-event.c \
test/test-fs-poll.c \
test/test-fs.c \
test/test-fs-readdir.c \
test/test-fs-fd-hash.c \

View File

@ -2,76 +2,10 @@
.. _fs_poll:
:c:type:`uv_fs_poll_t` --- FS Poll handle
=========================================
==========================================
FS Poll handles allow the user to monitor a given path for changes. Unlike
:c:type:`uv_fs_event_t`, fs poll handles use `stat` to detect when a file has
changed so they can work on file systems where fs event handles can't.
.. deprecated::
Data types
----------
.. c:type:: uv_fs_poll_t
FS Poll handle type.
.. c:type:: void (*uv_fs_poll_cb)(uv_fs_poll_t* handle, int status, const uv_stat_t* prev, const uv_stat_t* curr)
Callback passed to :c:func:`uv_fs_poll_start` which will be called repeatedly
after the handle is started, when any change happens to the monitored path.
The callback is invoked with `status < 0` if `path` does not exist
or is inaccessible. The watcher is *not* stopped but your callback is
not called again until something changes (e.g. when the file is created
or the error reason changes).
When `status == 0`, the callback receives pointers to the old and new
:c:type:`uv_stat_t` structs. They are valid for the duration of the
callback only.
Public members
^^^^^^^^^^^^^^
N/A
.. seealso:: The :c:type:`uv_handle_t` members also apply.
API
---
.. c:function:: int uv_fs_poll_init(uv_loop_t* loop, uv_fs_poll_t* handle)
Initialize the handle.
.. c:function:: int uv_fs_poll_start(uv_fs_poll_t* handle, uv_fs_poll_cb poll_cb, const char* path, unsigned int interval)
Check the file at `path` for changes every `interval` milliseconds.
.. note::
For maximum portability, use multi-second intervals. Sub-second intervals will not detect
all changes on many file systems.
.. c:function:: int uv_fs_poll_stop(uv_fs_poll_t* handle)
Stop the handle, the callback will no longer be called.
.. c:function:: int uv_fs_poll_getpath(uv_fs_poll_t* handle, char* buffer, size_t* size)
Get the path being monitored by the handle. The buffer must be preallocated
by the user. Returns 0 on success or an error code < 0 in case of failure.
On success, `buffer` will contain the path and `size` its length. If the buffer
is not big enough `UV_ENOBUFS` will be returned and `size` will be set to
the required size.
.. versionchanged:: 1.3.0 the returned length no longer includes the terminating null byte,
and the buffer is not null terminated.
.. versionchanged:: 1.9.0 the returned length includes the terminating null
byte on `UV_ENOBUFS`, and the buffer is null terminated
on success.
.. seealso:: The :c:type:`uv_handle_t` API functions also apply.
``uv_fs_poll_t`` has been removed. Use :c:type:`uv_fs_stat` instead with a
manual :c:type:`uv_timer` instead. See `issue #4543
<https://github.com/libuv/libuv/issues/4543>`_ for details.

View File

@ -165,7 +165,6 @@ struct uv__queue {
XX(ASYNC, async) \
XX(CHECK, check) \
XX(FS_EVENT, fs_event) \
XX(FS_POLL, fs_poll) \
XX(HANDLE, handle) \
XX(IDLE, idle) \
XX(NAMED_PIPE, pipe) \
@ -234,7 +233,6 @@ typedef struct uv_idle_s uv_idle_t;
typedef struct uv_async_s uv_async_t;
typedef struct uv_process_s uv_process_t;
typedef struct uv_fs_event_s uv_fs_event_t;
typedef struct uv_fs_poll_s uv_fs_poll_t;
typedef struct uv_signal_s uv_signal_t;
/* Request types. */
@ -397,10 +395,6 @@ typedef void (*uv_fs_event_cb)(uv_fs_event_t* handle,
int events,
int status);
typedef void (*uv_fs_poll_cb)(uv_fs_poll_t* handle,
int status,
const uv_stat_t* prev,
const uv_stat_t* curr);
typedef void (*uv_signal_cb)(uv_signal_t* handle, int signum);
@ -1774,25 +1768,6 @@ struct uv_fs_event_s {
};
/*
* uv_fs_stat() based polling file watcher.
*/
struct uv_fs_poll_s {
UV_HANDLE_FIELDS
/* Private, don't touch. */
void* poll_ctx;
};
UV_EXTERN int uv_fs_poll_init(uv_loop_t* loop, uv_fs_poll_t* handle);
UV_EXTERN int uv_fs_poll_start(uv_fs_poll_t* handle,
uv_fs_poll_cb poll_cb,
const char* path,
unsigned int interval);
UV_EXTERN int uv_fs_poll_stop(uv_fs_poll_t* handle);
UV_EXTERN int uv_fs_poll_getpath(uv_fs_poll_t* handle,
char* buffer,
size_t* size);
struct uv_signal_s {
UV_HANDLE_FIELDS

View File

@ -350,7 +350,6 @@ typedef struct {
uv_pipe_accept_t* pending_accepts;
#define uv_pipe_connection_fields \
uv_timer_t* eof_timer; \
DWORD ipc_remote_pid; \
struct { \
uint32_t payload_remaining; \

View File

@ -1,290 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "uv-common.h"
#ifdef _WIN32
#include "win/internal.h"
#include "win/handle-inl.h"
#define uv__make_close_pending(h) uv__want_endgame((h)->loop, (h))
#else
#include "unix/internal.h"
#endif
#include <assert.h>
#include <stdlib.h>
#include <string.h>
struct poll_ctx {
uv_fs_poll_t* parent_handle;
int busy_polling;
unsigned int interval;
uint64_t start_time;
uv_loop_t* loop;
uv_fs_poll_cb poll_cb;
uv_timer_t timer_handle;
uv_fs_t fs_req; /* TODO(bnoordhuis) mark fs_req internal */
uv_stat_t statbuf;
struct poll_ctx* previous; /* context from previous start()..stop() period */
char path[1]; /* variable length */
};
static int statbuf_eq(const uv_stat_t* a, const uv_stat_t* b);
static void poll_cb(uv_fs_t* req);
static void timer_cb(uv_timer_t* timer);
static void timer_close_cb(uv_handle_t* handle);
static uv_stat_t zero_statbuf;
int uv_fs_poll_init(uv_loop_t* loop, uv_fs_poll_t* handle) {
uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_POLL);
handle->poll_ctx = NULL;
return 0;
}
int uv_fs_poll_start(uv_fs_poll_t* handle,
uv_fs_poll_cb cb,
const char* path,
unsigned int interval) {
struct poll_ctx* ctx;
uv_loop_t* loop;
size_t len;
int err;
if (uv_is_active((uv_handle_t*)handle))
return 0;
loop = handle->loop;
len = strlen(path);
ctx = uv__calloc(1, sizeof(*ctx) + len);
if (ctx == NULL)
return UV_ENOMEM;
ctx->loop = loop;
ctx->poll_cb = cb;
ctx->interval = interval ? interval : 1;
ctx->start_time = uv_now(loop);
ctx->parent_handle = handle;
memcpy(ctx->path, path, len + 1);
err = uv_timer_init(loop, &ctx->timer_handle);
if (err < 0)
goto error;
ctx->timer_handle.flags |= UV_HANDLE_INTERNAL;
uv__handle_unref(&ctx->timer_handle);
err = uv_fs_stat(loop, &ctx->fs_req, ctx->path, poll_cb);
if (err < 0)
goto error;
if (handle->poll_ctx != NULL)
ctx->previous = handle->poll_ctx;
handle->poll_ctx = ctx;
uv__handle_start(handle);
return 0;
error:
uv__free(ctx);
return err;
}
int uv_fs_poll_stop(uv_fs_poll_t* handle) {
struct poll_ctx* ctx;
if (!uv_is_active((uv_handle_t*)handle))
return 0;
ctx = handle->poll_ctx;
assert(ctx != NULL);
assert(ctx->parent_handle == handle);
/* Close the timer if it's active. If it's inactive, there's a stat request
* in progress and poll_cb will take care of the cleanup.
*/
if (uv_is_active((uv_handle_t*)&ctx->timer_handle))
uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb);
uv__handle_stop(handle);
return 0;
}
int uv_fs_poll_getpath(uv_fs_poll_t* handle, char* buffer, size_t* size) {
struct poll_ctx* ctx;
size_t required_len;
if (buffer == NULL || size == NULL || *size == 0)
return UV_EINVAL;
if (!uv_is_active((uv_handle_t*)handle)) {
*size = 0;
return UV_EINVAL;
}
ctx = handle->poll_ctx;
assert(ctx != NULL);
required_len = strlen(ctx->path);
if (required_len >= *size) {
*size = required_len + 1;
return UV_ENOBUFS;
}
memcpy(buffer, ctx->path, required_len);
*size = required_len;
buffer[required_len] = '\0';
return 0;
}
void uv__fs_poll_close(uv_fs_poll_t* handle) {
uv_fs_poll_stop(handle);
if (handle->poll_ctx == NULL)
uv__make_close_pending((uv_handle_t*)handle);
}
static void timer_cb(uv_timer_t* timer) {
struct poll_ctx* ctx;
ctx = container_of(timer, struct poll_ctx, timer_handle);
assert(ctx->parent_handle != NULL);
assert(ctx->parent_handle->poll_ctx == ctx);
ctx->start_time = uv_now(ctx->loop);
if (uv_fs_stat(ctx->loop, &ctx->fs_req, ctx->path, poll_cb))
abort();
}
static void poll_cb(uv_fs_t* req) {
uv_stat_t* statbuf;
struct poll_ctx* ctx;
uint64_t interval;
uv_fs_poll_t* handle;
ctx = container_of(req, struct poll_ctx, fs_req);
handle = ctx->parent_handle;
if (!uv_is_active((uv_handle_t*)handle) || uv__is_closing(handle))
goto out;
if (req->result != 0) {
if (ctx->busy_polling != req->result) {
ctx->poll_cb(ctx->parent_handle,
req->result,
&ctx->statbuf,
&zero_statbuf);
ctx->busy_polling = req->result;
}
goto out;
}
statbuf = &req->statbuf;
if (ctx->busy_polling != 0)
if (ctx->busy_polling < 0 || !statbuf_eq(&ctx->statbuf, statbuf))
ctx->poll_cb(ctx->parent_handle, 0, &ctx->statbuf, statbuf);
ctx->statbuf = *statbuf;
ctx->busy_polling = 1;
out:
uv_fs_req_cleanup(req);
if (!uv_is_active((uv_handle_t*)handle) || uv__is_closing(handle)) {
uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb);
return;
}
/* Reschedule timer, subtract the delay from doing the stat(). */
interval = ctx->interval;
interval -= (uv_now(ctx->loop) - ctx->start_time) % interval;
if (uv_timer_start(&ctx->timer_handle, timer_cb, interval, 0))
abort();
}
static void timer_close_cb(uv_handle_t* timer) {
struct poll_ctx* ctx;
struct poll_ctx* it;
struct poll_ctx* last;
uv_fs_poll_t* handle;
ctx = container_of(timer, struct poll_ctx, timer_handle);
handle = ctx->parent_handle;
if (ctx == handle->poll_ctx) {
handle->poll_ctx = ctx->previous;
if (handle->poll_ctx == NULL && uv__is_closing(handle))
uv__make_close_pending((uv_handle_t*)handle);
} else {
for (last = handle->poll_ctx, it = last->previous;
it != ctx;
last = it, it = it->previous) {
assert(last->previous != NULL);
}
last->previous = ctx->previous;
}
uv__free(ctx);
}
static int statbuf_eq(const uv_stat_t* a, const uv_stat_t* b) {
return a->st_ctim.tv_nsec == b->st_ctim.tv_nsec
&& a->st_mtim.tv_nsec == b->st_mtim.tv_nsec
&& a->st_birthtim.tv_nsec == b->st_birthtim.tv_nsec
&& a->st_ctim.tv_sec == b->st_ctim.tv_sec
&& a->st_mtim.tv_sec == b->st_mtim.tv_sec
&& a->st_birthtim.tv_sec == b->st_birthtim.tv_sec
&& a->st_size == b->st_size
&& a->st_mode == b->st_mode
&& a->st_uid == b->st_uid
&& a->st_gid == b->st_gid
&& a->st_ino == b->st_ino
&& a->st_dev == b->st_dev
&& a->st_flags == b->st_flags
&& a->st_gen == b->st_gen;
}
#if defined(_WIN32)
#include "win/internal.h"
#include "win/handle-inl.h"
void uv__fs_poll_endgame(uv_loop_t* loop, uv_fs_poll_t* handle) {
assert(handle->flags & UV_HANDLE_CLOSING);
assert(!(handle->flags & UV_HANDLE_CLOSED));
uv__handle_close(handle);
}
#endif /* _WIN32 */

View File

@ -211,12 +211,6 @@ void uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
uv__poll_close((uv_poll_t*)handle);
break;
case UV_FS_POLL:
uv__fs_poll_close((uv_fs_poll_t*)handle);
/* Poll handles use file system requests, and one of them may still be
* running. The poll code will call uv__make_close_pending() for us. */
return;
case UV_SIGNAL:
uv__signal_close((uv_signal_t*) handle);
break;
@ -314,7 +308,6 @@ static void uv__finish_close(uv_handle_t* handle) {
case UV_TIMER:
case UV_PROCESS:
case UV_FS_EVENT:
case UV_FS_POLL:
case UV_POLL:
break;

View File

@ -198,8 +198,6 @@ int uv__udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloccb,
int uv__udp_recv_stop(uv_udp_t* handle);
void uv__fs_poll_close(uv_fs_poll_t* handle);
int uv__getaddrinfo_translate_error(int sys_err); /* EAI_* error. */
enum uv__work_kind {

View File

@ -149,10 +149,6 @@ INLINE static void uv__process_endgames(uv_loop_t* loop) {
uv__fs_event_endgame(loop, (uv_fs_event_t*) handle);
break;
case UV_FS_POLL:
uv__fs_poll_endgame(loop, (uv_fs_poll_t*) handle);
break;
default:
assert(0);
break;

View File

@ -132,11 +132,6 @@ void uv_close(uv_handle_t* handle, uv_close_cb cb) {
uv__fs_event_close(loop, (uv_fs_event_t*) handle);
return;
case UV_FS_POLL:
uv__fs_poll_close((uv_fs_poll_t*) handle);
uv__handle_closing(handle);
return;
default:
/* Not supported */
abort();

View File

@ -118,8 +118,6 @@ int uv__pipe_write(uv_loop_t* loop,
size_t nbufs,
uv_stream_t* send_handle,
uv_write_cb cb);
void uv__pipe_shutdown(uv_loop_t* loop, uv_pipe_t* handle, uv_shutdown_t* req);
void uv__process_pipe_read_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_req_t* req);
void uv__process_pipe_write_req(uv_loop_t* loop, uv_pipe_t* handle,
@ -225,12 +223,6 @@ void uv__fs_event_close(uv_loop_t* loop, uv_fs_event_t* handle);
void uv__fs_event_endgame(uv_loop_t* loop, uv_fs_event_t* handle);
/*
* Stat poller.
*/
void uv__fs_poll_endgame(uv_loop_t* loop, uv_fs_poll_t* handle);
/*
* Utilities.
*/

View File

@ -40,10 +40,6 @@ static char uv_zero_[] = "";
/* Null uv_buf_t */
static const uv_buf_t uv_null_buf_ = { 0, NULL };
/* The timeout that the pipe will wait for the remote end to write data when
* the local ends wants to shut it down. */
static const int64_t eof_timeout = 50; /* ms */
static const int default_pending_pipe_instances = 4;
/* Pipe prefix */
@ -89,13 +85,6 @@ typedef struct {
} uv__coalesced_write_t;
static void eof_timer_init(uv_pipe_t* pipe);
static void eof_timer_start(uv_pipe_t* pipe);
static void eof_timer_stop(uv_pipe_t* pipe);
static void eof_timer_cb(uv_timer_t* timer);
static void eof_timer_destroy(uv_pipe_t* pipe);
static void eof_timer_close_cb(uv_handle_t* handle);
/* Does the file path contain embedded nul bytes? */
static int includes_nul(const char *s, size_t n) {
@ -131,7 +120,6 @@ static void uv__pipe_connection_init(uv_pipe_t* handle) {
assert(!(handle->flags & UV_HANDLE_PIPESERVER));
uv__connection_init((uv_stream_t*) handle);
handle->read_req.data = handle;
handle->pipe.conn.eof_timer = NULL;
}
@ -531,76 +519,6 @@ static int pipe_alloc_accept(uv_loop_t* loop, uv_pipe_t* handle,
}
static DWORD WINAPI pipe_shutdown_thread_proc(void* parameter) {
uv_loop_t* loop;
uv_pipe_t* handle;
uv_shutdown_t* req;
req = (uv_shutdown_t*) parameter;
assert(req);
handle = (uv_pipe_t*) req->handle;
assert(handle);
loop = handle->loop;
assert(loop);
FlushFileBuffers(handle->handle);
/* Post completed */
POST_COMPLETION_FOR_REQ(loop, req);
return 0;
}
void uv__pipe_shutdown(uv_loop_t* loop, uv_pipe_t* handle, uv_shutdown_t *req) {
DWORD result;
NTSTATUS nt_status;
IO_STATUS_BLOCK io_status;
FILE_PIPE_LOCAL_INFORMATION pipe_info;
assert(handle->flags & UV_HANDLE_CONNECTION);
assert(req != NULL);
assert(uv__queue_empty(&handle->stream.conn.write_queue));
SET_REQ_SUCCESS(req);
if (handle->flags & UV_HANDLE_CLOSING) {
uv__insert_pending_req(loop, (uv_req_t*) req);
return;
}
/* Try to avoid flushing the pipe buffer in the thread pool. */
nt_status = pNtQueryInformationFile(handle->handle,
&io_status,
&pipe_info,
sizeof pipe_info,
FilePipeLocalInformation);
if (nt_status != STATUS_SUCCESS) {
SET_REQ_ERROR(req, pRtlNtStatusToDosError(nt_status));
handle->flags |= UV_HANDLE_WRITABLE; /* Questionable. */
uv__insert_pending_req(loop, (uv_req_t*) req);
return;
}
if (pipe_info.OutboundQuota == pipe_info.WriteQuotaAvailable) {
/* Short-circuit, no need to call FlushFileBuffers:
* all writes have been read. */
uv__insert_pending_req(loop, (uv_req_t*) req);
return;
}
/* Run FlushFileBuffers in the thread pool. */
result = QueueUserWorkItem(pipe_shutdown_thread_proc,
req,
WT_EXECUTELONGFUNCTION);
if (!result) {
SET_REQ_ERROR(req, GetLastError());
handle->flags |= UV_HANDLE_WRITABLE; /* Questionable. */
uv__insert_pending_req(loop, (uv_req_t*) req);
return;
}
}
void uv__pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle) {
uv__ipc_xfer_queue_item_t* xfer_queue_item;
@ -1050,10 +968,6 @@ void uv__pipe_close(uv_loop_t* loop, uv_pipe_t* handle) {
handle->handle = INVALID_HANDLE_VALUE;
}
if (handle->flags & UV_HANDLE_CONNECTION) {
eof_timer_destroy(handle);
}
if ((handle->flags & UV_HANDLE_CONNECTION)
&& handle->handle != INVALID_HANDLE_VALUE) {
/* This will eventually destroy the write queue for us too. */
@ -1388,8 +1302,6 @@ static void uv__pipe_queue_read(uv_loop_t* loop, uv_pipe_t* handle) {
}
}
/* Start the eof timer if there is one */
eof_timer_start(handle);
handle->flags |= UV_HANDLE_READ_PENDING;
handle->reqs_pending++;
return;
@ -1826,10 +1738,6 @@ int uv__pipe_write(uv_loop_t* loop,
static void uv__pipe_read_eof(uv_loop_t* loop, uv_pipe_t* handle,
uv_buf_t buf) {
/* If there is an eof timer running, we don't need it any more, so discard
* it. */
eof_timer_destroy(handle);
uv_read_stop((uv_stream_t*) handle);
handle->read_cb((uv_stream_t*) handle, UV_EOF, &buf);
@ -1838,10 +1746,6 @@ static void uv__pipe_read_eof(uv_loop_t* loop, uv_pipe_t* handle,
static void uv__pipe_read_error(uv_loop_t* loop, uv_pipe_t* handle, int error,
uv_buf_t buf) {
/* If there is an eof timer running, we don't need it any more, so discard
* it. */
eof_timer_destroy(handle);
uv_read_stop((uv_stream_t*) handle);
handle->read_cb((uv_stream_t*)handle, uv_translate_sys_error(error), &buf);
@ -2084,7 +1988,6 @@ void uv__process_pipe_read_req(uv_loop_t* loop,
handle->flags &= ~(UV_HANDLE_READ_PENDING | UV_HANDLE_CANCELLATION_PENDING);
DECREASE_PENDING_REQ_COUNT(handle);
eof_timer_stop(handle);
if (handle->read_req.wait_handle != INVALID_HANDLE_VALUE) {
UnregisterWait(handle->read_req.wait_handle);
@ -2177,10 +2080,6 @@ void uv__process_pipe_write_req(uv_loop_t* loop, uv_pipe_t* handle,
uv__queue_non_overlapped_write(handle);
}
if (uv__queue_empty(&handle->stream.conn.write_queue) &&
uv__is_stream_shutting(handle))
uv__pipe_shutdown(loop, handle, handle->stream.conn.shutdown_req);
DECREASE_PENDING_REQ_COUNT(handle);
}
@ -2254,128 +2153,8 @@ void uv__process_pipe_connect_req(uv_loop_t* loop, uv_pipe_t* handle,
void uv__process_pipe_shutdown_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_shutdown_t* req) {
int err;
assert(handle->type == UV_NAMED_PIPE);
/* Clear the shutdown_req field so we don't go here again. */
handle->stream.conn.shutdown_req = NULL;
UNREGISTER_HANDLE_REQ(loop, handle);
if (handle->flags & UV_HANDLE_CLOSING) {
/* Already closing. Cancel the shutdown. */
err = UV_ECANCELED;
} else if (!REQ_SUCCESS(req)) {
/* An error occurred in trying to shutdown gracefully. */
err = uv_translate_sys_error(GET_REQ_ERROR(req));
} else {
if (handle->flags & UV_HANDLE_READABLE) {
/* Initialize and optionally start the eof timer. Only do this if the pipe
* is readable and we haven't seen EOF come in ourselves. */
eof_timer_init(handle);
/* If reading start the timer right now. Otherwise uv__pipe_queue_read will
* start it. */
if (handle->flags & UV_HANDLE_READ_PENDING) {
eof_timer_start(handle);
}
} else {
/* This pipe is not readable. We can just close it to let the other end
* know that we're done writing. */
close_pipe(handle);
}
err = 0;
}
if (req->cb)
req->cb(req, err);
DECREASE_PENDING_REQ_COUNT(handle);
}
static void eof_timer_init(uv_pipe_t* pipe) {
int r;
assert(pipe->pipe.conn.eof_timer == NULL);
assert(pipe->flags & UV_HANDLE_CONNECTION);
pipe->pipe.conn.eof_timer = (uv_timer_t*) uv__malloc(sizeof *pipe->pipe.conn.eof_timer);
r = uv_timer_init(pipe->loop, pipe->pipe.conn.eof_timer);
assert(r == 0); /* timers can't fail */
(void) r;
pipe->pipe.conn.eof_timer->data = pipe;
uv_unref((uv_handle_t*) pipe->pipe.conn.eof_timer);
}
static void eof_timer_start(uv_pipe_t* pipe) {
assert(pipe->flags & UV_HANDLE_CONNECTION);
if (pipe->pipe.conn.eof_timer != NULL) {
uv_timer_start(pipe->pipe.conn.eof_timer, eof_timer_cb, eof_timeout, 0);
}
}
static void eof_timer_stop(uv_pipe_t* pipe) {
assert(pipe->flags & UV_HANDLE_CONNECTION);
if (pipe->pipe.conn.eof_timer != NULL) {
uv_timer_stop(pipe->pipe.conn.eof_timer);
}
}
static void eof_timer_cb(uv_timer_t* timer) {
uv_pipe_t* pipe = (uv_pipe_t*) timer->data;
uv_loop_t* loop = timer->loop;
assert(pipe->type == UV_NAMED_PIPE);
/* This should always be true, since we start the timer only in
* uv__pipe_queue_read after successfully calling ReadFile, or in
* uv__process_pipe_shutdown_req if a read is pending, and we always
* immediately stop the timer in uv__process_pipe_read_req. */
assert(pipe->flags & UV_HANDLE_READ_PENDING);
/* If there are many packets coming off the iocp then the timer callback may
* be called before the read request is coming off the queue. Therefore we
* check here if the read request has completed but will be processed later.
*/
if ((pipe->flags & UV_HANDLE_READ_PENDING) &&
HasOverlappedIoCompleted(&pipe->read_req.u.io.overlapped)) {
return;
}
/* Force both ends off the pipe. */
close_pipe(pipe);
/* Stop reading, so the pending read that is going to fail will not be
* reported to the user. */
uv_read_stop((uv_stream_t*) pipe);
/* Report the eof and update flags. This will get reported even if the user
* stopped reading in the meantime. TODO: is that okay? */
uv__pipe_read_eof(loop, pipe, uv_null_buf_);
}
static void eof_timer_destroy(uv_pipe_t* pipe) {
assert(pipe->flags & UV_HANDLE_CONNECTION);
if (pipe->pipe.conn.eof_timer) {
uv_close((uv_handle_t*) pipe->pipe.conn.eof_timer, eof_timer_close_cb);
pipe->pipe.conn.eof_timer = NULL;
}
}
static void eof_timer_close_cb(uv_handle_t* handle) {
assert(handle->type == UV_TIMER);
uv__free(handle);
/* uv_shutdown() returns UV_ENOTSOCK for named pipes; this is unreachable. */
abort();
}

View File

@ -203,6 +203,9 @@ int uv_try_write2(uv_stream_t* stream,
int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb) {
uv_loop_t* loop = handle->loop;
if (handle->type == UV_NAMED_PIPE)
return UV_ENOTSOCK;
if (!(handle->flags & UV_HANDLE_WRITABLE) ||
uv__is_stream_shutting(handle) ||
uv__is_closing(handle)) {
@ -219,12 +222,8 @@ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb) {
REGISTER_HANDLE_REQ(loop, handle);
if (!(handle->flags & UV_HANDLE_IN_WRITE_CB) &&
uv__queue_empty(&handle->stream.conn.write_queue)) {
if (handle->type == UV_NAMED_PIPE)
uv__pipe_shutdown(loop, (uv_pipe_t*) handle, req);
else
uv__insert_pending_req(loop, (uv_req_t*) req);
}
uv__queue_empty(&handle->stream.conn.write_queue))
uv__insert_pending_req(loop, (uv_req_t*) req);
return 0;
}

View File

@ -36,7 +36,6 @@ BENCHMARK_IMPL(sizes) {
fprintf(stderr, "uv_idle_t: %u bytes\n", (unsigned int) sizeof(uv_idle_t));
fprintf(stderr, "uv_async_t: %u bytes\n", (unsigned int) sizeof(uv_async_t));
fprintf(stderr, "uv_timer_t: %u bytes\n", (unsigned int) sizeof(uv_timer_t));
fprintf(stderr, "uv_fs_poll_t: %u bytes\n", (unsigned int) sizeof(uv_fs_poll_t));
fprintf(stderr, "uv_fs_event_t: %u bytes\n", (unsigned int) sizeof(uv_fs_event_t));
fprintf(stderr, "uv_process_t: %u bytes\n", (unsigned int) sizeof(uv_process_t));
fprintf(stderr, "uv_poll_t: %u bytes\n", (unsigned int) sizeof(uv_poll_t));

View File

@ -81,6 +81,7 @@ static void after_read(uv_stream_t* handle,
ssize_t nread,
const uv_buf_t* buf) {
int i;
int r;
write_req_t *wr;
uv_shutdown_t* sreq;
int shutdown = 0;
@ -92,7 +93,13 @@ static void after_read(uv_stream_t* handle,
free(buf->base);
sreq = malloc(sizeof* sreq);
if (uv_is_writable(handle)) {
ASSERT_OK(uv_shutdown(sreq, handle, after_shutdown));
r = uv_shutdown(sreq, handle, after_shutdown);
if (r != 0) {
/* Cancel pending writes. */
ASSERT_EQ(r, UV_ENOTSOCK);
sreq->handle = handle;
after_shutdown(sreq, 0);
}
}
return;
}
@ -132,14 +139,20 @@ static void after_read(uv_stream_t* handle,
}
}
wr = (write_req_t*) malloc(sizeof *wr);
wr = malloc(sizeof *wr);
ASSERT_NOT_NULL(wr);
wr->buf = uv_buf_init(buf->base, nread);
ASSERT_OK(uv_write(&wr->req, handle, &wr->buf, 1, after_write));
if (shutdown)
ASSERT_OK(uv_shutdown(malloc(sizeof* sreq), handle, on_shutdown));
if (shutdown) {
sreq = malloc(sizeof* sreq);
r = uv_shutdown(sreq, handle, on_shutdown);
if (r != 0) {
ASSERT_EQ(r, UV_ENOTSOCK);
free(sreq);
}
}
}

View File

@ -1,292 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "task.h"
#include <string.h>
#define FIXTURE "testfile"
static void timer_cb(uv_timer_t* handle);
static void close_cb(uv_handle_t* handle);
static void poll_cb(uv_fs_poll_t* handle,
int status,
const uv_stat_t* prev,
const uv_stat_t* curr);
static void poll_cb_fail(uv_fs_poll_t* handle,
int status,
const uv_stat_t* prev,
const uv_stat_t* curr);
static void poll_cb_noop(uv_fs_poll_t* handle,
int status,
const uv_stat_t* prev,
const uv_stat_t* curr);
static uv_fs_poll_t poll_handle;
static uv_timer_t timer_handle;
static uv_loop_t* loop;
static int poll_cb_called;
static int timer_cb_called;
static int close_cb_called;
static void touch_file(const char* path) {
static int count;
FILE* fp;
int i;
ASSERT((fp = fopen(FIXTURE, "w+")));
/* Need to change the file size because the poller may not pick up
* sub-second mtime changes.
*/
i = ++count;
while (i--)
fputc('*', fp);
fclose(fp);
}
static void close_cb(uv_handle_t* handle) {
close_cb_called++;
}
static void timer_cb(uv_timer_t* handle) {
touch_file(FIXTURE);
timer_cb_called++;
}
static void poll_cb_fail(uv_fs_poll_t* handle,
int status,
const uv_stat_t* prev,
const uv_stat_t* curr) {
ASSERT(0 && "fail_cb called");
}
static void poll_cb_noop(uv_fs_poll_t* handle,
int status,
const uv_stat_t* prev,
const uv_stat_t* curr) {
}
static void poll_cb(uv_fs_poll_t* handle,
int status,
const uv_stat_t* prev,
const uv_stat_t* curr) {
uv_stat_t zero_statbuf;
memset(&zero_statbuf, 0, sizeof(zero_statbuf));
ASSERT_PTR_EQ(handle, &poll_handle);
ASSERT_EQ(1, uv_is_active((uv_handle_t*) handle));
ASSERT_NOT_NULL(prev);
ASSERT_NOT_NULL(curr);
switch (poll_cb_called++) {
case 0:
ASSERT_EQ(status, UV_ENOENT);
ASSERT_OK(memcmp(prev, &zero_statbuf, sizeof(zero_statbuf)));
ASSERT_OK(memcmp(curr, &zero_statbuf, sizeof(zero_statbuf)));
touch_file(FIXTURE);
break;
case 1:
ASSERT_OK(status);
ASSERT_OK(memcmp(prev, &zero_statbuf, sizeof(zero_statbuf)));
ASSERT_NE(0, memcmp(curr, &zero_statbuf, sizeof(zero_statbuf)));
ASSERT_OK(uv_timer_start(&timer_handle, timer_cb, 20, 0));
break;
case 2:
ASSERT_OK(status);
ASSERT_NE(0, memcmp(prev, &zero_statbuf, sizeof(zero_statbuf)));
ASSERT_NE(0, memcmp(curr, &zero_statbuf, sizeof(zero_statbuf)));
ASSERT_OK(uv_timer_start(&timer_handle, timer_cb, 200, 0));
break;
case 3:
ASSERT_OK(status);
ASSERT_NE(0, memcmp(prev, &zero_statbuf, sizeof(zero_statbuf)));
ASSERT_NE(0, memcmp(curr, &zero_statbuf, sizeof(zero_statbuf)));
remove(FIXTURE);
break;
case 4:
ASSERT_EQ(status, UV_ENOENT);
ASSERT_NE(0, memcmp(prev, &zero_statbuf, sizeof(zero_statbuf)));
ASSERT_OK(memcmp(curr, &zero_statbuf, sizeof(zero_statbuf)));
uv_close((uv_handle_t*)handle, close_cb);
break;
default:
ASSERT(0);
}
}
TEST_IMPL(fs_poll) {
loop = uv_default_loop();
remove(FIXTURE);
ASSERT_OK(uv_timer_init(loop, &timer_handle));
ASSERT_OK(uv_fs_poll_init(loop, &poll_handle));
ASSERT_OK(uv_fs_poll_start(&poll_handle, poll_cb, FIXTURE, 100));
ASSERT_OK(uv_run(loop, UV_RUN_DEFAULT));
ASSERT_EQ(5, poll_cb_called);
ASSERT_EQ(2, timer_cb_called);
ASSERT_EQ(1, close_cb_called);
MAKE_VALGRIND_HAPPY(loop);
return 0;
}
TEST_IMPL(fs_poll_getpath) {
char buf[1024];
size_t len;
loop = uv_default_loop();
remove(FIXTURE);
ASSERT_OK(uv_fs_poll_init(loop, &poll_handle));
len = sizeof buf;
ASSERT_EQ(UV_EINVAL, uv_fs_poll_getpath(&poll_handle, buf, &len));
ASSERT_OK(uv_fs_poll_start(&poll_handle, poll_cb_fail, FIXTURE, 100));
len = sizeof buf;
ASSERT_OK(uv_fs_poll_getpath(&poll_handle, buf, &len));
ASSERT_NE(0, buf[len - 1]);
ASSERT_EQ(buf[len], '\0');
ASSERT_OK(memcmp(buf, FIXTURE, len));
uv_close((uv_handle_t*) &poll_handle, close_cb);
ASSERT_OK(uv_run(loop, UV_RUN_DEFAULT));
ASSERT_EQ(1, close_cb_called);
MAKE_VALGRIND_HAPPY(loop);
return 0;
}
TEST_IMPL(fs_poll_close_request) {
uv_loop_t loop;
uv_fs_poll_t poll_handle;
remove(FIXTURE);
ASSERT_OK(uv_loop_init(&loop));
ASSERT_OK(uv_fs_poll_init(&loop, &poll_handle));
ASSERT_OK(uv_fs_poll_start(&poll_handle, poll_cb_fail, FIXTURE, 100));
uv_close((uv_handle_t*) &poll_handle, close_cb);
while (close_cb_called == 0)
uv_run(&loop, UV_RUN_ONCE);
ASSERT_EQ(1, close_cb_called);
MAKE_VALGRIND_HAPPY(&loop);
return 0;
}
TEST_IMPL(fs_poll_close_request_multi_start_stop) {
uv_loop_t loop;
uv_fs_poll_t poll_handle;
int i;
remove(FIXTURE);
ASSERT_OK(uv_loop_init(&loop));
ASSERT_OK(uv_fs_poll_init(&loop, &poll_handle));
for (i = 0; i < 10; ++i) {
ASSERT_OK(uv_fs_poll_start(&poll_handle, poll_cb_fail, FIXTURE, 100));
ASSERT_OK(uv_fs_poll_stop(&poll_handle));
}
uv_close((uv_handle_t*) &poll_handle, close_cb);
while (close_cb_called == 0)
uv_run(&loop, UV_RUN_ONCE);
ASSERT_EQ(1, close_cb_called);
MAKE_VALGRIND_HAPPY(&loop);
return 0;
}
TEST_IMPL(fs_poll_close_request_multi_stop_start) {
uv_loop_t loop;
uv_fs_poll_t poll_handle;
int i;
remove(FIXTURE);
ASSERT_OK(uv_loop_init(&loop));
ASSERT_OK(uv_fs_poll_init(&loop, &poll_handle));
for (i = 0; i < 10; ++i) {
ASSERT_OK(uv_fs_poll_stop(&poll_handle));
ASSERT_OK(uv_fs_poll_start(&poll_handle, poll_cb_fail, FIXTURE, 100));
}
uv_close((uv_handle_t*) &poll_handle, close_cb);
while (close_cb_called == 0)
uv_run(&loop, UV_RUN_ONCE);
ASSERT_EQ(1, close_cb_called);
MAKE_VALGRIND_HAPPY(&loop);
return 0;
}
TEST_IMPL(fs_poll_close_request_stop_when_active) {
/* Regression test for https://github.com/libuv/libuv/issues/2287. */
uv_loop_t loop;
uv_fs_poll_t poll_handle;
remove(FIXTURE);
ASSERT_OK(uv_loop_init(&loop));
/* Set up all handles. */
ASSERT_OK(uv_fs_poll_init(&loop, &poll_handle));
ASSERT_OK(uv_fs_poll_start(&poll_handle, poll_cb_noop, FIXTURE, 100));
uv_run(&loop, UV_RUN_ONCE);
/* Close the timer handle, and do not crash. */
ASSERT_OK(uv_fs_poll_stop(&poll_handle));
uv_run(&loop, UV_RUN_ONCE);
/* Clean up after the test. */
uv_close((uv_handle_t*) &poll_handle, close_cb);
uv_run(&loop, UV_RUN_ONCE);
ASSERT_EQ(1, close_cb_called);
MAKE_VALGRIND_HAPPY(&loop);
return 0;
}

View File

@ -45,6 +45,13 @@ static uv_shutdown_t shutdown_req;
static size_t bytes_written;
static size_t bytes_read;
static int shutdown_notsup;
static void shutdown_cb(uv_shutdown_t* req, int status) {
if (status != UV_ENOTCONN)
ASSERT_OK(status);
uv_close((uv_handle_t*) req->handle, NULL);
}
static void write_cb(uv_write_t* req, int status) {
struct write_info* write_info =
@ -52,11 +59,13 @@ static void write_cb(uv_write_t* req, int status) {
ASSERT_OK(status);
bytes_written += BUFFERS_PER_WRITE * BUFFER_SIZE;
free(write_info);
}
static void shutdown_cb(uv_shutdown_t* req, int status) {
ASSERT(status == 0 || status == UV_ENOTCONN);
uv_close((uv_handle_t*) req->handle, NULL);
if (bytes_written >= XFER_SIZE) {
ASSERT_EQ(bytes_written, XFER_SIZE);
if (shutdown_notsup == 1)
shutdown_cb(&shutdown_req, 0);
shutdown_notsup = -1;
}
}
static void do_write(uv_stream_t* handle) {
@ -102,10 +111,17 @@ static void read_cb(uv_stream_t* handle, ssize_t nread, const uv_buf_t* buf) {
free(buf->base);
if (bytes_read >= XFER_SIZE) {
ASSERT_EQ(bytes_read, XFER_SIZE);
r = uv_read_stop(handle);
ASSERT_OK(r);
r = uv_shutdown(&shutdown_req, handle, shutdown_cb);
ASSERT_OK(r);
if (r != 0) {
ASSERT_EQ(r, UV_ENOTSOCK);
shutdown_req.handle = handle;
if (shutdown_notsup == -1)
shutdown_cb(&shutdown_req, 0);
shutdown_notsup = 1;
}
}
}

View File

@ -266,7 +266,6 @@ TEST_DECLARE (unref_in_prepare_cb)
TEST_DECLARE (timer_ref)
TEST_DECLARE (timer_ref2)
TEST_DECLARE (fs_event_ref)
TEST_DECLARE (fs_poll_ref)
TEST_DECLARE (tcp_ref)
TEST_DECLARE (tcp_ref2)
TEST_DECLARE (tcp_ref2b)
@ -356,12 +355,6 @@ TEST_DECLARE (spawn_quoted_path)
TEST_DECLARE (spawn_tcp_server)
TEST_DECLARE (spawn_exercise_sigchld_issue)
TEST_DECLARE (spawn_relative_path)
TEST_DECLARE (fs_poll)
TEST_DECLARE (fs_poll_getpath)
TEST_DECLARE (fs_poll_close_request)
TEST_DECLARE (fs_poll_close_request_multi_start_stop)
TEST_DECLARE (fs_poll_close_request_multi_stop_start)
TEST_DECLARE (fs_poll_close_request_stop_when_active)
TEST_DECLARE (kill)
TEST_DECLARE (kill_invalid_signum)
TEST_DECLARE (fs_file_noent)
@ -915,7 +908,6 @@ TASK_LIST_START
TEST_ENTRY (ref)
TEST_ENTRY (idle_ref)
TEST_ENTRY (fs_poll_ref)
TEST_ENTRY (async_ref)
TEST_ENTRY (prepare_ref)
TEST_ENTRY (check_ref)
@ -1054,12 +1046,6 @@ TASK_LIST_START
TEST_ENTRY (spawn_tcp_server)
TEST_ENTRY (spawn_exercise_sigchld_issue)
TEST_ENTRY (spawn_relative_path)
TEST_ENTRY (fs_poll)
TEST_ENTRY (fs_poll_getpath)
TEST_ENTRY (fs_poll_close_request)
TEST_ENTRY (fs_poll_close_request_multi_start_stop)
TEST_ENTRY (fs_poll_close_request_multi_stop_start)
TEST_ENTRY (fs_poll_close_request_stop_when_active)
TEST_ENTRY (kill)
TEST_ENTRY (kill_invalid_signum)

View File

@ -69,14 +69,20 @@ static void req_cb(uv_udp_send_t* req, int status) {
static void shutdown_cb(uv_shutdown_t* req, int status) {
ASSERT_OK(status);
ASSERT_PTR_EQ(req, &shutdown_req);
shutdown_cb_called++;
}
static void write_cb(uv_write_t* req, int status) {
int r;
ASSERT_PTR_EQ(req, &write_req);
uv_shutdown(&shutdown_req, req->handle, shutdown_cb);
r = uv_shutdown(&shutdown_req, req->handle, shutdown_cb);
if (r != 0) {
ASSERT_EQ(r, UV_ENOTSOCK);
shutdown_cb(&shutdown_req, 0);
}
write_cb_called++;
}
@ -92,9 +98,14 @@ static void connect_and_write(uv_connect_t* req, int status) {
static void connect_and_shutdown(uv_connect_t* req, int status) {
int r;
ASSERT_PTR_EQ(req, &connect_req);
ASSERT_OK(status);
uv_shutdown(&shutdown_req, req->handle, shutdown_cb);
r = uv_shutdown(&shutdown_req, req->handle, shutdown_cb);
if (r != 0) {
ASSERT_EQ(r, UV_ENOTSOCK);
shutdown_cb(&shutdown_req, 0);
}
connect_cb_called++;
}
@ -208,18 +219,6 @@ TEST_IMPL(fs_event_ref) {
}
TEST_IMPL(fs_poll_ref) {
uv_fs_poll_t h;
uv_fs_poll_init(uv_default_loop(), &h);
uv_fs_poll_start(&h, NULL, ".", 999);
uv_unref((uv_handle_t*)&h);
uv_run(uv_default_loop(), UV_RUN_DEFAULT);
do_close(&h);
MAKE_VALGRIND_HAPPY(uv_default_loop());
return 0;
}
TEST_IMPL(tcp_ref) {
uv_tcp_t h;
uv_tcp_init(uv_default_loop(), &h);

View File

@ -38,7 +38,8 @@ static int close_cb_called = 0;
static void shutdown_cb(uv_shutdown_t* req, int status) {
ASSERT_PTR_EQ(req, &shutdown_req);
ASSERT(status == 0 || status == UV_ECANCELED);
if (status != UV_ECANCELED)
ASSERT_OK(status);
shutdown_cb_called++;
}
@ -55,7 +56,10 @@ static void connect_cb(uv_connect_t* req, int status) {
ASSERT_OK(status);
r = uv_shutdown(&shutdown_req, req->handle, shutdown_cb);
ASSERT_OK(r);
if (r != 0) {
ASSERT_EQ(r, UV_ENOTSOCK);
shutdown_cb(&shutdown_req, 0);
}
ASSERT_OK(uv_is_closing((uv_handle_t*) req->handle));
uv_close((uv_handle_t*) req->handle, close_cb);
ASSERT_EQ(1, uv_is_closing((uv_handle_t*) req->handle));