summaryrefslogtreecommitdiffstats
path: root/Utilities/cmlibuv/src/win/tcp.c
diff options
context:
space:
mode:
Diffstat (limited to 'Utilities/cmlibuv/src/win/tcp.c')
-rw-r--r--Utilities/cmlibuv/src/win/tcp.c213
1 files changed, 106 insertions, 107 deletions
diff --git a/Utilities/cmlibuv/src/win/tcp.c b/Utilities/cmlibuv/src/win/tcp.c
index 39c1ff0..3ce5548 100644
--- a/Utilities/cmlibuv/src/win/tcp.c
+++ b/Utilities/cmlibuv/src/win/tcp.c
@@ -99,8 +99,8 @@ static int uv_tcp_set_socket(uv_loop_t* loop,
if (!SetHandleInformation((HANDLE) socket, HANDLE_FLAG_INHERIT, 0))
return GetLastError();
- /* Associate it with the I/O completion port. */
- /* Use uv_handle_t pointer as completion key. */
+ /* Associate it with the I/O completion port. Use uv_handle_t pointer as
+ * completion key. */
if (CreateIoCompletionPort((HANDLE)socket,
loop->iocp,
(ULONG_PTR)socket,
@@ -118,15 +118,12 @@ static int uv_tcp_set_socket(uv_loop_t* loop,
non_ifs_lsp = uv_tcp_non_ifs_lsp_ipv4;
}
- if (pSetFileCompletionNotificationModes &&
- !(handle->flags & UV_HANDLE_EMULATE_IOCP) && !non_ifs_lsp) {
- if (pSetFileCompletionNotificationModes((HANDLE) socket,
- FILE_SKIP_SET_EVENT_ON_HANDLE |
- FILE_SKIP_COMPLETION_PORT_ON_SUCCESS)) {
- handle->flags |= UV_HANDLE_SYNC_BYPASS_IOCP;
- } else if (GetLastError() != ERROR_INVALID_FUNCTION) {
+ if (!(handle->flags & UV_HANDLE_EMULATE_IOCP) && !non_ifs_lsp) {
+ UCHAR sfcnm_flags =
+ FILE_SKIP_SET_EVENT_ON_HANDLE | FILE_SKIP_COMPLETION_PORT_ON_SUCCESS;
+ if (!SetFileCompletionNotificationModes((HANDLE) socket, sfcnm_flags))
return GetLastError();
- }
+ handle->flags |= UV_HANDLE_SYNC_BYPASS_IOCP;
}
if (handle->flags & UV_HANDLE_TCP_NODELAY) {
@@ -220,7 +217,7 @@ void uv_tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle) {
UNREGISTER_HANDLE_REQ(loop, handle, handle->stream.conn.shutdown_req);
err = 0;
- if (handle->flags & UV__HANDLE_CLOSING) {
+ if (handle->flags & UV_HANDLE_CLOSING) {
err = ERROR_OPERATION_ABORTED;
} else if (shutdown(handle->socket, SD_SEND) == SOCKET_ERROR) {
err = WSAGetLastError();
@@ -236,7 +233,7 @@ void uv_tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle) {
return;
}
- if (handle->flags & UV__HANDLE_CLOSING &&
+ if (handle->flags & UV_HANDLE_CLOSING &&
handle->reqs_pending == 0) {
assert(!(handle->flags & UV_HANDLE_CLOSED));
@@ -326,9 +323,9 @@ static int uv_tcp_try_bind(uv_tcp_t* handle,
on = (flags & UV_TCP_IPV6ONLY) != 0;
- /* TODO: how to handle errors? This may fail if there is no ipv4 stack */
- /* available, or when run on XP/2003 which have no support for dualstack */
- /* sockets. For now we're silently ignoring the error. */
+ /* TODO: how to handle errors? This may fail if there is no ipv4 stack
+ * available, or when run on XP/2003 which have no support for dualstack
+ * sockets. For now we're silently ignoring the error. */
setsockopt(handle->socket,
IPPROTO_IPV6,
IPV6_V6ONLY,
@@ -626,9 +623,9 @@ int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) {
uv_tcp_queue_accept(handle, req);
}
- /* Initialize other unused requests too, because uv_tcp_endgame */
- /* doesn't know how how many requests were initialized, so it will */
- /* try to clean up {uv_simultaneous_server_accepts} requests. */
+ /* Initialize other unused requests too, because uv_tcp_endgame doesn't
+ * know how many requests were initialized, so it will try to clean up
+ * {uv_simultaneous_server_accepts} requests. */
for (i = simultaneous_accepts; i < uv_simultaneous_server_accepts; i++) {
req = &handle->tcp.serv.accept_reqs[i];
UV_REQ_INIT(req, UV_ACCEPT);
@@ -683,7 +680,7 @@ int uv_tcp_accept(uv_tcp_t* server, uv_tcp_t* client) {
req->next_pending = NULL;
req->accept_socket = INVALID_SOCKET;
- if (!(server->flags & UV__HANDLE_CLOSING)) {
+ if (!(server->flags & UV_HANDLE_CLOSING)) {
/* Check if we're in a middle of changing the number of pending accepts. */
if (!(server->flags & UV_HANDLE_TCP_ACCEPT_STATE_CHANGING)) {
uv_tcp_queue_accept(server, req);
@@ -721,8 +718,8 @@ int uv_tcp_read_start(uv_tcp_t* handle, uv_alloc_cb alloc_cb,
handle->alloc_cb = alloc_cb;
INCREASE_ACTIVE_COUNT(loop, handle);
- /* If reading was stopped and then started again, there could still be a */
- /* read request pending. */
+ /* If reading was stopped and then started again, there could still be a read
+ * request pending. */
if (!(handle->flags & UV_HANDLE_READ_PENDING)) {
if (handle->flags & UV_HANDLE_EMULATE_IOCP &&
!handle->read_req.event_handle) {
@@ -948,6 +945,7 @@ void uv_process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle,
uv_req_t* req) {
DWORD bytes, flags, err;
uv_buf_t buf;
+ int count;
assert(handle->type == UV_TCP);
@@ -965,8 +963,7 @@ void uv_process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle,
err = GET_REQ_SOCK_ERROR(req);
if (err == WSAECONNABORTED) {
- /*
- * Turn WSAECONNABORTED into UV_ECONNRESET to be consistent with Unix.
+ /* Turn WSAECONNABORTED into UV_ECONNRESET to be consistent with Unix.
*/
err = WSAECONNRESET;
}
@@ -1003,7 +1000,8 @@ void uv_process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle,
}
/* Do nonblocking reads until the buffer is empty */
- while (handle->flags & UV_HANDLE_READING) {
+ count = 32;
+ while ((handle->flags & UV_HANDLE_READING) && (count-- > 0)) {
buf = uv_buf_init(NULL, 0);
handle->alloc_cb((uv_handle_t*) handle, 65536, &buf);
if (buf.base == NULL || buf.len == 0) {
@@ -1046,8 +1044,8 @@ void uv_process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle,
DECREASE_ACTIVE_COUNT(loop, handle);
if (err == WSAECONNABORTED) {
- /* Turn WSAECONNABORTED into UV_ECONNRESET to be consistent with */
- /* Unix. */
+ /* Turn WSAECONNABORTED into UV_ECONNRESET to be consistent with
+ * Unix. */
err = WSAECONNRESET;
}
@@ -1119,10 +1117,9 @@ void uv_process_tcp_accept_req(uv_loop_t* loop, uv_tcp_t* handle,
assert(handle->type == UV_TCP);
- /* If handle->accepted_socket is not a valid socket, then */
- /* uv_queue_accept must have failed. This is a serious error. We stop */
- /* accepting connections and report this error to the connection */
- /* callback. */
+ /* If handle->accepted_socket is not a valid socket, then uv_queue_accept
+ * must have failed. This is a serious error. We stop accepting connections
+ * and report this error to the connection callback. */
if (req->accept_socket == INVALID_SOCKET) {
if (handle->flags & UV_HANDLE_LISTENING) {
handle->flags &= ~UV_HANDLE_LISTENING;
@@ -1147,9 +1144,9 @@ void uv_process_tcp_accept_req(uv_loop_t* loop, uv_tcp_t* handle,
handle->stream.serv.connection_cb((uv_stream_t*)handle, 0);
}
} else {
- /* Error related to accepted socket is ignored because the server */
- /* socket may still be healthy. If the server socket is broken */
- /* uv_queue_accept will detect it. */
+ /* Error related to accepted socket is ignored because the server socket
+ * may still be healthy. If the server socket is broken uv_queue_accept
+ * will detect it. */
closesocket(req->accept_socket);
req->accept_socket = INVALID_SOCKET;
if (handle->flags & UV_HANDLE_LISTENING) {
@@ -1171,7 +1168,7 @@ void uv_process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle,
err = 0;
if (REQ_SUCCESS(req)) {
- if (handle->flags & UV__HANDLE_CLOSING) {
+ if (handle->flags & UV_HANDLE_CLOSING) {
/* use UV_ECANCELED for consistency with Unix */
err = ERROR_OPERATION_ABORTED;
} else if (setsockopt(handle->socket,
@@ -1194,40 +1191,76 @@ void uv_process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle,
}
-int uv_tcp_import(uv_tcp_t* tcp, uv__ipc_socket_info_ex* socket_info_ex,
- int tcp_connection) {
+int uv__tcp_xfer_export(uv_tcp_t* handle,
+ int target_pid,
+ uv__ipc_socket_xfer_type_t* xfer_type,
+ uv__ipc_socket_xfer_info_t* xfer_info) {
+ if (handle->flags & UV_HANDLE_CONNECTION) {
+ *xfer_type = UV__IPC_SOCKET_XFER_TCP_CONNECTION;
+ } else {
+ *xfer_type = UV__IPC_SOCKET_XFER_TCP_SERVER;
+ /* We're about to share the socket with another process. Because this is a
+ * listening socket, we assume that the other process will be accepting
+ * connections on it. Thus, before sharing the socket with another process,
+ * we call listen here in the parent process. */
+ if (!(handle->flags & UV_HANDLE_LISTENING)) {
+ if (!(handle->flags & UV_HANDLE_BOUND)) {
+ return ERROR_NOT_SUPPORTED;
+ }
+ if (handle->delayed_error == 0 &&
+ listen(handle->socket, SOMAXCONN) == SOCKET_ERROR) {
+ handle->delayed_error = WSAGetLastError();
+ }
+ }
+ }
+
+ if (WSADuplicateSocketW(handle->socket, target_pid, &xfer_info->socket_info))
+ return WSAGetLastError();
+ xfer_info->delayed_error = handle->delayed_error;
+
+ /* Mark the local copy of the handle as 'shared' so we behave in a way that's
+ * friendly to the process(es) that we share the socket with. */
+ handle->flags |= UV_HANDLE_SHARED_TCP_SOCKET;
+
+ return 0;
+}
+
+
+int uv__tcp_xfer_import(uv_tcp_t* tcp,
+ uv__ipc_socket_xfer_type_t xfer_type,
+ uv__ipc_socket_xfer_info_t* xfer_info) {
int err;
- SOCKET socket = WSASocketW(FROM_PROTOCOL_INFO,
- FROM_PROTOCOL_INFO,
- FROM_PROTOCOL_INFO,
- &socket_info_ex->socket_info,
- 0,
- WSA_FLAG_OVERLAPPED);
+ SOCKET socket;
+
+ assert(xfer_type == UV__IPC_SOCKET_XFER_TCP_SERVER ||
+ xfer_type == UV__IPC_SOCKET_XFER_TCP_CONNECTION);
+
+ socket = WSASocketW(FROM_PROTOCOL_INFO,
+ FROM_PROTOCOL_INFO,
+ FROM_PROTOCOL_INFO,
+ &xfer_info->socket_info,
+ 0,
+ WSA_FLAG_OVERLAPPED);
if (socket == INVALID_SOCKET) {
return WSAGetLastError();
}
- err = uv_tcp_set_socket(tcp->loop,
- tcp,
- socket,
- socket_info_ex->socket_info.iAddressFamily,
- 1);
+ err = uv_tcp_set_socket(
+ tcp->loop, tcp, socket, xfer_info->socket_info.iAddressFamily, 1);
if (err) {
closesocket(socket);
return err;
}
- if (tcp_connection) {
+ tcp->delayed_error = xfer_info->delayed_error;
+ tcp->flags |= UV_HANDLE_BOUND | UV_HANDLE_SHARED_TCP_SOCKET;
+
+ if (xfer_type == UV__IPC_SOCKET_XFER_TCP_CONNECTION) {
uv_connection_init((uv_stream_t*)tcp);
tcp->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
}
- tcp->flags |= UV_HANDLE_BOUND;
- tcp->flags |= UV_HANDLE_SHARED_TCP_SOCKET;
-
- tcp->delayed_error = socket_info_ex->delayed_error;
-
tcp->loop->active_tcp_streams++;
return 0;
}
@@ -1273,39 +1306,6 @@ int uv_tcp_keepalive(uv_tcp_t* handle, int enable, unsigned int delay) {
}
-int uv_tcp_duplicate_socket(uv_tcp_t* handle, int pid,
- LPWSAPROTOCOL_INFOW protocol_info) {
- if (!(handle->flags & UV_HANDLE_CONNECTION)) {
- /*
- * We're about to share the socket with another process. Because
- * this is a listening socket, we assume that the other process will
- * be accepting connections on it. So, before sharing the socket
- * with another process, we call listen here in the parent process.
- */
-
- if (!(handle->flags & UV_HANDLE_LISTENING)) {
- if (!(handle->flags & UV_HANDLE_BOUND)) {
- return ERROR_INVALID_PARAMETER;
- }
-
- if (!(handle->delayed_error)) {
- if (listen(handle->socket, SOMAXCONN) == SOCKET_ERROR) {
- handle->delayed_error = WSAGetLastError();
- }
- }
- }
- }
-
- if (WSADuplicateSocketW(handle->socket, pid, protocol_info)) {
- return WSAGetLastError();
- }
-
- handle->flags |= UV_HANDLE_SHARED_TCP_SOCKET;
-
- return 0;
-}
-
-
int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) {
if (handle->flags & UV_HANDLE_CONNECTION) {
return UV_EINVAL;
@@ -1346,8 +1346,8 @@ static int uv_tcp_try_cancel_io(uv_tcp_t* tcp) {
non_ifs_lsp = (tcp->flags & UV_HANDLE_IPV6) ? uv_tcp_non_ifs_lsp_ipv6 :
uv_tcp_non_ifs_lsp_ipv4;
- /* If there are non-ifs LSPs then try to obtain a base handle for the */
- /* socket. This will always fail on Windows XP/3k. */
+ /* If there are non-ifs LSPs then try to obtain a base handle for the socket.
+ * This will always fail on Windows XP/3k. */
if (non_ifs_lsp) {
DWORD bytes;
if (WSAIoctl(socket,
@@ -1379,38 +1379,37 @@ void uv_tcp_close(uv_loop_t* loop, uv_tcp_t* tcp) {
int close_socket = 1;
if (tcp->flags & UV_HANDLE_READ_PENDING) {
- /* In order for winsock to do a graceful close there must not be any */
- /* any pending reads, or the socket must be shut down for writing */
+ /* In order for winsock to do a graceful close there must not be any any
+ * pending reads, or the socket must be shut down for writing */
if (!(tcp->flags & UV_HANDLE_SHARED_TCP_SOCKET)) {
/* Just do shutdown on non-shared sockets, which ensures graceful close. */
shutdown(tcp->socket, SD_SEND);
} else if (uv_tcp_try_cancel_io(tcp) == 0) {
- /* In case of a shared socket, we try to cancel all outstanding I/O, */
- /* If that works, don't close the socket yet - wait for the read req to */
- /* return and close the socket in uv_tcp_endgame. */
+ /* In case of a shared socket, we try to cancel all outstanding I/O,. If
+ * that works, don't close the socket yet - wait for the read req to
+ * return and close the socket in uv_tcp_endgame. */
close_socket = 0;
} else {
- /* When cancelling isn't possible - which could happen when an LSP is */
- /* present on an old Windows version, we will have to close the socket */
- /* with a read pending. That is not nice because trailing sent bytes */
- /* may not make it to the other side. */
+ /* When cancelling isn't possible - which could happen when an LSP is
+ * present on an old Windows version, we will have to close the socket
+ * with a read pending. That is not nice because trailing sent bytes may
+ * not make it to the other side. */
}
} else if ((tcp->flags & UV_HANDLE_SHARED_TCP_SOCKET) &&
tcp->tcp.serv.accept_reqs != NULL) {
- /* Under normal circumstances closesocket() will ensure that all pending */
- /* accept reqs are canceled. However, when the socket is shared the */
- /* presence of another reference to the socket in another process will */
- /* keep the accept reqs going, so we have to ensure that these are */
- /* canceled. */
+ /* Under normal circumstances closesocket() will ensure that all pending
+ * accept reqs are canceled. However, when the socket is shared the
+ * presence of another reference to the socket in another process will keep
+ * the accept reqs going, so we have to ensure that these are canceled. */
if (uv_tcp_try_cancel_io(tcp) != 0) {
- /* When cancellation is not possible, there is another option: we can */
- /* close the incoming sockets, which will also cancel the accept */
- /* operations. However this is not cool because we might inadvertently */
- /* close a socket that just accepted a new connection, which will */
- /* cause the connection to be aborted. */
+ /* When cancellation is not possible, there is another option: we can
+ * close the incoming sockets, which will also cancel the accept
+ * operations. However this is not cool because we might inadvertently
+ * close a socket that just accepted a new connection, which will cause
+ * the connection to be aborted. */
unsigned int i;
for (i = 0; i < uv_simultaneous_server_accepts; i++) {
uv_tcp_accept_t* req = &tcp->tcp.serv.accept_reqs[i];