summaryrefslogtreecommitdiffstats
path: root/Utilities/cmlibuv
diff options
context:
space:
mode:
Diffstat (limited to 'Utilities/cmlibuv')
-rw-r--r--Utilities/cmlibuv/.gitattributes1
-rw-r--r--Utilities/cmlibuv/CMakeLists.txt347
-rw-r--r--Utilities/cmlibuv/LICENSE70
-rw-r--r--Utilities/cmlibuv/include/uv.h1842
-rw-r--r--Utilities/cmlibuv/include/uv/aix.h32
-rw-r--r--Utilities/cmlibuv/include/uv/android-ifaddrs.h54
-rw-r--r--Utilities/cmlibuv/include/uv/bsd.h34
-rw-r--r--Utilities/cmlibuv/include/uv/darwin.h61
-rw-r--r--Utilities/cmlibuv/include/uv/errno.h448
-rw-r--r--Utilities/cmlibuv/include/uv/linux.h34
-rw-r--r--Utilities/cmlibuv/include/uv/os390.h33
-rw-r--r--Utilities/cmlibuv/include/uv/posix.h31
-rw-r--r--Utilities/cmlibuv/include/uv/stdint-msvc2008.h247
-rw-r--r--Utilities/cmlibuv/include/uv/sunos.h44
-rw-r--r--Utilities/cmlibuv/include/uv/threadpool.h37
-rw-r--r--Utilities/cmlibuv/include/uv/tree.h768
-rw-r--r--Utilities/cmlibuv/include/uv/unix.h527
-rw-r--r--Utilities/cmlibuv/include/uv/version.h43
-rw-r--r--Utilities/cmlibuv/include/uv/win.h702
-rw-r--r--Utilities/cmlibuv/src/fs-poll.c287
-rw-r--r--Utilities/cmlibuv/src/heap-inl.h245
-rw-r--r--Utilities/cmlibuv/src/idna.c291
-rw-r--r--Utilities/cmlibuv/src/idna.h31
-rw-r--r--Utilities/cmlibuv/src/inet.c302
-rw-r--r--Utilities/cmlibuv/src/queue.h108
-rw-r--r--Utilities/cmlibuv/src/random.c123
-rw-r--r--Utilities/cmlibuv/src/strscpy.c38
-rw-r--r--Utilities/cmlibuv/src/strscpy.h39
-rw-r--r--Utilities/cmlibuv/src/threadpool.c388
-rw-r--r--Utilities/cmlibuv/src/timer.c184
-rw-r--r--Utilities/cmlibuv/src/unix/aix-common.c90
-rw-r--r--Utilities/cmlibuv/src/unix/aix.c1304
-rw-r--r--Utilities/cmlibuv/src/unix/android-ifaddrs.c713
-rw-r--r--Utilities/cmlibuv/src/unix/async.c253
-rw-r--r--Utilities/cmlibuv/src/unix/atomic-ops.h67
-rw-r--r--Utilities/cmlibuv/src/unix/bsd-ifaddrs.c163
-rw-r--r--Utilities/cmlibuv/src/unix/bsd-proctitle.c100
-rw-r--r--Utilities/cmlibuv/src/unix/cmake-bootstrap.c155
-rw-r--r--Utilities/cmlibuv/src/unix/core.c1625
-rw-r--r--Utilities/cmlibuv/src/unix/cygwin.c53
-rw-r--r--Utilities/cmlibuv/src/unix/darwin-proctitle.c192
-rw-r--r--Utilities/cmlibuv/src/unix/darwin-stub.h113
-rw-r--r--Utilities/cmlibuv/src/unix/darwin.c371
-rw-r--r--Utilities/cmlibuv/src/unix/dl.c80
-rw-r--r--Utilities/cmlibuv/src/unix/freebsd.c282
-rw-r--r--Utilities/cmlibuv/src/unix/fs.c2144
-rw-r--r--Utilities/cmlibuv/src/unix/fsevents.c923
-rw-r--r--Utilities/cmlibuv/src/unix/getaddrinfo.c255
-rw-r--r--Utilities/cmlibuv/src/unix/getnameinfo.c121
-rw-r--r--Utilities/cmlibuv/src/unix/haiku.c167
-rw-r--r--Utilities/cmlibuv/src/unix/hpux.c30
-rw-r--r--Utilities/cmlibuv/src/unix/ibmi.c501
-rw-r--r--Utilities/cmlibuv/src/unix/internal.h354
-rw-r--r--Utilities/cmlibuv/src/unix/kqueue.c585
-rw-r--r--Utilities/cmlibuv/src/unix/linux-core.c1146
-rw-r--r--Utilities/cmlibuv/src/unix/linux-inotify.c327
-rw-r--r--Utilities/cmlibuv/src/unix/linux-syscalls.c267
-rw-r--r--Utilities/cmlibuv/src/unix/linux-syscalls.h81
-rw-r--r--Utilities/cmlibuv/src/unix/loop-watcher.c68
-rw-r--r--Utilities/cmlibuv/src/unix/loop.c228
-rw-r--r--Utilities/cmlibuv/src/unix/netbsd.c259
-rw-r--r--Utilities/cmlibuv/src/unix/no-fsevents.c42
-rw-r--r--Utilities/cmlibuv/src/unix/no-proctitle.c45
-rw-r--r--Utilities/cmlibuv/src/unix/openbsd.c240
-rw-r--r--Utilities/cmlibuv/src/unix/os390-syscalls.c584
-rw-r--r--Utilities/cmlibuv/src/unix/os390-syscalls.h74
-rw-r--r--Utilities/cmlibuv/src/unix/os390.c975
-rw-r--r--Utilities/cmlibuv/src/unix/pipe.c379
-rw-r--r--Utilities/cmlibuv/src/unix/poll.c150
-rw-r--r--Utilities/cmlibuv/src/unix/posix-hrtime.c74
-rw-r--r--Utilities/cmlibuv/src/unix/posix-poll.c374
-rw-r--r--Utilities/cmlibuv/src/unix/process.c650
-rw-r--r--Utilities/cmlibuv/src/unix/procfs-exepath.c45
-rw-r--r--Utilities/cmlibuv/src/unix/proctitle.c159
-rw-r--r--Utilities/cmlibuv/src/unix/pthread-fixes.c58
-rw-r--r--Utilities/cmlibuv/src/unix/qnx.c137
-rw-r--r--Utilities/cmlibuv/src/unix/random-devurandom.c93
-rw-r--r--Utilities/cmlibuv/src/unix/random-getentropy.c57
-rw-r--r--Utilities/cmlibuv/src/unix/random-getrandom.c88
-rw-r--r--Utilities/cmlibuv/src/unix/random-sysctl-linux.c99
-rw-r--r--Utilities/cmlibuv/src/unix/signal.c558
-rw-r--r--Utilities/cmlibuv/src/unix/spinlock.h53
-rw-r--r--Utilities/cmlibuv/src/unix/stream.c1693
-rw-r--r--Utilities/cmlibuv/src/unix/sunos.c871
-rw-r--r--Utilities/cmlibuv/src/unix/sysinfo-loadavg.c36
-rw-r--r--Utilities/cmlibuv/src/unix/sysinfo-memory.c42
-rw-r--r--Utilities/cmlibuv/src/unix/tcp.c461
-rw-r--r--Utilities/cmlibuv/src/unix/thread.c844
-rw-r--r--Utilities/cmlibuv/src/unix/tty.c399
-rw-r--r--Utilities/cmlibuv/src/unix/udp.c1332
-rw-r--r--Utilities/cmlibuv/src/uv-common.c930
-rw-r--r--Utilities/cmlibuv/src/uv-common.h368
-rw-r--r--Utilities/cmlibuv/src/uv-data-getter-setters.c119
-rw-r--r--Utilities/cmlibuv/src/version.c45
-rw-r--r--Utilities/cmlibuv/src/win/async.c98
-rw-r--r--Utilities/cmlibuv/src/win/atomicops-inl.h57
-rw-r--r--Utilities/cmlibuv/src/win/core.c748
-rw-r--r--Utilities/cmlibuv/src/win/detect-wakeup.c56
-rw-r--r--Utilities/cmlibuv/src/win/dl.c136
-rw-r--r--Utilities/cmlibuv/src/win/error.c173
-rw-r--r--Utilities/cmlibuv/src/win/fs-event.c608
-rw-r--r--Utilities/cmlibuv/src/win/fs-fd-hash-inl.h200
-rw-r--r--Utilities/cmlibuv/src/win/fs.c3428
-rw-r--r--Utilities/cmlibuv/src/win/getaddrinfo.c463
-rw-r--r--Utilities/cmlibuv/src/win/getnameinfo.c157
-rw-r--r--Utilities/cmlibuv/src/win/handle-inl.h180
-rw-r--r--Utilities/cmlibuv/src/win/handle.c162
-rw-r--r--Utilities/cmlibuv/src/win/internal.h348
-rw-r--r--Utilities/cmlibuv/src/win/loop-watcher.c122
-rw-r--r--Utilities/cmlibuv/src/win/pipe.c2393
-rw-r--r--Utilities/cmlibuv/src/win/poll.c584
-rw-r--r--Utilities/cmlibuv/src/win/process-stdio.c512
-rw-r--r--Utilities/cmlibuv/src/win/process.c1337
-rw-r--r--Utilities/cmlibuv/src/win/req-inl.h221
-rw-r--r--Utilities/cmlibuv/src/win/signal.c282
-rw-r--r--Utilities/cmlibuv/src/win/snprintf.c42
-rw-r--r--Utilities/cmlibuv/src/win/stream-inl.h54
-rw-r--r--Utilities/cmlibuv/src/win/stream.c243
-rw-r--r--Utilities/cmlibuv/src/win/tcp.c1573
-rw-r--r--Utilities/cmlibuv/src/win/thread.c520
-rw-r--r--Utilities/cmlibuv/src/win/tty.c2452
-rw-r--r--Utilities/cmlibuv/src/win/udp.c1181
-rw-r--r--Utilities/cmlibuv/src/win/util.c1977
-rw-r--r--Utilities/cmlibuv/src/win/winapi.c137
-rw-r--r--Utilities/cmlibuv/src/win/winapi.h4771
-rw-r--r--Utilities/cmlibuv/src/win/winsock.c575
-rw-r--r--Utilities/cmlibuv/src/win/winsock.h202
127 files changed, 58474 insertions, 0 deletions
diff --git a/Utilities/cmlibuv/.gitattributes b/Utilities/cmlibuv/.gitattributes
new file mode 100644
index 0000000..562b12e
--- /dev/null
+++ b/Utilities/cmlibuv/.gitattributes
@@ -0,0 +1 @@
+* -whitespace
diff --git a/Utilities/cmlibuv/CMakeLists.txt b/Utilities/cmlibuv/CMakeLists.txt
new file mode 100644
index 0000000..7625cf6
--- /dev/null
+++ b/Utilities/cmlibuv/CMakeLists.txt
@@ -0,0 +1,347 @@
+project(libuv C)
+
+# Disable warnings to avoid changing 3rd party code.
+if(CMAKE_C_COMPILER_ID MATCHES
+ "^(GNU|Clang|AppleClang|XLClang|XL|VisualAge|SunPro|HP|Intel)$")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w")
+elseif(CMAKE_C_COMPILER_ID STREQUAL "PathScale")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -woffall")
+endif()
+
+find_package(Threads)
+
+set(uv_libraries ${CMAKE_THREAD_LIBS_INIT})
+set(uv_includes include src)
+set(uv_headers
+ include/uv.h
+ include/uv/errno.h
+ include/uv/threadpool.h
+ include/uv/version.h
+ )
+set(uv_sources
+ src/fs-poll.c
+ src/heap-inl.h
+ src/idna.c
+ src/idna.h
+ src/inet.c
+ src/queue.h
+ src/strscpy.c
+ src/strscpy.h
+ src/threadpool.c
+ src/timer.c
+ src/uv-common.c
+ src/uv-common.h
+ src/uv-data-getter-setters.c
+ src/version.c
+ )
+if(WIN32)
+ list(APPEND uv_libraries
+ ws2_32
+ psapi
+ iphlpapi
+ shell32
+ userenv
+ )
+ list(APPEND uv_includes
+ src/win
+ )
+ list(APPEND uv_defines
+ WIN32_LEAN_AND_MEAN
+ _WIN32_WINNT=0x0600
+ )
+ list(APPEND uv_headers
+ include/uv/win.h
+ include/tree.h
+ )
+ list(APPEND uv_sources
+ src/win/async.c
+ src/win/atomicops-inl.h
+ src/win/core.c
+ src/win/detect-wakeup.c
+ src/win/dl.c
+ src/win/error.c
+ src/win/fs-event.c
+ src/win/fs.c
+ src/win/getaddrinfo.c
+ src/win/getnameinfo.c
+ src/win/handle.c
+ src/win/handle-inl.h
+ src/win/internal.h
+ src/win/loop-watcher.c
+ src/win/pipe.c
+ src/win/poll.c
+ src/win/process-stdio.c
+ src/win/process.c
+ src/win/req-inl.h
+ src/win/signal.c
+ src/win/snprintf.c
+ src/win/stream.c
+ src/win/stream-inl.h
+ src/win/tcp.c
+ src/win/thread.c
+ src/win/tty.c
+ src/win/udp.c
+ src/win/util.c
+ src/win/winapi.c
+ src/win/winapi.h
+ src/win/winsock.c
+ src/win/winsock.h
+ )
+else()
+ list(APPEND uv_includes
+ src/unix
+ )
+ list(APPEND uv_headers
+ include/uv/unix.h
+ )
+ list(APPEND uv_sources
+ src/unix/async.c
+ src/unix/atomic-ops.h
+ src/unix/core.c
+ src/unix/dl.c
+ src/unix/fs.c
+ src/unix/getaddrinfo.c
+ src/unix/getnameinfo.c
+ src/unix/internal.h
+ src/unix/loop-watcher.c
+ src/unix/loop.c
+ src/unix/pipe.c
+ src/unix/poll.c
+ src/unix/process.c
+ src/unix/signal.c
+ src/unix/spinlock.h
+ src/unix/stream.c
+ src/unix/tcp.c
+ src/unix/thread.c
+ src/unix/tty.c
+ src/unix/udp.c
+ )
+endif()
+
+if(CMAKE_SYSTEM_NAME STREQUAL "AIX")
+ list(APPEND uv_libraries
+ perfstat
+ )
+ list(APPEND uv_headers
+ include/uv/aix.h
+ )
+ list(APPEND uv_defines
+ _ALL_SOURCE
+ _XOPEN_SOURCE=500
+ _LINUX_SOURCE_COMPAT
+ _THREAD_SAFE
+ )
+ list(APPEND uv_sources
+ src/unix/aix.c
+ src/unix/aix-common.c
+ )
+endif()
+
+if(CMAKE_SYSTEM_NAME STREQUAL "OS400")
+ list(APPEND uv_headers
+ include/uv/posix.h
+ )
+ list(APPEND uv_defines
+ _ALL_SOURCE
+ _XOPEN_SOURCE=500
+ _LINUX_SOURCE_COMPAT
+ _THREAD_SAFE
+ )
+ list(APPEND uv_sources
+ src/unix/aix-common.c
+ src/unix/ibmi.c
+ src/unix/posix-poll.c
+ src/unix/no-fsevents.c
+ src/unix/no-proctitle.c
+ )
+endif()
+
+if(CMAKE_SYSTEM_NAME MATCHES "CYGWIN")
+ list(APPEND uv_libraries
+ )
+ list(APPEND uv_headers
+ include/uv/posix.h
+ )
+ list(APPEND uv_defines
+ )
+ list(APPEND uv_sources
+ src/unix/cygwin.c
+ src/unix/bsd-ifaddrs.c
+ src/unix/no-fsevents.c
+ src/unix/no-proctitle.c
+ src/unix/posix-hrtime.c
+ src/unix/posix-poll.c
+ src/unix/procfs-exepath.c
+ src/unix/sysinfo-loadavg.c
+ src/unix/sysinfo-memory.c
+ )
+endif()
+
+if(CMAKE_SYSTEM_NAME STREQUAL "Darwin")
+ list(APPEND uv_headers
+ include/uv/darwin.h
+ )
+ list(APPEND uv_defines
+ _DARWIN_USE_64_BIT_INODE=1
+ _DARWIN_UNLIMITED_SELECT=1
+ )
+ list(APPEND uv_sources
+ src/unix/bsd-ifaddrs.c
+ src/unix/darwin.c
+ src/unix/darwin-proctitle.c
+ src/unix/fsevents.c
+ src/unix/kqueue.c
+ src/unix/proctitle.c
+ )
+endif()
+
+if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
+ list(APPEND uv_libraries dl rt)
+ list(APPEND uv_headers
+ include/uv/linux.h
+ )
+ list(APPEND uv_defines _GNU_SOURCE)
+ list(APPEND uv_sources
+ src/unix/linux-core.c
+ src/unix/linux-inotify.c
+ src/unix/linux-syscalls.c
+ src/unix/linux-syscalls.h
+ src/unix/procfs-exepath.c
+ src/unix/proctitle.c
+ src/unix/sysinfo-loadavg.c
+ src/unix/sysinfo-memory.c
+ )
+endif()
+
+if(CMAKE_SYSTEM_NAME STREQUAL "FreeBSD")
+ list(APPEND uv_libraries
+ kvm
+ )
+ list(APPEND uv_headers
+ include/uv/bsd.h
+ )
+ list(APPEND uv_sources
+ src/unix/bsd-ifaddrs.c
+ src/unix/bsd-proctitle.c
+ src/unix/freebsd.c
+ src/unix/kqueue.c
+ src/unix/posix-hrtime.c
+ )
+endif()
+
+if(CMAKE_SYSTEM_NAME STREQUAL "kFreeBSD")
+ list(APPEND uv_libraries
+ freebsd-glue
+ kvm
+ )
+ list(APPEND uv_headers
+ include/uv/bsd.h
+ )
+ list(APPEND uv_sources
+ src/unix/bsd-ifaddrs.c
+ src/unix/bsd-proctitle.c
+ src/unix/freebsd.c
+ src/unix/kqueue.c
+ src/unix/posix-hrtime.c
+ )
+endif()
+
+if(CMAKE_SYSTEM_NAME STREQUAL "NetBSD")
+ list(APPEND uv_libraries
+ kvm
+ )
+ list(APPEND uv_headers
+ include/uv/bsd.h
+ )
+ list(APPEND uv_sources
+ src/unix/bsd-ifaddrs.c
+ src/unix/bsd-proctitle.c
+ src/unix/netbsd.c
+ src/unix/kqueue.c
+ src/unix/posix-hrtime.c
+ )
+endif()
+
+if(CMAKE_SYSTEM_NAME STREQUAL "OpenBSD")
+ list(APPEND uv_libraries
+ kvm
+ )
+ list(APPEND uv_headers
+ include/uv/bsd.h
+ )
+ list(APPEND uv_sources
+ src/unix/bsd-ifaddrs.c
+ src/unix/bsd-proctitle.c
+ src/unix/openbsd.c
+ src/unix/kqueue.c
+ src/unix/posix-hrtime.c
+ )
+endif()
+
+if(CMAKE_SYSTEM_NAME STREQUAL "SunOS")
+ list(APPEND uv_libraries
+ kstat
+ nsl
+ sendfile
+ socket
+ rt
+ )
+ list(APPEND uv_headers
+ include/uv/sunos.h
+ )
+ list(APPEND uv_defines
+ __EXTENSIONS__
+ )
+ if(CMAKE_SYSTEM_VERSION STREQUAL "5.10")
+ set(CMAKE_C_STANDARD 90)
+ if(CMAKE_VERSION VERSION_LESS 3.8.20170504 AND CMAKE_C_COMPILER_ID STREQUAL "SunPro" AND NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 5.14)
+ # The running version of CMake does not know how to add this flag.
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c90")
+ endif()
+ list(APPEND uv_defines
+ _XOPEN_SOURCE=500
+ )
+ else()
+ if(NOT CMAKE_C_STANDARD OR CMAKE_C_STANDARD EQUAL 90)
+ set(CMAKE_C_STANDARD 11)
+ endif()
+ if(CMAKE_VERSION VERSION_LESS 3.8.20170505 AND CMAKE_C_COMPILER_ID STREQUAL "SunPro" AND CMAKE_C_COMPILER_VERSION VERSION_LESS 5.14)
+ # The running version of CMake does not know how to add this flag.
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -xc99")
+ endif()
+ list(APPEND uv_defines
+ _XOPEN_SOURCE=600
+ )
+ endif()
+ list(APPEND uv_sources
+ src/unix/no-proctitle.c
+ src/unix/sunos.c
+ )
+endif()
+
+if(CMAKE_SYSTEM_NAME STREQUAL "HP-UX")
+ list(APPEND uv_libraries
+ rt
+ )
+ list(APPEND uv_headers
+ include/uv/posix.h
+ )
+ list(APPEND uv_defines
+ _XOPEN_SOURCE_EXTENDED
+ )
+ list(APPEND uv_sources
+ src/unix/hpux.c
+ src/unix/no-fsevents.c
+ src/unix/posix-poll.c
+ )
+endif()
+
+include_directories(
+ ${uv_includes}
+ ${KWSYS_HEADER_ROOT}
+ )
+add_library(cmlibuv STATIC ${uv_sources})
+target_link_libraries(cmlibuv ${uv_libraries})
+set_property(TARGET cmlibuv PROPERTY COMPILE_DEFINITIONS ${uv_defines})
+
+install(FILES LICENSE DESTINATION ${CMAKE_DOC_DIR}/cmlibuv)
diff --git a/Utilities/cmlibuv/LICENSE b/Utilities/cmlibuv/LICENSE
new file mode 100644
index 0000000..28f1733
--- /dev/null
+++ b/Utilities/cmlibuv/LICENSE
@@ -0,0 +1,70 @@
+libuv is licensed for use as follows:
+
+====
+Copyright (c) 2015-present libuv project contributors.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to
+deal in the Software without restriction, including without limitation the
+rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+sell copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+IN THE SOFTWARE.
+====
+
+This license applies to parts of libuv originating from the
+https://github.com/joyent/libuv repository:
+
+====
+
+Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to
+deal in the Software without restriction, including without limitation the
+rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+sell copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+IN THE SOFTWARE.
+
+====
+
+This license applies to all parts of libuv that are not externally
+maintained libraries.
+
+The externally maintained libraries used by libuv are:
+
+ - tree.h (from FreeBSD), copyright Niels Provos. Two clause BSD license.
+
+ - inet_pton and inet_ntop implementations, contained in src/inet.c, are
+ copyright the Internet Systems Consortium, Inc., and licensed under the ISC
+ license.
+
+ - stdint-msvc2008.h (from msinttypes), copyright Alexander Chemeris. Three
+ clause BSD license.
+
+ - pthread-fixes.c, copyright Google Inc. and Sony Mobile Communications AB.
+ Three clause BSD license.
+
+ - android-ifaddrs.h, android-ifaddrs.c, copyright Berkeley Software Design
+ Inc, Kenneth MacKay and Emergya (Cloud4all, FP7/2007-2013, grant agreement
+ n° 289016). Three clause BSD license.
diff --git a/Utilities/cmlibuv/include/uv.h b/Utilities/cmlibuv/include/uv.h
new file mode 100644
index 0000000..11891df
--- /dev/null
+++ b/Utilities/cmlibuv/include/uv.h
@@ -0,0 +1,1842 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/* See https://github.com/libuv/libuv#documentation for documentation. */
+
+#ifndef UV_H
+#define UV_H
+
+/* Include KWSys Large File Support configuration. */
+#include <cmsys/Configure.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if defined(BUILDING_UV_SHARED) && defined(USING_UV_SHARED)
+#error "Define either BUILDING_UV_SHARED or USING_UV_SHARED, not both."
+#endif
+
+#ifdef _WIN32
+ /* Windows - set up dll import/export decorators. */
+# if defined(BUILDING_UV_SHARED)
+ /* Building shared library. */
+# define UV_EXTERN __declspec(dllexport)
+# elif defined(USING_UV_SHARED)
+ /* Using shared library. */
+# define UV_EXTERN __declspec(dllimport)
+# else
+ /* Building static library. */
+# define UV_EXTERN /* nothing */
+# endif
+#elif __GNUC__ >= 4
+# define UV_EXTERN __attribute__((visibility("default")))
+#else
+# define UV_EXTERN /* nothing */
+#endif
+
+#include "uv/errno.h"
+#include "uv/version.h"
+#include <stddef.h>
+#include <stdio.h>
+
+#if defined(_MSC_VER) && _MSC_VER < 1600
+# include "uv/stdint-msvc2008.h"
+#else
+# include <stdint.h>
+#endif
+
+#if defined(_WIN32)
+# include "uv/win.h"
+#else
+# include "uv/unix.h"
+#endif
+
+/* Expand this list if necessary. */
+#define UV_ERRNO_MAP(XX) \
+ XX(E2BIG, "argument list too long") \
+ XX(EACCES, "permission denied") \
+ XX(EADDRINUSE, "address already in use") \
+ XX(EADDRNOTAVAIL, "address not available") \
+ XX(EAFNOSUPPORT, "address family not supported") \
+ XX(EAGAIN, "resource temporarily unavailable") \
+ XX(EAI_ADDRFAMILY, "address family not supported") \
+ XX(EAI_AGAIN, "temporary failure") \
+ XX(EAI_BADFLAGS, "bad ai_flags value") \
+ XX(EAI_BADHINTS, "invalid value for hints") \
+ XX(EAI_CANCELED, "request canceled") \
+ XX(EAI_FAIL, "permanent failure") \
+ XX(EAI_FAMILY, "ai_family not supported") \
+ XX(EAI_MEMORY, "out of memory") \
+ XX(EAI_NODATA, "no address") \
+ XX(EAI_NONAME, "unknown node or service") \
+ XX(EAI_OVERFLOW, "argument buffer overflow") \
+ XX(EAI_PROTOCOL, "resolved protocol is unknown") \
+ XX(EAI_SERVICE, "service not available for socket type") \
+ XX(EAI_SOCKTYPE, "socket type not supported") \
+ XX(EALREADY, "connection already in progress") \
+ XX(EBADF, "bad file descriptor") \
+ XX(EBUSY, "resource busy or locked") \
+ XX(ECANCELED, "operation canceled") \
+ XX(ECHARSET, "invalid Unicode character") \
+ XX(ECONNABORTED, "software caused connection abort") \
+ XX(ECONNREFUSED, "connection refused") \
+ XX(ECONNRESET, "connection reset by peer") \
+ XX(EDESTADDRREQ, "destination address required") \
+ XX(EEXIST, "file already exists") \
+ XX(EFAULT, "bad address in system call argument") \
+ XX(EFBIG, "file too large") \
+ XX(EHOSTUNREACH, "host is unreachable") \
+ XX(EINTR, "interrupted system call") \
+ XX(EINVAL, "invalid argument") \
+ XX(EIO, "i/o error") \
+ XX(EISCONN, "socket is already connected") \
+ XX(EISDIR, "illegal operation on a directory") \
+ XX(ELOOP, "too many symbolic links encountered") \
+ XX(EMFILE, "too many open files") \
+ XX(EMSGSIZE, "message too long") \
+ XX(ENAMETOOLONG, "name too long") \
+ XX(ENETDOWN, "network is down") \
+ XX(ENETUNREACH, "network is unreachable") \
+ XX(ENFILE, "file table overflow") \
+ XX(ENOBUFS, "no buffer space available") \
+ XX(ENODEV, "no such device") \
+ XX(ENOENT, "no such file or directory") \
+ XX(ENOMEM, "not enough memory") \
+ XX(ENONET, "machine is not on the network") \
+ XX(ENOPROTOOPT, "protocol not available") \
+ XX(ENOSPC, "no space left on device") \
+ XX(ENOSYS, "function not implemented") \
+ XX(ENOTCONN, "socket is not connected") \
+ XX(ENOTDIR, "not a directory") \
+ XX(ENOTEMPTY, "directory not empty") \
+ XX(ENOTSOCK, "socket operation on non-socket") \
+ XX(ENOTSUP, "operation not supported on socket") \
+ XX(EPERM, "operation not permitted") \
+ XX(EPIPE, "broken pipe") \
+ XX(EPROTO, "protocol error") \
+ XX(EPROTONOSUPPORT, "protocol not supported") \
+ XX(EPROTOTYPE, "protocol wrong type for socket") \
+ XX(ERANGE, "result too large") \
+ XX(EROFS, "read-only file system") \
+ XX(ESHUTDOWN, "cannot send after transport endpoint shutdown") \
+ XX(ESPIPE, "invalid seek") \
+ XX(ESRCH, "no such process") \
+ XX(ETIMEDOUT, "connection timed out") \
+ XX(ETXTBSY, "text file is busy") \
+ XX(EXDEV, "cross-device link not permitted") \
+ XX(UNKNOWN, "unknown error") \
+ XX(EOF, "end of file") \
+ XX(ENXIO, "no such device or address") \
+ XX(EMLINK, "too many links") \
+ XX(EHOSTDOWN, "host is down") \
+ XX(EREMOTEIO, "remote I/O error") \
+ XX(ENOTTY, "inappropriate ioctl for device") \
+ XX(EFTYPE, "inappropriate file type or format") \
+ XX(EILSEQ, "illegal byte sequence") \
+
+#define UV_HANDLE_TYPE_MAP(XX) \
+ XX(ASYNC, async) \
+ XX(CHECK, check) \
+ XX(FS_EVENT, fs_event) \
+ XX(FS_POLL, fs_poll) \
+ XX(HANDLE, handle) \
+ XX(IDLE, idle) \
+ XX(NAMED_PIPE, pipe) \
+ XX(POLL, poll) \
+ XX(PREPARE, prepare) \
+ XX(PROCESS, process) \
+ XX(STREAM, stream) \
+ XX(TCP, tcp) \
+ XX(TIMER, timer) \
+ XX(TTY, tty) \
+ XX(UDP, udp) \
+ XX(SIGNAL, signal) \
+
+#define UV_REQ_TYPE_MAP(XX) \
+ XX(REQ, req) \
+ XX(CONNECT, connect) \
+ XX(WRITE, write) \
+ XX(SHUTDOWN, shutdown) \
+ XX(UDP_SEND, udp_send) \
+ XX(FS, fs) \
+ XX(WORK, work) \
+ XX(GETADDRINFO, getaddrinfo) \
+ XX(GETNAMEINFO, getnameinfo) \
+ XX(RANDOM, random) \
+
+typedef enum {
+#define XX(code, _) UV_ ## code = UV__ ## code,
+ UV_ERRNO_MAP(XX)
+#undef XX
+ UV_ERRNO_MAX = UV__EOF - 1
+} uv_errno_t;
+
+typedef enum {
+ UV_UNKNOWN_HANDLE = 0,
+#define XX(uc, lc) UV_##uc,
+ UV_HANDLE_TYPE_MAP(XX)
+#undef XX
+ UV_FILE,
+ UV_HANDLE_TYPE_MAX
+} uv_handle_type;
+
+typedef enum {
+ UV_UNKNOWN_REQ = 0,
+#define XX(uc, lc) UV_##uc,
+ UV_REQ_TYPE_MAP(XX)
+#undef XX
+ UV_REQ_TYPE_PRIVATE
+ UV_REQ_TYPE_MAX
+} uv_req_type;
+
+
+/* Handle types. */
+typedef struct uv_loop_s uv_loop_t;
+typedef struct uv_handle_s uv_handle_t;
+typedef struct uv_dir_s uv_dir_t;
+typedef struct uv_stream_s uv_stream_t;
+typedef struct uv_tcp_s uv_tcp_t;
+typedef struct uv_udp_s uv_udp_t;
+typedef struct uv_pipe_s uv_pipe_t;
+typedef struct uv_tty_s uv_tty_t;
+typedef struct uv_poll_s uv_poll_t;
+typedef struct uv_timer_s uv_timer_t;
+typedef struct uv_prepare_s uv_prepare_t;
+typedef struct uv_check_s uv_check_t;
+typedef struct uv_idle_s uv_idle_t;
+typedef struct uv_async_s uv_async_t;
+typedef struct uv_process_s uv_process_t;
+typedef struct uv_fs_event_s uv_fs_event_t;
+typedef struct uv_fs_poll_s uv_fs_poll_t;
+typedef struct uv_signal_s uv_signal_t;
+
+/* Request types. */
+typedef struct uv_req_s uv_req_t;
+typedef struct uv_getaddrinfo_s uv_getaddrinfo_t;
+typedef struct uv_getnameinfo_s uv_getnameinfo_t;
+typedef struct uv_shutdown_s uv_shutdown_t;
+typedef struct uv_write_s uv_write_t;
+typedef struct uv_connect_s uv_connect_t;
+typedef struct uv_udp_send_s uv_udp_send_t;
+typedef struct uv_fs_s uv_fs_t;
+typedef struct uv_work_s uv_work_t;
+typedef struct uv_random_s uv_random_t;
+
+/* None of the above. */
+typedef struct uv_env_item_s uv_env_item_t;
+typedef struct uv_cpu_info_s uv_cpu_info_t;
+typedef struct uv_interface_address_s uv_interface_address_t;
+typedef struct uv_dirent_s uv_dirent_t;
+typedef struct uv_passwd_s uv_passwd_t;
+typedef struct uv_utsname_s uv_utsname_t;
+typedef struct uv_statfs_s uv_statfs_t;
+
+typedef enum {
+ UV_LOOP_BLOCK_SIGNAL = 0,
+ UV_METRICS_IDLE_TIME
+} uv_loop_option;
+
+typedef enum {
+ UV_RUN_DEFAULT = 0,
+ UV_RUN_ONCE,
+ UV_RUN_NOWAIT
+} uv_run_mode;
+
+
+UV_EXTERN unsigned int uv_version(void);
+UV_EXTERN const char* uv_version_string(void);
+
+typedef void* (*uv_malloc_func)(size_t size);
+typedef void* (*uv_realloc_func)(void* ptr, size_t size);
+typedef void* (*uv_calloc_func)(size_t count, size_t size);
+typedef void (*uv_free_func)(void* ptr);
+
+UV_EXTERN void uv_library_shutdown(void);
+
+UV_EXTERN int uv_replace_allocator(uv_malloc_func malloc_func,
+ uv_realloc_func realloc_func,
+ uv_calloc_func calloc_func,
+ uv_free_func free_func);
+
+UV_EXTERN uv_loop_t* uv_default_loop(void);
+UV_EXTERN int uv_loop_init(uv_loop_t* loop);
+UV_EXTERN int uv_loop_close(uv_loop_t* loop);
+/*
+ * NOTE:
+ * This function is DEPRECATED (to be removed after 0.12), users should
+ * allocate the loop manually and use uv_loop_init instead.
+ */
+UV_EXTERN uv_loop_t* uv_loop_new(void);
+/*
+ * NOTE:
+ * This function is DEPRECATED (to be removed after 0.12). Users should use
+ * uv_loop_close and free the memory manually instead.
+ */
+UV_EXTERN void uv_loop_delete(uv_loop_t*);
+UV_EXTERN size_t uv_loop_size(void);
+UV_EXTERN int uv_loop_alive(const uv_loop_t* loop);
+UV_EXTERN int uv_loop_configure(uv_loop_t* loop, uv_loop_option option, ...);
+UV_EXTERN int uv_loop_fork(uv_loop_t* loop);
+
+UV_EXTERN int uv_run(uv_loop_t*, uv_run_mode mode);
+UV_EXTERN void uv_stop(uv_loop_t*);
+
+UV_EXTERN void uv_ref(uv_handle_t*);
+UV_EXTERN void uv_unref(uv_handle_t*);
+UV_EXTERN int uv_has_ref(const uv_handle_t*);
+
+UV_EXTERN void uv_update_time(uv_loop_t*);
+UV_EXTERN uint64_t uv_now(const uv_loop_t*);
+
+UV_EXTERN int uv_backend_fd(const uv_loop_t*);
+UV_EXTERN int uv_backend_timeout(const uv_loop_t*);
+
+typedef void (*uv_alloc_cb)(uv_handle_t* handle,
+ size_t suggested_size,
+ uv_buf_t* buf);
+typedef void (*uv_read_cb)(uv_stream_t* stream,
+ ssize_t nread,
+ const uv_buf_t* buf);
+typedef void (*uv_write_cb)(uv_write_t* req, int status);
+typedef void (*uv_connect_cb)(uv_connect_t* req, int status);
+typedef void (*uv_shutdown_cb)(uv_shutdown_t* req, int status);
+typedef void (*uv_connection_cb)(uv_stream_t* server, int status);
+typedef void (*uv_close_cb)(uv_handle_t* handle);
+typedef void (*uv_poll_cb)(uv_poll_t* handle, int status, int events);
+typedef void (*uv_timer_cb)(uv_timer_t* handle);
+typedef void (*uv_async_cb)(uv_async_t* handle);
+typedef void (*uv_prepare_cb)(uv_prepare_t* handle);
+typedef void (*uv_check_cb)(uv_check_t* handle);
+typedef void (*uv_idle_cb)(uv_idle_t* handle);
+typedef void (*uv_exit_cb)(uv_process_t*, int64_t exit_status, int term_signal);
+typedef void (*uv_walk_cb)(uv_handle_t* handle, void* arg);
+typedef void (*uv_fs_cb)(uv_fs_t* req);
+typedef void (*uv_work_cb)(uv_work_t* req);
+typedef void (*uv_after_work_cb)(uv_work_t* req, int status);
+typedef void (*uv_getaddrinfo_cb)(uv_getaddrinfo_t* req,
+ int status,
+ struct addrinfo* res);
+typedef void (*uv_getnameinfo_cb)(uv_getnameinfo_t* req,
+ int status,
+ const char* hostname,
+ const char* service);
+typedef void (*uv_random_cb)(uv_random_t* req,
+ int status,
+ void* buf,
+ size_t buflen);
+
+typedef struct {
+ long tv_sec;
+ long tv_nsec;
+} uv_timespec_t;
+
+
+typedef struct {
+ uint64_t st_dev;
+ uint64_t st_mode;
+ uint64_t st_nlink;
+ uint64_t st_uid;
+ uint64_t st_gid;
+ uint64_t st_rdev;
+ uint64_t st_ino;
+ uint64_t st_size;
+ uint64_t st_blksize;
+ uint64_t st_blocks;
+ uint64_t st_flags;
+ uint64_t st_gen;
+ uv_timespec_t st_atim;
+ uv_timespec_t st_mtim;
+ uv_timespec_t st_ctim;
+ uv_timespec_t st_birthtim;
+} uv_stat_t;
+
+
+typedef void (*uv_fs_event_cb)(uv_fs_event_t* handle,
+ const char* filename,
+ int events,
+ int status);
+
+typedef void (*uv_fs_poll_cb)(uv_fs_poll_t* handle,
+ int status,
+ const uv_stat_t* prev,
+ const uv_stat_t* curr);
+
+typedef void (*uv_signal_cb)(uv_signal_t* handle, int signum);
+
+
+typedef enum {
+ UV_LEAVE_GROUP = 0,
+ UV_JOIN_GROUP
+} uv_membership;
+
+
+UV_EXTERN int uv_translate_sys_error(int sys_errno);
+
+UV_EXTERN const char* uv_strerror(int err);
+UV_EXTERN char* uv_strerror_r(int err, char* buf, size_t buflen);
+
+UV_EXTERN const char* uv_err_name(int err);
+UV_EXTERN char* uv_err_name_r(int err, char* buf, size_t buflen);
+
+
+#define UV_REQ_FIELDS \
+ /* public */ \
+ void* data; \
+ /* read-only */ \
+ uv_req_type type; \
+ /* private */ \
+ void* reserved[6]; \
+ UV_REQ_PRIVATE_FIELDS \
+
+/* Abstract base class of all requests. */
+struct uv_req_s {
+ UV_REQ_FIELDS
+};
+
+
+/* Platform-specific request types. */
+UV_PRIVATE_REQ_TYPES
+
+
+UV_EXTERN int uv_shutdown(uv_shutdown_t* req,
+ uv_stream_t* handle,
+ uv_shutdown_cb cb);
+
+struct uv_shutdown_s {
+ UV_REQ_FIELDS
+ uv_stream_t* handle;
+ uv_shutdown_cb cb;
+ UV_SHUTDOWN_PRIVATE_FIELDS
+};
+
+
+#define UV_HANDLE_FIELDS \
+ /* public */ \
+ void* data; \
+ /* read-only */ \
+ uv_loop_t* loop; \
+ uv_handle_type type; \
+ /* private */ \
+ uv_close_cb close_cb; \
+ void* handle_queue[2]; \
+ union { \
+ int fd; \
+ void* reserved[4]; \
+ } u; \
+ UV_HANDLE_PRIVATE_FIELDS \
+
+/* The abstract base class of all handles. */
+struct uv_handle_s {
+ UV_HANDLE_FIELDS
+};
+
+UV_EXTERN size_t uv_handle_size(uv_handle_type type);
+UV_EXTERN uv_handle_type uv_handle_get_type(const uv_handle_t* handle);
+UV_EXTERN const char* uv_handle_type_name(uv_handle_type type);
+UV_EXTERN void* uv_handle_get_data(const uv_handle_t* handle);
+UV_EXTERN uv_loop_t* uv_handle_get_loop(const uv_handle_t* handle);
+UV_EXTERN void uv_handle_set_data(uv_handle_t* handle, void* data);
+
+UV_EXTERN size_t uv_req_size(uv_req_type type);
+UV_EXTERN void* uv_req_get_data(const uv_req_t* req);
+UV_EXTERN void uv_req_set_data(uv_req_t* req, void* data);
+UV_EXTERN uv_req_type uv_req_get_type(const uv_req_t* req);
+UV_EXTERN const char* uv_req_type_name(uv_req_type type);
+
+UV_EXTERN int uv_is_active(const uv_handle_t* handle);
+
+UV_EXTERN void uv_walk(uv_loop_t* loop, uv_walk_cb walk_cb, void* arg);
+
+/* Helpers for ad hoc debugging, no API/ABI stability guaranteed. */
+UV_EXTERN void uv_print_all_handles(uv_loop_t* loop, FILE* stream);
+UV_EXTERN void uv_print_active_handles(uv_loop_t* loop, FILE* stream);
+
+UV_EXTERN void uv_close(uv_handle_t* handle, uv_close_cb close_cb);
+
+UV_EXTERN int uv_send_buffer_size(uv_handle_t* handle, int* value);
+UV_EXTERN int uv_recv_buffer_size(uv_handle_t* handle, int* value);
+
+UV_EXTERN int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd);
+
+UV_EXTERN uv_buf_t uv_buf_init(char* base, unsigned int len);
+
+
+#define UV_STREAM_FIELDS \
+ /* number of bytes queued for writing */ \
+ size_t write_queue_size; \
+ uv_alloc_cb alloc_cb; \
+ uv_read_cb read_cb; \
+ /* private */ \
+ UV_STREAM_PRIVATE_FIELDS
+
+/*
+ * uv_stream_t is a subclass of uv_handle_t.
+ *
+ * uv_stream is an abstract class.
+ *
+ * uv_stream_t is the parent class of uv_tcp_t, uv_pipe_t and uv_tty_t.
+ */
+struct uv_stream_s {
+ UV_HANDLE_FIELDS
+ UV_STREAM_FIELDS
+};
+
+UV_EXTERN size_t uv_stream_get_write_queue_size(const uv_stream_t* stream);
+
+UV_EXTERN int uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb);
+UV_EXTERN int uv_accept(uv_stream_t* server, uv_stream_t* client);
+
+UV_EXTERN int uv_read_start(uv_stream_t*,
+ uv_alloc_cb alloc_cb,
+ uv_read_cb read_cb);
+UV_EXTERN int uv_read_stop(uv_stream_t*);
+
+UV_EXTERN int uv_write(uv_write_t* req,
+ uv_stream_t* handle,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ uv_write_cb cb);
+UV_EXTERN int uv_write2(uv_write_t* req,
+ uv_stream_t* handle,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ uv_stream_t* send_handle,
+ uv_write_cb cb);
+UV_EXTERN int uv_try_write(uv_stream_t* handle,
+ const uv_buf_t bufs[],
+ unsigned int nbufs);
+
+/* uv_write_t is a subclass of uv_req_t. */
+struct uv_write_s {
+ UV_REQ_FIELDS
+ uv_write_cb cb;
+ uv_stream_t* send_handle; /* TODO: make private and unix-only in v2.x. */
+ uv_stream_t* handle;
+ UV_WRITE_PRIVATE_FIELDS
+};
+
+
+UV_EXTERN int uv_is_readable(const uv_stream_t* handle);
+UV_EXTERN int uv_is_writable(const uv_stream_t* handle);
+
+UV_EXTERN int uv_stream_set_blocking(uv_stream_t* handle, int blocking);
+
+UV_EXTERN int uv_is_closing(const uv_handle_t* handle);
+
+
+/*
+ * uv_tcp_t is a subclass of uv_stream_t.
+ *
+ * Represents a TCP stream or TCP server.
+ */
+struct uv_tcp_s {
+ UV_HANDLE_FIELDS
+ UV_STREAM_FIELDS
+ UV_TCP_PRIVATE_FIELDS
+};
+
+UV_EXTERN int uv_tcp_init(uv_loop_t*, uv_tcp_t* handle);
+UV_EXTERN int uv_tcp_init_ex(uv_loop_t*, uv_tcp_t* handle, unsigned int flags);
+UV_EXTERN int uv_tcp_open(uv_tcp_t* handle, uv_os_sock_t sock);
+UV_EXTERN int uv_tcp_nodelay(uv_tcp_t* handle, int enable);
+UV_EXTERN int uv_tcp_keepalive(uv_tcp_t* handle,
+ int enable,
+ unsigned int delay);
+UV_EXTERN int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable);
+
+enum uv_tcp_flags {
+ /* Used with uv_tcp_bind, when an IPv6 address is used. */
+ UV_TCP_IPV6ONLY = 1
+};
+
+UV_EXTERN int uv_tcp_bind(uv_tcp_t* handle,
+ const struct sockaddr* addr,
+ unsigned int flags);
+UV_EXTERN int uv_tcp_getsockname(const uv_tcp_t* handle,
+ struct sockaddr* name,
+ int* namelen);
+UV_EXTERN int uv_tcp_getpeername(const uv_tcp_t* handle,
+ struct sockaddr* name,
+ int* namelen);
+UV_EXTERN int uv_tcp_close_reset(uv_tcp_t* handle, uv_close_cb close_cb);
+UV_EXTERN int uv_tcp_connect(uv_connect_t* req,
+ uv_tcp_t* handle,
+ const struct sockaddr* addr,
+ uv_connect_cb cb);
+
+/* uv_connect_t is a subclass of uv_req_t. */
+struct uv_connect_s {
+ UV_REQ_FIELDS
+ uv_connect_cb cb;
+ uv_stream_t* handle;
+ UV_CONNECT_PRIVATE_FIELDS
+};
+
+
+/*
+ * UDP support.
+ */
+
+enum uv_udp_flags {
+ /* Disables dual stack mode. */
+ UV_UDP_IPV6ONLY = 1,
+ /*
+ * Indicates message was truncated because read buffer was too small. The
+ * remainder was discarded by the OS. Used in uv_udp_recv_cb.
+ */
+ UV_UDP_PARTIAL = 2,
+ /*
+ * Indicates if SO_REUSEADDR will be set when binding the handle.
+ * This sets the SO_REUSEPORT socket flag on the BSDs and OS X. On other
+ * Unix platforms, it sets the SO_REUSEADDR flag. What that means is that
+ * multiple threads or processes can bind to the same address without error
+ * (provided they all set the flag) but only the last one to bind will receive
+ * any traffic, in effect "stealing" the port from the previous listener.
+ */
+ UV_UDP_REUSEADDR = 4,
+ /*
+ * Indicates that the message was received by recvmmsg, so the buffer provided
+ * must not be freed by the recv_cb callback.
+ */
+ UV_UDP_MMSG_CHUNK = 8,
+ /*
+ * Indicates that the buffer provided has been fully utilized by recvmmsg and
+ * that it should now be freed by the recv_cb callback. When this flag is set
+ * in uv_udp_recv_cb, nread will always be 0 and addr will always be NULL.
+ */
+ UV_UDP_MMSG_FREE = 16,
+
+ /*
+ * Indicates that recvmmsg should be used, if available.
+ */
+ UV_UDP_RECVMMSG = 256
+};
+
+typedef void (*uv_udp_send_cb)(uv_udp_send_t* req, int status);
+typedef void (*uv_udp_recv_cb)(uv_udp_t* handle,
+ ssize_t nread,
+ const uv_buf_t* buf,
+ const struct sockaddr* addr,
+ unsigned flags);
+
+/* uv_udp_t is a subclass of uv_handle_t. */
+struct uv_udp_s {
+ UV_HANDLE_FIELDS
+ /* read-only */
+ /*
+ * Number of bytes queued for sending. This field strictly shows how much
+ * information is currently queued.
+ */
+ size_t send_queue_size;
+ /*
+ * Number of send requests currently in the queue awaiting to be processed.
+ */
+ size_t send_queue_count;
+ UV_UDP_PRIVATE_FIELDS
+};
+
+/* uv_udp_send_t is a subclass of uv_req_t. */
+struct uv_udp_send_s {
+ UV_REQ_FIELDS
+ uv_udp_t* handle;
+ uv_udp_send_cb cb;
+ UV_UDP_SEND_PRIVATE_FIELDS
+};
+
+UV_EXTERN int uv_udp_init(uv_loop_t*, uv_udp_t* handle);
+UV_EXTERN int uv_udp_init_ex(uv_loop_t*, uv_udp_t* handle, unsigned int flags);
+UV_EXTERN int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock);
+UV_EXTERN int uv_udp_bind(uv_udp_t* handle,
+ const struct sockaddr* addr,
+ unsigned int flags);
+UV_EXTERN int uv_udp_connect(uv_udp_t* handle, const struct sockaddr* addr);
+
+UV_EXTERN int uv_udp_getpeername(const uv_udp_t* handle,
+ struct sockaddr* name,
+ int* namelen);
+UV_EXTERN int uv_udp_getsockname(const uv_udp_t* handle,
+ struct sockaddr* name,
+ int* namelen);
+UV_EXTERN int uv_udp_set_membership(uv_udp_t* handle,
+ const char* multicast_addr,
+ const char* interface_addr,
+ uv_membership membership);
+UV_EXTERN int uv_udp_set_source_membership(uv_udp_t* handle,
+ const char* multicast_addr,
+ const char* interface_addr,
+ const char* source_addr,
+ uv_membership membership);
+UV_EXTERN int uv_udp_set_multicast_loop(uv_udp_t* handle, int on);
+UV_EXTERN int uv_udp_set_multicast_ttl(uv_udp_t* handle, int ttl);
+UV_EXTERN int uv_udp_set_multicast_interface(uv_udp_t* handle,
+ const char* interface_addr);
+UV_EXTERN int uv_udp_set_broadcast(uv_udp_t* handle, int on);
+UV_EXTERN int uv_udp_set_ttl(uv_udp_t* handle, int ttl);
+UV_EXTERN int uv_udp_send(uv_udp_send_t* req,
+ uv_udp_t* handle,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ const struct sockaddr* addr,
+ uv_udp_send_cb send_cb);
+UV_EXTERN int uv_udp_try_send(uv_udp_t* handle,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ const struct sockaddr* addr);
+UV_EXTERN int uv_udp_recv_start(uv_udp_t* handle,
+ uv_alloc_cb alloc_cb,
+ uv_udp_recv_cb recv_cb);
+UV_EXTERN int uv_udp_using_recvmmsg(const uv_udp_t* handle);
+UV_EXTERN int uv_udp_recv_stop(uv_udp_t* handle);
+UV_EXTERN size_t uv_udp_get_send_queue_size(const uv_udp_t* handle);
+UV_EXTERN size_t uv_udp_get_send_queue_count(const uv_udp_t* handle);
+
+
+/*
+ * uv_tty_t is a subclass of uv_stream_t.
+ *
+ * Representing a stream for the console.
+ */
+struct uv_tty_s {
+ UV_HANDLE_FIELDS
+ UV_STREAM_FIELDS
+ UV_TTY_PRIVATE_FIELDS
+};
+
+typedef enum {
+ /* Initial/normal terminal mode */
+ UV_TTY_MODE_NORMAL,
+ /* Raw input mode (On Windows, ENABLE_WINDOW_INPUT is also enabled) */
+ UV_TTY_MODE_RAW,
+ /* Binary-safe I/O mode for IPC (Unix-only) */
+ UV_TTY_MODE_IO
+} uv_tty_mode_t;
+
+typedef enum {
+ /*
+ * The console supports handling of virtual terminal sequences
+ * (Windows10 new console, ConEmu)
+ */
+ UV_TTY_SUPPORTED,
+ /* The console cannot process the virtual terminal sequence. (Legacy
+ * console)
+ */
+ UV_TTY_UNSUPPORTED
+} uv_tty_vtermstate_t;
+
+
+UV_EXTERN int uv_tty_init(uv_loop_t*, uv_tty_t*, uv_file fd, int readable);
+UV_EXTERN int uv_tty_set_mode(uv_tty_t*, uv_tty_mode_t mode);
+UV_EXTERN int uv_tty_reset_mode(void);
+UV_EXTERN int uv_tty_get_winsize(uv_tty_t*, int* width, int* height);
+UV_EXTERN void uv_tty_set_vterm_state(uv_tty_vtermstate_t state);
+UV_EXTERN int uv_tty_get_vterm_state(uv_tty_vtermstate_t* state);
+
+#ifdef __cplusplus
+extern "C++" {
+
+inline int uv_tty_set_mode(uv_tty_t* handle, int mode) {
+ return uv_tty_set_mode(handle, static_cast<uv_tty_mode_t>(mode));
+}
+
+}
+#endif
+
+UV_EXTERN uv_handle_type uv_guess_handle(uv_file file);
+
+/*
+ * uv_pipe_t is a subclass of uv_stream_t.
+ *
+ * Representing a pipe stream or pipe server. On Windows this is a Named
+ * Pipe. On Unix this is a Unix domain socket.
+ */
+struct uv_pipe_s {
+ UV_HANDLE_FIELDS
+ UV_STREAM_FIELDS
+ int ipc; /* non-zero if this pipe is used for passing handles */
+ UV_PIPE_PRIVATE_FIELDS
+};
+
+UV_EXTERN int uv_pipe_init(uv_loop_t*, uv_pipe_t* handle, int ipc);
+UV_EXTERN int uv_pipe_open(uv_pipe_t*, uv_file file);
+UV_EXTERN int uv_pipe_bind(uv_pipe_t* handle, const char* name);
+UV_EXTERN void uv_pipe_connect(uv_connect_t* req,
+ uv_pipe_t* handle,
+ const char* name,
+ uv_connect_cb cb);
+UV_EXTERN int uv_pipe_getsockname(const uv_pipe_t* handle,
+ char* buffer,
+ size_t* size);
+UV_EXTERN int uv_pipe_getpeername(const uv_pipe_t* handle,
+ char* buffer,
+ size_t* size);
+UV_EXTERN void uv_pipe_pending_instances(uv_pipe_t* handle, int count);
+UV_EXTERN int uv_pipe_pending_count(uv_pipe_t* handle);
+UV_EXTERN uv_handle_type uv_pipe_pending_type(uv_pipe_t* handle);
+UV_EXTERN int uv_pipe_chmod(uv_pipe_t* handle, int flags);
+
+
+struct uv_poll_s {
+ UV_HANDLE_FIELDS
+ uv_poll_cb poll_cb;
+ UV_POLL_PRIVATE_FIELDS
+};
+
+enum uv_poll_event {
+ UV_READABLE = 1,
+ UV_WRITABLE = 2,
+ UV_DISCONNECT = 4,
+ UV_PRIORITIZED = 8
+};
+
+UV_EXTERN int uv_poll_init(uv_loop_t* loop, uv_poll_t* handle, int fd);
+UV_EXTERN int uv_poll_init_socket(uv_loop_t* loop,
+ uv_poll_t* handle,
+ uv_os_sock_t socket);
+UV_EXTERN int uv_poll_start(uv_poll_t* handle, int events, uv_poll_cb cb);
+UV_EXTERN int uv_poll_stop(uv_poll_t* handle);
+
+
+struct uv_prepare_s {
+ UV_HANDLE_FIELDS
+ UV_PREPARE_PRIVATE_FIELDS
+};
+
+UV_EXTERN int uv_prepare_init(uv_loop_t*, uv_prepare_t* prepare);
+UV_EXTERN int uv_prepare_start(uv_prepare_t* prepare, uv_prepare_cb cb);
+UV_EXTERN int uv_prepare_stop(uv_prepare_t* prepare);
+
+
+struct uv_check_s {
+ UV_HANDLE_FIELDS
+ UV_CHECK_PRIVATE_FIELDS
+};
+
+UV_EXTERN int uv_check_init(uv_loop_t*, uv_check_t* check);
+UV_EXTERN int uv_check_start(uv_check_t* check, uv_check_cb cb);
+UV_EXTERN int uv_check_stop(uv_check_t* check);
+
+
+struct uv_idle_s {
+ UV_HANDLE_FIELDS
+ UV_IDLE_PRIVATE_FIELDS
+};
+
+UV_EXTERN int uv_idle_init(uv_loop_t*, uv_idle_t* idle);
+UV_EXTERN int uv_idle_start(uv_idle_t* idle, uv_idle_cb cb);
+UV_EXTERN int uv_idle_stop(uv_idle_t* idle);
+
+
+struct uv_async_s {
+ UV_HANDLE_FIELDS
+ UV_ASYNC_PRIVATE_FIELDS
+};
+
+UV_EXTERN int uv_async_init(uv_loop_t*,
+ uv_async_t* async,
+ uv_async_cb async_cb);
+UV_EXTERN int uv_async_send(uv_async_t* async);
+
+
+/*
+ * uv_timer_t is a subclass of uv_handle_t.
+ *
+ * Used to get woken up at a specified time in the future.
+ */
+struct uv_timer_s {
+ UV_HANDLE_FIELDS
+ UV_TIMER_PRIVATE_FIELDS
+};
+
+UV_EXTERN int uv_timer_init(uv_loop_t*, uv_timer_t* handle);
+UV_EXTERN int uv_timer_start(uv_timer_t* handle,
+ uv_timer_cb cb,
+ uint64_t timeout,
+ uint64_t repeat);
+UV_EXTERN int uv_timer_stop(uv_timer_t* handle);
+UV_EXTERN int uv_timer_again(uv_timer_t* handle);
+UV_EXTERN void uv_timer_set_repeat(uv_timer_t* handle, uint64_t repeat);
+UV_EXTERN uint64_t uv_timer_get_repeat(const uv_timer_t* handle);
+UV_EXTERN uint64_t uv_timer_get_due_in(const uv_timer_t* handle);
+
+
+/*
+ * uv_getaddrinfo_t is a subclass of uv_req_t.
+ *
+ * Request object for uv_getaddrinfo.
+ */
+struct uv_getaddrinfo_s {
+ UV_REQ_FIELDS
+ /* read-only */
+ uv_loop_t* loop;
+ /* struct addrinfo* addrinfo is marked as private, but it really isn't. */
+ UV_GETADDRINFO_PRIVATE_FIELDS
+};
+
+
+UV_EXTERN int uv_getaddrinfo(uv_loop_t* loop,
+ uv_getaddrinfo_t* req,
+ uv_getaddrinfo_cb getaddrinfo_cb,
+ const char* node,
+ const char* service,
+ const struct addrinfo* hints);
+UV_EXTERN void uv_freeaddrinfo(struct addrinfo* ai);
+
+
+/*
+* uv_getnameinfo_t is a subclass of uv_req_t.
+*
+* Request object for uv_getnameinfo.
+*/
+struct uv_getnameinfo_s {
+ UV_REQ_FIELDS
+ /* read-only */
+ uv_loop_t* loop;
+ /* host and service are marked as private, but they really aren't. */
+ UV_GETNAMEINFO_PRIVATE_FIELDS
+};
+
+UV_EXTERN int uv_getnameinfo(uv_loop_t* loop,
+ uv_getnameinfo_t* req,
+ uv_getnameinfo_cb getnameinfo_cb,
+ const struct sockaddr* addr,
+ int flags);
+
+
+/* uv_spawn() options. */
+typedef enum {
+ UV_IGNORE = 0x00,
+ UV_CREATE_PIPE = 0x01,
+ UV_INHERIT_FD = 0x02,
+ UV_INHERIT_STREAM = 0x04,
+
+ /*
+ * When UV_CREATE_PIPE is specified, UV_READABLE_PIPE and UV_WRITABLE_PIPE
+ * determine the direction of flow, from the child process' perspective. Both
+ * flags may be specified to create a duplex data stream.
+ */
+ UV_READABLE_PIPE = 0x10,
+ UV_WRITABLE_PIPE = 0x20,
+
+ /*
+ * Open the child pipe handle in overlapped mode on Windows.
+ * On Unix it is silently ignored.
+ */
+ UV_OVERLAPPED_PIPE = 0x40
+} uv_stdio_flags;
+
+typedef struct uv_stdio_container_s {
+ uv_stdio_flags flags;
+
+ union {
+ uv_stream_t* stream;
+ int fd;
+ } data;
+} uv_stdio_container_t;
+
+typedef struct uv_process_options_s {
+ uv_exit_cb exit_cb; /* Called after the process exits. */
+ const char* file; /* Path to program to execute. */
+ /*
+ * Command line arguments. args[0] should be the path to the program. On
+ * Windows this uses CreateProcess which concatenates the arguments into a
+ * string this can cause some strange errors. See the note at
+ * windows_verbatim_arguments.
+ */
+ char** args;
+ /*
+ * This will be set as the environ variable in the subprocess. If this is
+ * NULL then the parents environ will be used.
+ */
+ char** env;
+ /*
+ * If non-null this represents a directory the subprocess should execute
+ * in. Stands for current working directory.
+ */
+ const char* cwd;
+ /*
+ * Various flags that control how uv_spawn() behaves. See the definition of
+ * `enum uv_process_flags` below.
+ */
+ unsigned int flags;
+ /*
+ * The `stdio` field points to an array of uv_stdio_container_t structs that
+ * describe the file descriptors that will be made available to the child
+ * process. The convention is that stdio[0] points to stdin, fd 1 is used for
+ * stdout, and fd 2 is stderr.
+ *
+ * Note that on windows file descriptors greater than 2 are available to the
+ * child process only if the child processes uses the MSVCRT runtime.
+ */
+ int stdio_count;
+ uv_stdio_container_t* stdio;
+ /*
+ * Libuv can change the child process' user/group id. This happens only when
+ * the appropriate bits are set in the flags fields. This is not supported on
+ * windows; uv_spawn() will fail and set the error to UV_ENOTSUP.
+ */
+ uv_uid_t uid;
+ uv_gid_t gid;
+ /*
+ Libuv can set the child process' CPU affinity mask. This happens when
+ `cpumask` is non-NULL. It must point to an array of char values
+ of length `cpumask_size`, whose value must be at least that returned by
+ uv_cpumask_size(). Each byte in the mask can be either zero (false)
+ or non-zero (true) to indicate whether the corresponding processor at
+ that index is included.
+
+ If enabled on an unsupported platform, uv_spawn() will fail with
+ UV_ENOTSUP.
+ */
+ char* cpumask;
+ size_t cpumask_size;
+} uv_process_options_t;
+
+/*
+ * These are the flags that can be used for the uv_process_options.flags field.
+ */
+enum uv_process_flags {
+ /*
+ * Set the child process' user id. The user id is supplied in the `uid` field
+ * of the options struct. This does not work on windows; setting this flag
+ * will cause uv_spawn() to fail.
+ */
+ UV_PROCESS_SETUID = (1 << 0),
+ /*
+ * Set the child process' group id. The user id is supplied in the `gid`
+ * field of the options struct. This does not work on windows; setting this
+ * flag will cause uv_spawn() to fail.
+ */
+ UV_PROCESS_SETGID = (1 << 1),
+ /*
+ * Do not wrap any arguments in quotes, or perform any other escaping, when
+ * converting the argument list into a command line string. This option is
+ * only meaningful on Windows systems. On Unix it is silently ignored.
+ */
+ UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS = (1 << 2),
+ /*
+ * Spawn the child process in a detached state - this will make it a process
+ * group leader, and will effectively enable the child to keep running after
+ * the parent exits. Note that the child process will still keep the
+ * parent's event loop alive unless the parent process calls uv_unref() on
+ * the child's process handle.
+ */
+ UV_PROCESS_DETACHED = (1 << 3),
+ /*
+ * Hide the subprocess window that would normally be created. This option is
+ * only meaningful on Windows systems. On Unix it is silently ignored.
+ */
+ UV_PROCESS_WINDOWS_HIDE = (1 << 4),
+ /*
+ * Hide the subprocess console window that would normally be created. This
+ * option is only meaningful on Windows systems. On Unix it is silently
+ * ignored.
+ */
+ UV_PROCESS_WINDOWS_HIDE_CONSOLE = (1 << 5),
+ /*
+ * Hide the subprocess GUI window that would normally be created. This
+ * option is only meaningful on Windows systems. On Unix it is silently
+ * ignored.
+ */
+ UV_PROCESS_WINDOWS_HIDE_GUI = (1 << 6)
+};
+
+/*
+ * uv_process_t is a subclass of uv_handle_t.
+ */
+struct uv_process_s {
+ UV_HANDLE_FIELDS
+ uv_exit_cb exit_cb;
+ int pid;
+ UV_PROCESS_PRIVATE_FIELDS
+};
+
+UV_EXTERN int uv_spawn(uv_loop_t* loop,
+ uv_process_t* handle,
+ const uv_process_options_t* options);
+UV_EXTERN int uv_process_kill(uv_process_t*, int signum);
+UV_EXTERN int uv_kill(int pid, int signum);
+UV_EXTERN uv_pid_t uv_process_get_pid(const uv_process_t*);
+
+
+/*
+ * uv_work_t is a subclass of uv_req_t.
+ */
+struct uv_work_s {
+ UV_REQ_FIELDS
+ uv_loop_t* loop;
+ uv_work_cb work_cb;
+ uv_after_work_cb after_work_cb;
+ UV_WORK_PRIVATE_FIELDS
+};
+
+UV_EXTERN int uv_queue_work(uv_loop_t* loop,
+ uv_work_t* req,
+ uv_work_cb work_cb,
+ uv_after_work_cb after_work_cb);
+
+UV_EXTERN int uv_cancel(uv_req_t* req);
+
+
+struct uv_cpu_times_s {
+ uint64_t user; /* milliseconds */
+ uint64_t nice; /* milliseconds */
+ uint64_t sys; /* milliseconds */
+ uint64_t idle; /* milliseconds */
+ uint64_t irq; /* milliseconds */
+};
+
+struct uv_cpu_info_s {
+ char* model;
+ int speed;
+ struct uv_cpu_times_s cpu_times;
+};
+
+struct uv_interface_address_s {
+ char* name;
+ char phys_addr[6];
+ int is_internal;
+ union {
+ struct sockaddr_in address4;
+ struct sockaddr_in6 address6;
+ } address;
+ union {
+ struct sockaddr_in netmask4;
+ struct sockaddr_in6 netmask6;
+ } netmask;
+};
+
+struct uv_passwd_s {
+ char* username;
+ long uid;
+ long gid;
+ char* shell;
+ char* homedir;
+};
+
+struct uv_utsname_s {
+ char sysname[256];
+ char release[256];
+ char version[256];
+ char machine[256];
+ /* This struct does not contain the nodename and domainname fields present in
+ the utsname type. domainname is a GNU extension. Both fields are referred
+ to as meaningless in the docs. */
+};
+
+struct uv_statfs_s {
+ uint64_t f_type;
+ uint64_t f_bsize;
+ uint64_t f_blocks;
+ uint64_t f_bfree;
+ uint64_t f_bavail;
+ uint64_t f_files;
+ uint64_t f_ffree;
+ uint64_t f_spare[4];
+};
+
+typedef enum {
+ UV_DIRENT_UNKNOWN,
+ UV_DIRENT_FILE,
+ UV_DIRENT_DIR,
+ UV_DIRENT_LINK,
+ UV_DIRENT_FIFO,
+ UV_DIRENT_SOCKET,
+ UV_DIRENT_CHAR,
+ UV_DIRENT_BLOCK
+} uv_dirent_type_t;
+
+struct uv_dirent_s {
+ const char* name;
+ uv_dirent_type_t type;
+};
+
+UV_EXTERN char** uv_setup_args(int argc, char** argv);
+UV_EXTERN int uv_get_process_title(char* buffer, size_t size);
+UV_EXTERN int uv_set_process_title(const char* title);
+UV_EXTERN int uv_resident_set_memory(size_t* rss);
+UV_EXTERN int uv_uptime(double* uptime);
+UV_EXTERN uv_os_fd_t uv_get_osfhandle(int fd);
+UV_EXTERN int uv_open_osfhandle(uv_os_fd_t os_fd);
+
+typedef struct {
+ long tv_sec;
+ long tv_usec;
+} uv_timeval_t;
+
+typedef struct {
+ int64_t tv_sec;
+ int32_t tv_usec;
+} uv_timeval64_t;
+
+typedef struct {
+ uv_timeval_t ru_utime; /* user CPU time used */
+ uv_timeval_t ru_stime; /* system CPU time used */
+ uint64_t ru_maxrss; /* maximum resident set size */
+ uint64_t ru_ixrss; /* integral shared memory size */
+ uint64_t ru_idrss; /* integral unshared data size */
+ uint64_t ru_isrss; /* integral unshared stack size */
+ uint64_t ru_minflt; /* page reclaims (soft page faults) */
+ uint64_t ru_majflt; /* page faults (hard page faults) */
+ uint64_t ru_nswap; /* swaps */
+ uint64_t ru_inblock; /* block input operations */
+ uint64_t ru_oublock; /* block output operations */
+ uint64_t ru_msgsnd; /* IPC messages sent */
+ uint64_t ru_msgrcv; /* IPC messages received */
+ uint64_t ru_nsignals; /* signals received */
+ uint64_t ru_nvcsw; /* voluntary context switches */
+ uint64_t ru_nivcsw; /* involuntary context switches */
+} uv_rusage_t;
+
+UV_EXTERN int uv_getrusage(uv_rusage_t* rusage);
+
+UV_EXTERN int uv_os_homedir(char* buffer, size_t* size);
+UV_EXTERN int uv_os_tmpdir(char* buffer, size_t* size);
+UV_EXTERN int uv_os_get_passwd(uv_passwd_t* pwd);
+UV_EXTERN void uv_os_free_passwd(uv_passwd_t* pwd);
+UV_EXTERN uv_pid_t uv_os_getpid(void);
+UV_EXTERN uv_pid_t uv_os_getppid(void);
+
+#if defined(__PASE__)
+/* On IBM i PASE, the highest process priority is -10 */
+# define UV_PRIORITY_LOW 39 /* RUNPTY(99) */
+# define UV_PRIORITY_BELOW_NORMAL 15 /* RUNPTY(50) */
+# define UV_PRIORITY_NORMAL 0 /* RUNPTY(20) */
+# define UV_PRIORITY_ABOVE_NORMAL -4 /* RUNTY(12) */
+# define UV_PRIORITY_HIGH -7 /* RUNPTY(6) */
+# define UV_PRIORITY_HIGHEST -10 /* RUNPTY(1) */
+#else
+# define UV_PRIORITY_LOW 19
+# define UV_PRIORITY_BELOW_NORMAL 10
+# define UV_PRIORITY_NORMAL 0
+# define UV_PRIORITY_ABOVE_NORMAL -7
+# define UV_PRIORITY_HIGH -14
+# define UV_PRIORITY_HIGHEST -20
+#endif
+
+UV_EXTERN int uv_os_getpriority(uv_pid_t pid, int* priority);
+UV_EXTERN int uv_os_setpriority(uv_pid_t pid, int priority);
+
+UV_EXTERN int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count);
+UV_EXTERN void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count);
+UV_EXTERN int uv_cpumask_size(void);
+
+UV_EXTERN int uv_interface_addresses(uv_interface_address_t** addresses,
+ int* count);
+UV_EXTERN void uv_free_interface_addresses(uv_interface_address_t* addresses,
+ int count);
+
+struct uv_env_item_s {
+ char* name;
+ char* value;
+};
+
+UV_EXTERN int uv_os_environ(uv_env_item_t** envitems, int* count);
+UV_EXTERN void uv_os_free_environ(uv_env_item_t* envitems, int count);
+UV_EXTERN int uv_os_getenv(const char* name, char* buffer, size_t* size);
+UV_EXTERN int uv_os_setenv(const char* name, const char* value);
+UV_EXTERN int uv_os_unsetenv(const char* name);
+
+#ifdef MAXHOSTNAMELEN
+# define UV_MAXHOSTNAMESIZE (MAXHOSTNAMELEN + 1)
+#else
+ /*
+ Fallback for the maximum hostname size, including the null terminator. The
+ Windows gethostname() documentation states that 256 bytes will always be
+ large enough to hold the null-terminated hostname.
+ */
+# define UV_MAXHOSTNAMESIZE 256
+#endif
+
+UV_EXTERN int uv_os_gethostname(char* buffer, size_t* size);
+
+UV_EXTERN int uv_os_uname(uv_utsname_t* buffer);
+
+UV_EXTERN uint64_t uv_metrics_idle_time(uv_loop_t* loop);
+
+typedef enum {
+ UV_FS_UNKNOWN = -1,
+ UV_FS_CUSTOM,
+ UV_FS_OPEN,
+ UV_FS_CLOSE,
+ UV_FS_READ,
+ UV_FS_WRITE,
+ UV_FS_SENDFILE,
+ UV_FS_STAT,
+ UV_FS_LSTAT,
+ UV_FS_FSTAT,
+ UV_FS_FTRUNCATE,
+ UV_FS_UTIME,
+ UV_FS_FUTIME,
+ UV_FS_ACCESS,
+ UV_FS_CHMOD,
+ UV_FS_FCHMOD,
+ UV_FS_FSYNC,
+ UV_FS_FDATASYNC,
+ UV_FS_UNLINK,
+ UV_FS_RMDIR,
+ UV_FS_MKDIR,
+ UV_FS_MKDTEMP,
+ UV_FS_RENAME,
+ UV_FS_SCANDIR,
+ UV_FS_LINK,
+ UV_FS_SYMLINK,
+ UV_FS_READLINK,
+ UV_FS_CHOWN,
+ UV_FS_FCHOWN,
+ UV_FS_REALPATH,
+ UV_FS_COPYFILE,
+ UV_FS_LCHOWN,
+ UV_FS_OPENDIR,
+ UV_FS_READDIR,
+ UV_FS_CLOSEDIR,
+ UV_FS_STATFS,
+ UV_FS_MKSTEMP,
+ UV_FS_LUTIME
+} uv_fs_type;
+
+struct uv_dir_s {
+ uv_dirent_t* dirents;
+ size_t nentries;
+ void* reserved[4];
+ UV_DIR_PRIVATE_FIELDS
+};
+
+/* uv_fs_t is a subclass of uv_req_t. */
+struct uv_fs_s {
+ UV_REQ_FIELDS
+ uv_fs_type fs_type;
+ uv_loop_t* loop;
+ uv_fs_cb cb;
+ ssize_t result;
+ void* ptr;
+ const char* path;
+ uv_stat_t statbuf; /* Stores the result of uv_fs_stat() and uv_fs_fstat(). */
+ UV_FS_PRIVATE_FIELDS
+};
+
+UV_EXTERN uv_fs_type uv_fs_get_type(const uv_fs_t*);
+UV_EXTERN ssize_t uv_fs_get_result(const uv_fs_t*);
+UV_EXTERN int uv_fs_get_system_error(const uv_fs_t*);
+UV_EXTERN void* uv_fs_get_ptr(const uv_fs_t*);
+UV_EXTERN const char* uv_fs_get_path(const uv_fs_t*);
+UV_EXTERN uv_stat_t* uv_fs_get_statbuf(uv_fs_t*);
+
+UV_EXTERN void uv_fs_req_cleanup(uv_fs_t* req);
+UV_EXTERN int uv_fs_close(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_file file,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_open(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ int flags,
+ int mode,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_read(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_file file,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ int64_t offset,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_unlink(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_write(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_file file,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ int64_t offset,
+ uv_fs_cb cb);
+/*
+ * This flag can be used with uv_fs_copyfile() to return an error if the
+ * destination already exists.
+ */
+#define UV_FS_COPYFILE_EXCL 0x0001
+
+/*
+ * This flag can be used with uv_fs_copyfile() to attempt to create a reflink.
+ * If copy-on-write is not supported, a fallback copy mechanism is used.
+ */
+#define UV_FS_COPYFILE_FICLONE 0x0002
+
+/*
+ * This flag can be used with uv_fs_copyfile() to attempt to create a reflink.
+ * If copy-on-write is not supported, an error is returned.
+ */
+#define UV_FS_COPYFILE_FICLONE_FORCE 0x0004
+
+UV_EXTERN int uv_fs_copyfile(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ const char* new_path,
+ int flags,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_mkdir(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ int mode,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_mkdtemp(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* tpl,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_mkstemp(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* tpl,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_rmdir(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_scandir(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ int flags,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_scandir_next(uv_fs_t* req,
+ uv_dirent_t* ent);
+UV_EXTERN int uv_fs_opendir(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_readdir(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_dir_t* dir,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_closedir(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_dir_t* dir,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_stat(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_fstat(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_file file,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_rename(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ const char* new_path,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_fsync(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_file file,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_fdatasync(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_file file,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_ftruncate(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_file file,
+ int64_t offset,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_sendfile(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_file out_fd,
+ uv_file in_fd,
+ int64_t in_offset,
+ size_t length,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_access(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ int mode,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_chmod(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ int mode,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_utime(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ double atime,
+ double mtime,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_futime(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_file file,
+ double atime,
+ double mtime,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_lutime(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ double atime,
+ double mtime,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_lstat(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_link(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ const char* new_path,
+ uv_fs_cb cb);
+
+/*
+ * This flag can be used with uv_fs_symlink() on Windows to specify whether
+ * path argument points to a directory.
+ */
+#define UV_FS_SYMLINK_DIR 0x0001
+
+/*
+ * This flag can be used with uv_fs_symlink() on Windows to specify whether
+ * the symlink is to be created using junction points.
+ */
+#define UV_FS_SYMLINK_JUNCTION 0x0002
+
+UV_EXTERN int uv_fs_symlink(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ const char* new_path,
+ int flags,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_readlink(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_realpath(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_fchmod(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_file file,
+ int mode,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_chown(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ uv_uid_t uid,
+ uv_gid_t gid,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_fchown(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_file file,
+ uv_uid_t uid,
+ uv_gid_t gid,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_lchown(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ uv_uid_t uid,
+ uv_gid_t gid,
+ uv_fs_cb cb);
+UV_EXTERN int uv_fs_statfs(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ uv_fs_cb cb);
+
+
+enum uv_fs_event {
+ UV_RENAME = 1,
+ UV_CHANGE = 2
+};
+
+
+struct uv_fs_event_s {
+ UV_HANDLE_FIELDS
+ /* private */
+ char* path;
+ UV_FS_EVENT_PRIVATE_FIELDS
+};
+
+
+/*
+ * uv_fs_stat() based polling file watcher.
+ */
+struct uv_fs_poll_s {
+ UV_HANDLE_FIELDS
+ /* Private, don't touch. */
+ void* poll_ctx;
+};
+
+UV_EXTERN int uv_fs_poll_init(uv_loop_t* loop, uv_fs_poll_t* handle);
+UV_EXTERN int uv_fs_poll_start(uv_fs_poll_t* handle,
+ uv_fs_poll_cb poll_cb,
+ const char* path,
+ unsigned int interval);
+UV_EXTERN int uv_fs_poll_stop(uv_fs_poll_t* handle);
+UV_EXTERN int uv_fs_poll_getpath(uv_fs_poll_t* handle,
+ char* buffer,
+ size_t* size);
+
+
+struct uv_signal_s {
+ UV_HANDLE_FIELDS
+ uv_signal_cb signal_cb;
+ int signum;
+ UV_SIGNAL_PRIVATE_FIELDS
+};
+
+UV_EXTERN int uv_signal_init(uv_loop_t* loop, uv_signal_t* handle);
+UV_EXTERN int uv_signal_start(uv_signal_t* handle,
+ uv_signal_cb signal_cb,
+ int signum);
+UV_EXTERN int uv_signal_start_oneshot(uv_signal_t* handle,
+ uv_signal_cb signal_cb,
+ int signum);
+UV_EXTERN int uv_signal_stop(uv_signal_t* handle);
+
+UV_EXTERN void uv_loadavg(double avg[3]);
+
+
+/*
+ * Flags to be passed to uv_fs_event_start().
+ */
+enum uv_fs_event_flags {
+ /*
+ * By default, if the fs event watcher is given a directory name, we will
+ * watch for all events in that directory. This flags overrides this behavior
+ * and makes fs_event report only changes to the directory entry itself. This
+ * flag does not affect individual files watched.
+ * This flag is currently not implemented yet on any backend.
+ */
+ UV_FS_EVENT_WATCH_ENTRY = 1,
+
+ /*
+ * By default uv_fs_event will try to use a kernel interface such as inotify
+ * or kqueue to detect events. This may not work on remote filesystems such
+ * as NFS mounts. This flag makes fs_event fall back to calling stat() on a
+ * regular interval.
+ * This flag is currently not implemented yet on any backend.
+ */
+ UV_FS_EVENT_STAT = 2,
+
+ /*
+ * By default, event watcher, when watching directory, is not registering
+ * (is ignoring) changes in it's subdirectories.
+ * This flag will override this behaviour on platforms that support it.
+ */
+ UV_FS_EVENT_RECURSIVE = 4
+};
+
+
+UV_EXTERN int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle);
+UV_EXTERN int uv_fs_event_start(uv_fs_event_t* handle,
+ uv_fs_event_cb cb,
+ const char* path,
+ unsigned int flags);
+UV_EXTERN int uv_fs_event_stop(uv_fs_event_t* handle);
+UV_EXTERN int uv_fs_event_getpath(uv_fs_event_t* handle,
+ char* buffer,
+ size_t* size);
+
+UV_EXTERN int uv_ip4_addr(const char* ip, int port, struct sockaddr_in* addr);
+UV_EXTERN int uv_ip6_addr(const char* ip, int port, struct sockaddr_in6* addr);
+
+UV_EXTERN int uv_ip4_name(const struct sockaddr_in* src, char* dst, size_t size);
+UV_EXTERN int uv_ip6_name(const struct sockaddr_in6* src, char* dst, size_t size);
+
+UV_EXTERN int uv_inet_ntop(int af, const void* src, char* dst, size_t size);
+UV_EXTERN int uv_inet_pton(int af, const char* src, void* dst);
+
+
+struct uv_random_s {
+ UV_REQ_FIELDS
+ /* read-only */
+ uv_loop_t* loop;
+ /* private */
+ int status;
+ void* buf;
+ size_t buflen;
+ uv_random_cb cb;
+ struct uv__work work_req;
+};
+
+UV_EXTERN int uv_random(uv_loop_t* loop,
+ uv_random_t* req,
+ void *buf,
+ size_t buflen,
+ unsigned flags, /* For future extension; must be 0. */
+ uv_random_cb cb);
+
+#if defined(IF_NAMESIZE)
+# define UV_IF_NAMESIZE (IF_NAMESIZE + 1)
+#elif defined(IFNAMSIZ)
+# define UV_IF_NAMESIZE (IFNAMSIZ + 1)
+#else
+# define UV_IF_NAMESIZE (16 + 1)
+#endif
+
+UV_EXTERN int uv_if_indextoname(unsigned int ifindex,
+ char* buffer,
+ size_t* size);
+UV_EXTERN int uv_if_indextoiid(unsigned int ifindex,
+ char* buffer,
+ size_t* size);
+
+UV_EXTERN int uv_exepath(char* buffer, size_t* size);
+
+UV_EXTERN int uv_cwd(char* buffer, size_t* size);
+
+UV_EXTERN int uv_chdir(const char* dir);
+
+UV_EXTERN uint64_t uv_get_free_memory(void);
+UV_EXTERN uint64_t uv_get_total_memory(void);
+UV_EXTERN uint64_t uv_get_constrained_memory(void);
+
+UV_EXTERN uint64_t uv_hrtime(void);
+UV_EXTERN void uv_sleep(unsigned int msec);
+
+UV_EXTERN void uv_disable_stdio_inheritance(void);
+
+UV_EXTERN int uv_dlopen(const char* filename, uv_lib_t* lib);
+UV_EXTERN void uv_dlclose(uv_lib_t* lib);
+UV_EXTERN int uv_dlsym(uv_lib_t* lib, const char* name, void** ptr);
+UV_EXTERN const char* uv_dlerror(const uv_lib_t* lib);
+
+UV_EXTERN int uv_mutex_init(uv_mutex_t* handle);
+UV_EXTERN int uv_mutex_init_recursive(uv_mutex_t* handle);
+UV_EXTERN void uv_mutex_destroy(uv_mutex_t* handle);
+UV_EXTERN void uv_mutex_lock(uv_mutex_t* handle);
+UV_EXTERN int uv_mutex_trylock(uv_mutex_t* handle);
+UV_EXTERN void uv_mutex_unlock(uv_mutex_t* handle);
+
+UV_EXTERN int uv_rwlock_init(uv_rwlock_t* rwlock);
+UV_EXTERN void uv_rwlock_destroy(uv_rwlock_t* rwlock);
+UV_EXTERN void uv_rwlock_rdlock(uv_rwlock_t* rwlock);
+UV_EXTERN int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock);
+UV_EXTERN void uv_rwlock_rdunlock(uv_rwlock_t* rwlock);
+UV_EXTERN void uv_rwlock_wrlock(uv_rwlock_t* rwlock);
+UV_EXTERN int uv_rwlock_trywrlock(uv_rwlock_t* rwlock);
+UV_EXTERN void uv_rwlock_wrunlock(uv_rwlock_t* rwlock);
+
+UV_EXTERN int uv_sem_init(uv_sem_t* sem, unsigned int value);
+UV_EXTERN void uv_sem_destroy(uv_sem_t* sem);
+UV_EXTERN void uv_sem_post(uv_sem_t* sem);
+UV_EXTERN void uv_sem_wait(uv_sem_t* sem);
+UV_EXTERN int uv_sem_trywait(uv_sem_t* sem);
+
+UV_EXTERN int uv_cond_init(uv_cond_t* cond);
+UV_EXTERN void uv_cond_destroy(uv_cond_t* cond);
+UV_EXTERN void uv_cond_signal(uv_cond_t* cond);
+UV_EXTERN void uv_cond_broadcast(uv_cond_t* cond);
+
+UV_EXTERN int uv_barrier_init(uv_barrier_t* barrier, unsigned int count);
+UV_EXTERN void uv_barrier_destroy(uv_barrier_t* barrier);
+UV_EXTERN int uv_barrier_wait(uv_barrier_t* barrier);
+
+UV_EXTERN void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex);
+UV_EXTERN int uv_cond_timedwait(uv_cond_t* cond,
+ uv_mutex_t* mutex,
+ uint64_t timeout);
+
+UV_EXTERN void uv_once(uv_once_t* guard, void (*callback)(void));
+
+UV_EXTERN int uv_key_create(uv_key_t* key);
+UV_EXTERN void uv_key_delete(uv_key_t* key);
+UV_EXTERN void* uv_key_get(uv_key_t* key);
+UV_EXTERN void uv_key_set(uv_key_t* key, void* value);
+
+UV_EXTERN int uv_gettimeofday(uv_timeval64_t* tv);
+
+typedef void (*uv_thread_cb)(void* arg);
+
+UV_EXTERN int uv_thread_create(uv_thread_t* tid, uv_thread_cb entry, void* arg);
+
+typedef enum {
+ UV_THREAD_NO_FLAGS = 0x00,
+ UV_THREAD_HAS_STACK_SIZE = 0x01
+} uv_thread_create_flags;
+
+struct uv_thread_options_s {
+ unsigned int flags;
+ size_t stack_size;
+ /* More fields may be added at any time. */
+};
+
+typedef struct uv_thread_options_s uv_thread_options_t;
+
+UV_EXTERN int uv_thread_create_ex(uv_thread_t* tid,
+ const uv_thread_options_t* params,
+ uv_thread_cb entry,
+ void* arg);
+UV_EXTERN uv_thread_t uv_thread_self(void);
+UV_EXTERN int uv_thread_join(uv_thread_t *tid);
+UV_EXTERN int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2);
+
+/* The presence of these unions force similar struct layout. */
+#define XX(_, name) uv_ ## name ## _t name;
+union uv_any_handle {
+ UV_HANDLE_TYPE_MAP(XX)
+};
+
+union uv_any_req {
+ UV_REQ_TYPE_MAP(XX)
+};
+#undef XX
+
+
+struct uv_loop_s {
+ /* User data - use this for whatever. */
+ void* data;
+ /* Loop reference counting. */
+ unsigned int active_handles;
+ void* handle_queue[2];
+ union {
+ void* unused;
+ unsigned int count;
+ } active_reqs;
+ /* Internal storage for future extensions. */
+ void* internal_fields;
+ /* Internal flag to signal loop stop. */
+ unsigned int stop_flag;
+ UV_LOOP_PRIVATE_FIELDS
+};
+
+UV_EXTERN void* uv_loop_get_data(const uv_loop_t*);
+UV_EXTERN void uv_loop_set_data(uv_loop_t*, void* data);
+
+/* Don't export the private CPP symbols. */
+#undef UV_HANDLE_TYPE_PRIVATE
+#undef UV_REQ_TYPE_PRIVATE
+#undef UV_REQ_PRIVATE_FIELDS
+#undef UV_STREAM_PRIVATE_FIELDS
+#undef UV_TCP_PRIVATE_FIELDS
+#undef UV_PREPARE_PRIVATE_FIELDS
+#undef UV_CHECK_PRIVATE_FIELDS
+#undef UV_IDLE_PRIVATE_FIELDS
+#undef UV_ASYNC_PRIVATE_FIELDS
+#undef UV_TIMER_PRIVATE_FIELDS
+#undef UV_GETADDRINFO_PRIVATE_FIELDS
+#undef UV_GETNAMEINFO_PRIVATE_FIELDS
+#undef UV_FS_REQ_PRIVATE_FIELDS
+#undef UV_WORK_PRIVATE_FIELDS
+#undef UV_FS_EVENT_PRIVATE_FIELDS
+#undef UV_SIGNAL_PRIVATE_FIELDS
+#undef UV_LOOP_PRIVATE_FIELDS
+#undef UV_LOOP_PRIVATE_PLATFORM_FIELDS
+#undef UV__ERR
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* UV_H */
diff --git a/Utilities/cmlibuv/include/uv/aix.h b/Utilities/cmlibuv/include/uv/aix.h
new file mode 100644
index 0000000..7dc992f
--- /dev/null
+++ b/Utilities/cmlibuv/include/uv/aix.h
@@ -0,0 +1,32 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_AIX_H
+#define UV_AIX_H
+
+#define UV_PLATFORM_LOOP_FIELDS \
+ int fs_fd; \
+
+#define UV_PLATFORM_FS_EVENT_FIELDS \
+ uv__io_t event_watcher; \
+ char *dir_filename; \
+
+#endif /* UV_AIX_H */
diff --git a/Utilities/cmlibuv/include/uv/android-ifaddrs.h b/Utilities/cmlibuv/include/uv/android-ifaddrs.h
new file mode 100644
index 0000000..9cd19fe
--- /dev/null
+++ b/Utilities/cmlibuv/include/uv/android-ifaddrs.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 1995, 1999
+ * Berkeley Software Design, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Berkeley Software Design, Inc. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL Berkeley Software Design, Inc. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * BSDI ifaddrs.h,v 2.5 2000/02/23 14:51:59 dab Exp
+ */
+
+#ifndef _IFADDRS_H_
+#define _IFADDRS_H_
+
+struct ifaddrs {
+ struct ifaddrs *ifa_next;
+ char *ifa_name;
+ unsigned int ifa_flags;
+ struct sockaddr *ifa_addr;
+ struct sockaddr *ifa_netmask;
+ struct sockaddr *ifa_dstaddr;
+ void *ifa_data;
+};
+
+/*
+ * This may have been defined in <net/if.h>. Note that if <net/if.h> is
+ * to be included it must be included before this header file.
+ */
+#ifndef ifa_broadaddr
+#define ifa_broadaddr ifa_dstaddr /* broadcast address interface */
+#endif
+
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+extern int getifaddrs(struct ifaddrs **ifap);
+extern void freeifaddrs(struct ifaddrs *ifa);
+__END_DECLS
+
+#endif
diff --git a/Utilities/cmlibuv/include/uv/bsd.h b/Utilities/cmlibuv/include/uv/bsd.h
new file mode 100644
index 0000000..2d72b3d
--- /dev/null
+++ b/Utilities/cmlibuv/include/uv/bsd.h
@@ -0,0 +1,34 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_BSD_H
+#define UV_BSD_H
+
+#define UV_PLATFORM_FS_EVENT_FIELDS \
+ uv__io_t event_watcher; \
+
+#define UV_IO_PRIVATE_PLATFORM_FIELDS \
+ int rcount; \
+ int wcount; \
+
+#define UV_HAVE_KQUEUE 1
+
+#endif /* UV_BSD_H */
diff --git a/Utilities/cmlibuv/include/uv/darwin.h b/Utilities/cmlibuv/include/uv/darwin.h
new file mode 100644
index 0000000..d226415
--- /dev/null
+++ b/Utilities/cmlibuv/include/uv/darwin.h
@@ -0,0 +1,61 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_DARWIN_H
+#define UV_DARWIN_H
+
+#if defined(__APPLE__) && defined(__MACH__)
+# include <mach/mach.h>
+# include <mach/task.h>
+# include <mach/semaphore.h>
+# include <TargetConditionals.h>
+# define UV_PLATFORM_SEM_T semaphore_t
+#endif
+
+#define UV_IO_PRIVATE_PLATFORM_FIELDS \
+ int rcount; \
+ int wcount; \
+
+#define UV_PLATFORM_LOOP_FIELDS \
+ uv_thread_t cf_thread; \
+ void* _cf_reserved; \
+ void* cf_state; \
+ uv_mutex_t cf_mutex; \
+ uv_sem_t cf_sem; \
+ void* cf_signals[2]; \
+
+#define UV_PLATFORM_FS_EVENT_FIELDS \
+ uv__io_t event_watcher; \
+ char* realpath; \
+ int realpath_len; \
+ int cf_flags; \
+ uv_async_t* cf_cb; \
+ void* cf_events[2]; \
+ void* cf_member[2]; \
+ int cf_error; \
+ uv_mutex_t cf_mutex; \
+
+#define UV_STREAM_PRIVATE_PLATFORM_FIELDS \
+ void* select; \
+
+#define UV_HAVE_KQUEUE 1
+
+#endif /* UV_DARWIN_H */
diff --git a/Utilities/cmlibuv/include/uv/errno.h b/Utilities/cmlibuv/include/uv/errno.h
new file mode 100644
index 0000000..8d4d768
--- /dev/null
+++ b/Utilities/cmlibuv/include/uv/errno.h
@@ -0,0 +1,448 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_ERRNO_H_
+#define UV_ERRNO_H_
+
+#include <errno.h>
+#if EDOM > 0
+# define UV__ERR(x) (-(x))
+#else
+# define UV__ERR(x) (x)
+#endif
+
+#define UV__EOF (-4095)
+#define UV__UNKNOWN (-4094)
+
+#define UV__EAI_ADDRFAMILY (-3000)
+#define UV__EAI_AGAIN (-3001)
+#define UV__EAI_BADFLAGS (-3002)
+#define UV__EAI_CANCELED (-3003)
+#define UV__EAI_FAIL (-3004)
+#define UV__EAI_FAMILY (-3005)
+#define UV__EAI_MEMORY (-3006)
+#define UV__EAI_NODATA (-3007)
+#define UV__EAI_NONAME (-3008)
+#define UV__EAI_OVERFLOW (-3009)
+#define UV__EAI_SERVICE (-3010)
+#define UV__EAI_SOCKTYPE (-3011)
+#define UV__EAI_BADHINTS (-3013)
+#define UV__EAI_PROTOCOL (-3014)
+
+/* Only map to the system errno on non-Windows platforms. It's apparently
+ * a fairly common practice for Windows programmers to redefine errno codes.
+ */
+#if defined(E2BIG) && !defined(_WIN32)
+# define UV__E2BIG UV__ERR(E2BIG)
+#else
+# define UV__E2BIG (-4093)
+#endif
+
+#if defined(EACCES) && !defined(_WIN32)
+# define UV__EACCES UV__ERR(EACCES)
+#else
+# define UV__EACCES (-4092)
+#endif
+
+#if defined(EADDRINUSE) && !defined(_WIN32)
+# define UV__EADDRINUSE UV__ERR(EADDRINUSE)
+#else
+# define UV__EADDRINUSE (-4091)
+#endif
+
+#if defined(EADDRNOTAVAIL) && !defined(_WIN32)
+# define UV__EADDRNOTAVAIL UV__ERR(EADDRNOTAVAIL)
+#else
+# define UV__EADDRNOTAVAIL (-4090)
+#endif
+
+#if defined(EAFNOSUPPORT) && !defined(_WIN32)
+# define UV__EAFNOSUPPORT UV__ERR(EAFNOSUPPORT)
+#else
+# define UV__EAFNOSUPPORT (-4089)
+#endif
+
+#if defined(EAGAIN) && !defined(_WIN32)
+# define UV__EAGAIN UV__ERR(EAGAIN)
+#else
+# define UV__EAGAIN (-4088)
+#endif
+
+#if defined(EALREADY) && !defined(_WIN32)
+# define UV__EALREADY UV__ERR(EALREADY)
+#else
+# define UV__EALREADY (-4084)
+#endif
+
+#if defined(EBADF) && !defined(_WIN32)
+# define UV__EBADF UV__ERR(EBADF)
+#else
+# define UV__EBADF (-4083)
+#endif
+
+#if defined(EBUSY) && !defined(_WIN32)
+# define UV__EBUSY UV__ERR(EBUSY)
+#else
+# define UV__EBUSY (-4082)
+#endif
+
+#if defined(ECANCELED) && !defined(_WIN32)
+# define UV__ECANCELED UV__ERR(ECANCELED)
+#else
+# define UV__ECANCELED (-4081)
+#endif
+
+#if defined(ECHARSET) && !defined(_WIN32)
+# define UV__ECHARSET UV__ERR(ECHARSET)
+#else
+# define UV__ECHARSET (-4080)
+#endif
+
+#if defined(ECONNABORTED) && !defined(_WIN32)
+# define UV__ECONNABORTED UV__ERR(ECONNABORTED)
+#else
+# define UV__ECONNABORTED (-4079)
+#endif
+
+#if defined(ECONNREFUSED) && !defined(_WIN32)
+# define UV__ECONNREFUSED UV__ERR(ECONNREFUSED)
+#else
+# define UV__ECONNREFUSED (-4078)
+#endif
+
+#if defined(ECONNRESET) && !defined(_WIN32)
+# define UV__ECONNRESET UV__ERR(ECONNRESET)
+#else
+# define UV__ECONNRESET (-4077)
+#endif
+
+#if defined(EDESTADDRREQ) && !defined(_WIN32)
+# define UV__EDESTADDRREQ UV__ERR(EDESTADDRREQ)
+#else
+# define UV__EDESTADDRREQ (-4076)
+#endif
+
+#if defined(EEXIST) && !defined(_WIN32)
+# define UV__EEXIST UV__ERR(EEXIST)
+#else
+# define UV__EEXIST (-4075)
+#endif
+
+#if defined(EFAULT) && !defined(_WIN32)
+# define UV__EFAULT UV__ERR(EFAULT)
+#else
+# define UV__EFAULT (-4074)
+#endif
+
+#if defined(EHOSTUNREACH) && !defined(_WIN32)
+# define UV__EHOSTUNREACH UV__ERR(EHOSTUNREACH)
+#else
+# define UV__EHOSTUNREACH (-4073)
+#endif
+
+#if defined(EINTR) && !defined(_WIN32)
+# define UV__EINTR UV__ERR(EINTR)
+#else
+# define UV__EINTR (-4072)
+#endif
+
+#if defined(EINVAL) && !defined(_WIN32)
+# define UV__EINVAL UV__ERR(EINVAL)
+#else
+# define UV__EINVAL (-4071)
+#endif
+
+#if defined(EIO) && !defined(_WIN32)
+# define UV__EIO UV__ERR(EIO)
+#else
+# define UV__EIO (-4070)
+#endif
+
+#if defined(EISCONN) && !defined(_WIN32)
+# define UV__EISCONN UV__ERR(EISCONN)
+#else
+# define UV__EISCONN (-4069)
+#endif
+
+#if defined(EISDIR) && !defined(_WIN32)
+# define UV__EISDIR UV__ERR(EISDIR)
+#else
+# define UV__EISDIR (-4068)
+#endif
+
+#if defined(ELOOP) && !defined(_WIN32)
+# define UV__ELOOP UV__ERR(ELOOP)
+#else
+# define UV__ELOOP (-4067)
+#endif
+
+#if defined(EMFILE) && !defined(_WIN32)
+# define UV__EMFILE UV__ERR(EMFILE)
+#else
+# define UV__EMFILE (-4066)
+#endif
+
+#if defined(EMSGSIZE) && !defined(_WIN32)
+# define UV__EMSGSIZE UV__ERR(EMSGSIZE)
+#else
+# define UV__EMSGSIZE (-4065)
+#endif
+
+#if defined(ENAMETOOLONG) && !defined(_WIN32)
+# define UV__ENAMETOOLONG UV__ERR(ENAMETOOLONG)
+#else
+# define UV__ENAMETOOLONG (-4064)
+#endif
+
+#if defined(ENETDOWN) && !defined(_WIN32)
+# define UV__ENETDOWN UV__ERR(ENETDOWN)
+#else
+# define UV__ENETDOWN (-4063)
+#endif
+
+#if defined(ENETUNREACH) && !defined(_WIN32)
+# define UV__ENETUNREACH UV__ERR(ENETUNREACH)
+#else
+# define UV__ENETUNREACH (-4062)
+#endif
+
+#if defined(ENFILE) && !defined(_WIN32)
+# define UV__ENFILE UV__ERR(ENFILE)
+#else
+# define UV__ENFILE (-4061)
+#endif
+
+#if defined(ENOBUFS) && !defined(_WIN32)
+# define UV__ENOBUFS UV__ERR(ENOBUFS)
+#else
+# define UV__ENOBUFS (-4060)
+#endif
+
+#if defined(ENODEV) && !defined(_WIN32)
+# define UV__ENODEV UV__ERR(ENODEV)
+#else
+# define UV__ENODEV (-4059)
+#endif
+
+#if defined(ENOENT) && !defined(_WIN32)
+# define UV__ENOENT UV__ERR(ENOENT)
+#else
+# define UV__ENOENT (-4058)
+#endif
+
+#if defined(ENOMEM) && !defined(_WIN32)
+# define UV__ENOMEM UV__ERR(ENOMEM)
+#else
+# define UV__ENOMEM (-4057)
+#endif
+
+#if defined(ENONET) && !defined(_WIN32)
+# define UV__ENONET UV__ERR(ENONET)
+#else
+# define UV__ENONET (-4056)
+#endif
+
+#if defined(ENOSPC) && !defined(_WIN32)
+# define UV__ENOSPC UV__ERR(ENOSPC)
+#else
+# define UV__ENOSPC (-4055)
+#endif
+
+#if defined(ENOSYS) && !defined(_WIN32)
+# define UV__ENOSYS UV__ERR(ENOSYS)
+#else
+# define UV__ENOSYS (-4054)
+#endif
+
+#if defined(ENOTCONN) && !defined(_WIN32)
+# define UV__ENOTCONN UV__ERR(ENOTCONN)
+#else
+# define UV__ENOTCONN (-4053)
+#endif
+
+#if defined(ENOTDIR) && !defined(_WIN32)
+# define UV__ENOTDIR UV__ERR(ENOTDIR)
+#else
+# define UV__ENOTDIR (-4052)
+#endif
+
+#if defined(ENOTEMPTY) && !defined(_WIN32)
+# define UV__ENOTEMPTY UV__ERR(ENOTEMPTY)
+#else
+# define UV__ENOTEMPTY (-4051)
+#endif
+
+#if defined(ENOTSOCK) && !defined(_WIN32)
+# define UV__ENOTSOCK UV__ERR(ENOTSOCK)
+#else
+# define UV__ENOTSOCK (-4050)
+#endif
+
+#if defined(ENOTSUP) && !defined(_WIN32)
+# define UV__ENOTSUP UV__ERR(ENOTSUP)
+#else
+# define UV__ENOTSUP (-4049)
+#endif
+
+#if defined(EPERM) && !defined(_WIN32)
+# define UV__EPERM UV__ERR(EPERM)
+#else
+# define UV__EPERM (-4048)
+#endif
+
+#if defined(EPIPE) && !defined(_WIN32)
+# define UV__EPIPE UV__ERR(EPIPE)
+#else
+# define UV__EPIPE (-4047)
+#endif
+
+#if defined(EPROTO) && !defined(_WIN32)
+# define UV__EPROTO UV__ERR(EPROTO)
+#else
+# define UV__EPROTO UV__ERR(-4046)
+#endif
+
+#if defined(EPROTONOSUPPORT) && !defined(_WIN32)
+# define UV__EPROTONOSUPPORT UV__ERR(EPROTONOSUPPORT)
+#else
+# define UV__EPROTONOSUPPORT (-4045)
+#endif
+
+#if defined(EPROTOTYPE) && !defined(_WIN32)
+# define UV__EPROTOTYPE UV__ERR(EPROTOTYPE)
+#else
+# define UV__EPROTOTYPE (-4044)
+#endif
+
+#if defined(EROFS) && !defined(_WIN32)
+# define UV__EROFS UV__ERR(EROFS)
+#else
+# define UV__EROFS (-4043)
+#endif
+
+#if defined(ESHUTDOWN) && !defined(_WIN32)
+# define UV__ESHUTDOWN UV__ERR(ESHUTDOWN)
+#else
+# define UV__ESHUTDOWN (-4042)
+#endif
+
+#if defined(ESPIPE) && !defined(_WIN32)
+# define UV__ESPIPE UV__ERR(ESPIPE)
+#else
+# define UV__ESPIPE (-4041)
+#endif
+
+#if defined(ESRCH) && !defined(_WIN32)
+# define UV__ESRCH UV__ERR(ESRCH)
+#else
+# define UV__ESRCH (-4040)
+#endif
+
+#if defined(ETIMEDOUT) && !defined(_WIN32)
+# define UV__ETIMEDOUT UV__ERR(ETIMEDOUT)
+#else
+# define UV__ETIMEDOUT (-4039)
+#endif
+
+#if defined(ETXTBSY) && !defined(_WIN32)
+# define UV__ETXTBSY UV__ERR(ETXTBSY)
+#else
+# define UV__ETXTBSY (-4038)
+#endif
+
+#if defined(EXDEV) && !defined(_WIN32)
+# define UV__EXDEV UV__ERR(EXDEV)
+#else
+# define UV__EXDEV (-4037)
+#endif
+
+#if defined(EFBIG) && !defined(_WIN32)
+# define UV__EFBIG UV__ERR(EFBIG)
+#else
+# define UV__EFBIG (-4036)
+#endif
+
+#if defined(ENOPROTOOPT) && !defined(_WIN32)
+# define UV__ENOPROTOOPT UV__ERR(ENOPROTOOPT)
+#else
+# define UV__ENOPROTOOPT (-4035)
+#endif
+
+#if defined(ERANGE) && !defined(_WIN32)
+# define UV__ERANGE UV__ERR(ERANGE)
+#else
+# define UV__ERANGE (-4034)
+#endif
+
+#if defined(ENXIO) && !defined(_WIN32)
+# define UV__ENXIO UV__ERR(ENXIO)
+#else
+# define UV__ENXIO (-4033)
+#endif
+
+#if defined(EMLINK) && !defined(_WIN32)
+# define UV__EMLINK UV__ERR(EMLINK)
+#else
+# define UV__EMLINK (-4032)
+#endif
+
+/* EHOSTDOWN is not visible on BSD-like systems when _POSIX_C_SOURCE is
+ * defined. Fortunately, its value is always 64 so it's possible albeit
+ * icky to hard-code it.
+ */
+#if defined(EHOSTDOWN) && !defined(_WIN32)
+# define UV__EHOSTDOWN UV__ERR(EHOSTDOWN)
+#elif defined(__APPLE__) || \
+ defined(__DragonFly__) || \
+ defined(__FreeBSD__) || \
+ defined(__FreeBSD_kernel__) || \
+ defined(__NetBSD__) || \
+ defined(__OpenBSD__)
+# define UV__EHOSTDOWN (-64)
+#else
+# define UV__EHOSTDOWN (-4031)
+#endif
+
+#if defined(EREMOTEIO) && !defined(_WIN32)
+# define UV__EREMOTEIO UV__ERR(EREMOTEIO)
+#else
+# define UV__EREMOTEIO (-4030)
+#endif
+
+#if defined(ENOTTY) && !defined(_WIN32)
+# define UV__ENOTTY UV__ERR(ENOTTY)
+#else
+# define UV__ENOTTY (-4029)
+#endif
+
+#if defined(EFTYPE) && !defined(_WIN32)
+# define UV__EFTYPE UV__ERR(EFTYPE)
+#else
+# define UV__EFTYPE (-4028)
+#endif
+
+#if defined(EILSEQ) && !defined(_WIN32)
+# define UV__EILSEQ UV__ERR(EILSEQ)
+#else
+# define UV__EILSEQ (-4027)
+#endif
+
+#endif /* UV_ERRNO_H_ */
diff --git a/Utilities/cmlibuv/include/uv/linux.h b/Utilities/cmlibuv/include/uv/linux.h
new file mode 100644
index 0000000..9b38405
--- /dev/null
+++ b/Utilities/cmlibuv/include/uv/linux.h
@@ -0,0 +1,34 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_LINUX_H
+#define UV_LINUX_H
+
+#define UV_PLATFORM_LOOP_FIELDS \
+ uv__io_t inotify_read_watcher; \
+ void* inotify_watchers; \
+ int inotify_fd; \
+
+#define UV_PLATFORM_FS_EVENT_FIELDS \
+ void* watchers[2]; \
+ int wd; \
+
+#endif /* UV_LINUX_H */
diff --git a/Utilities/cmlibuv/include/uv/os390.h b/Utilities/cmlibuv/include/uv/os390.h
new file mode 100644
index 0000000..0267d74
--- /dev/null
+++ b/Utilities/cmlibuv/include/uv/os390.h
@@ -0,0 +1,33 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_MVS_H
+#define UV_MVS_H
+
+#define UV_PLATFORM_SEM_T long
+
+#define UV_PLATFORM_LOOP_FIELDS \
+ void* ep; \
+
+#define UV_PLATFORM_FS_EVENT_FIELDS \
+ char rfis_rftok[8]; \
+
+#endif /* UV_MVS_H */
diff --git a/Utilities/cmlibuv/include/uv/posix.h b/Utilities/cmlibuv/include/uv/posix.h
new file mode 100644
index 0000000..9a96634
--- /dev/null
+++ b/Utilities/cmlibuv/include/uv/posix.h
@@ -0,0 +1,31 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_POSIX_H
+#define UV_POSIX_H
+
+#define UV_PLATFORM_LOOP_FIELDS \
+ struct pollfd* poll_fds; \
+ size_t poll_fds_used; \
+ size_t poll_fds_size; \
+ unsigned char poll_fds_iterating; \
+
+#endif /* UV_POSIX_H */
diff --git a/Utilities/cmlibuv/include/uv/stdint-msvc2008.h b/Utilities/cmlibuv/include/uv/stdint-msvc2008.h
new file mode 100644
index 0000000..d02608a
--- /dev/null
+++ b/Utilities/cmlibuv/include/uv/stdint-msvc2008.h
@@ -0,0 +1,247 @@
+// ISO C9x compliant stdint.h for Microsoft Visual Studio
+// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
+//
+// Copyright (c) 2006-2008 Alexander Chemeris
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. The name of the author may be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef _MSC_VER // [
+#error "Use this header only with Microsoft Visual C++ compilers!"
+#endif // _MSC_VER ]
+
+#ifndef _MSC_STDINT_H_ // [
+#define _MSC_STDINT_H_
+
+#if _MSC_VER > 1000
+#pragma once
+#endif
+
+#include <limits.h>
+
+// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
+// compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}'
+// or compiler give many errors like this:
+// error C2733: second C linkage of overloaded function 'wmemchr' not allowed
+#ifdef __cplusplus
+extern "C" {
+#endif
+# include <wchar.h>
+#ifdef __cplusplus
+}
+#endif
+
+// Define _W64 macros to mark types changing their size, like intptr_t.
+#ifndef _W64
+# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
+# define _W64 __w64
+# else
+# define _W64
+# endif
+#endif
+
+
+// 7.18.1 Integer types
+
+// 7.18.1.1 Exact-width integer types
+
+// Visual Studio 6 and Embedded Visual C++ 4 doesn't
+// realize that, e.g. char has the same size as __int8
+// so we give up on __intX for them.
+#if (_MSC_VER < 1300)
+ typedef signed char int8_t;
+ typedef signed short int16_t;
+ typedef signed int int32_t;
+ typedef unsigned char uint8_t;
+ typedef unsigned short uint16_t;
+ typedef unsigned int uint32_t;
+#else
+ typedef signed __int8 int8_t;
+ typedef signed __int16 int16_t;
+ typedef signed __int32 int32_t;
+ typedef unsigned __int8 uint8_t;
+ typedef unsigned __int16 uint16_t;
+ typedef unsigned __int32 uint32_t;
+#endif
+typedef signed __int64 int64_t;
+typedef unsigned __int64 uint64_t;
+
+
+// 7.18.1.2 Minimum-width integer types
+typedef int8_t int_least8_t;
+typedef int16_t int_least16_t;
+typedef int32_t int_least32_t;
+typedef int64_t int_least64_t;
+typedef uint8_t uint_least8_t;
+typedef uint16_t uint_least16_t;
+typedef uint32_t uint_least32_t;
+typedef uint64_t uint_least64_t;
+
+// 7.18.1.3 Fastest minimum-width integer types
+typedef int8_t int_fast8_t;
+typedef int16_t int_fast16_t;
+typedef int32_t int_fast32_t;
+typedef int64_t int_fast64_t;
+typedef uint8_t uint_fast8_t;
+typedef uint16_t uint_fast16_t;
+typedef uint32_t uint_fast32_t;
+typedef uint64_t uint_fast64_t;
+
+// 7.18.1.4 Integer types capable of holding object pointers
+#ifdef _WIN64 // [
+ typedef signed __int64 intptr_t;
+ typedef unsigned __int64 uintptr_t;
+#else // _WIN64 ][
+ typedef _W64 signed int intptr_t;
+ typedef _W64 unsigned int uintptr_t;
+#endif // _WIN64 ]
+
+// 7.18.1.5 Greatest-width integer types
+typedef int64_t intmax_t;
+typedef uint64_t uintmax_t;
+
+
+// 7.18.2 Limits of specified-width integer types
+
+#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259
+
+// 7.18.2.1 Limits of exact-width integer types
+#define INT8_MIN ((int8_t)_I8_MIN)
+#define INT8_MAX _I8_MAX
+#define INT16_MIN ((int16_t)_I16_MIN)
+#define INT16_MAX _I16_MAX
+#define INT32_MIN ((int32_t)_I32_MIN)
+#define INT32_MAX _I32_MAX
+#define INT64_MIN ((int64_t)_I64_MIN)
+#define INT64_MAX _I64_MAX
+#define UINT8_MAX _UI8_MAX
+#define UINT16_MAX _UI16_MAX
+#define UINT32_MAX _UI32_MAX
+#define UINT64_MAX _UI64_MAX
+
+// 7.18.2.2 Limits of minimum-width integer types
+#define INT_LEAST8_MIN INT8_MIN
+#define INT_LEAST8_MAX INT8_MAX
+#define INT_LEAST16_MIN INT16_MIN
+#define INT_LEAST16_MAX INT16_MAX
+#define INT_LEAST32_MIN INT32_MIN
+#define INT_LEAST32_MAX INT32_MAX
+#define INT_LEAST64_MIN INT64_MIN
+#define INT_LEAST64_MAX INT64_MAX
+#define UINT_LEAST8_MAX UINT8_MAX
+#define UINT_LEAST16_MAX UINT16_MAX
+#define UINT_LEAST32_MAX UINT32_MAX
+#define UINT_LEAST64_MAX UINT64_MAX
+
+// 7.18.2.3 Limits of fastest minimum-width integer types
+#define INT_FAST8_MIN INT8_MIN
+#define INT_FAST8_MAX INT8_MAX
+#define INT_FAST16_MIN INT16_MIN
+#define INT_FAST16_MAX INT16_MAX
+#define INT_FAST32_MIN INT32_MIN
+#define INT_FAST32_MAX INT32_MAX
+#define INT_FAST64_MIN INT64_MIN
+#define INT_FAST64_MAX INT64_MAX
+#define UINT_FAST8_MAX UINT8_MAX
+#define UINT_FAST16_MAX UINT16_MAX
+#define UINT_FAST32_MAX UINT32_MAX
+#define UINT_FAST64_MAX UINT64_MAX
+
+// 7.18.2.4 Limits of integer types capable of holding object pointers
+#ifdef _WIN64 // [
+# define INTPTR_MIN INT64_MIN
+# define INTPTR_MAX INT64_MAX
+# define UINTPTR_MAX UINT64_MAX
+#else // _WIN64 ][
+# define INTPTR_MIN INT32_MIN
+# define INTPTR_MAX INT32_MAX
+# define UINTPTR_MAX UINT32_MAX
+#endif // _WIN64 ]
+
+// 7.18.2.5 Limits of greatest-width integer types
+#define INTMAX_MIN INT64_MIN
+#define INTMAX_MAX INT64_MAX
+#define UINTMAX_MAX UINT64_MAX
+
+// 7.18.3 Limits of other integer types
+
+#ifdef _WIN64 // [
+# define PTRDIFF_MIN _I64_MIN
+# define PTRDIFF_MAX _I64_MAX
+#else // _WIN64 ][
+# define PTRDIFF_MIN _I32_MIN
+# define PTRDIFF_MAX _I32_MAX
+#endif // _WIN64 ]
+
+#define SIG_ATOMIC_MIN INT_MIN
+#define SIG_ATOMIC_MAX INT_MAX
+
+#ifndef SIZE_MAX // [
+# ifdef _WIN64 // [
+# define SIZE_MAX _UI64_MAX
+# else // _WIN64 ][
+# define SIZE_MAX _UI32_MAX
+# endif // _WIN64 ]
+#endif // SIZE_MAX ]
+
+// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
+#ifndef WCHAR_MIN // [
+# define WCHAR_MIN 0
+#endif // WCHAR_MIN ]
+#ifndef WCHAR_MAX // [
+# define WCHAR_MAX _UI16_MAX
+#endif // WCHAR_MAX ]
+
+#define WINT_MIN 0
+#define WINT_MAX _UI16_MAX
+
+#endif // __STDC_LIMIT_MACROS ]
+
+
+// 7.18.4 Limits of other integer types
+
+#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
+
+// 7.18.4.1 Macros for minimum-width integer constants
+
+#define INT8_C(val) val##i8
+#define INT16_C(val) val##i16
+#define INT32_C(val) val##i32
+#define INT64_C(val) val##i64
+
+#define UINT8_C(val) val##ui8
+#define UINT16_C(val) val##ui16
+#define UINT32_C(val) val##ui32
+#define UINT64_C(val) val##ui64
+
+// 7.18.4.2 Macros for greatest-width integer constants
+#define INTMAX_C INT64_C
+#define UINTMAX_C UINT64_C
+
+#endif // __STDC_CONSTANT_MACROS ]
+
+
+#endif // _MSC_STDINT_H_ ]
diff --git a/Utilities/cmlibuv/include/uv/sunos.h b/Utilities/cmlibuv/include/uv/sunos.h
new file mode 100644
index 0000000..0421664
--- /dev/null
+++ b/Utilities/cmlibuv/include/uv/sunos.h
@@ -0,0 +1,44 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_SUNOS_H
+#define UV_SUNOS_H
+
+#include <sys/port.h>
+#include <port.h>
+
+/* For the sake of convenience and reduced #ifdef-ery in src/unix/sunos.c,
+ * add the fs_event fields even when this version of SunOS doesn't support
+ * file watching.
+ */
+#define UV_PLATFORM_LOOP_FIELDS \
+ uv__io_t fs_event_watcher; \
+ int fs_fd; \
+
+#if defined(PORT_SOURCE_FILE)
+
+# define UV_PLATFORM_FS_EVENT_FIELDS \
+ file_obj_t fo; \
+ int fd; \
+
+#endif /* defined(PORT_SOURCE_FILE) */
+
+#endif /* UV_SUNOS_H */
diff --git a/Utilities/cmlibuv/include/uv/threadpool.h b/Utilities/cmlibuv/include/uv/threadpool.h
new file mode 100644
index 0000000..9708ebd
--- /dev/null
+++ b/Utilities/cmlibuv/include/uv/threadpool.h
@@ -0,0 +1,37 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/*
+ * This file is private to libuv. It provides common functionality to both
+ * Windows and Unix backends.
+ */
+
+#ifndef UV_THREADPOOL_H_
+#define UV_THREADPOOL_H_
+
+struct uv__work {
+ void (*work)(struct uv__work *w);
+ void (*done)(struct uv__work *w, int status);
+ struct uv_loop_s* loop;
+ void* wq[2];
+};
+
+#endif /* UV_THREADPOOL_H_ */
diff --git a/Utilities/cmlibuv/include/uv/tree.h b/Utilities/cmlibuv/include/uv/tree.h
new file mode 100644
index 0000000..f936416
--- /dev/null
+++ b/Utilities/cmlibuv/include/uv/tree.h
@@ -0,0 +1,768 @@
+/*-
+ * Copyright 2002 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef UV_TREE_H_
+#define UV_TREE_H_
+
+#ifndef UV__UNUSED
+# if __GNUC__
+# define UV__UNUSED __attribute__((unused))
+# else
+# define UV__UNUSED
+# endif
+#endif
+
+/*
+ * This file defines data structures for different types of trees:
+ * splay trees and red-black trees.
+ *
+ * A splay tree is a self-organizing data structure. Every operation
+ * on the tree causes a splay to happen. The splay moves the requested
+ * node to the root of the tree and partly rebalances it.
+ *
+ * This has the benefit that request locality causes faster lookups as
+ * the requested nodes move to the top of the tree. On the other hand,
+ * every lookup causes memory writes.
+ *
+ * The Balance Theorem bounds the total access time for m operations
+ * and n inserts on an initially empty tree as O((m + n)lg n). The
+ * amortized cost for a sequence of m accesses to a splay tree is O(lg n);
+ *
+ * A red-black tree is a binary search tree with the node color as an
+ * extra attribute. It fulfills a set of conditions:
+ * - every search path from the root to a leaf consists of the
+ * same number of black nodes,
+ * - each red node (except for the root) has a black parent,
+ * - each leaf node is black.
+ *
+ * Every operation on a red-black tree is bounded as O(lg n).
+ * The maximum height of a red-black tree is 2lg (n+1).
+ */
+
+#define SPLAY_HEAD(name, type) \
+struct name { \
+ struct type *sph_root; /* root of the tree */ \
+}
+
+#define SPLAY_INITIALIZER(root) \
+ { NULL }
+
+#define SPLAY_INIT(root) do { \
+ (root)->sph_root = NULL; \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_ENTRY(type) \
+struct { \
+ struct type *spe_left; /* left element */ \
+ struct type *spe_right; /* right element */ \
+}
+
+#define SPLAY_LEFT(elm, field) (elm)->field.spe_left
+#define SPLAY_RIGHT(elm, field) (elm)->field.spe_right
+#define SPLAY_ROOT(head) (head)->sph_root
+#define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL)
+
+/* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */
+#define SPLAY_ROTATE_RIGHT(head, tmp, field) do { \
+ SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \
+ SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
+ (head)->sph_root = tmp; \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_ROTATE_LEFT(head, tmp, field) do { \
+ SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \
+ SPLAY_LEFT(tmp, field) = (head)->sph_root; \
+ (head)->sph_root = tmp; \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_LINKLEFT(head, tmp, field) do { \
+ SPLAY_LEFT(tmp, field) = (head)->sph_root; \
+ tmp = (head)->sph_root; \
+ (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_LINKRIGHT(head, tmp, field) do { \
+ SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
+ tmp = (head)->sph_root; \
+ (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_ASSEMBLE(head, node, left, right, field) do { \
+ SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \
+ SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field); \
+ SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \
+ SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \
+} while (/*CONSTCOND*/ 0)
+
+/* Generates prototypes and inline functions */
+
+#define SPLAY_PROTOTYPE(name, type, field, cmp) \
+void name##_SPLAY(struct name *, struct type *); \
+void name##_SPLAY_MINMAX(struct name *, int); \
+struct type *name##_SPLAY_INSERT(struct name *, struct type *); \
+struct type *name##_SPLAY_REMOVE(struct name *, struct type *); \
+ \
+/* Finds the node with the same key as elm */ \
+static __inline struct type * \
+name##_SPLAY_FIND(struct name *head, struct type *elm) \
+{ \
+ if (SPLAY_EMPTY(head)) \
+ return(NULL); \
+ name##_SPLAY(head, elm); \
+ if ((cmp)(elm, (head)->sph_root) == 0) \
+ return (head->sph_root); \
+ return (NULL); \
+} \
+ \
+static __inline struct type * \
+name##_SPLAY_NEXT(struct name *head, struct type *elm) \
+{ \
+ name##_SPLAY(head, elm); \
+ if (SPLAY_RIGHT(elm, field) != NULL) { \
+ elm = SPLAY_RIGHT(elm, field); \
+ while (SPLAY_LEFT(elm, field) != NULL) { \
+ elm = SPLAY_LEFT(elm, field); \
+ } \
+ } else \
+ elm = NULL; \
+ return (elm); \
+} \
+ \
+static __inline struct type * \
+name##_SPLAY_MIN_MAX(struct name *head, int val) \
+{ \
+ name##_SPLAY_MINMAX(head, val); \
+ return (SPLAY_ROOT(head)); \
+}
+
+/* Main splay operation.
+ * Moves node close to the key of elm to top
+ */
+#define SPLAY_GENERATE(name, type, field, cmp) \
+struct type * \
+name##_SPLAY_INSERT(struct name *head, struct type *elm) \
+{ \
+ if (SPLAY_EMPTY(head)) { \
+ SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \
+ } else { \
+ int __comp; \
+ name##_SPLAY(head, elm); \
+ __comp = (cmp)(elm, (head)->sph_root); \
+ if(__comp < 0) { \
+ SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field); \
+ SPLAY_RIGHT(elm, field) = (head)->sph_root; \
+ SPLAY_LEFT((head)->sph_root, field) = NULL; \
+ } else if (__comp > 0) { \
+ SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field); \
+ SPLAY_LEFT(elm, field) = (head)->sph_root; \
+ SPLAY_RIGHT((head)->sph_root, field) = NULL; \
+ } else \
+ return ((head)->sph_root); \
+ } \
+ (head)->sph_root = (elm); \
+ return (NULL); \
+} \
+ \
+struct type * \
+name##_SPLAY_REMOVE(struct name *head, struct type *elm) \
+{ \
+ struct type *__tmp; \
+ if (SPLAY_EMPTY(head)) \
+ return (NULL); \
+ name##_SPLAY(head, elm); \
+ if ((cmp)(elm, (head)->sph_root) == 0) { \
+ if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \
+ (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \
+ } else { \
+ __tmp = SPLAY_RIGHT((head)->sph_root, field); \
+ (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \
+ name##_SPLAY(head, elm); \
+ SPLAY_RIGHT((head)->sph_root, field) = __tmp; \
+ } \
+ return (elm); \
+ } \
+ return (NULL); \
+} \
+ \
+void \
+name##_SPLAY(struct name *head, struct type *elm) \
+{ \
+ struct type __node, *__left, *__right, *__tmp; \
+ int __comp; \
+ \
+ SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL; \
+ __left = __right = &__node; \
+ \
+ while ((__comp = (cmp)(elm, (head)->sph_root)) != 0) { \
+ if (__comp < 0) { \
+ __tmp = SPLAY_LEFT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if ((cmp)(elm, __tmp) < 0){ \
+ SPLAY_ROTATE_RIGHT(head, __tmp, field); \
+ if (SPLAY_LEFT((head)->sph_root, field) == NULL) \
+ break; \
+ } \
+ SPLAY_LINKLEFT(head, __right, field); \
+ } else if (__comp > 0) { \
+ __tmp = SPLAY_RIGHT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if ((cmp)(elm, __tmp) > 0){ \
+ SPLAY_ROTATE_LEFT(head, __tmp, field); \
+ if (SPLAY_RIGHT((head)->sph_root, field) == NULL) \
+ break; \
+ } \
+ SPLAY_LINKRIGHT(head, __left, field); \
+ } \
+ } \
+ SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
+} \
+ \
+/* Splay with either the minimum or the maximum element \
+ * Used to find minimum or maximum element in tree. \
+ */ \
+void name##_SPLAY_MINMAX(struct name *head, int __comp) \
+{ \
+ struct type __node, *__left, *__right, *__tmp; \
+ \
+ SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL; \
+ __left = __right = &__node; \
+ \
+ while (1) { \
+ if (__comp < 0) { \
+ __tmp = SPLAY_LEFT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if (__comp < 0){ \
+ SPLAY_ROTATE_RIGHT(head, __tmp, field); \
+ if (SPLAY_LEFT((head)->sph_root, field) == NULL) \
+ break; \
+ } \
+ SPLAY_LINKLEFT(head, __right, field); \
+ } else if (__comp > 0) { \
+ __tmp = SPLAY_RIGHT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if (__comp > 0) { \
+ SPLAY_ROTATE_LEFT(head, __tmp, field); \
+ if (SPLAY_RIGHT((head)->sph_root, field) == NULL) \
+ break; \
+ } \
+ SPLAY_LINKRIGHT(head, __left, field); \
+ } \
+ } \
+ SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
+}
+
+#define SPLAY_NEGINF -1
+#define SPLAY_INF 1
+
+#define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y)
+#define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y)
+#define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y)
+#define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y)
+#define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL \
+ : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF))
+#define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL \
+ : name##_SPLAY_MIN_MAX(x, SPLAY_INF))
+
+#define SPLAY_FOREACH(x, name, head) \
+ for ((x) = SPLAY_MIN(name, head); \
+ (x) != NULL; \
+ (x) = SPLAY_NEXT(name, head, x))
+
+/* Macros that define a red-black tree */
+#define RB_HEAD(name, type) \
+struct name { \
+ struct type *rbh_root; /* root of the tree */ \
+}
+
+#define RB_INITIALIZER(root) \
+ { NULL }
+
+#define RB_INIT(root) do { \
+ (root)->rbh_root = NULL; \
+} while (/*CONSTCOND*/ 0)
+
+#define RB_BLACK 0
+#define RB_RED 1
+#define RB_ENTRY(type) \
+struct { \
+ struct type *rbe_left; /* left element */ \
+ struct type *rbe_right; /* right element */ \
+ struct type *rbe_parent; /* parent element */ \
+ int rbe_color; /* node color */ \
+}
+
+#define RB_LEFT(elm, field) (elm)->field.rbe_left
+#define RB_RIGHT(elm, field) (elm)->field.rbe_right
+#define RB_PARENT(elm, field) (elm)->field.rbe_parent
+#define RB_COLOR(elm, field) (elm)->field.rbe_color
+#define RB_ROOT(head) (head)->rbh_root
+#define RB_EMPTY(head) (RB_ROOT(head) == NULL)
+
+#define RB_SET(elm, parent, field) do { \
+ RB_PARENT(elm, field) = parent; \
+ RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \
+ RB_COLOR(elm, field) = RB_RED; \
+} while (/*CONSTCOND*/ 0)
+
+#define RB_SET_BLACKRED(black, red, field) do { \
+ RB_COLOR(black, field) = RB_BLACK; \
+ RB_COLOR(red, field) = RB_RED; \
+} while (/*CONSTCOND*/ 0)
+
+#ifndef RB_AUGMENT
+#define RB_AUGMENT(x) do {} while (0)
+#endif
+
+#define RB_ROTATE_LEFT(head, elm, tmp, field) do { \
+ (tmp) = RB_RIGHT(elm, field); \
+ if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field)) != NULL) { \
+ RB_PARENT(RB_LEFT(tmp, field), field) = (elm); \
+ } \
+ RB_AUGMENT(elm); \
+ if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \
+ if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
+ RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
+ else \
+ RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
+ } else \
+ (head)->rbh_root = (tmp); \
+ RB_LEFT(tmp, field) = (elm); \
+ RB_PARENT(elm, field) = (tmp); \
+ RB_AUGMENT(tmp); \
+ if ((RB_PARENT(tmp, field))) \
+ RB_AUGMENT(RB_PARENT(tmp, field)); \
+} while (/*CONSTCOND*/ 0)
+
+#define RB_ROTATE_RIGHT(head, elm, tmp, field) do { \
+ (tmp) = RB_LEFT(elm, field); \
+ if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field)) != NULL) { \
+ RB_PARENT(RB_RIGHT(tmp, field), field) = (elm); \
+ } \
+ RB_AUGMENT(elm); \
+ if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \
+ if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
+ RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
+ else \
+ RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
+ } else \
+ (head)->rbh_root = (tmp); \
+ RB_RIGHT(tmp, field) = (elm); \
+ RB_PARENT(elm, field) = (tmp); \
+ RB_AUGMENT(tmp); \
+ if ((RB_PARENT(tmp, field))) \
+ RB_AUGMENT(RB_PARENT(tmp, field)); \
+} while (/*CONSTCOND*/ 0)
+
+/* Generates prototypes and inline functions */
+#define RB_PROTOTYPE(name, type, field, cmp) \
+ RB_PROTOTYPE_INTERNAL(name, type, field, cmp,)
+#define RB_PROTOTYPE_STATIC(name, type, field, cmp) \
+ RB_PROTOTYPE_INTERNAL(name, type, field, cmp, UV__UNUSED static)
+#define RB_PROTOTYPE_INTERNAL(name, type, field, cmp, attr) \
+attr void name##_RB_INSERT_COLOR(struct name *, struct type *); \
+attr void name##_RB_REMOVE_COLOR(struct name *, struct type *, struct type *);\
+attr struct type *name##_RB_REMOVE(struct name *, struct type *); \
+attr struct type *name##_RB_INSERT(struct name *, struct type *); \
+attr struct type *name##_RB_FIND(struct name *, struct type *); \
+attr struct type *name##_RB_NFIND(struct name *, struct type *); \
+attr struct type *name##_RB_NEXT(struct type *); \
+attr struct type *name##_RB_PREV(struct type *); \
+attr struct type *name##_RB_MINMAX(struct name *, int); \
+ \
+
+/* Main rb operation.
+ * Moves node close to the key of elm to top
+ */
+#define RB_GENERATE(name, type, field, cmp) \
+ RB_GENERATE_INTERNAL(name, type, field, cmp,)
+#define RB_GENERATE_STATIC(name, type, field, cmp) \
+ RB_GENERATE_INTERNAL(name, type, field, cmp, UV__UNUSED static)
+#define RB_GENERATE_INTERNAL(name, type, field, cmp, attr) \
+attr void \
+name##_RB_INSERT_COLOR(struct name *head, struct type *elm) \
+{ \
+ struct type *parent, *gparent, *tmp; \
+ while ((parent = RB_PARENT(elm, field)) != NULL && \
+ RB_COLOR(parent, field) == RB_RED) { \
+ gparent = RB_PARENT(parent, field); \
+ if (parent == RB_LEFT(gparent, field)) { \
+ tmp = RB_RIGHT(gparent, field); \
+ if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
+ RB_COLOR(tmp, field) = RB_BLACK; \
+ RB_SET_BLACKRED(parent, gparent, field); \
+ elm = gparent; \
+ continue; \
+ } \
+ if (RB_RIGHT(parent, field) == elm) { \
+ RB_ROTATE_LEFT(head, parent, tmp, field); \
+ tmp = parent; \
+ parent = elm; \
+ elm = tmp; \
+ } \
+ RB_SET_BLACKRED(parent, gparent, field); \
+ RB_ROTATE_RIGHT(head, gparent, tmp, field); \
+ } else { \
+ tmp = RB_LEFT(gparent, field); \
+ if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
+ RB_COLOR(tmp, field) = RB_BLACK; \
+ RB_SET_BLACKRED(parent, gparent, field); \
+ elm = gparent; \
+ continue; \
+ } \
+ if (RB_LEFT(parent, field) == elm) { \
+ RB_ROTATE_RIGHT(head, parent, tmp, field); \
+ tmp = parent; \
+ parent = elm; \
+ elm = tmp; \
+ } \
+ RB_SET_BLACKRED(parent, gparent, field); \
+ RB_ROTATE_LEFT(head, gparent, tmp, field); \
+ } \
+ } \
+ RB_COLOR(head->rbh_root, field) = RB_BLACK; \
+} \
+ \
+attr void \
+name##_RB_REMOVE_COLOR(struct name *head, struct type *parent, \
+ struct type *elm) \
+{ \
+ struct type *tmp; \
+ while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) && \
+ elm != RB_ROOT(head)) { \
+ if (RB_LEFT(parent, field) == elm) { \
+ tmp = RB_RIGHT(parent, field); \
+ if (RB_COLOR(tmp, field) == RB_RED) { \
+ RB_SET_BLACKRED(tmp, parent, field); \
+ RB_ROTATE_LEFT(head, parent, tmp, field); \
+ tmp = RB_RIGHT(parent, field); \
+ } \
+ if ((RB_LEFT(tmp, field) == NULL || \
+ RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) && \
+ (RB_RIGHT(tmp, field) == NULL || \
+ RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) { \
+ RB_COLOR(tmp, field) = RB_RED; \
+ elm = parent; \
+ parent = RB_PARENT(elm, field); \
+ } else { \
+ if (RB_RIGHT(tmp, field) == NULL || \
+ RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK) { \
+ struct type *oleft; \
+ if ((oleft = RB_LEFT(tmp, field)) \
+ != NULL) \
+ RB_COLOR(oleft, field) = RB_BLACK; \
+ RB_COLOR(tmp, field) = RB_RED; \
+ RB_ROTATE_RIGHT(head, tmp, oleft, field); \
+ tmp = RB_RIGHT(parent, field); \
+ } \
+ RB_COLOR(tmp, field) = RB_COLOR(parent, field); \
+ RB_COLOR(parent, field) = RB_BLACK; \
+ if (RB_RIGHT(tmp, field)) \
+ RB_COLOR(RB_RIGHT(tmp, field), field) = RB_BLACK; \
+ RB_ROTATE_LEFT(head, parent, tmp, field); \
+ elm = RB_ROOT(head); \
+ break; \
+ } \
+ } else { \
+ tmp = RB_LEFT(parent, field); \
+ if (RB_COLOR(tmp, field) == RB_RED) { \
+ RB_SET_BLACKRED(tmp, parent, field); \
+ RB_ROTATE_RIGHT(head, parent, tmp, field); \
+ tmp = RB_LEFT(parent, field); \
+ } \
+ if ((RB_LEFT(tmp, field) == NULL || \
+ RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) && \
+ (RB_RIGHT(tmp, field) == NULL || \
+ RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) { \
+ RB_COLOR(tmp, field) = RB_RED; \
+ elm = parent; \
+ parent = RB_PARENT(elm, field); \
+ } else { \
+ if (RB_LEFT(tmp, field) == NULL || \
+ RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) { \
+ struct type *oright; \
+ if ((oright = RB_RIGHT(tmp, field)) \
+ != NULL) \
+ RB_COLOR(oright, field) = RB_BLACK; \
+ RB_COLOR(tmp, field) = RB_RED; \
+ RB_ROTATE_LEFT(head, tmp, oright, field); \
+ tmp = RB_LEFT(parent, field); \
+ } \
+ RB_COLOR(tmp, field) = RB_COLOR(parent, field); \
+ RB_COLOR(parent, field) = RB_BLACK; \
+ if (RB_LEFT(tmp, field)) \
+ RB_COLOR(RB_LEFT(tmp, field), field) = RB_BLACK; \
+ RB_ROTATE_RIGHT(head, parent, tmp, field); \
+ elm = RB_ROOT(head); \
+ break; \
+ } \
+ } \
+ } \
+ if (elm) \
+ RB_COLOR(elm, field) = RB_BLACK; \
+} \
+ \
+attr struct type * \
+name##_RB_REMOVE(struct name *head, struct type *elm) \
+{ \
+ struct type *child, *parent, *old = elm; \
+ int color; \
+ if (RB_LEFT(elm, field) == NULL) \
+ child = RB_RIGHT(elm, field); \
+ else if (RB_RIGHT(elm, field) == NULL) \
+ child = RB_LEFT(elm, field); \
+ else { \
+ struct type *left; \
+ elm = RB_RIGHT(elm, field); \
+ while ((left = RB_LEFT(elm, field)) != NULL) \
+ elm = left; \
+ child = RB_RIGHT(elm, field); \
+ parent = RB_PARENT(elm, field); \
+ color = RB_COLOR(elm, field); \
+ if (child) \
+ RB_PARENT(child, field) = parent; \
+ if (parent) { \
+ if (RB_LEFT(parent, field) == elm) \
+ RB_LEFT(parent, field) = child; \
+ else \
+ RB_RIGHT(parent, field) = child; \
+ RB_AUGMENT(parent); \
+ } else \
+ RB_ROOT(head) = child; \
+ if (RB_PARENT(elm, field) == old) \
+ parent = elm; \
+ (elm)->field = (old)->field; \
+ if (RB_PARENT(old, field)) { \
+ if (RB_LEFT(RB_PARENT(old, field), field) == old) \
+ RB_LEFT(RB_PARENT(old, field), field) = elm; \
+ else \
+ RB_RIGHT(RB_PARENT(old, field), field) = elm; \
+ RB_AUGMENT(RB_PARENT(old, field)); \
+ } else \
+ RB_ROOT(head) = elm; \
+ RB_PARENT(RB_LEFT(old, field), field) = elm; \
+ if (RB_RIGHT(old, field)) \
+ RB_PARENT(RB_RIGHT(old, field), field) = elm; \
+ if (parent) { \
+ left = parent; \
+ do { \
+ RB_AUGMENT(left); \
+ } while ((left = RB_PARENT(left, field)) != NULL); \
+ } \
+ goto color; \
+ } \
+ parent = RB_PARENT(elm, field); \
+ color = RB_COLOR(elm, field); \
+ if (child) \
+ RB_PARENT(child, field) = parent; \
+ if (parent) { \
+ if (RB_LEFT(parent, field) == elm) \
+ RB_LEFT(parent, field) = child; \
+ else \
+ RB_RIGHT(parent, field) = child; \
+ RB_AUGMENT(parent); \
+ } else \
+ RB_ROOT(head) = child; \
+color: \
+ if (color == RB_BLACK) \
+ name##_RB_REMOVE_COLOR(head, parent, child); \
+ return (old); \
+} \
+ \
+/* Inserts a node into the RB tree */ \
+attr struct type * \
+name##_RB_INSERT(struct name *head, struct type *elm) \
+{ \
+ struct type *tmp; \
+ struct type *parent = NULL; \
+ int comp = 0; \
+ tmp = RB_ROOT(head); \
+ while (tmp) { \
+ parent = tmp; \
+ comp = (cmp)(elm, parent); \
+ if (comp < 0) \
+ tmp = RB_LEFT(tmp, field); \
+ else if (comp > 0) \
+ tmp = RB_RIGHT(tmp, field); \
+ else \
+ return (tmp); \
+ } \
+ RB_SET(elm, parent, field); \
+ if (parent != NULL) { \
+ if (comp < 0) \
+ RB_LEFT(parent, field) = elm; \
+ else \
+ RB_RIGHT(parent, field) = elm; \
+ RB_AUGMENT(parent); \
+ } else \
+ RB_ROOT(head) = elm; \
+ name##_RB_INSERT_COLOR(head, elm); \
+ return (NULL); \
+} \
+ \
+/* Finds the node with the same key as elm */ \
+attr struct type * \
+name##_RB_FIND(struct name *head, struct type *elm) \
+{ \
+ struct type *tmp = RB_ROOT(head); \
+ int comp; \
+ while (tmp) { \
+ comp = cmp(elm, tmp); \
+ if (comp < 0) \
+ tmp = RB_LEFT(tmp, field); \
+ else if (comp > 0) \
+ tmp = RB_RIGHT(tmp, field); \
+ else \
+ return (tmp); \
+ } \
+ return (NULL); \
+} \
+ \
+/* Finds the first node greater than or equal to the search key */ \
+attr struct type * \
+name##_RB_NFIND(struct name *head, struct type *elm) \
+{ \
+ struct type *tmp = RB_ROOT(head); \
+ struct type *res = NULL; \
+ int comp; \
+ while (tmp) { \
+ comp = cmp(elm, tmp); \
+ if (comp < 0) { \
+ res = tmp; \
+ tmp = RB_LEFT(tmp, field); \
+ } \
+ else if (comp > 0) \
+ tmp = RB_RIGHT(tmp, field); \
+ else \
+ return (tmp); \
+ } \
+ return (res); \
+} \
+ \
+/* ARGSUSED */ \
+attr struct type * \
+name##_RB_NEXT(struct type *elm) \
+{ \
+ if (RB_RIGHT(elm, field)) { \
+ elm = RB_RIGHT(elm, field); \
+ while (RB_LEFT(elm, field)) \
+ elm = RB_LEFT(elm, field); \
+ } else { \
+ if (RB_PARENT(elm, field) && \
+ (elm == RB_LEFT(RB_PARENT(elm, field), field))) \
+ elm = RB_PARENT(elm, field); \
+ else { \
+ while (RB_PARENT(elm, field) && \
+ (elm == RB_RIGHT(RB_PARENT(elm, field), field))) \
+ elm = RB_PARENT(elm, field); \
+ elm = RB_PARENT(elm, field); \
+ } \
+ } \
+ return (elm); \
+} \
+ \
+/* ARGSUSED */ \
+attr struct type * \
+name##_RB_PREV(struct type *elm) \
+{ \
+ if (RB_LEFT(elm, field)) { \
+ elm = RB_LEFT(elm, field); \
+ while (RB_RIGHT(elm, field)) \
+ elm = RB_RIGHT(elm, field); \
+ } else { \
+ if (RB_PARENT(elm, field) && \
+ (elm == RB_RIGHT(RB_PARENT(elm, field), field))) \
+ elm = RB_PARENT(elm, field); \
+ else { \
+ while (RB_PARENT(elm, field) && \
+ (elm == RB_LEFT(RB_PARENT(elm, field), field))) \
+ elm = RB_PARENT(elm, field); \
+ elm = RB_PARENT(elm, field); \
+ } \
+ } \
+ return (elm); \
+} \
+ \
+attr struct type * \
+name##_RB_MINMAX(struct name *head, int val) \
+{ \
+ struct type *tmp = RB_ROOT(head); \
+ struct type *parent = NULL; \
+ while (tmp) { \
+ parent = tmp; \
+ if (val < 0) \
+ tmp = RB_LEFT(tmp, field); \
+ else \
+ tmp = RB_RIGHT(tmp, field); \
+ } \
+ return (parent); \
+}
+
+#define RB_NEGINF -1
+#define RB_INF 1
+
+#define RB_INSERT(name, x, y) name##_RB_INSERT(x, y)
+#define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y)
+#define RB_FIND(name, x, y) name##_RB_FIND(x, y)
+#define RB_NFIND(name, x, y) name##_RB_NFIND(x, y)
+#define RB_NEXT(name, x, y) name##_RB_NEXT(y)
+#define RB_PREV(name, x, y) name##_RB_PREV(y)
+#define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF)
+#define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF)
+
+#define RB_FOREACH(x, name, head) \
+ for ((x) = RB_MIN(name, head); \
+ (x) != NULL; \
+ (x) = name##_RB_NEXT(x))
+
+#define RB_FOREACH_FROM(x, name, y) \
+ for ((x) = (y); \
+ ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \
+ (x) = (y))
+
+#define RB_FOREACH_SAFE(x, name, head, y) \
+ for ((x) = RB_MIN(name, head); \
+ ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \
+ (x) = (y))
+
+#define RB_FOREACH_REVERSE(x, name, head) \
+ for ((x) = RB_MAX(name, head); \
+ (x) != NULL; \
+ (x) = name##_RB_PREV(x))
+
+#define RB_FOREACH_REVERSE_FROM(x, name, y) \
+ for ((x) = (y); \
+ ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \
+ (x) = (y))
+
+#define RB_FOREACH_REVERSE_SAFE(x, name, head, y) \
+ for ((x) = RB_MAX(name, head); \
+ ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \
+ (x) = (y))
+
+#endif /* UV_TREE_H_ */
diff --git a/Utilities/cmlibuv/include/uv/unix.h b/Utilities/cmlibuv/include/uv/unix.h
new file mode 100644
index 0000000..a59192f
--- /dev/null
+++ b/Utilities/cmlibuv/include/uv/unix.h
@@ -0,0 +1,527 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_UNIX_H
+#define UV_UNIX_H
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <dirent.h>
+
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <arpa/inet.h>
+#include <netdb.h> /* MAXHOSTNAMELEN on Solaris */
+
+#include <termios.h>
+#include <pwd.h>
+
+#if !defined(__MVS__)
+#include <semaphore.h>
+#include <sys/param.h> /* MAXHOSTNAMELEN on Linux and the BSDs */
+#endif
+#include <pthread.h>
+#include <signal.h>
+
+#include "threadpool.h"
+
+#ifdef CMAKE_BOOTSTRAP
+# include "posix.h"
+# if defined(__APPLE__)
+# include <TargetConditionals.h>
+# endif
+#elif defined(__linux__)
+# include "linux.h"
+#elif defined (__MVS__)
+# include "os390.h"
+#elif defined(__PASE__) /* __PASE__ and _AIX are both defined on IBM i */
+# include "posix.h" /* IBM i needs uv/posix.h, not uv/aix.h */
+#elif defined(_AIX)
+# include "aix.h"
+#elif defined(__sun)
+# include "sunos.h"
+#elif defined(__hpux)
+# include "posix.h"
+#elif defined(__APPLE__)
+# include "darwin.h"
+#elif defined(__DragonFly__) || \
+ defined(__FreeBSD__) || \
+ defined(__FreeBSD_kernel__) || \
+ defined(__OpenBSD__) || \
+ defined(__NetBSD__)
+# include "bsd.h"
+#elif defined(__CYGWIN__) || \
+ defined(__MSYS__) || \
+ defined(__GNU__)
+# include "posix.h"
+#elif defined(__HAIKU__)
+# include "posix.h"
+#elif defined(__QNX__)
+# include "posix.h"
+#endif
+
+#ifndef NI_MAXHOST
+# define NI_MAXHOST 1025
+#endif
+
+#ifndef NI_MAXSERV
+# define NI_MAXSERV 32
+#endif
+
+#ifndef UV_IO_PRIVATE_PLATFORM_FIELDS
+# define UV_IO_PRIVATE_PLATFORM_FIELDS /* empty */
+#endif
+
+struct uv__io_s;
+struct uv_loop_s;
+
+typedef void (*uv__io_cb)(struct uv_loop_s* loop,
+ struct uv__io_s* w,
+ unsigned int events);
+typedef struct uv__io_s uv__io_t;
+
+struct uv__io_s {
+ uv__io_cb cb;
+ void* pending_queue[2];
+ void* watcher_queue[2];
+ unsigned int pevents; /* Pending event mask i.e. mask at next tick. */
+ unsigned int events; /* Current event mask. */
+ int fd;
+ UV_IO_PRIVATE_PLATFORM_FIELDS
+};
+
+#ifndef UV_PLATFORM_SEM_T
+# define UV_PLATFORM_SEM_T sem_t
+#endif
+
+#ifndef UV_PLATFORM_LOOP_FIELDS
+# define UV_PLATFORM_LOOP_FIELDS /* empty */
+#endif
+
+#ifndef UV_PLATFORM_FS_EVENT_FIELDS
+# define UV_PLATFORM_FS_EVENT_FIELDS /* empty */
+#endif
+
+#ifndef UV_STREAM_PRIVATE_PLATFORM_FIELDS
+# define UV_STREAM_PRIVATE_PLATFORM_FIELDS /* empty */
+#endif
+
+/* Note: May be cast to struct iovec. See writev(2). */
+typedef struct uv_buf_t {
+ char* base;
+ size_t len;
+} uv_buf_t;
+
+typedef int uv_file;
+typedef int uv_os_sock_t;
+typedef int uv_os_fd_t;
+typedef pid_t uv_pid_t;
+
+#ifdef CMAKE_BOOTSTRAP
+#define UV_ONCE_INIT 0
+typedef int uv_once_t;
+typedef int uv_thread_t;
+typedef int uv_mutex_t;
+typedef int uv_rwlock_t;
+typedef int uv_sem_t;
+typedef int uv_cond_t;
+typedef int uv_key_t;
+typedef int uv_barrier_t;
+#else
+#define UV_ONCE_INIT PTHREAD_ONCE_INIT
+
+typedef pthread_once_t uv_once_t;
+typedef pthread_t uv_thread_t;
+typedef pthread_mutex_t uv_mutex_t;
+typedef pthread_rwlock_t uv_rwlock_t;
+typedef UV_PLATFORM_SEM_T uv_sem_t;
+typedef pthread_cond_t uv_cond_t;
+typedef pthread_key_t uv_key_t;
+
+/* Note: guard clauses should match uv_barrier_init's in src/unix/thread.c. */
+#if defined(_AIX) || \
+ defined(__OpenBSD__) || \
+ !defined(PTHREAD_BARRIER_SERIAL_THREAD)
+/* TODO(bnoordhuis) Merge into uv_barrier_t in v2. */
+struct _uv_barrier {
+ uv_mutex_t mutex;
+ uv_cond_t cond;
+ unsigned threshold;
+ unsigned in;
+ unsigned out;
+};
+
+typedef struct {
+ struct _uv_barrier* b;
+# if defined(PTHREAD_BARRIER_SERIAL_THREAD)
+ /* TODO(bnoordhuis) Remove padding in v2. */
+ char pad[sizeof(pthread_barrier_t) - sizeof(struct _uv_barrier*)];
+# endif
+} uv_barrier_t;
+#else
+typedef pthread_barrier_t uv_barrier_t;
+#endif
+
+#endif
+
+/* Platform-specific definitions for uv_spawn support. */
+typedef gid_t uv_gid_t;
+typedef uid_t uv_uid_t;
+
+typedef struct dirent uv__dirent_t;
+
+#define UV_DIR_PRIVATE_FIELDS \
+ DIR* dir;
+
+#if defined(DT_UNKNOWN)
+# define HAVE_DIRENT_TYPES
+# if defined(DT_REG)
+# define UV__DT_FILE DT_REG
+# else
+# define UV__DT_FILE -1
+# endif
+# if defined(DT_DIR)
+# define UV__DT_DIR DT_DIR
+# else
+# define UV__DT_DIR -2
+# endif
+# if defined(DT_LNK)
+# define UV__DT_LINK DT_LNK
+# else
+# define UV__DT_LINK -3
+# endif
+# if defined(DT_FIFO)
+# define UV__DT_FIFO DT_FIFO
+# else
+# define UV__DT_FIFO -4
+# endif
+# if defined(DT_SOCK)
+# define UV__DT_SOCKET DT_SOCK
+# else
+# define UV__DT_SOCKET -5
+# endif
+# if defined(DT_CHR)
+# define UV__DT_CHAR DT_CHR
+# else
+# define UV__DT_CHAR -6
+# endif
+# if defined(DT_BLK)
+# define UV__DT_BLOCK DT_BLK
+# else
+# define UV__DT_BLOCK -7
+# endif
+#endif
+
+/* Platform-specific definitions for uv_dlopen support. */
+#define UV_DYNAMIC /* empty */
+
+typedef struct {
+ void* handle;
+ char* errmsg;
+} uv_lib_t;
+
+#define UV_LOOP_PRIVATE_FIELDS \
+ unsigned long flags; \
+ int backend_fd; \
+ void* pending_queue[2]; \
+ void* watcher_queue[2]; \
+ uv__io_t** watchers; \
+ unsigned int nwatchers; \
+ unsigned int nfds; \
+ void* wq[2]; \
+ uv_mutex_t wq_mutex; \
+ uv_async_t wq_async; \
+ uv_rwlock_t cloexec_lock; \
+ uv_handle_t* closing_handles; \
+ void* process_handles[2]; \
+ void* prepare_handles[2]; \
+ void* check_handles[2]; \
+ void* idle_handles[2]; \
+ void* async_handles[2]; \
+ void (*async_unused)(void); /* TODO(bnoordhuis) Remove in libuv v2. */ \
+ uv__io_t async_io_watcher; \
+ int async_wfd; \
+ struct { \
+ void* min; \
+ unsigned int nelts; \
+ } timer_heap; \
+ uint64_t timer_counter; \
+ uint64_t time; \
+ int signal_pipefd[2]; \
+ uv__io_t signal_io_watcher; \
+ uv_signal_t child_watcher; \
+ int emfile_fd; \
+ UV_PLATFORM_LOOP_FIELDS \
+
+#define UV_REQ_TYPE_PRIVATE /* empty */
+
+#define UV_REQ_PRIVATE_FIELDS /* empty */
+
+#define UV_PRIVATE_REQ_TYPES /* empty */
+
+#define UV_WRITE_PRIVATE_FIELDS \
+ void* queue[2]; \
+ unsigned int write_index; \
+ uv_buf_t* bufs; \
+ unsigned int nbufs; \
+ int error; \
+ uv_buf_t bufsml[4]; \
+
+#define UV_CONNECT_PRIVATE_FIELDS \
+ void* queue[2]; \
+
+#define UV_SHUTDOWN_PRIVATE_FIELDS /* empty */
+
+#define UV_UDP_SEND_PRIVATE_FIELDS \
+ void* queue[2]; \
+ struct sockaddr_storage addr; \
+ unsigned int nbufs; \
+ uv_buf_t* bufs; \
+ ssize_t status; \
+ uv_udp_send_cb send_cb; \
+ uv_buf_t bufsml[4]; \
+
+#define UV_HANDLE_PRIVATE_FIELDS \
+ uv_handle_t* next_closing; \
+ unsigned int flags; \
+
+#define UV_STREAM_PRIVATE_FIELDS \
+ uv_connect_t *connect_req; \
+ uv_shutdown_t *shutdown_req; \
+ uv__io_t io_watcher; \
+ void* write_queue[2]; \
+ void* write_completed_queue[2]; \
+ uv_connection_cb connection_cb; \
+ int delayed_error; \
+ int accepted_fd; \
+ void* queued_fds; \
+ UV_STREAM_PRIVATE_PLATFORM_FIELDS \
+
+#define UV_TCP_PRIVATE_FIELDS /* empty */
+
+#define UV_UDP_PRIVATE_FIELDS \
+ uv_alloc_cb alloc_cb; \
+ uv_udp_recv_cb recv_cb; \
+ uv__io_t io_watcher; \
+ void* write_queue[2]; \
+ void* write_completed_queue[2]; \
+
+#define UV_PIPE_PRIVATE_FIELDS \
+ const char* pipe_fname; /* strdup'ed */
+
+#define UV_POLL_PRIVATE_FIELDS \
+ uv__io_t io_watcher;
+
+#define UV_PREPARE_PRIVATE_FIELDS \
+ uv_prepare_cb prepare_cb; \
+ void* queue[2]; \
+
+#define UV_CHECK_PRIVATE_FIELDS \
+ uv_check_cb check_cb; \
+ void* queue[2]; \
+
+#define UV_IDLE_PRIVATE_FIELDS \
+ uv_idle_cb idle_cb; \
+ void* queue[2]; \
+
+#define UV_ASYNC_PRIVATE_FIELDS \
+ uv_async_cb async_cb; \
+ void* queue[2]; \
+ int pending; \
+
+#define UV_TIMER_PRIVATE_FIELDS \
+ uv_timer_cb timer_cb; \
+ void* heap_node[3]; \
+ uint64_t timeout; \
+ uint64_t repeat; \
+ uint64_t start_id;
+
+#define UV_GETADDRINFO_PRIVATE_FIELDS \
+ struct uv__work work_req; \
+ uv_getaddrinfo_cb cb; \
+ struct addrinfo* hints; \
+ char* hostname; \
+ char* service; \
+ struct addrinfo* addrinfo; \
+ int retcode;
+
+#define UV_GETNAMEINFO_PRIVATE_FIELDS \
+ struct uv__work work_req; \
+ uv_getnameinfo_cb getnameinfo_cb; \
+ struct sockaddr_storage storage; \
+ int flags; \
+ char host[NI_MAXHOST]; \
+ char service[NI_MAXSERV]; \
+ int retcode;
+
+#define UV_PROCESS_PRIVATE_FIELDS \
+ void* queue[2]; \
+ int status; \
+
+#define UV_FS_PRIVATE_FIELDS \
+ const char *new_path; \
+ uv_file file; \
+ int flags; \
+ mode_t mode; \
+ unsigned int nbufs; \
+ uv_buf_t* bufs; \
+ off_t off; \
+ uv_uid_t uid; \
+ uv_gid_t gid; \
+ double atime; \
+ double mtime; \
+ struct uv__work work_req; \
+ uv_buf_t bufsml[4]; \
+
+#define UV_WORK_PRIVATE_FIELDS \
+ struct uv__work work_req;
+
+#define UV_TTY_PRIVATE_FIELDS \
+ struct termios orig_termios; \
+ int mode;
+
+#define UV_SIGNAL_PRIVATE_FIELDS \
+ /* RB_ENTRY(uv_signal_s) tree_entry; */ \
+ struct { \
+ struct uv_signal_s* rbe_left; \
+ struct uv_signal_s* rbe_right; \
+ struct uv_signal_s* rbe_parent; \
+ int rbe_color; \
+ } tree_entry; \
+ /* Use two counters here so we don have to fiddle with atomics. */ \
+ unsigned int caught_signals; \
+ unsigned int dispatched_signals;
+
+#define UV_FS_EVENT_PRIVATE_FIELDS \
+ uv_fs_event_cb cb; \
+ UV_PLATFORM_FS_EVENT_FIELDS \
+
+/* fs open() flags supported on this platform: */
+#if defined(O_APPEND)
+# define UV_FS_O_APPEND O_APPEND
+#else
+# define UV_FS_O_APPEND 0
+#endif
+#if defined(O_CREAT)
+# define UV_FS_O_CREAT O_CREAT
+#else
+# define UV_FS_O_CREAT 0
+#endif
+
+#if defined(__linux__) && defined(__arm__)
+# define UV_FS_O_DIRECT 0x10000
+#elif defined(__linux__) && defined(__m68k__)
+# define UV_FS_O_DIRECT 0x10000
+#elif defined(__linux__) && defined(__mips__)
+# define UV_FS_O_DIRECT 0x08000
+#elif defined(__linux__) && defined(__powerpc__)
+# define UV_FS_O_DIRECT 0x20000
+#elif defined(__linux__) && defined(__s390x__)
+# define UV_FS_O_DIRECT 0x04000
+#elif defined(__linux__) && defined(__x86_64__)
+# define UV_FS_O_DIRECT 0x04000
+#elif defined(O_DIRECT)
+# define UV_FS_O_DIRECT O_DIRECT
+#else
+# define UV_FS_O_DIRECT 0
+#endif
+
+#if defined(O_DIRECTORY)
+# define UV_FS_O_DIRECTORY O_DIRECTORY
+#else
+# define UV_FS_O_DIRECTORY 0
+#endif
+#if defined(O_DSYNC)
+# define UV_FS_O_DSYNC O_DSYNC
+#else
+# define UV_FS_O_DSYNC 0
+#endif
+#if defined(O_EXCL)
+# define UV_FS_O_EXCL O_EXCL
+#else
+# define UV_FS_O_EXCL 0
+#endif
+#if defined(O_EXLOCK)
+# define UV_FS_O_EXLOCK O_EXLOCK
+#else
+# define UV_FS_O_EXLOCK 0
+#endif
+#if defined(O_NOATIME)
+# define UV_FS_O_NOATIME O_NOATIME
+#else
+# define UV_FS_O_NOATIME 0
+#endif
+#if defined(O_NOCTTY)
+# define UV_FS_O_NOCTTY O_NOCTTY
+#else
+# define UV_FS_O_NOCTTY 0
+#endif
+#if defined(O_NOFOLLOW)
+# define UV_FS_O_NOFOLLOW O_NOFOLLOW
+#else
+# define UV_FS_O_NOFOLLOW 0
+#endif
+#if defined(O_NONBLOCK)
+# define UV_FS_O_NONBLOCK O_NONBLOCK
+#else
+# define UV_FS_O_NONBLOCK 0
+#endif
+#if defined(O_RDONLY)
+# define UV_FS_O_RDONLY O_RDONLY
+#else
+# define UV_FS_O_RDONLY 0
+#endif
+#if defined(O_RDWR)
+# define UV_FS_O_RDWR O_RDWR
+#else
+# define UV_FS_O_RDWR 0
+#endif
+#if defined(O_SYMLINK)
+# define UV_FS_O_SYMLINK O_SYMLINK
+#else
+# define UV_FS_O_SYMLINK 0
+#endif
+#if defined(O_SYNC)
+# define UV_FS_O_SYNC O_SYNC
+#else
+# define UV_FS_O_SYNC 0
+#endif
+#if defined(O_TRUNC)
+# define UV_FS_O_TRUNC O_TRUNC
+#else
+# define UV_FS_O_TRUNC 0
+#endif
+#if defined(O_WRONLY)
+# define UV_FS_O_WRONLY O_WRONLY
+#else
+# define UV_FS_O_WRONLY 0
+#endif
+
+/* fs open() flags supported on other platforms: */
+#define UV_FS_O_FILEMAP 0
+#define UV_FS_O_RANDOM 0
+#define UV_FS_O_SHORT_LIVED 0
+#define UV_FS_O_SEQUENTIAL 0
+#define UV_FS_O_TEMPORARY 0
+
+#endif /* UV_UNIX_H */
diff --git a/Utilities/cmlibuv/include/uv/version.h b/Utilities/cmlibuv/include/uv/version.h
new file mode 100644
index 0000000..96c1c13
--- /dev/null
+++ b/Utilities/cmlibuv/include/uv/version.h
@@ -0,0 +1,43 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_VERSION_H
+#define UV_VERSION_H
+
+ /*
+ * Versions with the same major number are ABI stable. API is allowed to
+ * evolve between minor releases, but only in a backwards compatible way.
+ * Make sure you update the -soname directives in configure.ac
+ * whenever you bump UV_VERSION_MAJOR or UV_VERSION_MINOR (but
+ * not UV_VERSION_PATCH.)
+ */
+
+#define UV_VERSION_MAJOR 1
+#define UV_VERSION_MINOR 39
+#define UV_VERSION_PATCH 1
+#define UV_VERSION_IS_RELEASE 0
+#define UV_VERSION_SUFFIX "dev"
+
+#define UV_VERSION_HEX ((UV_VERSION_MAJOR << 16) | \
+ (UV_VERSION_MINOR << 8) | \
+ (UV_VERSION_PATCH))
+
+#endif /* UV_VERSION_H */
diff --git a/Utilities/cmlibuv/include/uv/win.h b/Utilities/cmlibuv/include/uv/win.h
new file mode 100644
index 0000000..f86357b
--- /dev/null
+++ b/Utilities/cmlibuv/include/uv/win.h
@@ -0,0 +1,702 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _WIN32_WINNT
+# define _WIN32_WINNT 0x0600
+#endif
+
+#if !defined(_SSIZE_T_) && !defined(_SSIZE_T_DEFINED)
+typedef intptr_t ssize_t;
+# define SSIZE_MAX INTPTR_MAX
+# define _SSIZE_T_
+# define _SSIZE_T_DEFINED
+#endif
+
+#include <winsock2.h>
+
+#if defined(__MINGW32__) && !defined(__MINGW64_VERSION_MAJOR)
+typedef struct pollfd {
+ SOCKET fd;
+ short events;
+ short revents;
+} WSAPOLLFD, *PWSAPOLLFD, *LPWSAPOLLFD;
+#endif
+
+#ifndef LOCALE_INVARIANT
+# define LOCALE_INVARIANT 0x007f
+#endif
+
+#include <mswsock.h>
+#include <ws2tcpip.h>
+#include <windows.h>
+
+#include <process.h>
+#include <signal.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+
+#if defined(_MSC_VER) && _MSC_VER < 1600
+# include "stdint-msvc2008.h"
+#else
+# include <stdint.h>
+#endif
+
+#include "tree.h"
+#include "threadpool.h"
+
+#define MAX_PIPENAME_LEN 256
+
+#ifndef S_IFLNK
+# define S_IFLNK 0xA000
+#endif
+
+/* Additional signals supported by uv_signal and or uv_kill. The CRT defines
+ * the following signals already:
+ *
+ * #define SIGINT 2
+ * #define SIGILL 4
+ * #define SIGABRT_COMPAT 6
+ * #define SIGFPE 8
+ * #define SIGSEGV 11
+ * #define SIGTERM 15
+ * #define SIGBREAK 21
+ * #define SIGABRT 22
+ *
+ * The additional signals have values that are common on other Unix
+ * variants (Linux and Darwin)
+ */
+#define SIGHUP 1
+#define SIGKILL 9
+#define SIGWINCH 28
+
+/* Redefine NSIG to take SIGWINCH into consideration */
+#if defined(NSIG) && NSIG <= SIGWINCH
+# undef NSIG
+#endif
+#ifndef NSIG
+# define NSIG SIGWINCH + 1
+#endif
+
+/* The CRT defines SIGABRT_COMPAT as 6, which equals SIGABRT on many unix-like
+ * platforms. However MinGW doesn't define it, so we do. */
+#ifndef SIGABRT_COMPAT
+# define SIGABRT_COMPAT 6
+#endif
+
+/*
+ * Guids and typedefs for winsock extension functions
+ * Mingw32 doesn't have these :-(
+ */
+#ifndef WSAID_ACCEPTEX
+# define WSAID_ACCEPTEX \
+ {0xb5367df1, 0xcbac, 0x11cf, \
+ {0x95, 0xca, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92}}
+#endif
+
+#ifndef WSAID_CONNECTEX
+# define WSAID_CONNECTEX \
+ {0x25a207b9, 0xddf3, 0x4660, \
+ {0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e}}
+#endif
+
+#ifndef WSAID_GETACCEPTEXSOCKADDRS
+# define WSAID_GETACCEPTEXSOCKADDRS \
+ {0xb5367df2, 0xcbac, 0x11cf, \
+ {0x95, 0xca, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92}}
+#endif
+
+#ifndef WSAID_DISCONNECTEX
+# define WSAID_DISCONNECTEX \
+ {0x7fda2e11, 0x8630, 0x436f, \
+ {0xa0, 0x31, 0xf5, 0x36, 0xa6, 0xee, 0xc1, 0x57}}
+#endif
+
+#ifndef WSAID_TRANSMITFILE
+# define WSAID_TRANSMITFILE \
+ {0xb5367df0, 0xcbac, 0x11cf, \
+ {0x95, 0xca, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92}}
+#endif
+
+#if (defined(__MINGW32__) && !defined(__MINGW64_VERSION_MAJOR)) \
+ || (defined(_MSC_VER) && _MSC_VER < 1500)
+ typedef BOOL (PASCAL *LPFN_ACCEPTEX)
+ (SOCKET sListenSocket,
+ SOCKET sAcceptSocket,
+ PVOID lpOutputBuffer,
+ DWORD dwReceiveDataLength,
+ DWORD dwLocalAddressLength,
+ DWORD dwRemoteAddressLength,
+ LPDWORD lpdwBytesReceived,
+ LPOVERLAPPED lpOverlapped);
+
+ typedef BOOL (PASCAL *LPFN_CONNECTEX)
+ (SOCKET s,
+ const struct sockaddr* name,
+ int namelen,
+ PVOID lpSendBuffer,
+ DWORD dwSendDataLength,
+ LPDWORD lpdwBytesSent,
+ LPOVERLAPPED lpOverlapped);
+
+ typedef void (PASCAL *LPFN_GETACCEPTEXSOCKADDRS)
+ (PVOID lpOutputBuffer,
+ DWORD dwReceiveDataLength,
+ DWORD dwLocalAddressLength,
+ DWORD dwRemoteAddressLength,
+ LPSOCKADDR* LocalSockaddr,
+ LPINT LocalSockaddrLength,
+ LPSOCKADDR* RemoteSockaddr,
+ LPINT RemoteSockaddrLength);
+
+ typedef BOOL (PASCAL *LPFN_DISCONNECTEX)
+ (SOCKET hSocket,
+ LPOVERLAPPED lpOverlapped,
+ DWORD dwFlags,
+ DWORD reserved);
+
+ typedef BOOL (PASCAL *LPFN_TRANSMITFILE)
+ (SOCKET hSocket,
+ HANDLE hFile,
+ DWORD nNumberOfBytesToWrite,
+ DWORD nNumberOfBytesPerSend,
+ LPOVERLAPPED lpOverlapped,
+ LPTRANSMIT_FILE_BUFFERS lpTransmitBuffers,
+ DWORD dwFlags);
+
+ typedef PVOID RTL_SRWLOCK;
+ typedef RTL_SRWLOCK SRWLOCK, *PSRWLOCK;
+#endif
+
+typedef int (WSAAPI* LPFN_WSARECV)
+ (SOCKET socket,
+ LPWSABUF buffers,
+ DWORD buffer_count,
+ LPDWORD bytes,
+ LPDWORD flags,
+ LPWSAOVERLAPPED overlapped,
+ LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine);
+
+typedef int (WSAAPI* LPFN_WSARECVFROM)
+ (SOCKET socket,
+ LPWSABUF buffers,
+ DWORD buffer_count,
+ LPDWORD bytes,
+ LPDWORD flags,
+ struct sockaddr* addr,
+ LPINT addr_len,
+ LPWSAOVERLAPPED overlapped,
+ LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine);
+
+#ifndef _NTDEF_
+ typedef LONG NTSTATUS;
+ typedef NTSTATUS *PNTSTATUS;
+#endif
+
+#ifndef RTL_CONDITION_VARIABLE_INIT
+ typedef PVOID CONDITION_VARIABLE, *PCONDITION_VARIABLE;
+#endif
+
+typedef struct _AFD_POLL_HANDLE_INFO {
+ HANDLE Handle;
+ ULONG Events;
+ NTSTATUS Status;
+} AFD_POLL_HANDLE_INFO, *PAFD_POLL_HANDLE_INFO;
+
+typedef struct _AFD_POLL_INFO {
+ LARGE_INTEGER Timeout;
+ ULONG NumberOfHandles;
+ ULONG Exclusive;
+ AFD_POLL_HANDLE_INFO Handles[1];
+} AFD_POLL_INFO, *PAFD_POLL_INFO;
+
+#define UV_MSAFD_PROVIDER_COUNT 3
+
+
+/**
+ * It should be possible to cast uv_buf_t[] to WSABUF[]
+ * see http://msdn.microsoft.com/en-us/library/ms741542(v=vs.85).aspx
+ */
+typedef struct uv_buf_t {
+ ULONG len;
+ char* base;
+} uv_buf_t;
+
+typedef int uv_file;
+typedef SOCKET uv_os_sock_t;
+typedef HANDLE uv_os_fd_t;
+typedef int uv_pid_t;
+
+typedef HANDLE uv_thread_t;
+
+typedef HANDLE uv_sem_t;
+
+typedef CRITICAL_SECTION uv_mutex_t;
+
+/* This condition variable implementation is based on the SetEvent solution
+ * (section 3.2) at http://www.cs.wustl.edu/~schmidt/win32-cv-1.html
+ * We could not use the SignalObjectAndWait solution (section 3.4) because
+ * it want the 2nd argument (type uv_mutex_t) of uv_cond_wait() and
+ * uv_cond_timedwait() to be HANDLEs, but we use CRITICAL_SECTIONs.
+ */
+
+typedef union {
+ CONDITION_VARIABLE cond_var;
+ struct {
+ unsigned int waiters_count;
+ CRITICAL_SECTION waiters_count_lock;
+ HANDLE signal_event;
+ HANDLE broadcast_event;
+ } unused_; /* TODO: retained for ABI compatibility; remove me in v2.x. */
+} uv_cond_t;
+
+typedef union {
+ struct {
+ unsigned int num_readers_;
+ CRITICAL_SECTION num_readers_lock_;
+ HANDLE write_semaphore_;
+ } state_;
+ /* TODO: remove me in v2.x. */
+ struct {
+ SRWLOCK unused_;
+ } unused1_;
+ /* TODO: remove me in v2.x. */
+ struct {
+ uv_mutex_t unused1_;
+ uv_mutex_t unused2_;
+ } unused2_;
+} uv_rwlock_t;
+
+typedef struct {
+ unsigned int n;
+ unsigned int count;
+ uv_mutex_t mutex;
+ uv_sem_t turnstile1;
+ uv_sem_t turnstile2;
+} uv_barrier_t;
+
+typedef struct {
+ DWORD tls_index;
+} uv_key_t;
+
+#define UV_ONCE_INIT { 0, NULL }
+
+typedef struct uv_once_s {
+ unsigned char ran;
+ HANDLE event;
+} uv_once_t;
+
+/* Platform-specific definitions for uv_spawn support. */
+typedef unsigned char uv_uid_t;
+typedef unsigned char uv_gid_t;
+
+typedef struct uv__dirent_s {
+ int d_type;
+ char d_name[1];
+} uv__dirent_t;
+
+#define UV_DIR_PRIVATE_FIELDS \
+ HANDLE dir_handle; \
+ WIN32_FIND_DATAW find_data; \
+ BOOL need_find_call;
+
+#define HAVE_DIRENT_TYPES
+#define UV__DT_DIR UV_DIRENT_DIR
+#define UV__DT_FILE UV_DIRENT_FILE
+#define UV__DT_LINK UV_DIRENT_LINK
+#define UV__DT_FIFO UV_DIRENT_FIFO
+#define UV__DT_SOCKET UV_DIRENT_SOCKET
+#define UV__DT_CHAR UV_DIRENT_CHAR
+#define UV__DT_BLOCK UV_DIRENT_BLOCK
+
+/* Platform-specific definitions for uv_dlopen support. */
+#define UV_DYNAMIC FAR WINAPI
+typedef struct {
+ HMODULE handle;
+ char* errmsg;
+} uv_lib_t;
+
+#define UV_LOOP_PRIVATE_FIELDS \
+ /* The loop's I/O completion port */ \
+ HANDLE iocp; \
+ /* The current time according to the event loop. in msecs. */ \
+ uint64_t time; \
+ /* Tail of a single-linked circular queue of pending reqs. If the queue */ \
+ /* is empty, tail_ is NULL. If there is only one item, */ \
+ /* tail_->next_req == tail_ */ \
+ uv_req_t* pending_reqs_tail; \
+ /* Head of a single-linked list of closed handles */ \
+ uv_handle_t* endgame_handles; \
+ /* TODO(bnoordhuis) Stop heap-allocating |timer_heap| in libuv v2.x. */ \
+ void* timer_heap; \
+ /* Lists of active loop (prepare / check / idle) watchers */ \
+ uv_prepare_t* prepare_handles; \
+ uv_check_t* check_handles; \
+ uv_idle_t* idle_handles; \
+ /* This pointer will refer to the prepare/check/idle handle whose */ \
+ /* callback is scheduled to be called next. This is needed to allow */ \
+ /* safe removal from one of the lists above while that list being */ \
+ /* iterated over. */ \
+ uv_prepare_t* next_prepare_handle; \
+ uv_check_t* next_check_handle; \
+ uv_idle_t* next_idle_handle; \
+ /* This handle holds the peer sockets for the fast variant of uv_poll_t */ \
+ SOCKET poll_peer_sockets[UV_MSAFD_PROVIDER_COUNT]; \
+ /* Counter to keep track of active tcp streams */ \
+ unsigned int active_tcp_streams; \
+ /* Counter to keep track of active udp streams */ \
+ unsigned int active_udp_streams; \
+ /* Counter to started timer */ \
+ uint64_t timer_counter; \
+ /* Threadpool */ \
+ void* wq[2]; \
+ uv_mutex_t wq_mutex; \
+ uv_async_t wq_async;
+
+#define UV_REQ_TYPE_PRIVATE \
+ /* TODO: remove the req suffix */ \
+ UV_ACCEPT, \
+ UV_FS_EVENT_REQ, \
+ UV_POLL_REQ, \
+ UV_PROCESS_EXIT, \
+ UV_READ, \
+ UV_UDP_RECV, \
+ UV_WAKEUP, \
+ UV_SIGNAL_REQ,
+
+#define UV_REQ_PRIVATE_FIELDS \
+ union { \
+ /* Used by I/O operations */ \
+ struct { \
+ OVERLAPPED overlapped; \
+ size_t queued_bytes; \
+ } io; \
+ } u; \
+ struct uv_req_s* next_req;
+
+#define UV_WRITE_PRIVATE_FIELDS \
+ int coalesced; \
+ uv_buf_t write_buffer; \
+ HANDLE event_handle; \
+ HANDLE wait_handle;
+
+#define UV_CONNECT_PRIVATE_FIELDS \
+ /* empty */
+
+#define UV_SHUTDOWN_PRIVATE_FIELDS \
+ /* empty */
+
+#define UV_UDP_SEND_PRIVATE_FIELDS \
+ /* empty */
+
+#define UV_PRIVATE_REQ_TYPES \
+ typedef struct uv_pipe_accept_s { \
+ UV_REQ_FIELDS \
+ HANDLE pipeHandle; \
+ struct uv_pipe_accept_s* next_pending; \
+ } uv_pipe_accept_t; \
+ \
+ typedef struct uv_tcp_accept_s { \
+ UV_REQ_FIELDS \
+ SOCKET accept_socket; \
+ char accept_buffer[sizeof(struct sockaddr_storage) * 2 + 32]; \
+ HANDLE event_handle; \
+ HANDLE wait_handle; \
+ struct uv_tcp_accept_s* next_pending; \
+ } uv_tcp_accept_t; \
+ \
+ typedef struct uv_read_s { \
+ UV_REQ_FIELDS \
+ HANDLE event_handle; \
+ HANDLE wait_handle; \
+ } uv_read_t;
+
+#define uv_stream_connection_fields \
+ unsigned int write_reqs_pending; \
+ uv_shutdown_t* shutdown_req;
+
+#define uv_stream_server_fields \
+ uv_connection_cb connection_cb;
+
+#define UV_STREAM_PRIVATE_FIELDS \
+ unsigned int reqs_pending; \
+ int activecnt; \
+ uv_read_t read_req; \
+ union { \
+ struct { uv_stream_connection_fields } conn; \
+ struct { uv_stream_server_fields } serv; \
+ } stream;
+
+#define uv_tcp_server_fields \
+ uv_tcp_accept_t* accept_reqs; \
+ unsigned int processed_accepts; \
+ uv_tcp_accept_t* pending_accepts; \
+ LPFN_ACCEPTEX func_acceptex;
+
+#define uv_tcp_connection_fields \
+ uv_buf_t read_buffer; \
+ LPFN_CONNECTEX func_connectex;
+
+#define UV_TCP_PRIVATE_FIELDS \
+ SOCKET socket; \
+ int delayed_error; \
+ union { \
+ struct { uv_tcp_server_fields } serv; \
+ struct { uv_tcp_connection_fields } conn; \
+ } tcp;
+
+#define UV_UDP_PRIVATE_FIELDS \
+ SOCKET socket; \
+ unsigned int reqs_pending; \
+ int activecnt; \
+ uv_req_t recv_req; \
+ uv_buf_t recv_buffer; \
+ struct sockaddr_storage recv_from; \
+ int recv_from_len; \
+ uv_udp_recv_cb recv_cb; \
+ uv_alloc_cb alloc_cb; \
+ LPFN_WSARECV func_wsarecv; \
+ LPFN_WSARECVFROM func_wsarecvfrom;
+
+#define uv_pipe_server_fields \
+ int pending_instances; \
+ uv_pipe_accept_t* accept_reqs; \
+ uv_pipe_accept_t* pending_accepts;
+
+#define uv_pipe_connection_fields \
+ uv_timer_t* eof_timer; \
+ uv_write_t dummy; /* TODO: retained for ABI compat; remove this in v2.x. */ \
+ DWORD ipc_remote_pid; \
+ union { \
+ uint32_t payload_remaining; \
+ uint64_t dummy; /* TODO: retained for ABI compat; remove this in v2.x. */ \
+ } ipc_data_frame; \
+ void* ipc_xfer_queue[2]; \
+ int ipc_xfer_queue_length; \
+ uv_write_t* non_overlapped_writes_tail; \
+ CRITICAL_SECTION readfile_thread_lock; \
+ volatile HANDLE readfile_thread_handle;
+
+#define UV_PIPE_PRIVATE_FIELDS \
+ HANDLE handle; \
+ WCHAR* name; \
+ union { \
+ struct { uv_pipe_server_fields } serv; \
+ struct { uv_pipe_connection_fields } conn; \
+ } pipe;
+
+/* TODO: put the parser states in an union - TTY handles are always half-duplex
+ * so read-state can safely overlap write-state. */
+#define UV_TTY_PRIVATE_FIELDS \
+ HANDLE handle; \
+ union { \
+ struct { \
+ /* Used for readable TTY handles */ \
+ /* TODO: remove me in v2.x. */ \
+ HANDLE unused_; \
+ uv_buf_t read_line_buffer; \
+ HANDLE read_raw_wait; \
+ /* Fields used for translating win keystrokes into vt100 characters */ \
+ char last_key[8]; \
+ unsigned char last_key_offset; \
+ unsigned char last_key_len; \
+ WCHAR last_utf16_high_surrogate; \
+ INPUT_RECORD last_input_record; \
+ } rd; \
+ struct { \
+ /* Used for writable TTY handles */ \
+ /* utf8-to-utf16 conversion state */ \
+ unsigned int utf8_codepoint; \
+ unsigned char utf8_bytes_left; \
+ /* eol conversion state */ \
+ unsigned char previous_eol; \
+ /* ansi parser state */ \
+ unsigned short ansi_parser_state; \
+ unsigned char ansi_csi_argc; \
+ unsigned short ansi_csi_argv[4]; \
+ COORD saved_position; \
+ WORD saved_attributes; \
+ } wr; \
+ } tty;
+
+#define UV_POLL_PRIVATE_FIELDS \
+ SOCKET socket; \
+ /* Used in fast mode */ \
+ SOCKET peer_socket; \
+ AFD_POLL_INFO afd_poll_info_1; \
+ AFD_POLL_INFO afd_poll_info_2; \
+ /* Used in fast and slow mode. */ \
+ uv_req_t poll_req_1; \
+ uv_req_t poll_req_2; \
+ unsigned char submitted_events_1; \
+ unsigned char submitted_events_2; \
+ unsigned char mask_events_1; \
+ unsigned char mask_events_2; \
+ unsigned char events;
+
+#define UV_TIMER_PRIVATE_FIELDS \
+ void* heap_node[3]; \
+ int unused; \
+ uint64_t timeout; \
+ uint64_t repeat; \
+ uint64_t start_id; \
+ uv_timer_cb timer_cb;
+
+#define UV_ASYNC_PRIVATE_FIELDS \
+ struct uv_req_s async_req; \
+ uv_async_cb async_cb; \
+ /* char to avoid alignment issues */ \
+ char volatile async_sent;
+
+#define UV_PREPARE_PRIVATE_FIELDS \
+ uv_prepare_t* prepare_prev; \
+ uv_prepare_t* prepare_next; \
+ uv_prepare_cb prepare_cb;
+
+#define UV_CHECK_PRIVATE_FIELDS \
+ uv_check_t* check_prev; \
+ uv_check_t* check_next; \
+ uv_check_cb check_cb;
+
+#define UV_IDLE_PRIVATE_FIELDS \
+ uv_idle_t* idle_prev; \
+ uv_idle_t* idle_next; \
+ uv_idle_cb idle_cb;
+
+#define UV_HANDLE_PRIVATE_FIELDS \
+ uv_handle_t* endgame_next; \
+ unsigned int flags;
+
+#define UV_GETADDRINFO_PRIVATE_FIELDS \
+ struct uv__work work_req; \
+ uv_getaddrinfo_cb getaddrinfo_cb; \
+ void* alloc; \
+ WCHAR* node; \
+ WCHAR* service; \
+ /* The addrinfoW field is used to store a pointer to the hints, and */ \
+ /* later on to store the result of GetAddrInfoW. The final result will */ \
+ /* be converted to struct addrinfo* and stored in the addrinfo field. */ \
+ struct addrinfoW* addrinfow; \
+ struct addrinfo* addrinfo; \
+ int retcode;
+
+#define UV_GETNAMEINFO_PRIVATE_FIELDS \
+ struct uv__work work_req; \
+ uv_getnameinfo_cb getnameinfo_cb; \
+ struct sockaddr_storage storage; \
+ int flags; \
+ char host[NI_MAXHOST]; \
+ char service[NI_MAXSERV]; \
+ int retcode;
+
+#define UV_PROCESS_PRIVATE_FIELDS \
+ struct uv_process_exit_s { \
+ UV_REQ_FIELDS \
+ } exit_req; \
+ BYTE* child_stdio_buffer; \
+ int exit_signal; \
+ HANDLE wait_handle; \
+ HANDLE process_handle; \
+ volatile char exit_cb_pending;
+
+#define UV_FS_PRIVATE_FIELDS \
+ struct uv__work work_req; \
+ int flags; \
+ DWORD sys_errno_; \
+ union { \
+ /* TODO: remove me in 0.9. */ \
+ WCHAR* pathw; \
+ int fd; \
+ } file; \
+ union { \
+ struct { \
+ int mode; \
+ WCHAR* new_pathw; \
+ int file_flags; \
+ int fd_out; \
+ unsigned int nbufs; \
+ uv_buf_t* bufs; \
+ int64_t offset; \
+ uv_buf_t bufsml[4]; \
+ } info; \
+ struct { \
+ double atime; \
+ double mtime; \
+ } time; \
+ } fs;
+
+#define UV_WORK_PRIVATE_FIELDS \
+ struct uv__work work_req;
+
+#define UV_FS_EVENT_PRIVATE_FIELDS \
+ struct uv_fs_event_req_s { \
+ UV_REQ_FIELDS \
+ } req; \
+ HANDLE dir_handle; \
+ int req_pending; \
+ uv_fs_event_cb cb; \
+ WCHAR* filew; \
+ WCHAR* short_filew; \
+ WCHAR* dirw; \
+ char* buffer;
+
+#define UV_SIGNAL_PRIVATE_FIELDS \
+ RB_ENTRY(uv_signal_s) tree_entry; \
+ struct uv_req_s signal_req; \
+ unsigned long pending_signum;
+
+#ifndef F_OK
+#define F_OK 0
+#endif
+#ifndef R_OK
+#define R_OK 4
+#endif
+#ifndef W_OK
+#define W_OK 2
+#endif
+#ifndef X_OK
+#define X_OK 1
+#endif
+
+/* fs open() flags supported on this platform: */
+#define UV_FS_O_APPEND _O_APPEND
+#define UV_FS_O_CREAT _O_CREAT
+#define UV_FS_O_EXCL _O_EXCL
+#define UV_FS_O_FILEMAP 0x20000000
+#define UV_FS_O_RANDOM _O_RANDOM
+#define UV_FS_O_RDONLY _O_RDONLY
+#define UV_FS_O_RDWR _O_RDWR
+#define UV_FS_O_SEQUENTIAL _O_SEQUENTIAL
+#define UV_FS_O_SHORT_LIVED _O_SHORT_LIVED
+#define UV_FS_O_TEMPORARY _O_TEMPORARY
+#define UV_FS_O_TRUNC _O_TRUNC
+#define UV_FS_O_WRONLY _O_WRONLY
+
+/* fs open() flags supported on other platforms (or mapped on this platform): */
+#define UV_FS_O_DIRECT 0x02000000 /* FILE_FLAG_NO_BUFFERING */
+#define UV_FS_O_DIRECTORY 0
+#define UV_FS_O_DSYNC 0x04000000 /* FILE_FLAG_WRITE_THROUGH */
+#define UV_FS_O_EXLOCK 0x10000000 /* EXCLUSIVE SHARING MODE */
+#define UV_FS_O_NOATIME 0
+#define UV_FS_O_NOCTTY 0
+#define UV_FS_O_NOFOLLOW 0
+#define UV_FS_O_NONBLOCK 0
+#define UV_FS_O_SYMLINK 0
+#define UV_FS_O_SYNC 0x08000000 /* FILE_FLAG_WRITE_THROUGH */
diff --git a/Utilities/cmlibuv/src/fs-poll.c b/Utilities/cmlibuv/src/fs-poll.c
new file mode 100644
index 0000000..89864e2
--- /dev/null
+++ b/Utilities/cmlibuv/src/fs-poll.c
@@ -0,0 +1,287 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "uv-common.h"
+
+#ifdef _WIN32
+#include "win/internal.h"
+#include "win/handle-inl.h"
+#define uv__make_close_pending(h) uv_want_endgame((h)->loop, (h))
+#else
+#include "unix/internal.h"
+#endif
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+
+struct poll_ctx {
+ uv_fs_poll_t* parent_handle;
+ int busy_polling;
+ unsigned int interval;
+ uint64_t start_time;
+ uv_loop_t* loop;
+ uv_fs_poll_cb poll_cb;
+ uv_timer_t timer_handle;
+ uv_fs_t fs_req; /* TODO(bnoordhuis) mark fs_req internal */
+ uv_stat_t statbuf;
+ struct poll_ctx* previous; /* context from previous start()..stop() period */
+ char path[1]; /* variable length */
+};
+
+static int statbuf_eq(const uv_stat_t* a, const uv_stat_t* b);
+static void poll_cb(uv_fs_t* req);
+static void timer_cb(uv_timer_t* timer);
+static void timer_close_cb(uv_handle_t* handle);
+
+static uv_stat_t zero_statbuf;
+
+
+int uv_fs_poll_init(uv_loop_t* loop, uv_fs_poll_t* handle) {
+ uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_POLL);
+ handle->poll_ctx = NULL;
+ return 0;
+}
+
+
+int uv_fs_poll_start(uv_fs_poll_t* handle,
+ uv_fs_poll_cb cb,
+ const char* path,
+ unsigned int interval) {
+ struct poll_ctx* ctx;
+ uv_loop_t* loop;
+ size_t len;
+ int err;
+
+ if (uv_is_active((uv_handle_t*)handle))
+ return 0;
+
+ loop = handle->loop;
+ len = strlen(path);
+ ctx = uv__calloc(1, sizeof(*ctx) + len);
+
+ if (ctx == NULL)
+ return UV_ENOMEM;
+
+ ctx->loop = loop;
+ ctx->poll_cb = cb;
+ ctx->interval = interval ? interval : 1;
+ ctx->start_time = uv_now(loop);
+ ctx->parent_handle = handle;
+ memcpy(ctx->path, path, len + 1);
+
+ err = uv_timer_init(loop, &ctx->timer_handle);
+ if (err < 0)
+ goto error;
+
+ ctx->timer_handle.flags |= UV_HANDLE_INTERNAL;
+ uv__handle_unref(&ctx->timer_handle);
+
+ err = uv_fs_stat(loop, &ctx->fs_req, ctx->path, poll_cb);
+ if (err < 0)
+ goto error;
+
+ if (handle->poll_ctx != NULL)
+ ctx->previous = handle->poll_ctx;
+ handle->poll_ctx = ctx;
+ uv__handle_start(handle);
+
+ return 0;
+
+error:
+ uv__free(ctx);
+ return err;
+}
+
+
+int uv_fs_poll_stop(uv_fs_poll_t* handle) {
+ struct poll_ctx* ctx;
+
+ if (!uv_is_active((uv_handle_t*)handle))
+ return 0;
+
+ ctx = handle->poll_ctx;
+ assert(ctx != NULL);
+ assert(ctx->parent_handle == handle);
+
+ /* Close the timer if it's active. If it's inactive, there's a stat request
+ * in progress and poll_cb will take care of the cleanup.
+ */
+ if (uv_is_active((uv_handle_t*)&ctx->timer_handle))
+ uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb);
+
+ uv__handle_stop(handle);
+
+ return 0;
+}
+
+
+int uv_fs_poll_getpath(uv_fs_poll_t* handle, char* buffer, size_t* size) {
+ struct poll_ctx* ctx;
+ size_t required_len;
+
+ if (!uv_is_active((uv_handle_t*)handle)) {
+ *size = 0;
+ return UV_EINVAL;
+ }
+
+ ctx = handle->poll_ctx;
+ assert(ctx != NULL);
+
+ required_len = strlen(ctx->path);
+ if (required_len >= *size) {
+ *size = required_len + 1;
+ return UV_ENOBUFS;
+ }
+
+ memcpy(buffer, ctx->path, required_len);
+ *size = required_len;
+ buffer[required_len] = '\0';
+
+ return 0;
+}
+
+
+void uv__fs_poll_close(uv_fs_poll_t* handle) {
+ uv_fs_poll_stop(handle);
+
+ if (handle->poll_ctx == NULL)
+ uv__make_close_pending((uv_handle_t*)handle);
+}
+
+
+static void timer_cb(uv_timer_t* timer) {
+ struct poll_ctx* ctx;
+
+ ctx = container_of(timer, struct poll_ctx, timer_handle);
+ assert(ctx->parent_handle != NULL);
+ assert(ctx->parent_handle->poll_ctx == ctx);
+ ctx->start_time = uv_now(ctx->loop);
+
+ if (uv_fs_stat(ctx->loop, &ctx->fs_req, ctx->path, poll_cb))
+ abort();
+}
+
+
+static void poll_cb(uv_fs_t* req) {
+ uv_stat_t* statbuf;
+ struct poll_ctx* ctx;
+ uint64_t interval;
+ uv_fs_poll_t* handle;
+
+ ctx = container_of(req, struct poll_ctx, fs_req);
+ handle = ctx->parent_handle;
+
+ if (!uv_is_active((uv_handle_t*)handle) || uv__is_closing(handle))
+ goto out;
+
+ if (req->result != 0) {
+ if (ctx->busy_polling != req->result) {
+ ctx->poll_cb(ctx->parent_handle,
+ req->result,
+ &ctx->statbuf,
+ &zero_statbuf);
+ ctx->busy_polling = req->result;
+ }
+ goto out;
+ }
+
+ statbuf = &req->statbuf;
+
+ if (ctx->busy_polling != 0)
+ if (ctx->busy_polling < 0 || !statbuf_eq(&ctx->statbuf, statbuf))
+ ctx->poll_cb(ctx->parent_handle, 0, &ctx->statbuf, statbuf);
+
+ ctx->statbuf = *statbuf;
+ ctx->busy_polling = 1;
+
+out:
+ uv_fs_req_cleanup(req);
+
+ if (!uv_is_active((uv_handle_t*)handle) || uv__is_closing(handle)) {
+ uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb);
+ return;
+ }
+
+ /* Reschedule timer, subtract the delay from doing the stat(). */
+ interval = ctx->interval;
+ interval -= (uv_now(ctx->loop) - ctx->start_time) % interval;
+
+ if (uv_timer_start(&ctx->timer_handle, timer_cb, interval, 0))
+ abort();
+}
+
+
+static void timer_close_cb(uv_handle_t* timer) {
+ struct poll_ctx* ctx;
+ struct poll_ctx* it;
+ struct poll_ctx* last;
+ uv_fs_poll_t* handle;
+
+ ctx = container_of(timer, struct poll_ctx, timer_handle);
+ handle = ctx->parent_handle;
+ if (ctx == handle->poll_ctx) {
+ handle->poll_ctx = ctx->previous;
+ if (handle->poll_ctx == NULL && uv__is_closing(handle))
+ uv__make_close_pending((uv_handle_t*)handle);
+ } else {
+ for (last = handle->poll_ctx, it = last->previous;
+ it != ctx;
+ last = it, it = it->previous) {
+ assert(last->previous != NULL);
+ }
+ last->previous = ctx->previous;
+ }
+ uv__free(ctx);
+}
+
+
+static int statbuf_eq(const uv_stat_t* a, const uv_stat_t* b) {
+ return a->st_ctim.tv_nsec == b->st_ctim.tv_nsec
+ && a->st_mtim.tv_nsec == b->st_mtim.tv_nsec
+ && a->st_birthtim.tv_nsec == b->st_birthtim.tv_nsec
+ && a->st_ctim.tv_sec == b->st_ctim.tv_sec
+ && a->st_mtim.tv_sec == b->st_mtim.tv_sec
+ && a->st_birthtim.tv_sec == b->st_birthtim.tv_sec
+ && a->st_size == b->st_size
+ && a->st_mode == b->st_mode
+ && a->st_uid == b->st_uid
+ && a->st_gid == b->st_gid
+ && a->st_ino == b->st_ino
+ && a->st_dev == b->st_dev
+ && a->st_flags == b->st_flags
+ && a->st_gen == b->st_gen;
+}
+
+
+#if defined(_WIN32)
+
+#include "win/internal.h"
+#include "win/handle-inl.h"
+
+void uv__fs_poll_endgame(uv_loop_t* loop, uv_fs_poll_t* handle) {
+ assert(handle->flags & UV_HANDLE_CLOSING);
+ assert(!(handle->flags & UV_HANDLE_CLOSED));
+ uv__handle_close(handle);
+}
+
+#endif /* _WIN32 */
diff --git a/Utilities/cmlibuv/src/heap-inl.h b/Utilities/cmlibuv/src/heap-inl.h
new file mode 100644
index 0000000..1e2ed60
--- /dev/null
+++ b/Utilities/cmlibuv/src/heap-inl.h
@@ -0,0 +1,245 @@
+/* Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef UV_SRC_HEAP_H_
+#define UV_SRC_HEAP_H_
+
+#include <stddef.h> /* NULL */
+
+#if defined(__GNUC__)
+# define HEAP_EXPORT(declaration) __attribute__((unused)) static declaration
+#else
+# define HEAP_EXPORT(declaration) static declaration
+#endif
+
+struct heap_node {
+ struct heap_node* left;
+ struct heap_node* right;
+ struct heap_node* parent;
+};
+
+/* A binary min heap. The usual properties hold: the root is the lowest
+ * element in the set, the height of the tree is at most log2(nodes) and
+ * it's always a complete binary tree.
+ *
+ * The heap function try hard to detect corrupted tree nodes at the cost
+ * of a minor reduction in performance. Compile with -DNDEBUG to disable.
+ */
+struct heap {
+ struct heap_node* min;
+ unsigned int nelts;
+};
+
+/* Return non-zero if a < b. */
+typedef int (*heap_compare_fn)(const struct heap_node* a,
+ const struct heap_node* b);
+
+/* Public functions. */
+HEAP_EXPORT(void heap_init(struct heap* heap));
+HEAP_EXPORT(struct heap_node* heap_min(const struct heap* heap));
+HEAP_EXPORT(void heap_insert(struct heap* heap,
+ struct heap_node* newnode,
+ heap_compare_fn less_than));
+HEAP_EXPORT(void heap_remove(struct heap* heap,
+ struct heap_node* node,
+ heap_compare_fn less_than));
+HEAP_EXPORT(void heap_dequeue(struct heap* heap, heap_compare_fn less_than));
+
+/* Implementation follows. */
+
+HEAP_EXPORT(void heap_init(struct heap* heap)) {
+ heap->min = NULL;
+ heap->nelts = 0;
+}
+
+HEAP_EXPORT(struct heap_node* heap_min(const struct heap* heap)) {
+ return heap->min;
+}
+
+/* Swap parent with child. Child moves closer to the root, parent moves away. */
+static void heap_node_swap(struct heap* heap,
+ struct heap_node* parent,
+ struct heap_node* child) {
+ struct heap_node* sibling;
+ struct heap_node t;
+
+ t = *parent;
+ *parent = *child;
+ *child = t;
+
+ parent->parent = child;
+ if (child->left == child) {
+ child->left = parent;
+ sibling = child->right;
+ } else {
+ child->right = parent;
+ sibling = child->left;
+ }
+ if (sibling != NULL)
+ sibling->parent = child;
+
+ if (parent->left != NULL)
+ parent->left->parent = parent;
+ if (parent->right != NULL)
+ parent->right->parent = parent;
+
+ if (child->parent == NULL)
+ heap->min = child;
+ else if (child->parent->left == parent)
+ child->parent->left = child;
+ else
+ child->parent->right = child;
+}
+
+HEAP_EXPORT(void heap_insert(struct heap* heap,
+ struct heap_node* newnode,
+ heap_compare_fn less_than)) {
+ struct heap_node** parent;
+ struct heap_node** child;
+ unsigned int path;
+ unsigned int n;
+ unsigned int k;
+
+ newnode->left = NULL;
+ newnode->right = NULL;
+ newnode->parent = NULL;
+
+ /* Calculate the path from the root to the insertion point. This is a min
+ * heap so we always insert at the left-most free node of the bottom row.
+ */
+ path = 0;
+ for (k = 0, n = 1 + heap->nelts; n >= 2; k += 1, n /= 2)
+ path = (path << 1) | (n & 1);
+
+ /* Now traverse the heap using the path we calculated in the previous step. */
+ parent = child = &heap->min;
+ while (k > 0) {
+ parent = child;
+ if (path & 1)
+ child = &(*child)->right;
+ else
+ child = &(*child)->left;
+ path >>= 1;
+ k -= 1;
+ }
+
+ /* Insert the new node. */
+ newnode->parent = *parent;
+ *child = newnode;
+ heap->nelts += 1;
+
+ /* Walk up the tree and check at each node if the heap property holds.
+ * It's a min heap so parent < child must be true.
+ */
+ while (newnode->parent != NULL && less_than(newnode, newnode->parent))
+ heap_node_swap(heap, newnode->parent, newnode);
+}
+
+HEAP_EXPORT(void heap_remove(struct heap* heap,
+ struct heap_node* node,
+ heap_compare_fn less_than)) {
+ struct heap_node* smallest;
+ struct heap_node** max;
+ struct heap_node* child;
+ unsigned int path;
+ unsigned int k;
+ unsigned int n;
+
+ if (heap->nelts == 0)
+ return;
+
+ /* Calculate the path from the min (the root) to the max, the left-most node
+ * of the bottom row.
+ */
+ path = 0;
+ for (k = 0, n = heap->nelts; n >= 2; k += 1, n /= 2)
+ path = (path << 1) | (n & 1);
+
+ /* Now traverse the heap using the path we calculated in the previous step. */
+ max = &heap->min;
+ while (k > 0) {
+ if (path & 1)
+ max = &(*max)->right;
+ else
+ max = &(*max)->left;
+ path >>= 1;
+ k -= 1;
+ }
+
+ heap->nelts -= 1;
+
+ /* Unlink the max node. */
+ child = *max;
+ *max = NULL;
+
+ if (child == node) {
+ /* We're removing either the max or the last node in the tree. */
+ if (child == heap->min) {
+ heap->min = NULL;
+ }
+ return;
+ }
+
+ /* Replace the to be deleted node with the max node. */
+ child->left = node->left;
+ child->right = node->right;
+ child->parent = node->parent;
+
+ if (child->left != NULL) {
+ child->left->parent = child;
+ }
+
+ if (child->right != NULL) {
+ child->right->parent = child;
+ }
+
+ if (node->parent == NULL) {
+ heap->min = child;
+ } else if (node->parent->left == node) {
+ node->parent->left = child;
+ } else {
+ node->parent->right = child;
+ }
+
+ /* Walk down the subtree and check at each node if the heap property holds.
+ * It's a min heap so parent < child must be true. If the parent is bigger,
+ * swap it with the smallest child.
+ */
+ for (;;) {
+ smallest = child;
+ if (child->left != NULL && less_than(child->left, smallest))
+ smallest = child->left;
+ if (child->right != NULL && less_than(child->right, smallest))
+ smallest = child->right;
+ if (smallest == child)
+ break;
+ heap_node_swap(heap, child, smallest);
+ }
+
+ /* Walk up the subtree and check that each parent is less than the node
+ * this is required, because `max` node is not guaranteed to be the
+ * actual maximum in tree
+ */
+ while (child->parent != NULL && less_than(child, child->parent))
+ heap_node_swap(heap, child->parent, child);
+}
+
+HEAP_EXPORT(void heap_dequeue(struct heap* heap, heap_compare_fn less_than)) {
+ heap_remove(heap, heap->min, less_than);
+}
+
+#undef HEAP_EXPORT
+
+#endif /* UV_SRC_HEAP_H_ */
diff --git a/Utilities/cmlibuv/src/idna.c b/Utilities/cmlibuv/src/idna.c
new file mode 100644
index 0000000..13ffac6
--- /dev/null
+++ b/Utilities/cmlibuv/src/idna.c
@@ -0,0 +1,291 @@
+/* Copyright (c) 2011, 2018 Ben Noordhuis <info@bnoordhuis.nl>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* Derived from https://github.com/bnoordhuis/punycode
+ * but updated to support IDNA 2008.
+ */
+
+#include "uv.h"
+#include "idna.h"
+#include <string.h>
+
+static unsigned uv__utf8_decode1_slow(const char** p,
+ const char* pe,
+ unsigned a) {
+ unsigned b;
+ unsigned c;
+ unsigned d;
+ unsigned min;
+
+ if (a > 0xF7)
+ return -1;
+
+ switch (*p - pe) {
+ default:
+ if (a > 0xEF) {
+ min = 0x10000;
+ a = a & 7;
+ b = (unsigned char) *(*p)++;
+ c = (unsigned char) *(*p)++;
+ d = (unsigned char) *(*p)++;
+ break;
+ }
+ /* Fall through. */
+ case 2:
+ if (a > 0xDF) {
+ min = 0x800;
+ b = 0x80 | (a & 15);
+ c = (unsigned char) *(*p)++;
+ d = (unsigned char) *(*p)++;
+ a = 0;
+ break;
+ }
+ /* Fall through. */
+ case 1:
+ if (a > 0xBF) {
+ min = 0x80;
+ b = 0x80;
+ c = 0x80 | (a & 31);
+ d = (unsigned char) *(*p)++;
+ a = 0;
+ break;
+ }
+ return -1; /* Invalid continuation byte. */
+ }
+
+ if (0x80 != (0xC0 & (b ^ c ^ d)))
+ return -1; /* Invalid sequence. */
+
+ b &= 63;
+ c &= 63;
+ d &= 63;
+ a = (a << 18) | (b << 12) | (c << 6) | d;
+
+ if (a < min)
+ return -1; /* Overlong sequence. */
+
+ if (a > 0x10FFFF)
+ return -1; /* Four-byte sequence > U+10FFFF. */
+
+ if (a >= 0xD800 && a <= 0xDFFF)
+ return -1; /* Surrogate pair. */
+
+ return a;
+}
+
+unsigned uv__utf8_decode1(const char** p, const char* pe) {
+ unsigned a;
+
+ a = (unsigned char) *(*p)++;
+
+ if (a < 128)
+ return a; /* ASCII, common case. */
+
+ return uv__utf8_decode1_slow(p, pe, a);
+}
+
+#define foreach_codepoint(c, p, pe) \
+ for (; (void) (*p <= pe && (c = uv__utf8_decode1(p, pe))), *p <= pe;)
+
+static int uv__idna_toascii_label(const char* s, const char* se,
+ char** d, char* de) {
+ static const char alphabet[] = "abcdefghijklmnopqrstuvwxyz0123456789";
+ const char* ss;
+ unsigned c;
+ unsigned h;
+ unsigned k;
+ unsigned n;
+ unsigned m;
+ unsigned q;
+ unsigned t;
+ unsigned x;
+ unsigned y;
+ unsigned bias;
+ unsigned delta;
+ unsigned todo;
+ int first;
+
+ h = 0;
+ ss = s;
+ todo = 0;
+
+ foreach_codepoint(c, &s, se) {
+ if (c < 128)
+ h++;
+ else if (c == (unsigned) -1)
+ return UV_EINVAL;
+ else
+ todo++;
+ }
+
+ if (todo > 0) {
+ if (*d < de) *(*d)++ = 'x';
+ if (*d < de) *(*d)++ = 'n';
+ if (*d < de) *(*d)++ = '-';
+ if (*d < de) *(*d)++ = '-';
+ }
+
+ x = 0;
+ s = ss;
+ foreach_codepoint(c, &s, se) {
+ if (c > 127)
+ continue;
+
+ if (*d < de)
+ *(*d)++ = c;
+
+ if (++x == h)
+ break; /* Visited all ASCII characters. */
+ }
+
+ if (todo == 0)
+ return h;
+
+ /* Only write separator when we've written ASCII characters first. */
+ if (h > 0)
+ if (*d < de)
+ *(*d)++ = '-';
+
+ n = 128;
+ bias = 72;
+ delta = 0;
+ first = 1;
+
+ while (todo > 0) {
+ m = -1;
+ s = ss;
+ foreach_codepoint(c, &s, se)
+ if (c >= n)
+ if (c < m)
+ m = c;
+
+ x = m - n;
+ y = h + 1;
+
+ if (x > ~delta / y)
+ return UV_E2BIG; /* Overflow. */
+
+ delta += x * y;
+ n = m;
+
+ s = ss;
+ foreach_codepoint(c, &s, se) {
+ if (c < n)
+ if (++delta == 0)
+ return UV_E2BIG; /* Overflow. */
+
+ if (c != n)
+ continue;
+
+ for (k = 36, q = delta; /* empty */; k += 36) {
+ t = 1;
+
+ if (k > bias)
+ t = k - bias;
+
+ if (t > 26)
+ t = 26;
+
+ if (q < t)
+ break;
+
+ /* TODO(bnoordhuis) Since 1 <= t <= 26 and therefore
+ * 10 <= y <= 35, we can optimize the long division
+ * into a table-based reciprocal multiplication.
+ */
+ x = q - t;
+ y = 36 - t; /* 10 <= y <= 35 since 1 <= t <= 26. */
+ q = x / y;
+ t = t + x % y; /* 1 <= t <= 35 because of y. */
+
+ if (*d < de)
+ *(*d)++ = alphabet[t];
+ }
+
+ if (*d < de)
+ *(*d)++ = alphabet[q];
+
+ delta /= 2;
+
+ if (first) {
+ delta /= 350;
+ first = 0;
+ }
+
+ /* No overflow check is needed because |delta| was just
+ * divided by 2 and |delta+delta >= delta + delta/h|.
+ */
+ h++;
+ delta += delta / h;
+
+ for (bias = 0; delta > 35 * 26 / 2; bias += 36)
+ delta /= 35;
+
+ bias += 36 * delta / (delta + 38);
+ delta = 0;
+ todo--;
+ }
+
+ delta++;
+ n++;
+ }
+
+ return 0;
+}
+
+#undef foreach_codepoint
+
+long uv__idna_toascii(const char* s, const char* se, char* d, char* de) {
+ const char* si;
+ const char* st;
+ unsigned c;
+ char* ds;
+ int rc;
+
+ ds = d;
+
+ for (si = s; si < se; /* empty */) {
+ st = si;
+ c = uv__utf8_decode1(&si, se);
+
+ if (c != '.')
+ if (c != 0x3002) /* 。 */
+ if (c != 0xFF0E) /* . */
+ if (c != 0xFF61) /* 。 */
+ continue;
+
+ rc = uv__idna_toascii_label(s, st, &d, de);
+
+ if (rc < 0)
+ return rc;
+
+ if (d < de)
+ *d++ = '.';
+
+ s = si;
+ }
+
+ if (s < se) {
+ rc = uv__idna_toascii_label(s, se, &d, de);
+
+ if (rc < 0)
+ return rc;
+ }
+
+ if (d < de)
+ *d++ = '\0';
+
+ return d - ds; /* Number of bytes written. */
+}
diff --git a/Utilities/cmlibuv/src/idna.h b/Utilities/cmlibuv/src/idna.h
new file mode 100644
index 0000000..8e0c592
--- /dev/null
+++ b/Utilities/cmlibuv/src/idna.h
@@ -0,0 +1,31 @@
+/* Copyright (c) 2011, 2018 Ben Noordhuis <info@bnoordhuis.nl>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef UV_SRC_IDNA_H_
+#define UV_SRC_IDNA_H_
+
+/* Decode a single codepoint. Returns the codepoint or UINT32_MAX on error.
+ * |p| is updated on success _and_ error, i.e., bad multi-byte sequences are
+ * skipped in their entirety, not just the first bad byte.
+ */
+unsigned uv__utf8_decode1(const char** p, const char* pe);
+
+/* Convert a UTF-8 domain name to IDNA 2008 / Punycode. A return value >= 0
+ * is the number of bytes written to |d|, including the trailing nul byte.
+ * A return value < 0 is a libuv error code. |s| and |d| can not overlap.
+ */
+long uv__idna_toascii(const char* s, const char* se, char* d, char* de);
+
+#endif /* UV_SRC_IDNA_H_ */
diff --git a/Utilities/cmlibuv/src/inet.c b/Utilities/cmlibuv/src/inet.c
new file mode 100644
index 0000000..58238dc
--- /dev/null
+++ b/Utilities/cmlibuv/src/inet.c
@@ -0,0 +1,302 @@
+/*
+ * Copyright (c) 2004 by Internet Systems Consortium, Inc. ("ISC")
+ * Copyright (c) 1996-1999 by Internet Software Consortium.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "uv.h"
+#include "uv-common.h"
+
+#include <stdio.h>
+#include <string.h>
+
+#if defined(_MSC_VER) && _MSC_VER < 1600
+# include "uv/stdint-msvc2008.h"
+#else
+# include <stdint.h>
+#endif
+
+#define UV__INET_ADDRSTRLEN 16
+#define UV__INET6_ADDRSTRLEN 46
+
+
+static int inet_ntop4(const unsigned char *src, char *dst, size_t size);
+static int inet_ntop6(const unsigned char *src, char *dst, size_t size);
+static int inet_pton4(const char *src, unsigned char *dst);
+static int inet_pton6(const char *src, unsigned char *dst);
+
+
+int uv_inet_ntop(int af, const void* src, char* dst, size_t size) {
+ switch (af) {
+ case AF_INET:
+ return (inet_ntop4(src, dst, size));
+ case AF_INET6:
+ return (inet_ntop6(src, dst, size));
+ default:
+ return UV_EAFNOSUPPORT;
+ }
+ /* NOTREACHED */
+}
+
+
+static int inet_ntop4(const unsigned char *src, char *dst, size_t size) {
+ static const char fmt[] = "%u.%u.%u.%u";
+ char tmp[UV__INET_ADDRSTRLEN];
+ int l;
+
+ l = snprintf(tmp, sizeof(tmp), fmt, src[0], src[1], src[2], src[3]);
+ if (l <= 0 || (size_t) l >= size) {
+ return UV_ENOSPC;
+ }
+ uv__strscpy(dst, tmp, size);
+ return 0;
+}
+
+
+static int inet_ntop6(const unsigned char *src, char *dst, size_t size) {
+ /*
+ * Note that int32_t and int16_t need only be "at least" large enough
+ * to contain a value of the specified size. On some systems, like
+ * Crays, there is no such thing as an integer variable with 16 bits.
+ * Keep this in mind if you think this function should have been coded
+ * to use pointer overlays. All the world's not a VAX.
+ */
+ char tmp[UV__INET6_ADDRSTRLEN], *tp;
+ struct { int base, len; } best, cur;
+ unsigned int words[sizeof(struct in6_addr) / sizeof(uint16_t)];
+ int i;
+
+ /*
+ * Preprocess:
+ * Copy the input (bytewise) array into a wordwise array.
+ * Find the longest run of 0x00's in src[] for :: shorthanding.
+ */
+ memset(words, '\0', sizeof words);
+ for (i = 0; i < (int) sizeof(struct in6_addr); i++)
+ words[i / 2] |= (src[i] << ((1 - (i % 2)) << 3));
+ best.base = -1;
+ best.len = 0;
+ cur.base = -1;
+ cur.len = 0;
+ for (i = 0; i < (int) ARRAY_SIZE(words); i++) {
+ if (words[i] == 0) {
+ if (cur.base == -1)
+ cur.base = i, cur.len = 1;
+ else
+ cur.len++;
+ } else {
+ if (cur.base != -1) {
+ if (best.base == -1 || cur.len > best.len)
+ best = cur;
+ cur.base = -1;
+ }
+ }
+ }
+ if (cur.base != -1) {
+ if (best.base == -1 || cur.len > best.len)
+ best = cur;
+ }
+ if (best.base != -1 && best.len < 2)
+ best.base = -1;
+
+ /*
+ * Format the result.
+ */
+ tp = tmp;
+ for (i = 0; i < (int) ARRAY_SIZE(words); i++) {
+ /* Are we inside the best run of 0x00's? */
+ if (best.base != -1 && i >= best.base &&
+ i < (best.base + best.len)) {
+ if (i == best.base)
+ *tp++ = ':';
+ continue;
+ }
+ /* Are we following an initial run of 0x00s or any real hex? */
+ if (i != 0)
+ *tp++ = ':';
+ /* Is this address an encapsulated IPv4? */
+ if (i == 6 && best.base == 0 && (best.len == 6 ||
+ (best.len == 7 && words[7] != 0x0001) ||
+ (best.len == 5 && words[5] == 0xffff))) {
+ int err = inet_ntop4(src+12, tp, sizeof tmp - (tp - tmp));
+ if (err)
+ return err;
+ tp += strlen(tp);
+ break;
+ }
+ tp += sprintf(tp, "%x", words[i]);
+ }
+ /* Was it a trailing run of 0x00's? */
+ if (best.base != -1 && (best.base + best.len) == ARRAY_SIZE(words))
+ *tp++ = ':';
+ *tp++ = '\0';
+ if (UV_E2BIG == uv__strscpy(dst, tmp, size))
+ return UV_ENOSPC;
+ return 0;
+}
+
+
+int uv_inet_pton(int af, const char* src, void* dst) {
+ if (src == NULL || dst == NULL)
+ return UV_EINVAL;
+
+ switch (af) {
+ case AF_INET:
+ return (inet_pton4(src, dst));
+ case AF_INET6: {
+ int len;
+ char tmp[UV__INET6_ADDRSTRLEN], *s, *p;
+ s = (char*) src;
+ p = strchr(src, '%');
+ if (p != NULL) {
+ s = tmp;
+ len = p - src;
+ if (len > UV__INET6_ADDRSTRLEN-1)
+ return UV_EINVAL;
+ memcpy(s, src, len);
+ s[len] = '\0';
+ }
+ return inet_pton6(s, dst);
+ }
+ default:
+ return UV_EAFNOSUPPORT;
+ }
+ /* NOTREACHED */
+}
+
+
+static int inet_pton4(const char *src, unsigned char *dst) {
+ static const char digits[] = "0123456789";
+ int saw_digit, octets, ch;
+ unsigned char tmp[sizeof(struct in_addr)], *tp;
+
+ saw_digit = 0;
+ octets = 0;
+ *(tp = tmp) = 0;
+ while ((ch = *src++) != '\0') {
+ const char *pch;
+
+ if ((pch = strchr(digits, ch)) != NULL) {
+ unsigned int nw = *tp * 10 + (pch - digits);
+
+ if (saw_digit && *tp == 0)
+ return UV_EINVAL;
+ if (nw > 255)
+ return UV_EINVAL;
+ *tp = nw;
+ if (!saw_digit) {
+ if (++octets > 4)
+ return UV_EINVAL;
+ saw_digit = 1;
+ }
+ } else if (ch == '.' && saw_digit) {
+ if (octets == 4)
+ return UV_EINVAL;
+ *++tp = 0;
+ saw_digit = 0;
+ } else
+ return UV_EINVAL;
+ }
+ if (octets < 4)
+ return UV_EINVAL;
+ memcpy(dst, tmp, sizeof(struct in_addr));
+ return 0;
+}
+
+
+static int inet_pton6(const char *src, unsigned char *dst) {
+ static const char xdigits_l[] = "0123456789abcdef",
+ xdigits_u[] = "0123456789ABCDEF";
+ unsigned char tmp[sizeof(struct in6_addr)], *tp, *endp, *colonp;
+ const char *xdigits, *curtok;
+ int ch, seen_xdigits;
+ unsigned int val;
+
+ memset((tp = tmp), '\0', sizeof tmp);
+ endp = tp + sizeof tmp;
+ colonp = NULL;
+ /* Leading :: requires some special handling. */
+ if (*src == ':')
+ if (*++src != ':')
+ return UV_EINVAL;
+ curtok = src;
+ seen_xdigits = 0;
+ val = 0;
+ while ((ch = *src++) != '\0') {
+ const char *pch;
+
+ if ((pch = strchr((xdigits = xdigits_l), ch)) == NULL)
+ pch = strchr((xdigits = xdigits_u), ch);
+ if (pch != NULL) {
+ val <<= 4;
+ val |= (pch - xdigits);
+ if (++seen_xdigits > 4)
+ return UV_EINVAL;
+ continue;
+ }
+ if (ch == ':') {
+ curtok = src;
+ if (!seen_xdigits) {
+ if (colonp)
+ return UV_EINVAL;
+ colonp = tp;
+ continue;
+ } else if (*src == '\0') {
+ return UV_EINVAL;
+ }
+ if (tp + sizeof(uint16_t) > endp)
+ return UV_EINVAL;
+ *tp++ = (unsigned char) (val >> 8) & 0xff;
+ *tp++ = (unsigned char) val & 0xff;
+ seen_xdigits = 0;
+ val = 0;
+ continue;
+ }
+ if (ch == '.' && ((tp + sizeof(struct in_addr)) <= endp)) {
+ int err = inet_pton4(curtok, tp);
+ if (err == 0) {
+ tp += sizeof(struct in_addr);
+ seen_xdigits = 0;
+ break; /*%< '\\0' was seen by inet_pton4(). */
+ }
+ }
+ return UV_EINVAL;
+ }
+ if (seen_xdigits) {
+ if (tp + sizeof(uint16_t) > endp)
+ return UV_EINVAL;
+ *tp++ = (unsigned char) (val >> 8) & 0xff;
+ *tp++ = (unsigned char) val & 0xff;
+ }
+ if (colonp != NULL) {
+ /*
+ * Since some memmove()'s erroneously fail to handle
+ * overlapping regions, we'll do the shift by hand.
+ */
+ const int n = tp - colonp;
+ int i;
+
+ if (tp == endp)
+ return UV_EINVAL;
+ for (i = 1; i <= n; i++) {
+ endp[- i] = colonp[n - i];
+ colonp[n - i] = 0;
+ }
+ tp = endp;
+ }
+ if (tp != endp)
+ return UV_EINVAL;
+ memcpy(dst, tmp, sizeof tmp);
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/queue.h b/Utilities/cmlibuv/src/queue.h
new file mode 100644
index 0000000..ff3540a
--- /dev/null
+++ b/Utilities/cmlibuv/src/queue.h
@@ -0,0 +1,108 @@
+/* Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef QUEUE_H_
+#define QUEUE_H_
+
+#include <stddef.h>
+
+typedef void *QUEUE[2];
+
+/* Private macros. */
+#define QUEUE_NEXT(q) (*(QUEUE **) &((*(q))[0]))
+#define QUEUE_PREV(q) (*(QUEUE **) &((*(q))[1]))
+#define QUEUE_PREV_NEXT(q) (QUEUE_NEXT(QUEUE_PREV(q)))
+#define QUEUE_NEXT_PREV(q) (QUEUE_PREV(QUEUE_NEXT(q)))
+
+/* Public macros. */
+#define QUEUE_DATA(ptr, type, field) \
+ ((type *) ((char *) (ptr) - offsetof(type, field)))
+
+/* Important note: mutating the list while QUEUE_FOREACH is
+ * iterating over its elements results in undefined behavior.
+ */
+#define QUEUE_FOREACH(q, h) \
+ for ((q) = QUEUE_NEXT(h); (q) != (h); (q) = QUEUE_NEXT(q))
+
+#define QUEUE_EMPTY(q) \
+ ((const QUEUE *) (q) == (const QUEUE *) QUEUE_NEXT(q))
+
+#define QUEUE_HEAD(q) \
+ (QUEUE_NEXT(q))
+
+#define QUEUE_INIT(q) \
+ do { \
+ QUEUE_NEXT(q) = (q); \
+ QUEUE_PREV(q) = (q); \
+ } \
+ while (0)
+
+#define QUEUE_ADD(h, n) \
+ do { \
+ QUEUE_PREV_NEXT(h) = QUEUE_NEXT(n); \
+ QUEUE_NEXT_PREV(n) = QUEUE_PREV(h); \
+ QUEUE_PREV(h) = QUEUE_PREV(n); \
+ QUEUE_PREV_NEXT(h) = (h); \
+ } \
+ while (0)
+
+#define QUEUE_SPLIT(h, q, n) \
+ do { \
+ QUEUE_PREV(n) = QUEUE_PREV(h); \
+ QUEUE_PREV_NEXT(n) = (n); \
+ QUEUE_NEXT(n) = (q); \
+ QUEUE_PREV(h) = QUEUE_PREV(q); \
+ QUEUE_PREV_NEXT(h) = (h); \
+ QUEUE_PREV(q) = (n); \
+ } \
+ while (0)
+
+#define QUEUE_MOVE(h, n) \
+ do { \
+ if (QUEUE_EMPTY(h)) \
+ QUEUE_INIT(n); \
+ else { \
+ QUEUE* q = QUEUE_HEAD(h); \
+ QUEUE_SPLIT(h, q, n); \
+ } \
+ } \
+ while (0)
+
+#define QUEUE_INSERT_HEAD(h, q) \
+ do { \
+ QUEUE_NEXT(q) = QUEUE_NEXT(h); \
+ QUEUE_PREV(q) = (h); \
+ QUEUE_NEXT_PREV(q) = (q); \
+ QUEUE_NEXT(h) = (q); \
+ } \
+ while (0)
+
+#define QUEUE_INSERT_TAIL(h, q) \
+ do { \
+ QUEUE_NEXT(q) = (h); \
+ QUEUE_PREV(q) = QUEUE_PREV(h); \
+ QUEUE_PREV_NEXT(q) = (q); \
+ QUEUE_PREV(h) = (q); \
+ } \
+ while (0)
+
+#define QUEUE_REMOVE(q) \
+ do { \
+ QUEUE_PREV_NEXT(q) = QUEUE_NEXT(q); \
+ QUEUE_NEXT_PREV(q) = QUEUE_PREV(q); \
+ } \
+ while (0)
+
+#endif /* QUEUE_H_ */
diff --git a/Utilities/cmlibuv/src/random.c b/Utilities/cmlibuv/src/random.c
new file mode 100644
index 0000000..e75f77d
--- /dev/null
+++ b/Utilities/cmlibuv/src/random.c
@@ -0,0 +1,123 @@
+/* Copyright libuv contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "uv-common.h"
+
+#ifdef _WIN32
+# include "win/internal.h"
+#else
+# include "unix/internal.h"
+#endif
+
+static int uv__random(void* buf, size_t buflen) {
+ int rc;
+
+#if defined(__PASE__)
+ rc = uv__random_readpath("/dev/urandom", buf, buflen);
+#elif defined(_AIX) || defined(__QNX__)
+ rc = uv__random_readpath("/dev/random", buf, buflen);
+#elif defined(__APPLE__) || defined(__OpenBSD__) || \
+ (defined(__ANDROID_API__) && __ANDROID_API__ >= 28)
+ rc = uv__random_getentropy(buf, buflen);
+ if (rc == UV_ENOSYS)
+ rc = uv__random_devurandom(buf, buflen);
+#elif defined(__NetBSD__)
+ rc = uv__random_sysctl(buf, buflen);
+#elif defined(__FreeBSD__) || defined(__linux__)
+ rc = uv__random_getrandom(buf, buflen);
+ if (rc == UV_ENOSYS)
+ rc = uv__random_devurandom(buf, buflen);
+# if defined(__linux__)
+ switch (rc) {
+ case UV_EACCES:
+ case UV_EIO:
+ case UV_ELOOP:
+ case UV_EMFILE:
+ case UV_ENFILE:
+ case UV_ENOENT:
+ case UV_EPERM:
+ rc = uv__random_sysctl(buf, buflen);
+ break;
+ }
+# endif
+#elif defined(_WIN32)
+ uv__once_init();
+ rc = uv__random_rtlgenrandom(buf, buflen);
+#else
+ rc = uv__random_devurandom(buf, buflen);
+#endif
+
+ return rc;
+}
+
+
+static void uv__random_work(struct uv__work* w) {
+ uv_random_t* req;
+
+ req = container_of(w, uv_random_t, work_req);
+ req->status = uv__random(req->buf, req->buflen);
+}
+
+
+static void uv__random_done(struct uv__work* w, int status) {
+ uv_random_t* req;
+
+ req = container_of(w, uv_random_t, work_req);
+ uv__req_unregister(req->loop, req);
+
+ if (status == 0)
+ status = req->status;
+
+ req->cb(req, status, req->buf, req->buflen);
+}
+
+
+int uv_random(uv_loop_t* loop,
+ uv_random_t* req,
+ void *buf,
+ size_t buflen,
+ unsigned flags,
+ uv_random_cb cb) {
+ if (buflen > 0x7FFFFFFFu)
+ return UV_E2BIG;
+
+ if (flags != 0)
+ return UV_EINVAL;
+
+ if (cb == NULL)
+ return uv__random(buf, buflen);
+
+ uv__req_init(loop, req, UV_RANDOM);
+ req->loop = loop;
+ req->status = 0;
+ req->cb = cb;
+ req->buf = buf;
+ req->buflen = buflen;
+
+ uv__work_submit(loop,
+ &req->work_req,
+ UV__WORK_CPU,
+ uv__random_work,
+ uv__random_done);
+
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/strscpy.c b/Utilities/cmlibuv/src/strscpy.c
new file mode 100644
index 0000000..20df6fc
--- /dev/null
+++ b/Utilities/cmlibuv/src/strscpy.c
@@ -0,0 +1,38 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "strscpy.h"
+#include <limits.h> /* SSIZE_MAX */
+
+ssize_t uv__strscpy(char* d, const char* s, size_t n) {
+ size_t i;
+
+ for (i = 0; i < n; i++)
+ if ('\0' == (d[i] = s[i]))
+ return i > SSIZE_MAX ? UV_E2BIG : (ssize_t) i;
+
+ if (i == 0)
+ return 0;
+
+ d[--i] = '\0';
+
+ return UV_E2BIG;
+}
diff --git a/Utilities/cmlibuv/src/strscpy.h b/Utilities/cmlibuv/src/strscpy.h
new file mode 100644
index 0000000..cc78149
--- /dev/null
+++ b/Utilities/cmlibuv/src/strscpy.h
@@ -0,0 +1,39 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_STRSCPY_H_
+#define UV_STRSCPY_H_
+
+/* Include uv.h for its definitions of size_t and ssize_t.
+ * size_t can be obtained directly from <stddef.h> but ssize_t requires
+ * some hoop jumping on Windows that I didn't want to duplicate here.
+ */
+#include "uv.h"
+
+/* Copies up to |n-1| bytes from |d| to |s| and always zero-terminates
+ * the result, except when |n==0|. Returns the number of bytes copied
+ * or UV_E2BIG if |d| is too small.
+ *
+ * See https://www.kernel.org/doc/htmldocs/kernel-api/API-strscpy.html
+ */
+ssize_t uv__strscpy(char* d, const char* s, size_t n);
+
+#endif /* UV_STRSCPY_H_ */
diff --git a/Utilities/cmlibuv/src/threadpool.c b/Utilities/cmlibuv/src/threadpool.c
new file mode 100644
index 0000000..0998938
--- /dev/null
+++ b/Utilities/cmlibuv/src/threadpool.c
@@ -0,0 +1,388 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv-common.h"
+
+#if !defined(_WIN32)
+# include "unix/internal.h"
+#endif
+
+#include <stdlib.h>
+
+#define MAX_THREADPOOL_SIZE 1024
+
+static uv_once_t once = UV_ONCE_INIT;
+static uv_cond_t cond;
+static uv_mutex_t mutex;
+static unsigned int idle_threads;
+static unsigned int slow_io_work_running;
+static unsigned int nthreads;
+static uv_thread_t* threads;
+static uv_thread_t default_threads[4];
+static QUEUE exit_message;
+static QUEUE wq;
+static QUEUE run_slow_work_message;
+static QUEUE slow_io_pending_wq;
+
+static unsigned int slow_work_thread_threshold(void) {
+ return (nthreads + 1) / 2;
+}
+
+static void uv__cancelled(struct uv__work* w) {
+ abort();
+}
+
+
+/* To avoid deadlock with uv_cancel() it's crucial that the worker
+ * never holds the global mutex and the loop-local mutex at the same time.
+ */
+static void worker(void* arg) {
+ struct uv__work* w;
+ QUEUE* q;
+ int is_slow_work;
+
+ uv_sem_post((uv_sem_t*) arg);
+ arg = NULL;
+
+ uv_mutex_lock(&mutex);
+ for (;;) {
+ /* `mutex` should always be locked at this point. */
+
+ /* Keep waiting while either no work is present or only slow I/O
+ and we're at the threshold for that. */
+ while (QUEUE_EMPTY(&wq) ||
+ (QUEUE_HEAD(&wq) == &run_slow_work_message &&
+ QUEUE_NEXT(&run_slow_work_message) == &wq &&
+ slow_io_work_running >= slow_work_thread_threshold())) {
+ idle_threads += 1;
+ uv_cond_wait(&cond, &mutex);
+ idle_threads -= 1;
+ }
+
+ q = QUEUE_HEAD(&wq);
+ if (q == &exit_message) {
+ uv_cond_signal(&cond);
+ uv_mutex_unlock(&mutex);
+ break;
+ }
+
+ QUEUE_REMOVE(q);
+ QUEUE_INIT(q); /* Signal uv_cancel() that the work req is executing. */
+
+ is_slow_work = 0;
+ if (q == &run_slow_work_message) {
+ /* If we're at the slow I/O threshold, re-schedule until after all
+ other work in the queue is done. */
+ if (slow_io_work_running >= slow_work_thread_threshold()) {
+ QUEUE_INSERT_TAIL(&wq, q);
+ continue;
+ }
+
+ /* If we encountered a request to run slow I/O work but there is none
+ to run, that means it's cancelled => Start over. */
+ if (QUEUE_EMPTY(&slow_io_pending_wq))
+ continue;
+
+ is_slow_work = 1;
+ slow_io_work_running++;
+
+ q = QUEUE_HEAD(&slow_io_pending_wq);
+ QUEUE_REMOVE(q);
+ QUEUE_INIT(q);
+
+ /* If there is more slow I/O work, schedule it to be run as well. */
+ if (!QUEUE_EMPTY(&slow_io_pending_wq)) {
+ QUEUE_INSERT_TAIL(&wq, &run_slow_work_message);
+ if (idle_threads > 0)
+ uv_cond_signal(&cond);
+ }
+ }
+
+ uv_mutex_unlock(&mutex);
+
+ w = QUEUE_DATA(q, struct uv__work, wq);
+ w->work(w);
+
+ uv_mutex_lock(&w->loop->wq_mutex);
+ w->work = NULL; /* Signal uv_cancel() that the work req is done
+ executing. */
+ QUEUE_INSERT_TAIL(&w->loop->wq, &w->wq);
+ uv_async_send(&w->loop->wq_async);
+ uv_mutex_unlock(&w->loop->wq_mutex);
+
+ /* Lock `mutex` since that is expected at the start of the next
+ * iteration. */
+ uv_mutex_lock(&mutex);
+ if (is_slow_work) {
+ /* `slow_io_work_running` is protected by `mutex`. */
+ slow_io_work_running--;
+ }
+ }
+}
+
+
+static void post(QUEUE* q, enum uv__work_kind kind) {
+ uv_mutex_lock(&mutex);
+ if (kind == UV__WORK_SLOW_IO) {
+ /* Insert into a separate queue. */
+ QUEUE_INSERT_TAIL(&slow_io_pending_wq, q);
+ if (!QUEUE_EMPTY(&run_slow_work_message)) {
+ /* Running slow I/O tasks is already scheduled => Nothing to do here.
+ The worker that runs said other task will schedule this one as well. */
+ uv_mutex_unlock(&mutex);
+ return;
+ }
+ q = &run_slow_work_message;
+ }
+
+ QUEUE_INSERT_TAIL(&wq, q);
+ if (idle_threads > 0)
+ uv_cond_signal(&cond);
+ uv_mutex_unlock(&mutex);
+}
+
+
+void uv__threadpool_cleanup(void) {
+#ifndef _WIN32
+ unsigned int i;
+
+ if (nthreads == 0)
+ return;
+
+ post(&exit_message, UV__WORK_CPU);
+
+ for (i = 0; i < nthreads; i++)
+ if (uv_thread_join(threads + i))
+ abort();
+
+ if (threads != default_threads)
+ uv__free(threads);
+
+ uv_mutex_destroy(&mutex);
+ uv_cond_destroy(&cond);
+
+ threads = NULL;
+ nthreads = 0;
+#endif
+}
+
+
+static void init_threads(void) {
+ unsigned int i;
+ const char* val;
+ uv_sem_t sem;
+
+ nthreads = ARRAY_SIZE(default_threads);
+ val = getenv("UV_THREADPOOL_SIZE");
+ if (val != NULL)
+ nthreads = atoi(val);
+ if (nthreads == 0)
+ nthreads = 1;
+ if (nthreads > MAX_THREADPOOL_SIZE)
+ nthreads = MAX_THREADPOOL_SIZE;
+
+ threads = default_threads;
+ if (nthreads > ARRAY_SIZE(default_threads)) {
+ threads = uv__malloc(nthreads * sizeof(threads[0]));
+ if (threads == NULL) {
+ nthreads = ARRAY_SIZE(default_threads);
+ threads = default_threads;
+ }
+ }
+
+ if (uv_cond_init(&cond))
+ abort();
+
+ if (uv_mutex_init(&mutex))
+ abort();
+
+ QUEUE_INIT(&wq);
+ QUEUE_INIT(&slow_io_pending_wq);
+ QUEUE_INIT(&run_slow_work_message);
+
+ if (uv_sem_init(&sem, 0))
+ abort();
+
+ for (i = 0; i < nthreads; i++)
+ if (uv_thread_create(threads + i, worker, &sem))
+ abort();
+
+ for (i = 0; i < nthreads; i++)
+ uv_sem_wait(&sem);
+
+ uv_sem_destroy(&sem);
+}
+
+
+#ifndef _WIN32
+static void reset_once(void) {
+ uv_once_t child_once = UV_ONCE_INIT;
+ memcpy(&once, &child_once, sizeof(child_once));
+}
+#endif
+
+
+static void init_once(void) {
+#ifndef _WIN32
+ /* Re-initialize the threadpool after fork.
+ * Note that this discards the global mutex and condition as well
+ * as the work queue.
+ */
+ if (pthread_atfork(NULL, NULL, &reset_once))
+ abort();
+#endif
+ init_threads();
+}
+
+
+void uv__work_submit(uv_loop_t* loop,
+ struct uv__work* w,
+ enum uv__work_kind kind,
+ void (*work)(struct uv__work* w),
+ void (*done)(struct uv__work* w, int status)) {
+ uv_once(&once, init_once);
+ w->loop = loop;
+ w->work = work;
+ w->done = done;
+ post(&w->wq, kind);
+}
+
+
+static int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) {
+ int cancelled;
+
+ uv_mutex_lock(&mutex);
+ uv_mutex_lock(&w->loop->wq_mutex);
+
+ cancelled = !QUEUE_EMPTY(&w->wq) && w->work != NULL;
+ if (cancelled)
+ QUEUE_REMOVE(&w->wq);
+
+ uv_mutex_unlock(&w->loop->wq_mutex);
+ uv_mutex_unlock(&mutex);
+
+ if (!cancelled)
+ return UV_EBUSY;
+
+ w->work = uv__cancelled;
+ uv_mutex_lock(&loop->wq_mutex);
+ QUEUE_INSERT_TAIL(&loop->wq, &w->wq);
+ uv_async_send(&loop->wq_async);
+ uv_mutex_unlock(&loop->wq_mutex);
+
+ return 0;
+}
+
+
+void uv__work_done(uv_async_t* handle) {
+ struct uv__work* w;
+ uv_loop_t* loop;
+ QUEUE* q;
+ QUEUE wq;
+ int err;
+
+ loop = container_of(handle, uv_loop_t, wq_async);
+ uv_mutex_lock(&loop->wq_mutex);
+ QUEUE_MOVE(&loop->wq, &wq);
+ uv_mutex_unlock(&loop->wq_mutex);
+
+ while (!QUEUE_EMPTY(&wq)) {
+ q = QUEUE_HEAD(&wq);
+ QUEUE_REMOVE(q);
+
+ w = container_of(q, struct uv__work, wq);
+ err = (w->work == uv__cancelled) ? UV_ECANCELED : 0;
+ w->done(w, err);
+ }
+}
+
+
+static void uv__queue_work(struct uv__work* w) {
+ uv_work_t* req = container_of(w, uv_work_t, work_req);
+
+ req->work_cb(req);
+}
+
+
+static void uv__queue_done(struct uv__work* w, int err) {
+ uv_work_t* req;
+
+ req = container_of(w, uv_work_t, work_req);
+ uv__req_unregister(req->loop, req);
+
+ if (req->after_work_cb == NULL)
+ return;
+
+ req->after_work_cb(req, err);
+}
+
+
+int uv_queue_work(uv_loop_t* loop,
+ uv_work_t* req,
+ uv_work_cb work_cb,
+ uv_after_work_cb after_work_cb) {
+ if (work_cb == NULL)
+ return UV_EINVAL;
+
+ uv__req_init(loop, req, UV_WORK);
+ req->loop = loop;
+ req->work_cb = work_cb;
+ req->after_work_cb = after_work_cb;
+ uv__work_submit(loop,
+ &req->work_req,
+ UV__WORK_CPU,
+ uv__queue_work,
+ uv__queue_done);
+ return 0;
+}
+
+
+int uv_cancel(uv_req_t* req) {
+ struct uv__work* wreq;
+ uv_loop_t* loop;
+
+ switch (req->type) {
+ case UV_FS:
+ loop = ((uv_fs_t*) req)->loop;
+ wreq = &((uv_fs_t*) req)->work_req;
+ break;
+ case UV_GETADDRINFO:
+ loop = ((uv_getaddrinfo_t*) req)->loop;
+ wreq = &((uv_getaddrinfo_t*) req)->work_req;
+ break;
+ case UV_GETNAMEINFO:
+ loop = ((uv_getnameinfo_t*) req)->loop;
+ wreq = &((uv_getnameinfo_t*) req)->work_req;
+ break;
+ case UV_RANDOM:
+ loop = ((uv_random_t*) req)->loop;
+ wreq = &((uv_random_t*) req)->work_req;
+ break;
+ case UV_WORK:
+ loop = ((uv_work_t*) req)->loop;
+ wreq = &((uv_work_t*) req)->work_req;
+ break;
+ default:
+ return UV_EINVAL;
+ }
+
+ return uv__work_cancel(loop, req, wreq);
+}
diff --git a/Utilities/cmlibuv/src/timer.c b/Utilities/cmlibuv/src/timer.c
new file mode 100644
index 0000000..1bea2a8
--- /dev/null
+++ b/Utilities/cmlibuv/src/timer.c
@@ -0,0 +1,184 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "uv-common.h"
+#include "heap-inl.h"
+
+#include <assert.h>
+#include <limits.h>
+
+
+static struct heap *timer_heap(const uv_loop_t* loop) {
+#ifdef _WIN32
+ return (struct heap*) loop->timer_heap;
+#else
+ return (struct heap*) &loop->timer_heap;
+#endif
+}
+
+
+static int timer_less_than(const struct heap_node* ha,
+ const struct heap_node* hb) {
+ const uv_timer_t* a;
+ const uv_timer_t* b;
+
+ a = container_of(ha, uv_timer_t, heap_node);
+ b = container_of(hb, uv_timer_t, heap_node);
+
+ if (a->timeout < b->timeout)
+ return 1;
+ if (b->timeout < a->timeout)
+ return 0;
+
+ /* Compare start_id when both have the same timeout. start_id is
+ * allocated with loop->timer_counter in uv_timer_start().
+ */
+ return a->start_id < b->start_id;
+}
+
+
+int uv_timer_init(uv_loop_t* loop, uv_timer_t* handle) {
+ uv__handle_init(loop, (uv_handle_t*)handle, UV_TIMER);
+ handle->timer_cb = NULL;
+ handle->repeat = 0;
+ return 0;
+}
+
+
+int uv_timer_start(uv_timer_t* handle,
+ uv_timer_cb cb,
+ uint64_t timeout,
+ uint64_t repeat) {
+ uint64_t clamped_timeout;
+
+ if (uv__is_closing(handle) || cb == NULL)
+ return UV_EINVAL;
+
+ if (uv__is_active(handle))
+ uv_timer_stop(handle);
+
+ clamped_timeout = handle->loop->time + timeout;
+ if (clamped_timeout < timeout)
+ clamped_timeout = (uint64_t) -1;
+
+ handle->timer_cb = cb;
+ handle->timeout = clamped_timeout;
+ handle->repeat = repeat;
+ /* start_id is the second index to be compared in timer_less_than() */
+ handle->start_id = handle->loop->timer_counter++;
+
+ heap_insert(timer_heap(handle->loop),
+ (struct heap_node*) &handle->heap_node,
+ timer_less_than);
+ uv__handle_start(handle);
+
+ return 0;
+}
+
+
+int uv_timer_stop(uv_timer_t* handle) {
+ if (!uv__is_active(handle))
+ return 0;
+
+ heap_remove(timer_heap(handle->loop),
+ (struct heap_node*) &handle->heap_node,
+ timer_less_than);
+ uv__handle_stop(handle);
+
+ return 0;
+}
+
+
+int uv_timer_again(uv_timer_t* handle) {
+ if (handle->timer_cb == NULL)
+ return UV_EINVAL;
+
+ if (handle->repeat) {
+ uv_timer_stop(handle);
+ uv_timer_start(handle, handle->timer_cb, handle->repeat, handle->repeat);
+ }
+
+ return 0;
+}
+
+
+void uv_timer_set_repeat(uv_timer_t* handle, uint64_t repeat) {
+ handle->repeat = repeat;
+}
+
+
+uint64_t uv_timer_get_repeat(const uv_timer_t* handle) {
+ return handle->repeat;
+}
+
+
+uint64_t uv_timer_get_due_in(const uv_timer_t* handle) {
+ if (handle->loop->time >= handle->timeout)
+ return 0;
+
+ return handle->timeout - handle->loop->time;
+}
+
+
+int uv__next_timeout(const uv_loop_t* loop) {
+ const struct heap_node* heap_node;
+ const uv_timer_t* handle;
+ uint64_t diff;
+
+ heap_node = heap_min(timer_heap(loop));
+ if (heap_node == NULL)
+ return -1; /* block indefinitely */
+
+ handle = container_of(heap_node, uv_timer_t, heap_node);
+ if (handle->timeout <= loop->time)
+ return 0;
+
+ diff = handle->timeout - loop->time;
+ if (diff > INT_MAX)
+ diff = INT_MAX;
+
+ return (int) diff;
+}
+
+
+void uv__run_timers(uv_loop_t* loop) {
+ struct heap_node* heap_node;
+ uv_timer_t* handle;
+
+ for (;;) {
+ heap_node = heap_min(timer_heap(loop));
+ if (heap_node == NULL)
+ break;
+
+ handle = container_of(heap_node, uv_timer_t, heap_node);
+ if (handle->timeout > loop->time)
+ break;
+
+ uv_timer_stop(handle);
+ uv_timer_again(handle);
+ handle->timer_cb(handle);
+ }
+}
+
+
+void uv__timer_close(uv_timer_t* handle) {
+ uv_timer_stop(handle);
+}
diff --git a/Utilities/cmlibuv/src/unix/aix-common.c b/Utilities/cmlibuv/src/unix/aix-common.c
new file mode 100644
index 0000000..5bd2a68
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/aix-common.c
@@ -0,0 +1,90 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <sys/types.h>
+
+#include <sys/time.h>
+#include <unistd.h>
+
+#include <procinfo.h>
+
+#include <ctype.h>
+
+extern char* original_exepath;
+extern uv_mutex_t process_title_mutex;
+extern uv_once_t process_title_mutex_once;
+extern void init_process_title_mutex_once(void);
+
+uint64_t uv__hrtime(uv_clocktype_t type) {
+ uint64_t G = 1000000000;
+ timebasestruct_t t;
+ read_wall_time(&t, TIMEBASE_SZ);
+ time_base_to_time(&t, TIMEBASE_SZ);
+ return (uint64_t) t.tb_high * G + t.tb_low;
+}
+
+
+/*
+ * We could use a static buffer for the path manipulations that we need outside
+ * of the function, but this function could be called by multiple consumers and
+ * we don't want to potentially create a race condition in the use of snprintf.
+ * There is no direct way of getting the exe path in AIX - either through /procfs
+ * or through some libc APIs. The below approach is to parse the argv[0]'s pattern
+ * and use it in conjunction with PATH environment variable to craft one.
+ */
+int uv_exepath(char* buffer, size_t* size) {
+ int res;
+ char args[UV__PATH_MAX];
+ size_t cached_len;
+ struct procsinfo pi;
+
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+ if (original_exepath != NULL) {
+ cached_len = strlen(original_exepath);
+ *size -= 1;
+ if (*size > cached_len)
+ *size = cached_len;
+ memcpy(buffer, original_exepath, *size);
+ buffer[*size] = '\0';
+ uv_mutex_unlock(&process_title_mutex);
+ return 0;
+ }
+ uv_mutex_unlock(&process_title_mutex);
+ pi.pi_pid = getpid();
+ res = getargs(&pi, sizeof(pi), args, sizeof(args));
+
+ if (res < 0)
+ return UV_EINVAL;
+
+ return uv__search_path(args, buffer, size);
+}
+
diff --git a/Utilities/cmlibuv/src/unix/aix.c b/Utilities/cmlibuv/src/unix/aix.c
new file mode 100644
index 0000000..6a013d4
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/aix.c
@@ -0,0 +1,1304 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/ioctl.h>
+#include <net/if.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+
+#include <sys/time.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <utmp.h>
+#include <libgen.h>
+
+#include <sys/protosw.h>
+#include <libperfstat.h>
+#include <procinfo.h>
+#include <sys/proc.h>
+#include <sys/procfs.h>
+
+#include <sys/poll.h>
+
+#include <sys/pollset.h>
+#include <ctype.h>
+#ifdef HAVE_SYS_AHAFS_EVPRODS_H
+#include <sys/ahafs_evProds.h>
+#endif
+
+#include <sys/mntctl.h>
+#include <sys/vmount.h>
+#include <limits.h>
+#include <strings.h>
+#include <sys/vnode.h>
+
+#define RDWR_BUF_SIZE 4096
+#define EQ(a,b) (strcmp(a,b) == 0)
+
+char* original_exepath = NULL;
+uv_mutex_t process_title_mutex;
+uv_once_t process_title_mutex_once = UV_ONCE_INIT;
+static void* args_mem = NULL;
+static char** process_argv = NULL;
+static int process_argc = 0;
+static char* process_title_ptr = NULL;
+
+void init_process_title_mutex_once(void) {
+ uv_mutex_init(&process_title_mutex);
+}
+
+
+int uv__platform_loop_init(uv_loop_t* loop) {
+ loop->fs_fd = -1;
+
+ /* Passing maxfd of -1 should mean the limit is determined
+ * by the user's ulimit or the global limit as per the doc */
+ loop->backend_fd = pollset_create(-1);
+
+ if (loop->backend_fd == -1)
+ return -1;
+
+ return 0;
+}
+
+
+void uv__platform_loop_delete(uv_loop_t* loop) {
+ if (loop->fs_fd != -1) {
+ uv__close(loop->fs_fd);
+ loop->fs_fd = -1;
+ }
+
+ if (loop->backend_fd != -1) {
+ pollset_destroy(loop->backend_fd);
+ loop->backend_fd = -1;
+ }
+}
+
+
+int uv__io_fork(uv_loop_t* loop) {
+ uv__platform_loop_delete(loop);
+
+ return uv__platform_loop_init(loop);
+}
+
+
+int uv__io_check_fd(uv_loop_t* loop, int fd) {
+ struct poll_ctl pc;
+
+ pc.events = POLLIN;
+ pc.cmd = PS_MOD; /* Equivalent to PS_ADD if the fd is not in the pollset. */
+ pc.fd = fd;
+
+ if (pollset_ctl(loop->backend_fd, &pc, 1))
+ return UV__ERR(errno);
+
+ pc.cmd = PS_DELETE;
+ if (pollset_ctl(loop->backend_fd, &pc, 1))
+ abort();
+
+ return 0;
+}
+
+
+void uv__io_poll(uv_loop_t* loop, int timeout) {
+ struct pollfd events[1024];
+ struct pollfd pqry;
+ struct pollfd* pe;
+ struct poll_ctl pc;
+ QUEUE* q;
+ uv__io_t* w;
+ uint64_t base;
+ uint64_t diff;
+ int have_signals;
+ int nevents;
+ int count;
+ int nfds;
+ int i;
+ int rc;
+ int add_failed;
+ int user_timeout;
+ int reset_timeout;
+
+ if (loop->nfds == 0) {
+ assert(QUEUE_EMPTY(&loop->watcher_queue));
+ return;
+ }
+
+ while (!QUEUE_EMPTY(&loop->watcher_queue)) {
+ q = QUEUE_HEAD(&loop->watcher_queue);
+ QUEUE_REMOVE(q);
+ QUEUE_INIT(q);
+
+ w = QUEUE_DATA(q, uv__io_t, watcher_queue);
+ assert(w->pevents != 0);
+ assert(w->fd >= 0);
+ assert(w->fd < (int) loop->nwatchers);
+
+ pc.events = w->pevents;
+ pc.fd = w->fd;
+
+ add_failed = 0;
+ if (w->events == 0) {
+ pc.cmd = PS_ADD;
+ if (pollset_ctl(loop->backend_fd, &pc, 1)) {
+ if (errno != EINVAL) {
+ assert(0 && "Failed to add file descriptor (pc.fd) to pollset");
+ abort();
+ }
+ /* Check if the fd is already in the pollset */
+ pqry.fd = pc.fd;
+ rc = pollset_query(loop->backend_fd, &pqry);
+ switch (rc) {
+ case -1:
+ assert(0 && "Failed to query pollset for file descriptor");
+ abort();
+ case 0:
+ assert(0 && "Pollset does not contain file descriptor");
+ abort();
+ }
+ /* If we got here then the pollset already contained the file descriptor even though
+ * we didn't think it should. This probably shouldn't happen, but we can continue. */
+ add_failed = 1;
+ }
+ }
+ if (w->events != 0 || add_failed) {
+ /* Modify, potentially removing events -- need to delete then add.
+ * Could maybe mod if we knew for sure no events are removed, but
+ * content of w->events is handled above as not reliable (falls back)
+ * so may require a pollset_query() which would have to be pretty cheap
+ * compared to a PS_DELETE to be worth optimizing. Alternatively, could
+ * lazily remove events, squelching them in the mean time. */
+ pc.cmd = PS_DELETE;
+ if (pollset_ctl(loop->backend_fd, &pc, 1)) {
+ assert(0 && "Failed to delete file descriptor (pc.fd) from pollset");
+ abort();
+ }
+ pc.cmd = PS_ADD;
+ if (pollset_ctl(loop->backend_fd, &pc, 1)) {
+ assert(0 && "Failed to add file descriptor (pc.fd) to pollset");
+ abort();
+ }
+ }
+
+ w->events = w->pevents;
+ }
+
+ assert(timeout >= -1);
+ base = loop->time;
+ count = 48; /* Benchmarks suggest this gives the best throughput. */
+
+ if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
+ reset_timeout = 1;
+ user_timeout = timeout;
+ timeout = 0;
+ } else {
+ reset_timeout = 0;
+ }
+
+ for (;;) {
+ /* Only need to set the provider_entry_time if timeout != 0. The function
+ * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
+ */
+ if (timeout != 0)
+ uv__metrics_set_provider_entry_time(loop);
+
+ nfds = pollset_poll(loop->backend_fd,
+ events,
+ ARRAY_SIZE(events),
+ timeout);
+
+ /* Update loop->time unconditionally. It's tempting to skip the update when
+ * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
+ * operating system didn't reschedule our process while in the syscall.
+ */
+ SAVE_ERRNO(uv__update_time(loop));
+
+ if (nfds == 0) {
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ if (timeout == -1)
+ continue;
+ if (timeout > 0)
+ goto update_timeout;
+ }
+
+ assert(timeout != -1);
+ return;
+ }
+
+ if (nfds == -1) {
+ if (errno != EINTR) {
+ abort();
+ }
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (timeout == -1)
+ continue;
+
+ if (timeout == 0)
+ return;
+
+ /* Interrupted by a signal. Update timeout and poll again. */
+ goto update_timeout;
+ }
+
+ have_signals = 0;
+ nevents = 0;
+
+ assert(loop->watchers != NULL);
+ loop->watchers[loop->nwatchers] = (void*) events;
+ loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
+
+ for (i = 0; i < nfds; i++) {
+ pe = events + i;
+ pc.cmd = PS_DELETE;
+ pc.fd = pe->fd;
+
+ /* Skip invalidated events, see uv__platform_invalidate_fd */
+ if (pc.fd == -1)
+ continue;
+
+ assert(pc.fd >= 0);
+ assert((unsigned) pc.fd < loop->nwatchers);
+
+ w = loop->watchers[pc.fd];
+
+ if (w == NULL) {
+ /* File descriptor that we've stopped watching, disarm it.
+ *
+ * Ignore all errors because we may be racing with another thread
+ * when the file descriptor is closed.
+ */
+ pollset_ctl(loop->backend_fd, &pc, 1);
+ continue;
+ }
+
+ /* Run signal watchers last. This also affects child process watchers
+ * because those are implemented in terms of signal watchers.
+ */
+ if (w == &loop->signal_io_watcher) {
+ have_signals = 1;
+ } else {
+ uv__metrics_update_idle_time(loop);
+ w->cb(loop, w, pe->revents);
+ }
+
+ nevents++;
+ }
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (have_signals != 0) {
+ uv__metrics_update_idle_time(loop);
+ loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
+ }
+
+ loop->watchers[loop->nwatchers] = NULL;
+ loop->watchers[loop->nwatchers + 1] = NULL;
+
+ if (have_signals != 0)
+ return; /* Event loop should cycle now so don't poll again. */
+
+ if (nevents != 0) {
+ if (nfds == ARRAY_SIZE(events) && --count != 0) {
+ /* Poll for more events but don't block this time. */
+ timeout = 0;
+ continue;
+ }
+ return;
+ }
+
+ if (timeout == 0)
+ return;
+
+ if (timeout == -1)
+ continue;
+
+update_timeout:
+ assert(timeout > 0);
+
+ diff = loop->time - base;
+ if (diff >= (uint64_t) timeout)
+ return;
+
+ timeout -= diff;
+ }
+}
+
+
+uint64_t uv_get_free_memory(void) {
+ perfstat_memory_total_t mem_total;
+ int result = perfstat_memory_total(NULL, &mem_total, sizeof(mem_total), 1);
+ if (result == -1) {
+ return 0;
+ }
+ return mem_total.real_free * 4096;
+}
+
+
+uint64_t uv_get_total_memory(void) {
+ perfstat_memory_total_t mem_total;
+ int result = perfstat_memory_total(NULL, &mem_total, sizeof(mem_total), 1);
+ if (result == -1) {
+ return 0;
+ }
+ return mem_total.real_total * 4096;
+}
+
+
+uint64_t uv_get_constrained_memory(void) {
+ return 0; /* Memory constraints are unknown. */
+}
+
+
+void uv_loadavg(double avg[3]) {
+ perfstat_cpu_total_t ps_total;
+ int result = perfstat_cpu_total(NULL, &ps_total, sizeof(ps_total), 1);
+ if (result == -1) {
+ avg[0] = 0.; avg[1] = 0.; avg[2] = 0.;
+ return;
+ }
+ avg[0] = ps_total.loadavg[0] / (double)(1 << SBITS);
+ avg[1] = ps_total.loadavg[1] / (double)(1 << SBITS);
+ avg[2] = ps_total.loadavg[2] / (double)(1 << SBITS);
+}
+
+
+#ifdef HAVE_SYS_AHAFS_EVPRODS_H
+static char* uv__rawname(const char* cp, char (*dst)[FILENAME_MAX+1]) {
+ char* dp;
+
+ dp = rindex(cp, '/');
+ if (dp == 0)
+ return 0;
+
+ snprintf(*dst, sizeof(*dst), "%.*s/r%s", (int) (dp - cp), cp, dp + 1);
+ return *dst;
+}
+
+
+/*
+ * Determine whether given pathname is a directory
+ * Returns 0 if the path is a directory, -1 if not
+ *
+ * Note: Opportunity here for more detailed error information but
+ * that requires changing callers of this function as well
+ */
+static int uv__path_is_a_directory(char* filename) {
+ struct stat statbuf;
+
+ if (stat(filename, &statbuf) < 0)
+ return -1; /* failed: not a directory, assume it is a file */
+
+ if (statbuf.st_type == VDIR)
+ return 0;
+
+ return -1;
+}
+
+
+/*
+ * Check whether AHAFS is mounted.
+ * Returns 0 if AHAFS is mounted, or an error code < 0 on failure
+ */
+static int uv__is_ahafs_mounted(void){
+ char rawbuf[FILENAME_MAX+1];
+ int rv, i = 2;
+ struct vmount *p;
+ int size_multiplier = 10;
+ size_t siz = sizeof(struct vmount)*size_multiplier;
+ struct vmount *vmt;
+ const char *dev = "/aha";
+ char *obj, *stub;
+
+ p = uv__malloc(siz);
+ if (p == NULL)
+ return UV__ERR(errno);
+
+ /* Retrieve all mounted filesystems */
+ rv = mntctl(MCTL_QUERY, siz, (char*)p);
+ if (rv < 0)
+ return UV__ERR(errno);
+ if (rv == 0) {
+ /* buffer was not large enough, reallocate to correct size */
+ siz = *(int*)p;
+ uv__free(p);
+ p = uv__malloc(siz);
+ if (p == NULL)
+ return UV__ERR(errno);
+ rv = mntctl(MCTL_QUERY, siz, (char*)p);
+ if (rv < 0)
+ return UV__ERR(errno);
+ }
+
+ /* Look for dev in filesystems mount info */
+ for(vmt = p, i = 0; i < rv; i++) {
+ obj = vmt2dataptr(vmt, VMT_OBJECT); /* device */
+ stub = vmt2dataptr(vmt, VMT_STUB); /* mount point */
+
+ if (EQ(obj, dev) || EQ(uv__rawname(obj, &rawbuf), dev) || EQ(stub, dev)) {
+ uv__free(p); /* Found a match */
+ return 0;
+ }
+ vmt = (struct vmount *) ((char *) vmt + vmt->vmt_length);
+ }
+
+ /* /aha is required for monitoring filesystem changes */
+ return -1;
+}
+
+/*
+ * Recursive call to mkdir() to create intermediate folders, if any
+ * Returns code from mkdir call
+ */
+static int uv__makedir_p(const char *dir) {
+ char tmp[256];
+ char *p = NULL;
+ size_t len;
+ int err;
+
+ /* TODO(bnoordhuis) Check uv__strscpy() return value. */
+ uv__strscpy(tmp, dir, sizeof(tmp));
+ len = strlen(tmp);
+ if (tmp[len - 1] == '/')
+ tmp[len - 1] = 0;
+ for (p = tmp + 1; *p; p++) {
+ if (*p == '/') {
+ *p = 0;
+ err = mkdir(tmp, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
+ if (err != 0 && errno != EEXIST)
+ return err;
+ *p = '/';
+ }
+ }
+ return mkdir(tmp, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
+}
+
+/*
+ * Creates necessary subdirectories in the AIX Event Infrastructure
+ * file system for monitoring the object specified.
+ * Returns code from mkdir call
+ */
+static int uv__make_subdirs_p(const char *filename) {
+ char cmd[2048];
+ char *p;
+ int rc = 0;
+
+ /* Strip off the monitor file name */
+ p = strrchr(filename, '/');
+
+ if (p == NULL)
+ return 0;
+
+ if (uv__path_is_a_directory((char*)filename) == 0) {
+ sprintf(cmd, "/aha/fs/modDir.monFactory");
+ } else {
+ sprintf(cmd, "/aha/fs/modFile.monFactory");
+ }
+
+ strncat(cmd, filename, (p - filename));
+ rc = uv__makedir_p(cmd);
+
+ if (rc == -1 && errno != EEXIST){
+ return UV__ERR(errno);
+ }
+
+ return rc;
+}
+
+
+/*
+ * Checks if /aha is mounted, then proceeds to set up the monitoring
+ * objects for the specified file.
+ * Returns 0 on success, or an error code < 0 on failure
+ */
+static int uv__setup_ahafs(const char* filename, int *fd) {
+ int rc = 0;
+ char mon_file_write_string[RDWR_BUF_SIZE];
+ char mon_file[PATH_MAX];
+ int file_is_directory = 0; /* -1 == NO, 0 == YES */
+
+ /* Create monitor file name for object */
+ file_is_directory = uv__path_is_a_directory((char*)filename);
+
+ if (file_is_directory == 0)
+ sprintf(mon_file, "/aha/fs/modDir.monFactory");
+ else
+ sprintf(mon_file, "/aha/fs/modFile.monFactory");
+
+ if ((strlen(mon_file) + strlen(filename) + 5) > PATH_MAX)
+ return UV_ENAMETOOLONG;
+
+ /* Make the necessary subdirectories for the monitor file */
+ rc = uv__make_subdirs_p(filename);
+ if (rc == -1 && errno != EEXIST)
+ return rc;
+
+ strcat(mon_file, filename);
+ strcat(mon_file, ".mon");
+
+ *fd = 0; errno = 0;
+
+ /* Open the monitor file, creating it if necessary */
+ *fd = open(mon_file, O_CREAT|O_RDWR);
+ if (*fd < 0)
+ return UV__ERR(errno);
+
+ /* Write out the monitoring specifications.
+ * In this case, we are monitoring for a state change event type
+ * CHANGED=YES
+ * We will be waiting in select call, rather than a read:
+ * WAIT_TYPE=WAIT_IN_SELECT
+ * We only want minimal information for files:
+ * INFO_LVL=1
+ * For directories, we want more information to track what file
+ * caused the change
+ * INFO_LVL=2
+ */
+
+ if (file_is_directory == 0)
+ sprintf(mon_file_write_string, "CHANGED=YES;WAIT_TYPE=WAIT_IN_SELECT;INFO_LVL=2");
+ else
+ sprintf(mon_file_write_string, "CHANGED=YES;WAIT_TYPE=WAIT_IN_SELECT;INFO_LVL=1");
+
+ rc = write(*fd, mon_file_write_string, strlen(mon_file_write_string)+1);
+ if (rc < 0 && errno != EBUSY)
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+/*
+ * Skips a specified number of lines in the buffer passed in.
+ * Walks the buffer pointed to by p and attempts to skip n lines.
+ * Returns the total number of lines skipped
+ */
+static int uv__skip_lines(char **p, int n) {
+ int lines = 0;
+
+ while(n > 0) {
+ *p = strchr(*p, '\n');
+ if (!p)
+ return lines;
+
+ (*p)++;
+ n--;
+ lines++;
+ }
+ return lines;
+}
+
+
+/*
+ * Parse the event occurrence data to figure out what event just occurred
+ * and take proper action.
+ *
+ * The buf is a pointer to the buffer containing the event occurrence data
+ * Returns 0 on success, -1 if unrecoverable error in parsing
+ *
+ */
+static int uv__parse_data(char *buf, int *events, uv_fs_event_t* handle) {
+ int evp_rc, i;
+ char *p;
+ char filename[PATH_MAX]; /* To be used when handling directories */
+
+ p = buf;
+ *events = 0;
+
+ /* Clean the filename buffer*/
+ for(i = 0; i < PATH_MAX; i++) {
+ filename[i] = 0;
+ }
+ i = 0;
+
+ /* Check for BUF_WRAP */
+ if (strncmp(buf, "BUF_WRAP", strlen("BUF_WRAP")) == 0) {
+ assert(0 && "Buffer wrap detected, Some event occurrences lost!");
+ return 0;
+ }
+
+ /* Since we are using the default buffer size (4K), and have specified
+ * INFO_LVL=1, we won't see any EVENT_OVERFLOW conditions. Applications
+ * should check for this keyword if they are using an INFO_LVL of 2 or
+ * higher, and have a buffer size of <= 4K
+ */
+
+ /* Skip to RC_FROM_EVPROD */
+ if (uv__skip_lines(&p, 9) != 9)
+ return -1;
+
+ if (sscanf(p, "RC_FROM_EVPROD=%d\nEND_EVENT_DATA", &evp_rc) == 1) {
+ if (uv__path_is_a_directory(handle->path) == 0) { /* Directory */
+ if (evp_rc == AHAFS_MODDIR_UNMOUNT || evp_rc == AHAFS_MODDIR_REMOVE_SELF) {
+ /* The directory is no longer available for monitoring */
+ *events = UV_RENAME;
+ handle->dir_filename = NULL;
+ } else {
+ /* A file was added/removed inside the directory */
+ *events = UV_CHANGE;
+
+ /* Get the EVPROD_INFO */
+ if (uv__skip_lines(&p, 1) != 1)
+ return -1;
+
+ /* Scan out the name of the file that triggered the event*/
+ if (sscanf(p, "BEGIN_EVPROD_INFO\n%sEND_EVPROD_INFO", filename) == 1) {
+ handle->dir_filename = uv__strdup((const char*)&filename);
+ } else
+ return -1;
+ }
+ } else { /* Regular File */
+ if (evp_rc == AHAFS_MODFILE_RENAME)
+ *events = UV_RENAME;
+ else
+ *events = UV_CHANGE;
+ }
+ }
+ else
+ return -1;
+
+ return 0;
+}
+
+
+/* This is the internal callback */
+static void uv__ahafs_event(uv_loop_t* loop, uv__io_t* event_watch, unsigned int fflags) {
+ char result_data[RDWR_BUF_SIZE];
+ int bytes, rc = 0;
+ uv_fs_event_t* handle;
+ int events = 0;
+ char fname[PATH_MAX];
+ char *p;
+
+ handle = container_of(event_watch, uv_fs_event_t, event_watcher);
+
+ /* At this point, we assume that polling has been done on the
+ * file descriptor, so we can just read the AHAFS event occurrence
+ * data and parse its results without having to block anything
+ */
+ bytes = pread(event_watch->fd, result_data, RDWR_BUF_SIZE, 0);
+
+ assert((bytes >= 0) && "uv__ahafs_event - Error reading monitor file");
+
+ /* In file / directory move cases, AIX Event infrastructure
+ * produces a second event with no data.
+ * Ignore it and return gracefully.
+ */
+ if(bytes == 0)
+ return;
+
+ /* Parse the data */
+ if(bytes > 0)
+ rc = uv__parse_data(result_data, &events, handle);
+
+ /* Unrecoverable error */
+ if (rc == -1)
+ return;
+
+ /* For directory changes, the name of the files that triggered the change
+ * are never absolute pathnames
+ */
+ if (uv__path_is_a_directory(handle->path) == 0) {
+ p = handle->dir_filename;
+ } else {
+ p = strrchr(handle->path, '/');
+ if (p == NULL)
+ p = handle->path;
+ else
+ p++;
+ }
+
+ /* TODO(bnoordhuis) Check uv__strscpy() return value. */
+ uv__strscpy(fname, p, sizeof(fname));
+
+ handle->cb(handle, fname, events, 0);
+}
+#endif
+
+
+int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
+#ifdef HAVE_SYS_AHAFS_EVPRODS_H
+ uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
+ return 0;
+#else
+ return UV_ENOSYS;
+#endif
+}
+
+
+int uv_fs_event_start(uv_fs_event_t* handle,
+ uv_fs_event_cb cb,
+ const char* filename,
+ unsigned int flags) {
+#ifdef HAVE_SYS_AHAFS_EVPRODS_H
+ int fd, rc, str_offset = 0;
+ char cwd[PATH_MAX];
+ char absolute_path[PATH_MAX];
+ char readlink_cwd[PATH_MAX];
+ struct timeval zt;
+ fd_set pollfd;
+
+
+ /* Figure out whether filename is absolute or not */
+ if (filename[0] == '\0') {
+ /* Missing a pathname */
+ return UV_ENOENT;
+ }
+ else if (filename[0] == '/') {
+ /* We have absolute pathname */
+ /* TODO(bnoordhuis) Check uv__strscpy() return value. */
+ uv__strscpy(absolute_path, filename, sizeof(absolute_path));
+ } else {
+ /* We have a relative pathname, compose the absolute pathname */
+ snprintf(cwd, sizeof(cwd), "/proc/%lu/cwd", (unsigned long) getpid());
+ rc = readlink(cwd, readlink_cwd, sizeof(readlink_cwd) - 1);
+ if (rc < 0)
+ return rc;
+ /* readlink does not null terminate our string */
+ readlink_cwd[rc] = '\0';
+
+ if (filename[0] == '.' && filename[1] == '/')
+ str_offset = 2;
+
+ snprintf(absolute_path, sizeof(absolute_path), "%s%s", readlink_cwd,
+ filename + str_offset);
+ }
+
+ if (uv__is_ahafs_mounted() < 0) /* /aha checks failed */
+ return UV_ENOSYS;
+
+ /* Setup ahafs */
+ rc = uv__setup_ahafs((const char *)absolute_path, &fd);
+ if (rc != 0)
+ return rc;
+
+ /* Setup/Initialize all the libuv routines */
+ uv__handle_start(handle);
+ uv__io_init(&handle->event_watcher, uv__ahafs_event, fd);
+ handle->path = uv__strdup(filename);
+ handle->cb = cb;
+ handle->dir_filename = NULL;
+
+ uv__io_start(handle->loop, &handle->event_watcher, POLLIN);
+
+ /* AHAFS wants someone to poll for it to start mointoring.
+ * so kick-start it so that we don't miss an event in the
+ * eventuality of an event that occurs in the current loop. */
+ do {
+ memset(&zt, 0, sizeof(zt));
+ FD_ZERO(&pollfd);
+ FD_SET(fd, &pollfd);
+ rc = select(fd + 1, &pollfd, NULL, NULL, &zt);
+ } while (rc == -1 && errno == EINTR);
+ return 0;
+#else
+ return UV_ENOSYS;
+#endif
+}
+
+
+int uv_fs_event_stop(uv_fs_event_t* handle) {
+#ifdef HAVE_SYS_AHAFS_EVPRODS_H
+ if (!uv__is_active(handle))
+ return 0;
+
+ uv__io_close(handle->loop, &handle->event_watcher);
+ uv__handle_stop(handle);
+
+ if (uv__path_is_a_directory(handle->path) == 0) {
+ uv__free(handle->dir_filename);
+ handle->dir_filename = NULL;
+ }
+
+ uv__free(handle->path);
+ handle->path = NULL;
+ uv__close(handle->event_watcher.fd);
+ handle->event_watcher.fd = -1;
+
+ return 0;
+#else
+ return UV_ENOSYS;
+#endif
+}
+
+
+void uv__fs_event_close(uv_fs_event_t* handle) {
+#ifdef HAVE_SYS_AHAFS_EVPRODS_H
+ uv_fs_event_stop(handle);
+#else
+ UNREACHABLE();
+#endif
+}
+
+
+char** uv_setup_args(int argc, char** argv) {
+ char exepath[UV__PATH_MAX];
+ char** new_argv;
+ size_t size;
+ char* s;
+ int i;
+
+ if (argc <= 0)
+ return argv;
+
+ /* Save the original pointer to argv.
+ * AIX uses argv to read the process name.
+ * (Not the memory pointed to by argv[0..n] as on Linux.)
+ */
+ process_argv = argv;
+ process_argc = argc;
+
+ /* Use argv[0] to determine value for uv_exepath(). */
+ size = sizeof(exepath);
+ if (uv__search_path(argv[0], exepath, &size) == 0) {
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+ original_exepath = uv__strdup(exepath);
+ uv_mutex_unlock(&process_title_mutex);
+ }
+
+ /* Calculate how much memory we need for the argv strings. */
+ size = 0;
+ for (i = 0; i < argc; i++)
+ size += strlen(argv[i]) + 1;
+
+ /* Add space for the argv pointers. */
+ size += (argc + 1) * sizeof(char*);
+
+ new_argv = uv__malloc(size);
+ if (new_argv == NULL)
+ return argv;
+ args_mem = new_argv;
+
+ /* Copy over the strings and set up the pointer table. */
+ s = (char*) &new_argv[argc + 1];
+ for (i = 0; i < argc; i++) {
+ size = strlen(argv[i]) + 1;
+ memcpy(s, argv[i], size);
+ new_argv[i] = s;
+ s += size;
+ }
+ new_argv[i] = NULL;
+
+ return new_argv;
+}
+
+
+int uv_set_process_title(const char* title) {
+ char* new_title;
+
+ /* If uv_setup_args wasn't called or failed, we can't continue. */
+ if (process_argv == NULL || args_mem == NULL)
+ return UV_ENOBUFS;
+
+ /* We cannot free this pointer when libuv shuts down,
+ * the process may still be using it.
+ */
+ new_title = uv__strdup(title);
+ if (new_title == NULL)
+ return UV_ENOMEM;
+
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+
+ /* If this is the first time this is set,
+ * don't free and set argv[1] to NULL.
+ */
+ if (process_title_ptr != NULL)
+ uv__free(process_title_ptr);
+
+ process_title_ptr = new_title;
+
+ process_argv[0] = process_title_ptr;
+ if (process_argc > 1)
+ process_argv[1] = NULL;
+
+ uv_mutex_unlock(&process_title_mutex);
+
+ return 0;
+}
+
+
+int uv_get_process_title(char* buffer, size_t size) {
+ size_t len;
+ if (buffer == NULL || size == 0)
+ return UV_EINVAL;
+
+ /* If uv_setup_args wasn't called, we can't continue. */
+ if (process_argv == NULL)
+ return UV_ENOBUFS;
+
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+
+ len = strlen(process_argv[0]);
+ if (size <= len) {
+ uv_mutex_unlock(&process_title_mutex);
+ return UV_ENOBUFS;
+ }
+
+ memcpy(buffer, process_argv[0], len);
+ buffer[len] = '\0';
+
+ uv_mutex_unlock(&process_title_mutex);
+
+ return 0;
+}
+
+
+void uv__process_title_cleanup(void) {
+ uv__free(args_mem); /* Keep valgrind happy. */
+ args_mem = NULL;
+}
+
+
+int uv_resident_set_memory(size_t* rss) {
+ char pp[64];
+ psinfo_t psinfo;
+ int err;
+ int fd;
+
+ snprintf(pp, sizeof(pp), "/proc/%lu/psinfo", (unsigned long) getpid());
+
+ fd = open(pp, O_RDONLY);
+ if (fd == -1)
+ return UV__ERR(errno);
+
+ /* FIXME(bnoordhuis) Handle EINTR. */
+ err = UV_EINVAL;
+ if (read(fd, &psinfo, sizeof(psinfo)) == sizeof(psinfo)) {
+ *rss = (size_t)psinfo.pr_rssize * 1024;
+ err = 0;
+ }
+ uv__close(fd);
+
+ return err;
+}
+
+
+int uv_uptime(double* uptime) {
+ struct utmp *utmp_buf;
+ size_t entries = 0;
+ time_t boot_time;
+
+ boot_time = 0;
+ utmpname(UTMP_FILE);
+
+ setutent();
+
+ while ((utmp_buf = getutent()) != NULL) {
+ if (utmp_buf->ut_user[0] && utmp_buf->ut_type == USER_PROCESS)
+ ++entries;
+ if (utmp_buf->ut_type == BOOT_TIME)
+ boot_time = utmp_buf->ut_time;
+ }
+
+ endutent();
+
+ if (boot_time == 0)
+ return UV_ENOSYS;
+
+ *uptime = time(NULL) - boot_time;
+ return 0;
+}
+
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+ uv_cpu_info_t* cpu_info;
+ perfstat_cpu_total_t ps_total;
+ perfstat_cpu_t* ps_cpus;
+ perfstat_id_t cpu_id;
+ int result, ncpus, idx = 0;
+
+ result = perfstat_cpu_total(NULL, &ps_total, sizeof(ps_total), 1);
+ if (result == -1) {
+ return UV_ENOSYS;
+ }
+
+ ncpus = result = perfstat_cpu(NULL, NULL, sizeof(perfstat_cpu_t), 0);
+ if (result == -1) {
+ return UV_ENOSYS;
+ }
+
+ ps_cpus = (perfstat_cpu_t*) uv__malloc(ncpus * sizeof(perfstat_cpu_t));
+ if (!ps_cpus) {
+ return UV_ENOMEM;
+ }
+
+ /* TODO(bnoordhuis) Check uv__strscpy() return value. */
+ uv__strscpy(cpu_id.name, FIRST_CPU, sizeof(cpu_id.name));
+ result = perfstat_cpu(&cpu_id, ps_cpus, sizeof(perfstat_cpu_t), ncpus);
+ if (result == -1) {
+ uv__free(ps_cpus);
+ return UV_ENOSYS;
+ }
+
+ *cpu_infos = (uv_cpu_info_t*) uv__malloc(ncpus * sizeof(uv_cpu_info_t));
+ if (!*cpu_infos) {
+ uv__free(ps_cpus);
+ return UV_ENOMEM;
+ }
+
+ *count = ncpus;
+
+ cpu_info = *cpu_infos;
+ while (idx < ncpus) {
+ cpu_info->speed = (int)(ps_total.processorHZ / 1000000);
+ cpu_info->model = uv__strdup(ps_total.description);
+ cpu_info->cpu_times.user = ps_cpus[idx].user;
+ cpu_info->cpu_times.sys = ps_cpus[idx].sys;
+ cpu_info->cpu_times.idle = ps_cpus[idx].idle;
+ cpu_info->cpu_times.irq = ps_cpus[idx].wait;
+ cpu_info->cpu_times.nice = 0;
+ cpu_info++;
+ idx++;
+ }
+
+ uv__free(ps_cpus);
+ return 0;
+}
+
+
+int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
+ uv_interface_address_t* address;
+ int sockfd, sock6fd, inet6, i, r, size = 1;
+ struct ifconf ifc;
+ struct ifreq *ifr, *p, flg;
+ struct in6_ifreq if6;
+ struct sockaddr_dl* sa_addr;
+
+ ifc.ifc_req = NULL;
+ sock6fd = -1;
+ r = 0;
+ *count = 0;
+ *addresses = NULL;
+
+ if (0 > (sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP))) {
+ r = UV__ERR(errno);
+ goto cleanup;
+ }
+
+ if (0 > (sock6fd = socket(AF_INET6, SOCK_DGRAM, IPPROTO_IP))) {
+ r = UV__ERR(errno);
+ goto cleanup;
+ }
+
+ if (ioctl(sockfd, SIOCGSIZIFCONF, &size) == -1) {
+ r = UV__ERR(errno);
+ goto cleanup;
+ }
+
+ ifc.ifc_req = (struct ifreq*)uv__malloc(size);
+ if (ifc.ifc_req == NULL) {
+ r = UV_ENOMEM;
+ goto cleanup;
+ }
+ ifc.ifc_len = size;
+ if (ioctl(sockfd, SIOCGIFCONF, &ifc) == -1) {
+ r = UV__ERR(errno);
+ goto cleanup;
+ }
+
+#define ADDR_SIZE(p) MAX((p).sa_len, sizeof(p))
+
+ /* Count all up and running ipv4/ipv6 addresses */
+ ifr = ifc.ifc_req;
+ while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) {
+ p = ifr;
+ ifr = (struct ifreq*)
+ ((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr));
+
+ if (!(p->ifr_addr.sa_family == AF_INET6 ||
+ p->ifr_addr.sa_family == AF_INET))
+ continue;
+
+ memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name));
+ if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1) {
+ r = UV__ERR(errno);
+ goto cleanup;
+ }
+
+ if (!(flg.ifr_flags & IFF_UP && flg.ifr_flags & IFF_RUNNING))
+ continue;
+
+ (*count)++;
+ }
+
+ if (*count == 0)
+ goto cleanup;
+
+ /* Alloc the return interface structs */
+ *addresses = uv__calloc(*count, sizeof(**addresses));
+ if (!(*addresses)) {
+ r = UV_ENOMEM;
+ goto cleanup;
+ }
+ address = *addresses;
+
+ ifr = ifc.ifc_req;
+ while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) {
+ p = ifr;
+ ifr = (struct ifreq*)
+ ((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr));
+
+ if (!(p->ifr_addr.sa_family == AF_INET6 ||
+ p->ifr_addr.sa_family == AF_INET))
+ continue;
+
+ inet6 = (p->ifr_addr.sa_family == AF_INET6);
+
+ memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name));
+ if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1)
+ goto syserror;
+
+ if (!(flg.ifr_flags & IFF_UP && flg.ifr_flags & IFF_RUNNING))
+ continue;
+
+ /* All conditions above must match count loop */
+
+ address->name = uv__strdup(p->ifr_name);
+
+ if (inet6)
+ address->address.address6 = *((struct sockaddr_in6*) &p->ifr_addr);
+ else
+ address->address.address4 = *((struct sockaddr_in*) &p->ifr_addr);
+
+ if (inet6) {
+ memset(&if6, 0, sizeof(if6));
+ r = uv__strscpy(if6.ifr_name, p->ifr_name, sizeof(if6.ifr_name));
+ if (r == UV_E2BIG)
+ goto cleanup;
+ r = 0;
+ memcpy(&if6.ifr_Addr, &p->ifr_addr, sizeof(if6.ifr_Addr));
+ if (ioctl(sock6fd, SIOCGIFNETMASK6, &if6) == -1)
+ goto syserror;
+ address->netmask.netmask6 = *((struct sockaddr_in6*) &if6.ifr_Addr);
+ /* Explicitly set family as the ioctl call appears to return it as 0. */
+ address->netmask.netmask6.sin6_family = AF_INET6;
+ } else {
+ if (ioctl(sockfd, SIOCGIFNETMASK, p) == -1)
+ goto syserror;
+ address->netmask.netmask4 = *((struct sockaddr_in*) &p->ifr_addr);
+ /* Explicitly set family as the ioctl call appears to return it as 0. */
+ address->netmask.netmask4.sin_family = AF_INET;
+ }
+
+ address->is_internal = flg.ifr_flags & IFF_LOOPBACK ? 1 : 0;
+
+ address++;
+ }
+
+ /* Fill in physical addresses. */
+ ifr = ifc.ifc_req;
+ while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) {
+ p = ifr;
+ ifr = (struct ifreq*)
+ ((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr));
+
+ if (p->ifr_addr.sa_family != AF_LINK)
+ continue;
+
+ address = *addresses;
+ for (i = 0; i < *count; i++) {
+ if (strcmp(address->name, p->ifr_name) == 0) {
+ sa_addr = (struct sockaddr_dl*) &p->ifr_addr;
+ memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr));
+ }
+ address++;
+ }
+ }
+
+#undef ADDR_SIZE
+ goto cleanup;
+
+syserror:
+ uv_free_interface_addresses(*addresses, *count);
+ *addresses = NULL;
+ *count = 0;
+ r = UV_ENOSYS;
+
+cleanup:
+ if (sockfd != -1)
+ uv__close(sockfd);
+ if (sock6fd != -1)
+ uv__close(sock6fd);
+ uv__free(ifc.ifc_req);
+ return r;
+}
+
+
+void uv_free_interface_addresses(uv_interface_address_t* addresses,
+ int count) {
+ int i;
+
+ for (i = 0; i < count; ++i) {
+ uv__free(addresses[i].name);
+ }
+
+ uv__free(addresses);
+}
+
+
+void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
+ struct pollfd* events;
+ uintptr_t i;
+ uintptr_t nfds;
+ struct poll_ctl pc;
+
+ assert(loop->watchers != NULL);
+ assert(fd >= 0);
+
+ events = (struct pollfd*) loop->watchers[loop->nwatchers];
+ nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
+
+ if (events != NULL)
+ /* Invalidate events with same file descriptor */
+ for (i = 0; i < nfds; i++)
+ if ((int) events[i].fd == fd)
+ events[i].fd = -1;
+
+ /* Remove the file descriptor from the poll set */
+ pc.events = 0;
+ pc.cmd = PS_DELETE;
+ pc.fd = fd;
+ if(loop->backend_fd >= 0)
+ pollset_ctl(loop->backend_fd, &pc, 1);
+}
diff --git a/Utilities/cmlibuv/src/unix/android-ifaddrs.c b/Utilities/cmlibuv/src/unix/android-ifaddrs.c
new file mode 100644
index 0000000..4765cc0
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/android-ifaddrs.c
@@ -0,0 +1,713 @@
+/*
+Copyright (c) 2013, Kenneth MacKay
+Copyright (c) 2014, Emergya (Cloud4all, FP7/2007-2013 grant agreement #289016)
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "uv/android-ifaddrs.h"
+#include "uv-common.h"
+
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <unistd.h>
+#include <sys/socket.h>
+#include <net/if_arp.h>
+#include <netinet/in.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_packet.h>
+
+typedef struct NetlinkList
+{
+ struct NetlinkList *m_next;
+ struct nlmsghdr *m_data;
+ unsigned int m_size;
+} NetlinkList;
+
+static int netlink_socket(pid_t *p_pid)
+{
+ struct sockaddr_nl l_addr;
+ socklen_t l_len;
+
+ int l_socket = socket(PF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
+ if(l_socket < 0)
+ {
+ return -1;
+ }
+
+ memset(&l_addr, 0, sizeof(l_addr));
+ l_addr.nl_family = AF_NETLINK;
+ if(bind(l_socket, (struct sockaddr *)&l_addr, sizeof(l_addr)) < 0)
+ {
+ close(l_socket);
+ return -1;
+ }
+
+ l_len = sizeof(l_addr);
+ if(getsockname(l_socket, (struct sockaddr *)&l_addr, &l_len) < 0)
+ {
+ close(l_socket);
+ return -1;
+ }
+ *p_pid = l_addr.nl_pid;
+
+ return l_socket;
+}
+
+static int netlink_send(int p_socket, int p_request)
+{
+ char l_buffer[NLMSG_ALIGN(sizeof(struct nlmsghdr)) + NLMSG_ALIGN(sizeof(struct rtgenmsg))];
+
+ struct nlmsghdr *l_hdr;
+ struct rtgenmsg *l_msg;
+ struct sockaddr_nl l_addr;
+
+ memset(l_buffer, 0, sizeof(l_buffer));
+
+ l_hdr = (struct nlmsghdr *)l_buffer;
+ l_msg = (struct rtgenmsg *)NLMSG_DATA(l_hdr);
+
+ l_hdr->nlmsg_len = NLMSG_LENGTH(sizeof(*l_msg));
+ l_hdr->nlmsg_type = p_request;
+ l_hdr->nlmsg_flags = NLM_F_ROOT | NLM_F_MATCH | NLM_F_REQUEST;
+ l_hdr->nlmsg_pid = 0;
+ l_hdr->nlmsg_seq = p_socket;
+ l_msg->rtgen_family = AF_UNSPEC;
+
+ memset(&l_addr, 0, sizeof(l_addr));
+ l_addr.nl_family = AF_NETLINK;
+ return (sendto(p_socket, l_hdr, l_hdr->nlmsg_len, 0, (struct sockaddr *)&l_addr, sizeof(l_addr)));
+}
+
+static int netlink_recv(int p_socket, void *p_buffer, size_t p_len)
+{
+ struct sockaddr_nl l_addr;
+ struct msghdr l_msg;
+
+ struct iovec l_iov;
+ l_iov.iov_base = p_buffer;
+ l_iov.iov_len = p_len;
+
+ for(;;)
+ {
+ int l_result;
+ l_msg.msg_name = (void *)&l_addr;
+ l_msg.msg_namelen = sizeof(l_addr);
+ l_msg.msg_iov = &l_iov;
+ l_msg.msg_iovlen = 1;
+ l_msg.msg_control = NULL;
+ l_msg.msg_controllen = 0;
+ l_msg.msg_flags = 0;
+ l_result = recvmsg(p_socket, &l_msg, 0);
+
+ if(l_result < 0)
+ {
+ if(errno == EINTR)
+ {
+ continue;
+ }
+ return -2;
+ }
+
+ /* Buffer was too small */
+ if(l_msg.msg_flags & MSG_TRUNC)
+ {
+ return -1;
+ }
+ return l_result;
+ }
+}
+
+static struct nlmsghdr *getNetlinkResponse(int p_socket, pid_t p_pid, int *p_size, int *p_done)
+{
+ size_t l_size = 4096;
+ void *l_buffer = NULL;
+
+ for(;;)
+ {
+ int l_read;
+
+ uv__free(l_buffer);
+ l_buffer = uv__malloc(l_size);
+ if (l_buffer == NULL)
+ {
+ return NULL;
+ }
+
+ l_read = netlink_recv(p_socket, l_buffer, l_size);
+ *p_size = l_read;
+ if(l_read == -2)
+ {
+ uv__free(l_buffer);
+ return NULL;
+ }
+ if(l_read >= 0)
+ {
+ struct nlmsghdr *l_hdr;
+ for(l_hdr = (struct nlmsghdr *)l_buffer; NLMSG_OK(l_hdr, (unsigned int)l_read); l_hdr = (struct nlmsghdr *)NLMSG_NEXT(l_hdr, l_read))
+ {
+ if((pid_t)l_hdr->nlmsg_pid != p_pid || (int)l_hdr->nlmsg_seq != p_socket)
+ {
+ continue;
+ }
+
+ if(l_hdr->nlmsg_type == NLMSG_DONE)
+ {
+ *p_done = 1;
+ break;
+ }
+
+ if(l_hdr->nlmsg_type == NLMSG_ERROR)
+ {
+ uv__free(l_buffer);
+ return NULL;
+ }
+ }
+ return l_buffer;
+ }
+
+ l_size *= 2;
+ }
+}
+
+static NetlinkList *newListItem(struct nlmsghdr *p_data, unsigned int p_size)
+{
+ NetlinkList *l_item = uv__malloc(sizeof(NetlinkList));
+ if (l_item == NULL)
+ {
+ return NULL;
+ }
+
+ l_item->m_next = NULL;
+ l_item->m_data = p_data;
+ l_item->m_size = p_size;
+ return l_item;
+}
+
+static void freeResultList(NetlinkList *p_list)
+{
+ NetlinkList *l_cur;
+ while(p_list)
+ {
+ l_cur = p_list;
+ p_list = p_list->m_next;
+ uv__free(l_cur->m_data);
+ uv__free(l_cur);
+ }
+}
+
+static NetlinkList *getResultList(int p_socket, int p_request, pid_t p_pid)
+{
+ int l_size;
+ int l_done;
+ NetlinkList *l_list;
+ NetlinkList *l_end;
+
+ if(netlink_send(p_socket, p_request) < 0)
+ {
+ return NULL;
+ }
+
+ l_list = NULL;
+ l_end = NULL;
+
+ l_done = 0;
+ while(!l_done)
+ {
+ NetlinkList *l_item;
+
+ struct nlmsghdr *l_hdr = getNetlinkResponse(p_socket, p_pid, &l_size, &l_done);
+ /* Error */
+ if(!l_hdr)
+ {
+ freeResultList(l_list);
+ return NULL;
+ }
+
+ l_item = newListItem(l_hdr, l_size);
+ if (!l_item)
+ {
+ freeResultList(l_list);
+ return NULL;
+ }
+ if(!l_list)
+ {
+ l_list = l_item;
+ }
+ else
+ {
+ l_end->m_next = l_item;
+ }
+ l_end = l_item;
+ }
+ return l_list;
+}
+
+static size_t maxSize(size_t a, size_t b)
+{
+ return (a > b ? a : b);
+}
+
+static size_t calcAddrLen(sa_family_t p_family, int p_dataSize)
+{
+ switch(p_family)
+ {
+ case AF_INET:
+ return sizeof(struct sockaddr_in);
+ case AF_INET6:
+ return sizeof(struct sockaddr_in6);
+ case AF_PACKET:
+ return maxSize(sizeof(struct sockaddr_ll), offsetof(struct sockaddr_ll, sll_addr) + p_dataSize);
+ default:
+ return maxSize(sizeof(struct sockaddr), offsetof(struct sockaddr, sa_data) + p_dataSize);
+ }
+}
+
+static void makeSockaddr(sa_family_t p_family, struct sockaddr *p_dest, void *p_data, size_t p_size)
+{
+ switch(p_family)
+ {
+ case AF_INET:
+ memcpy(&((struct sockaddr_in*)p_dest)->sin_addr, p_data, p_size);
+ break;
+ case AF_INET6:
+ memcpy(&((struct sockaddr_in6*)p_dest)->sin6_addr, p_data, p_size);
+ break;
+ case AF_PACKET:
+ memcpy(((struct sockaddr_ll*)p_dest)->sll_addr, p_data, p_size);
+ ((struct sockaddr_ll*)p_dest)->sll_halen = p_size;
+ break;
+ default:
+ memcpy(p_dest->sa_data, p_data, p_size);
+ break;
+ }
+ p_dest->sa_family = p_family;
+}
+
+static void addToEnd(struct ifaddrs **p_resultList, struct ifaddrs *p_entry)
+{
+ if(!*p_resultList)
+ {
+ *p_resultList = p_entry;
+ }
+ else
+ {
+ struct ifaddrs *l_cur = *p_resultList;
+ while(l_cur->ifa_next)
+ {
+ l_cur = l_cur->ifa_next;
+ }
+ l_cur->ifa_next = p_entry;
+ }
+}
+
+static int interpretLink(struct nlmsghdr *p_hdr, struct ifaddrs **p_resultList)
+{
+ struct ifaddrs *l_entry;
+
+ char *l_index;
+ char *l_name;
+ char *l_addr;
+ char *l_data;
+
+ struct ifinfomsg *l_info = (struct ifinfomsg *)NLMSG_DATA(p_hdr);
+
+ size_t l_nameSize = 0;
+ size_t l_addrSize = 0;
+ size_t l_dataSize = 0;
+
+ size_t l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifinfomsg));
+ struct rtattr *l_rta;
+ for(l_rta = IFLA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize))
+ {
+ size_t l_rtaDataSize = RTA_PAYLOAD(l_rta);
+ switch(l_rta->rta_type)
+ {
+ case IFLA_ADDRESS:
+ case IFLA_BROADCAST:
+ l_addrSize += NLMSG_ALIGN(calcAddrLen(AF_PACKET, l_rtaDataSize));
+ break;
+ case IFLA_IFNAME:
+ l_nameSize += NLMSG_ALIGN(l_rtaSize + 1);
+ break;
+ case IFLA_STATS:
+ l_dataSize += NLMSG_ALIGN(l_rtaSize);
+ break;
+ default:
+ break;
+ }
+ }
+
+ l_entry = uv__malloc(sizeof(struct ifaddrs) + sizeof(int) + l_nameSize + l_addrSize + l_dataSize);
+ if (l_entry == NULL)
+ {
+ return -1;
+ }
+ memset(l_entry, 0, sizeof(struct ifaddrs));
+ l_entry->ifa_name = "";
+
+ l_index = ((char *)l_entry) + sizeof(struct ifaddrs);
+ l_name = l_index + sizeof(int);
+ l_addr = l_name + l_nameSize;
+ l_data = l_addr + l_addrSize;
+
+ /* Save the interface index so we can look it up when handling the
+ * addresses.
+ */
+ memcpy(l_index, &l_info->ifi_index, sizeof(int));
+
+ l_entry->ifa_flags = l_info->ifi_flags;
+
+ l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifinfomsg));
+ for(l_rta = IFLA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize))
+ {
+ void *l_rtaData = RTA_DATA(l_rta);
+ size_t l_rtaDataSize = RTA_PAYLOAD(l_rta);
+ switch(l_rta->rta_type)
+ {
+ case IFLA_ADDRESS:
+ case IFLA_BROADCAST:
+ {
+ size_t l_addrLen = calcAddrLen(AF_PACKET, l_rtaDataSize);
+ makeSockaddr(AF_PACKET, (struct sockaddr *)l_addr, l_rtaData, l_rtaDataSize);
+ ((struct sockaddr_ll *)l_addr)->sll_ifindex = l_info->ifi_index;
+ ((struct sockaddr_ll *)l_addr)->sll_hatype = l_info->ifi_type;
+ if(l_rta->rta_type == IFLA_ADDRESS)
+ {
+ l_entry->ifa_addr = (struct sockaddr *)l_addr;
+ }
+ else
+ {
+ l_entry->ifa_broadaddr = (struct sockaddr *)l_addr;
+ }
+ l_addr += NLMSG_ALIGN(l_addrLen);
+ break;
+ }
+ case IFLA_IFNAME:
+ strncpy(l_name, l_rtaData, l_rtaDataSize);
+ l_name[l_rtaDataSize] = '\0';
+ l_entry->ifa_name = l_name;
+ break;
+ case IFLA_STATS:
+ memcpy(l_data, l_rtaData, l_rtaDataSize);
+ l_entry->ifa_data = l_data;
+ break;
+ default:
+ break;
+ }
+ }
+
+ addToEnd(p_resultList, l_entry);
+ return 0;
+}
+
+static struct ifaddrs *findInterface(int p_index, struct ifaddrs **p_links, int p_numLinks)
+{
+ int l_num = 0;
+ struct ifaddrs *l_cur = *p_links;
+ while(l_cur && l_num < p_numLinks)
+ {
+ char *l_indexPtr = ((char *)l_cur) + sizeof(struct ifaddrs);
+ int l_index;
+ memcpy(&l_index, l_indexPtr, sizeof(int));
+ if(l_index == p_index)
+ {
+ return l_cur;
+ }
+
+ l_cur = l_cur->ifa_next;
+ ++l_num;
+ }
+ return NULL;
+}
+
+static int interpretAddr(struct nlmsghdr *p_hdr, struct ifaddrs **p_resultList, int p_numLinks)
+{
+ struct ifaddrmsg *l_info = (struct ifaddrmsg *)NLMSG_DATA(p_hdr);
+ struct ifaddrs *l_interface = findInterface(l_info->ifa_index, p_resultList, p_numLinks);
+
+ size_t l_nameSize = 0;
+ size_t l_addrSize = 0;
+
+ int l_addedNetmask = 0;
+
+ size_t l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifaddrmsg));
+ struct rtattr *l_rta;
+ struct ifaddrs *l_entry;
+
+ char *l_name;
+ char *l_addr;
+
+ for(l_rta = IFA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize))
+ {
+ size_t l_rtaDataSize = RTA_PAYLOAD(l_rta);
+ if(l_info->ifa_family == AF_PACKET)
+ {
+ continue;
+ }
+
+ switch(l_rta->rta_type)
+ {
+ case IFA_ADDRESS:
+ case IFA_LOCAL:
+ l_addrSize += NLMSG_ALIGN(calcAddrLen(l_info->ifa_family, l_rtaDataSize));
+ if((l_info->ifa_family == AF_INET || l_info->ifa_family == AF_INET6) && !l_addedNetmask)
+ {
+ /* Make room for netmask */
+ l_addrSize += NLMSG_ALIGN(calcAddrLen(l_info->ifa_family, l_rtaDataSize));
+ l_addedNetmask = 1;
+ }
+ break;
+ case IFA_BROADCAST:
+ l_addrSize += NLMSG_ALIGN(calcAddrLen(l_info->ifa_family, l_rtaDataSize));
+ break;
+ case IFA_LABEL:
+ l_nameSize += NLMSG_ALIGN(l_rtaDataSize + 1);
+ break;
+ default:
+ break;
+ }
+ }
+
+ l_entry = uv__malloc(sizeof(struct ifaddrs) + l_nameSize + l_addrSize);
+ if (l_entry == NULL)
+ {
+ return -1;
+ }
+ memset(l_entry, 0, sizeof(struct ifaddrs));
+ l_entry->ifa_name = (l_interface ? l_interface->ifa_name : "");
+
+ l_name = ((char *)l_entry) + sizeof(struct ifaddrs);
+ l_addr = l_name + l_nameSize;
+
+ l_entry->ifa_flags = l_info->ifa_flags;
+ if(l_interface)
+ {
+ l_entry->ifa_flags |= l_interface->ifa_flags;
+ }
+
+ l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifaddrmsg));
+ for(l_rta = IFA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize))
+ {
+ void *l_rtaData = RTA_DATA(l_rta);
+ size_t l_rtaDataSize = RTA_PAYLOAD(l_rta);
+ switch(l_rta->rta_type)
+ {
+ case IFA_ADDRESS:
+ case IFA_BROADCAST:
+ case IFA_LOCAL:
+ {
+ size_t l_addrLen = calcAddrLen(l_info->ifa_family, l_rtaDataSize);
+ makeSockaddr(l_info->ifa_family, (struct sockaddr *)l_addr, l_rtaData, l_rtaDataSize);
+ if(l_info->ifa_family == AF_INET6)
+ {
+ if(IN6_IS_ADDR_LINKLOCAL((struct in6_addr *)l_rtaData) || IN6_IS_ADDR_MC_LINKLOCAL((struct in6_addr *)l_rtaData))
+ {
+ ((struct sockaddr_in6 *)l_addr)->sin6_scope_id = l_info->ifa_index;
+ }
+ }
+
+ /* Apparently in a point-to-point network IFA_ADDRESS contains
+ * the dest address and IFA_LOCAL contains the local address
+ */
+ if(l_rta->rta_type == IFA_ADDRESS)
+ {
+ if(l_entry->ifa_addr)
+ {
+ l_entry->ifa_dstaddr = (struct sockaddr *)l_addr;
+ }
+ else
+ {
+ l_entry->ifa_addr = (struct sockaddr *)l_addr;
+ }
+ }
+ else if(l_rta->rta_type == IFA_LOCAL)
+ {
+ if(l_entry->ifa_addr)
+ {
+ l_entry->ifa_dstaddr = l_entry->ifa_addr;
+ }
+ l_entry->ifa_addr = (struct sockaddr *)l_addr;
+ }
+ else
+ {
+ l_entry->ifa_broadaddr = (struct sockaddr *)l_addr;
+ }
+ l_addr += NLMSG_ALIGN(l_addrLen);
+ break;
+ }
+ case IFA_LABEL:
+ strncpy(l_name, l_rtaData, l_rtaDataSize);
+ l_name[l_rtaDataSize] = '\0';
+ l_entry->ifa_name = l_name;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if(l_entry->ifa_addr && (l_entry->ifa_addr->sa_family == AF_INET || l_entry->ifa_addr->sa_family == AF_INET6))
+ {
+ unsigned l_maxPrefix = (l_entry->ifa_addr->sa_family == AF_INET ? 32 : 128);
+ unsigned l_prefix = (l_info->ifa_prefixlen > l_maxPrefix ? l_maxPrefix : l_info->ifa_prefixlen);
+ unsigned char l_mask[16] = {0};
+ unsigned i;
+ for(i=0; i<(l_prefix/8); ++i)
+ {
+ l_mask[i] = 0xff;
+ }
+ if(l_prefix % 8)
+ {
+ l_mask[i] = 0xff << (8 - (l_prefix % 8));
+ }
+
+ makeSockaddr(l_entry->ifa_addr->sa_family, (struct sockaddr *)l_addr, l_mask, l_maxPrefix / 8);
+ l_entry->ifa_netmask = (struct sockaddr *)l_addr;
+ }
+
+ addToEnd(p_resultList, l_entry);
+ return 0;
+}
+
+static int interpretLinks(int p_socket, pid_t p_pid, NetlinkList *p_netlinkList, struct ifaddrs **p_resultList)
+{
+
+ int l_numLinks = 0;
+ for(; p_netlinkList; p_netlinkList = p_netlinkList->m_next)
+ {
+ unsigned int l_nlsize = p_netlinkList->m_size;
+ struct nlmsghdr *l_hdr;
+ for(l_hdr = p_netlinkList->m_data; NLMSG_OK(l_hdr, l_nlsize); l_hdr = NLMSG_NEXT(l_hdr, l_nlsize))
+ {
+ if((pid_t)l_hdr->nlmsg_pid != p_pid || (int)l_hdr->nlmsg_seq != p_socket)
+ {
+ continue;
+ }
+
+ if(l_hdr->nlmsg_type == NLMSG_DONE)
+ {
+ break;
+ }
+
+ if(l_hdr->nlmsg_type == RTM_NEWLINK)
+ {
+ if(interpretLink(l_hdr, p_resultList) == -1)
+ {
+ return -1;
+ }
+ ++l_numLinks;
+ }
+ }
+ }
+ return l_numLinks;
+}
+
+static int interpretAddrs(int p_socket, pid_t p_pid, NetlinkList *p_netlinkList, struct ifaddrs **p_resultList, int p_numLinks)
+{
+ for(; p_netlinkList; p_netlinkList = p_netlinkList->m_next)
+ {
+ unsigned int l_nlsize = p_netlinkList->m_size;
+ struct nlmsghdr *l_hdr;
+ for(l_hdr = p_netlinkList->m_data; NLMSG_OK(l_hdr, l_nlsize); l_hdr = NLMSG_NEXT(l_hdr, l_nlsize))
+ {
+ if((pid_t)l_hdr->nlmsg_pid != p_pid || (int)l_hdr->nlmsg_seq != p_socket)
+ {
+ continue;
+ }
+
+ if(l_hdr->nlmsg_type == NLMSG_DONE)
+ {
+ break;
+ }
+
+ if(l_hdr->nlmsg_type == RTM_NEWADDR)
+ {
+ if (interpretAddr(l_hdr, p_resultList, p_numLinks) == -1)
+ {
+ return -1;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+int getifaddrs(struct ifaddrs **ifap)
+{
+ int l_socket;
+ int l_result;
+ int l_numLinks;
+ pid_t l_pid;
+ NetlinkList *l_linkResults;
+ NetlinkList *l_addrResults;
+
+ if(!ifap)
+ {
+ return -1;
+ }
+ *ifap = NULL;
+
+ l_socket = netlink_socket(&l_pid);
+ if(l_socket < 0)
+ {
+ return -1;
+ }
+
+ l_linkResults = getResultList(l_socket, RTM_GETLINK, l_pid);
+ if(!l_linkResults)
+ {
+ close(l_socket);
+ return -1;
+ }
+
+ l_addrResults = getResultList(l_socket, RTM_GETADDR, l_pid);
+ if(!l_addrResults)
+ {
+ close(l_socket);
+ freeResultList(l_linkResults);
+ return -1;
+ }
+
+ l_result = 0;
+ l_numLinks = interpretLinks(l_socket, l_pid, l_linkResults, ifap);
+ if(l_numLinks == -1 || interpretAddrs(l_socket, l_pid, l_addrResults, ifap, l_numLinks) == -1)
+ {
+ l_result = -1;
+ }
+
+ freeResultList(l_linkResults);
+ freeResultList(l_addrResults);
+ close(l_socket);
+ return l_result;
+}
+
+void freeifaddrs(struct ifaddrs *ifa)
+{
+ struct ifaddrs *l_cur;
+ while(ifa)
+ {
+ l_cur = ifa;
+ ifa = ifa->ifa_next;
+ uv__free(l_cur);
+ }
+}
diff --git a/Utilities/cmlibuv/src/unix/async.c b/Utilities/cmlibuv/src/unix/async.c
new file mode 100644
index 0000000..5f58fb8
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/async.c
@@ -0,0 +1,253 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/* This file contains both the uv__async internal infrastructure and the
+ * user-facing uv_async_t functions.
+ */
+
+#include "uv.h"
+#include "internal.h"
+#include "atomic-ops.h"
+
+#include <errno.h>
+#include <stdio.h> /* snprintf() */
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sched.h> /* sched_yield() */
+
+#ifdef __linux__
+#include <sys/eventfd.h>
+#endif
+
+static void uv__async_send(uv_loop_t* loop);
+static int uv__async_start(uv_loop_t* loop);
+
+
+int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
+ int err;
+
+ err = uv__async_start(loop);
+ if (err)
+ return err;
+
+ uv__handle_init(loop, (uv_handle_t*)handle, UV_ASYNC);
+ handle->async_cb = async_cb;
+ handle->pending = 0;
+
+ QUEUE_INSERT_TAIL(&loop->async_handles, &handle->queue);
+ uv__handle_start(handle);
+
+ return 0;
+}
+
+
+int uv_async_send(uv_async_t* handle) {
+ /* Do a cheap read first. */
+ if (ACCESS_ONCE(int, handle->pending) != 0)
+ return 0;
+
+ /* Tell the other thread we're busy with the handle. */
+ if (cmpxchgi(&handle->pending, 0, 1) != 0)
+ return 0;
+
+ /* Wake up the other thread's event loop. */
+ uv__async_send(handle->loop);
+
+ /* Tell the other thread we're done. */
+ if (cmpxchgi(&handle->pending, 1, 2) != 1)
+ abort();
+
+ return 0;
+}
+
+
+/* Only call this from the event loop thread. */
+static int uv__async_spin(uv_async_t* handle) {
+ int i;
+ int rc;
+
+ for (;;) {
+ /* 997 is not completely chosen at random. It's a prime number, acyclical
+ * by nature, and should therefore hopefully dampen sympathetic resonance.
+ */
+ for (i = 0; i < 997; i++) {
+ /* rc=0 -- handle is not pending.
+ * rc=1 -- handle is pending, other thread is still working with it.
+ * rc=2 -- handle is pending, other thread is done.
+ */
+ rc = cmpxchgi(&handle->pending, 2, 0);
+
+ if (rc != 1)
+ return rc;
+
+ /* Other thread is busy with this handle, spin until it's done. */
+ cpu_relax();
+ }
+
+ /* Yield the CPU. We may have preempted the other thread while it's
+ * inside the critical section and if it's running on the same CPU
+ * as us, we'll just burn CPU cycles until the end of our time slice.
+ */
+ sched_yield();
+ }
+}
+
+
+void uv__async_close(uv_async_t* handle) {
+ uv__async_spin(handle);
+ QUEUE_REMOVE(&handle->queue);
+ uv__handle_stop(handle);
+}
+
+
+static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
+ char buf[1024];
+ ssize_t r;
+ QUEUE queue;
+ QUEUE* q;
+ uv_async_t* h;
+
+ assert(w == &loop->async_io_watcher);
+
+ for (;;) {
+ r = read(w->fd, buf, sizeof(buf));
+
+ if (r == sizeof(buf))
+ continue;
+
+ if (r != -1)
+ break;
+
+ if (errno == EAGAIN || errno == EWOULDBLOCK)
+ break;
+
+ if (errno == EINTR)
+ continue;
+
+ abort();
+ }
+
+ QUEUE_MOVE(&loop->async_handles, &queue);
+ while (!QUEUE_EMPTY(&queue)) {
+ q = QUEUE_HEAD(&queue);
+ h = QUEUE_DATA(q, uv_async_t, queue);
+
+ QUEUE_REMOVE(q);
+ QUEUE_INSERT_TAIL(&loop->async_handles, q);
+
+ if (0 == uv__async_spin(h))
+ continue; /* Not pending. */
+
+ if (h->async_cb == NULL)
+ continue;
+
+ h->async_cb(h);
+ }
+}
+
+
+static void uv__async_send(uv_loop_t* loop) {
+ const void* buf;
+ ssize_t len;
+ int fd;
+ int r;
+
+ buf = "";
+ len = 1;
+ fd = loop->async_wfd;
+
+#if defined(__linux__)
+ if (fd == -1) {
+ static const uint64_t val = 1;
+ buf = &val;
+ len = sizeof(val);
+ fd = loop->async_io_watcher.fd; /* eventfd */
+ }
+#endif
+
+ do
+ r = write(fd, buf, len);
+ while (r == -1 && errno == EINTR);
+
+ if (r == len)
+ return;
+
+ if (r == -1)
+ if (errno == EAGAIN || errno == EWOULDBLOCK)
+ return;
+
+ abort();
+}
+
+
+static int uv__async_start(uv_loop_t* loop) {
+ int pipefd[2];
+ int err;
+
+ if (loop->async_io_watcher.fd != -1)
+ return 0;
+
+#ifdef __linux__
+ err = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+ if (err < 0)
+ return UV__ERR(errno);
+
+ pipefd[0] = err;
+ pipefd[1] = -1;
+#else
+ err = uv__make_pipe(pipefd, UV__F_NONBLOCK);
+ if (err < 0)
+ return err;
+#endif
+
+ uv__io_init(&loop->async_io_watcher, uv__async_io, pipefd[0]);
+ uv__io_start(loop, &loop->async_io_watcher, POLLIN);
+ loop->async_wfd = pipefd[1];
+
+ return 0;
+}
+
+
+int uv__async_fork(uv_loop_t* loop) {
+ if (loop->async_io_watcher.fd == -1) /* never started */
+ return 0;
+
+ uv__async_stop(loop);
+
+ return uv__async_start(loop);
+}
+
+
+void uv__async_stop(uv_loop_t* loop) {
+ if (loop->async_io_watcher.fd == -1)
+ return;
+
+ if (loop->async_wfd != -1) {
+ if (loop->async_wfd != loop->async_io_watcher.fd)
+ uv__close(loop->async_wfd);
+ loop->async_wfd = -1;
+ }
+
+ uv__io_stop(loop, &loop->async_io_watcher, POLLIN);
+ uv__close(loop->async_io_watcher.fd);
+ loop->async_io_watcher.fd = -1;
+}
diff --git a/Utilities/cmlibuv/src/unix/atomic-ops.h b/Utilities/cmlibuv/src/unix/atomic-ops.h
new file mode 100644
index 0000000..2518a06
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/atomic-ops.h
@@ -0,0 +1,67 @@
+/* Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef UV_ATOMIC_OPS_H_
+#define UV_ATOMIC_OPS_H_
+
+#include "internal.h" /* UV_UNUSED */
+
+#if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
+#include <atomic.h>
+#endif
+
+UV_UNUSED(static int cmpxchgi(int* ptr, int oldval, int newval));
+UV_UNUSED(static void cpu_relax(void));
+
+/* Prefer hand-rolled assembly over the gcc builtins because the latter also
+ * issue full memory barriers.
+ */
+UV_UNUSED(static int cmpxchgi(int* ptr, int oldval, int newval)) {
+#if defined(__i386__) || defined(__x86_64__)
+ int out;
+ __asm__ __volatile__ ("lock; cmpxchg %2, %1;"
+ : "=a" (out), "+m" (*(volatile int*) ptr)
+ : "r" (newval), "0" (oldval)
+ : "memory");
+ return out;
+#elif defined(_AIX) && defined(__ibmxl__)
+ /* FIXME: This is not actually atomic but XLClang 16.1 for AIX
+ does not provide __sync_val_compare_and_swap or an equivalent.
+ Its documentation suggests using C++11 atomics but this is C. */
+ __compare_and_swap((volatile int*)ptr, &oldval, newval);
+ return oldval;
+#elif defined(__MVS__)
+ unsigned int op4;
+ if (__plo_CSST(ptr, (unsigned int*) &oldval, newval,
+ (unsigned int*) ptr, *ptr, &op4))
+ return oldval;
+ else
+ return op4;
+#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
+ return atomic_cas_uint((uint_t *)ptr, (uint_t)oldval, (uint_t)newval);
+#else
+ return __sync_val_compare_and_swap(ptr, oldval, newval);
+#endif
+}
+
+UV_UNUSED(static void cpu_relax(void)) {
+#if defined(__i386__) || defined(__x86_64__)
+ __asm__ __volatile__ ("rep; nop"); /* a.k.a. PAUSE */
+#elif (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__)
+ __asm__ volatile("yield");
+#endif
+}
+
+#endif /* UV_ATOMIC_OPS_H_ */
diff --git a/Utilities/cmlibuv/src/unix/bsd-ifaddrs.c b/Utilities/cmlibuv/src/unix/bsd-ifaddrs.c
new file mode 100644
index 0000000..5223ab4
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/bsd-ifaddrs.c
@@ -0,0 +1,163 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <errno.h>
+#include <stddef.h>
+
+#include <ifaddrs.h>
+#include <net/if.h>
+#if !defined(__CYGWIN__) && !defined(__MSYS__)
+#include <net/if_dl.h>
+#endif
+
+#if defined(__HAIKU__)
+#define IFF_RUNNING IFF_LINK
+#endif
+
+static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) {
+ if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
+ return 1;
+ if (ent->ifa_addr == NULL)
+ return 1;
+#if !defined(__CYGWIN__) && !defined(__MSYS__)
+ /*
+ * If `exclude_type` is `UV__EXCLUDE_IFPHYS`, just see whether `sa_family`
+ * equals to `AF_LINK` or not. Otherwise, the result depends on the operation
+ * system with `AF_LINK` or `PF_INET`.
+ */
+ if (exclude_type == UV__EXCLUDE_IFPHYS)
+ return (ent->ifa_addr->sa_family != AF_LINK);
+#endif
+#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__DragonFly__) || \
+ defined(__HAIKU__)
+ /*
+ * On BSD getifaddrs returns information related to the raw underlying
+ * devices. We're not interested in this information.
+ */
+ if (ent->ifa_addr->sa_family == AF_LINK)
+ return 1;
+#elif defined(__NetBSD__) || defined(__OpenBSD__)
+ if (ent->ifa_addr->sa_family != PF_INET &&
+ ent->ifa_addr->sa_family != PF_INET6)
+ return 1;
+#endif
+ return 0;
+}
+
+int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
+ struct ifaddrs* addrs;
+ struct ifaddrs* ent;
+ uv_interface_address_t* address;
+#if !(defined(__CYGWIN__) || defined(__MSYS__))
+ int i;
+#endif
+
+ *count = 0;
+ *addresses = NULL;
+
+ if (getifaddrs(&addrs) != 0)
+ return UV__ERR(errno);
+
+ /* Count the number of interfaces */
+ for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+ if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
+ continue;
+ (*count)++;
+ }
+
+ if (*count == 0) {
+ freeifaddrs(addrs);
+ return 0;
+ }
+
+ /* Make sure the memory is initiallized to zero using calloc() */
+ *addresses = uv__calloc(*count, sizeof(**addresses));
+
+ if (*addresses == NULL) {
+ freeifaddrs(addrs);
+ return UV_ENOMEM;
+ }
+
+ address = *addresses;
+
+ for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+ if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
+ continue;
+
+ address->name = uv__strdup(ent->ifa_name);
+
+ if (ent->ifa_addr->sa_family == AF_INET6) {
+ address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
+ } else {
+ address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
+ }
+
+ if (ent->ifa_netmask == NULL) {
+ memset(&address->netmask, 0, sizeof(address->netmask));
+ } else if (ent->ifa_netmask->sa_family == AF_INET6) {
+ address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
+ } else {
+ address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
+ }
+
+ address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK);
+
+ address++;
+ }
+
+#if !(defined(__CYGWIN__) || defined(__MSYS__))
+ /* Fill in physical addresses for each interface */
+ for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+ if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFPHYS))
+ continue;
+
+ address = *addresses;
+
+ for (i = 0; i < *count; i++) {
+ if (strcmp(address->name, ent->ifa_name) == 0) {
+ struct sockaddr_dl* sa_addr;
+ sa_addr = (struct sockaddr_dl*)(ent->ifa_addr);
+ memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr));
+ }
+ address++;
+ }
+ }
+#endif
+
+ freeifaddrs(addrs);
+
+ return 0;
+}
+
+
+void uv_free_interface_addresses(uv_interface_address_t* addresses,
+ int count) {
+ int i;
+
+ for (i = 0; i < count; i++) {
+ uv__free(addresses[i].name);
+ }
+
+ uv__free(addresses);
+}
diff --git a/Utilities/cmlibuv/src/unix/bsd-proctitle.c b/Utilities/cmlibuv/src/unix/bsd-proctitle.c
new file mode 100644
index 0000000..723b81c
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/bsd-proctitle.c
@@ -0,0 +1,100 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <sys/types.h>
+#include <unistd.h>
+
+
+static uv_mutex_t process_title_mutex;
+static uv_once_t process_title_mutex_once = UV_ONCE_INIT;
+static char* process_title;
+
+
+static void init_process_title_mutex_once(void) {
+ if (uv_mutex_init(&process_title_mutex))
+ abort();
+}
+
+
+void uv__process_title_cleanup(void) {
+ /* TODO(bnoordhuis) uv_mutex_destroy(&process_title_mutex)
+ * and reset process_title_mutex_once?
+ */
+}
+
+
+char** uv_setup_args(int argc, char** argv) {
+ process_title = argc > 0 ? uv__strdup(argv[0]) : NULL;
+ return argv;
+}
+
+
+int uv_set_process_title(const char* title) {
+ char* new_title;
+
+ new_title = uv__strdup(title);
+ if (new_title == NULL)
+ return UV_ENOMEM;
+
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+
+ uv__free(process_title);
+ process_title = new_title;
+ setproctitle("%s", title);
+
+ uv_mutex_unlock(&process_title_mutex);
+
+ return 0;
+}
+
+
+int uv_get_process_title(char* buffer, size_t size) {
+ size_t len;
+
+ if (buffer == NULL || size == 0)
+ return UV_EINVAL;
+
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+
+ if (process_title != NULL) {
+ len = strlen(process_title) + 1;
+
+ if (size < len) {
+ uv_mutex_unlock(&process_title_mutex);
+ return UV_ENOBUFS;
+ }
+
+ memcpy(buffer, process_title, len);
+ } else {
+ len = 0;
+ }
+
+ uv_mutex_unlock(&process_title_mutex);
+
+ buffer[len] = '\0';
+
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/unix/cmake-bootstrap.c b/Utilities/cmlibuv/src/unix/cmake-bootstrap.c
new file mode 100644
index 0000000..6cf42fa
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/cmake-bootstrap.c
@@ -0,0 +1,155 @@
+#include "uv.h"
+#include "internal.h"
+
+void uv__process_title_cleanup(void) {
+}
+
+void uv__threadpool_cleanup(void) {
+}
+
+int uv__tcp_nodelay(int fd, int on) {
+ errno = EINVAL;
+ return -1;
+}
+
+int uv__tcp_keepalive(int fd, int on, unsigned int delay) {
+ errno = EINVAL;
+ return -1;
+}
+
+int uv_tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) {
+ return -EINVAL;
+}
+
+int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock) {
+ return -EINVAL;
+}
+
+void uv__tcp_close(uv_tcp_t* handle) {
+}
+
+void uv__udp_close(uv_udp_t* handle) {
+}
+
+void uv__udp_finish_close(uv_udp_t* handle) {
+}
+
+void uv__fs_poll_close(uv_fs_poll_t* handle) {
+}
+
+int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
+ return 0;
+}
+
+void uv__async_close(uv_async_t* handle) {
+}
+
+int uv__async_fork(uv_loop_t* loop) {
+ return 0;
+}
+
+void uv__async_stop(uv_loop_t* loop) {
+}
+
+void uv__work_submit(uv_loop_t* loop, struct uv__work* w,
+ enum uv__work_kind kind,
+ void (*work)(struct uv__work* w),
+ void (*done)(struct uv__work* w, int status)) {
+ abort();
+}
+
+void uv__work_done(uv_async_t* handle) {
+}
+
+int uv__pthread_atfork(void (*prepare)(void), void (*parent)(void),
+ void (*child)(void)) {
+ return 0;
+}
+
+int uv__pthread_sigmask(int how, const sigset_t* set, sigset_t* oset) {
+ return 0;
+}
+
+int uv_mutex_init(uv_mutex_t* mutex) {
+ return 0;
+}
+
+void uv_mutex_destroy(uv_mutex_t* mutex) {
+}
+
+void uv_mutex_lock(uv_mutex_t* mutex) {
+}
+
+void uv_mutex_unlock(uv_mutex_t* mutex) {
+}
+
+int uv_rwlock_init(uv_rwlock_t* rwlock) {
+ return 0;
+}
+
+void uv_rwlock_destroy(uv_rwlock_t* rwlock) {
+}
+
+void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {
+}
+
+void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {
+}
+
+void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {
+}
+
+void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {
+}
+
+void uv_once(uv_once_t* guard, void (*callback)(void)) {
+ if (*guard) {
+ return;
+ }
+ *guard = 1;
+ callback();
+}
+
+#if defined(__linux__)
+int uv__accept4(int fd, struct sockaddr* addr, socklen_t* addrlen, int flags) {
+ errno = ENOSYS;
+ return -1;
+}
+
+int uv__dup3(int oldfd, int newfd, int flags) {
+ errno = ENOSYS;
+ return -1;
+}
+
+int uv__pipe2(int pipefd[2], int flags) {
+ errno = ENOSYS;
+ return -1;
+}
+
+ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt,
+ int64_t offset) {
+ errno = ENOSYS;
+ return -1;
+}
+
+ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt,
+ int64_t offset) {
+ errno = ENOSYS;
+ return -1;
+}
+
+int uv__utimesat(int dirfd, const char* path, const struct timespec times[2],
+ int flags) {
+ errno = ENOSYS;
+ return -1;
+}
+
+int uv__statx(int dirfd,
+ const char* path,
+ int flags,
+ unsigned int mask,
+ struct uv__statx* statxbuf) {
+ errno = ENOSYS;
+ return -1;
+}
+#endif
diff --git a/Utilities/cmlibuv/src/unix/core.c b/Utilities/cmlibuv/src/unix/core.c
new file mode 100644
index 0000000..e6d61ee
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/core.c
@@ -0,0 +1,1625 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stddef.h> /* NULL */
+#include <stdio.h> /* printf */
+#include <stdlib.h>
+#include <string.h> /* strerror */
+#include <errno.h>
+#include <assert.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h> /* O_CLOEXEC */
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <limits.h> /* INT_MAX, PATH_MAX, IOV_MAX */
+#include <sys/uio.h> /* writev */
+#include <sys/resource.h> /* getrusage */
+#include <pwd.h>
+#include <sched.h>
+#include <sys/utsname.h>
+#include <sys/time.h>
+
+#ifdef __sun
+# include <sys/filio.h>
+# include <sys/types.h>
+# include <sys/wait.h>
+#endif
+
+#if defined(__APPLE__)
+# include <sys/filio.h>
+# endif /* defined(__APPLE__) */
+
+
+#if defined(__APPLE__) && !TARGET_OS_IPHONE
+# include <crt_externs.h>
+# include <mach-o/dyld.h> /* _NSGetExecutablePath */
+# define environ (*_NSGetEnviron())
+#else /* defined(__APPLE__) && !TARGET_OS_IPHONE */
+extern char** environ;
+#endif /* !(defined(__APPLE__) && !TARGET_OS_IPHONE) */
+
+
+#if defined(__DragonFly__) || \
+ defined(__FreeBSD__) || \
+ defined(__FreeBSD_kernel__) || \
+ defined(__NetBSD__) || \
+ defined(__OpenBSD__)
+# include <sys/sysctl.h>
+# include <sys/filio.h>
+# include <sys/wait.h>
+# include <sys/param.h>
+# include <sys/cpuset.h>
+# if defined(__FreeBSD__)
+# define uv__accept4 accept4
+# endif
+# if defined(__NetBSD__)
+# define uv__accept4(a, b, c, d) paccept((a), (b), (c), NULL, (d))
+# endif
+#endif
+
+#if defined(__MVS__)
+#include <sys/ioctl.h>
+#endif
+
+#if defined(__linux__)
+# include <sys/syscall.h>
+# define uv__accept4 accept4
+#endif
+
+static int uv__run_pending(uv_loop_t* loop);
+
+/* Verify that uv_buf_t is ABI-compatible with struct iovec. */
+STATIC_ASSERT(sizeof(uv_buf_t) == sizeof(struct iovec));
+STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->base) ==
+ sizeof(((struct iovec*) 0)->iov_base));
+STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->len) ==
+ sizeof(((struct iovec*) 0)->iov_len));
+STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base));
+STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len));
+
+
+uint64_t uv_hrtime(void) {
+ return uv__hrtime(UV_CLOCK_PRECISE);
+}
+
+
+void uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
+ assert(!uv__is_closing(handle));
+
+ handle->flags |= UV_HANDLE_CLOSING;
+ handle->close_cb = close_cb;
+
+ switch (handle->type) {
+ case UV_NAMED_PIPE:
+ uv__pipe_close((uv_pipe_t*)handle);
+ break;
+
+ case UV_TTY:
+ uv__stream_close((uv_stream_t*)handle);
+ break;
+
+ case UV_TCP:
+ uv__tcp_close((uv_tcp_t*)handle);
+ break;
+
+ case UV_UDP:
+ uv__udp_close((uv_udp_t*)handle);
+ break;
+
+ case UV_PREPARE:
+ uv__prepare_close((uv_prepare_t*)handle);
+ break;
+
+ case UV_CHECK:
+ uv__check_close((uv_check_t*)handle);
+ break;
+
+ case UV_IDLE:
+ uv__idle_close((uv_idle_t*)handle);
+ break;
+
+ case UV_ASYNC:
+ uv__async_close((uv_async_t*)handle);
+ break;
+
+ case UV_TIMER:
+ uv__timer_close((uv_timer_t*)handle);
+ break;
+
+ case UV_PROCESS:
+ uv__process_close((uv_process_t*)handle);
+ break;
+
+ case UV_FS_EVENT:
+ uv__fs_event_close((uv_fs_event_t*)handle);
+ break;
+
+ case UV_POLL:
+ uv__poll_close((uv_poll_t*)handle);
+ break;
+
+ case UV_FS_POLL:
+ uv__fs_poll_close((uv_fs_poll_t*)handle);
+ /* Poll handles use file system requests, and one of them may still be
+ * running. The poll code will call uv__make_close_pending() for us. */
+ return;
+
+ case UV_SIGNAL:
+ uv__signal_close((uv_signal_t*) handle);
+ break;
+
+ default:
+ assert(0);
+ }
+
+ uv__make_close_pending(handle);
+}
+
+int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value) {
+ int r;
+ int fd;
+ socklen_t len;
+
+ if (handle == NULL || value == NULL)
+ return UV_EINVAL;
+
+ if (handle->type == UV_TCP || handle->type == UV_NAMED_PIPE)
+ fd = uv__stream_fd((uv_stream_t*) handle);
+ else if (handle->type == UV_UDP)
+ fd = ((uv_udp_t *) handle)->io_watcher.fd;
+ else
+ return UV_ENOTSUP;
+
+ len = sizeof(*value);
+
+ if (*value == 0)
+ r = getsockopt(fd, SOL_SOCKET, optname, value, &len);
+ else
+ r = setsockopt(fd, SOL_SOCKET, optname, (const void*) value, len);
+
+ if (r < 0)
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+void uv__make_close_pending(uv_handle_t* handle) {
+ assert(handle->flags & UV_HANDLE_CLOSING);
+ assert(!(handle->flags & UV_HANDLE_CLOSED));
+ handle->next_closing = handle->loop->closing_handles;
+ handle->loop->closing_handles = handle;
+}
+
+int uv__getiovmax(void) {
+#if defined(IOV_MAX)
+ return IOV_MAX;
+#elif defined(_SC_IOV_MAX)
+ static int iovmax_cached = -1;
+ int iovmax;
+
+ iovmax = uv__load_relaxed(&iovmax_cached);
+ if (iovmax != -1)
+ return iovmax;
+
+ /* On some embedded devices (arm-linux-uclibc based ip camera),
+ * sysconf(_SC_IOV_MAX) can not get the correct value. The return
+ * value is -1 and the errno is EINPROGRESS. Degrade the value to 1.
+ */
+ iovmax = sysconf(_SC_IOV_MAX);
+ if (iovmax == -1)
+ iovmax = 1;
+
+ uv__store_relaxed(&iovmax_cached, iovmax);
+
+ return iovmax;
+#else
+ return 1024;
+#endif
+}
+
+
+static void uv__finish_close(uv_handle_t* handle) {
+ uv_signal_t* sh;
+
+ /* Note: while the handle is in the UV_HANDLE_CLOSING state now, it's still
+ * possible for it to be active in the sense that uv__is_active() returns
+ * true.
+ *
+ * A good example is when the user calls uv_shutdown(), immediately followed
+ * by uv_close(). The handle is considered active at this point because the
+ * completion of the shutdown req is still pending.
+ */
+ assert(handle->flags & UV_HANDLE_CLOSING);
+ assert(!(handle->flags & UV_HANDLE_CLOSED));
+ handle->flags |= UV_HANDLE_CLOSED;
+
+ switch (handle->type) {
+ case UV_PREPARE:
+ case UV_CHECK:
+ case UV_IDLE:
+ case UV_ASYNC:
+ case UV_TIMER:
+ case UV_PROCESS:
+ case UV_FS_EVENT:
+ case UV_FS_POLL:
+ case UV_POLL:
+ break;
+
+ case UV_SIGNAL:
+ /* If there are any caught signals "trapped" in the signal pipe,
+ * we can't call the close callback yet. Reinserting the handle
+ * into the closing queue makes the event loop spin but that's
+ * okay because we only need to deliver the pending events.
+ */
+ sh = (uv_signal_t*) handle;
+ if (sh->caught_signals > sh->dispatched_signals) {
+ handle->flags ^= UV_HANDLE_CLOSED;
+ uv__make_close_pending(handle); /* Back into the queue. */
+ return;
+ }
+ break;
+
+ case UV_NAMED_PIPE:
+ case UV_TCP:
+ case UV_TTY:
+ uv__stream_destroy((uv_stream_t*)handle);
+ break;
+
+ case UV_UDP:
+ uv__udp_finish_close((uv_udp_t*)handle);
+ break;
+
+ default:
+ assert(0);
+ break;
+ }
+
+ uv__handle_unref(handle);
+ QUEUE_REMOVE(&handle->handle_queue);
+
+ if (handle->close_cb) {
+ handle->close_cb(handle);
+ }
+}
+
+
+static void uv__run_closing_handles(uv_loop_t* loop) {
+ uv_handle_t* p;
+ uv_handle_t* q;
+
+ p = loop->closing_handles;
+ loop->closing_handles = NULL;
+
+ while (p) {
+ q = p->next_closing;
+ uv__finish_close(p);
+ p = q;
+ }
+}
+
+
+int uv_is_closing(const uv_handle_t* handle) {
+ return uv__is_closing(handle);
+}
+
+
+int uv_backend_fd(const uv_loop_t* loop) {
+ return loop->backend_fd;
+}
+
+
+int uv_backend_timeout(const uv_loop_t* loop) {
+ if (loop->stop_flag != 0)
+ return 0;
+
+ if (!uv__has_active_handles(loop) && !uv__has_active_reqs(loop))
+ return 0;
+
+ if (!QUEUE_EMPTY(&loop->idle_handles))
+ return 0;
+
+ if (!QUEUE_EMPTY(&loop->pending_queue))
+ return 0;
+
+ if (loop->closing_handles)
+ return 0;
+
+ return uv__next_timeout(loop);
+}
+
+
+static int uv__loop_alive(const uv_loop_t* loop) {
+ return uv__has_active_handles(loop) ||
+ uv__has_active_reqs(loop) ||
+ loop->closing_handles != NULL;
+}
+
+
+int uv_loop_alive(const uv_loop_t* loop) {
+ return uv__loop_alive(loop);
+}
+
+
+int uv_run(uv_loop_t* loop, uv_run_mode mode) {
+ int timeout;
+ int r;
+ int ran_pending;
+
+ r = uv__loop_alive(loop);
+ if (!r)
+ uv__update_time(loop);
+
+ while (r != 0 && loop->stop_flag == 0) {
+ uv__update_time(loop);
+ uv__run_timers(loop);
+ ran_pending = uv__run_pending(loop);
+ uv__run_idle(loop);
+ uv__run_prepare(loop);
+
+ timeout = 0;
+ if ((mode == UV_RUN_ONCE && !ran_pending) || mode == UV_RUN_DEFAULT)
+ timeout = uv_backend_timeout(loop);
+
+ uv__io_poll(loop, timeout);
+
+ /* Run one final update on the provider_idle_time in case uv__io_poll
+ * returned because the timeout expired, but no events were received. This
+ * call will be ignored if the provider_entry_time was either never set (if
+ * the timeout == 0) or was already updated b/c an event was received.
+ */
+ uv__metrics_update_idle_time(loop);
+
+ uv__run_check(loop);
+ uv__run_closing_handles(loop);
+
+ if (mode == UV_RUN_ONCE) {
+ /* UV_RUN_ONCE implies forward progress: at least one callback must have
+ * been invoked when it returns. uv__io_poll() can return without doing
+ * I/O (meaning: no callbacks) when its timeout expires - which means we
+ * have pending timers that satisfy the forward progress constraint.
+ *
+ * UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
+ * the check.
+ */
+ uv__update_time(loop);
+ uv__run_timers(loop);
+ }
+
+ r = uv__loop_alive(loop);
+ if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT)
+ break;
+ }
+
+ /* The if statement lets gcc compile it to a conditional store. Avoids
+ * dirtying a cache line.
+ */
+ if (loop->stop_flag != 0)
+ loop->stop_flag = 0;
+
+ return r;
+}
+
+
+void uv_update_time(uv_loop_t* loop) {
+ uv__update_time(loop);
+}
+
+
+int uv_is_active(const uv_handle_t* handle) {
+ return uv__is_active(handle);
+}
+
+
+/* Open a socket in non-blocking close-on-exec mode, atomically if possible. */
+int uv__socket(int domain, int type, int protocol) {
+ int sockfd;
+ int err;
+
+#if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC)
+ sockfd = socket(domain, type | SOCK_NONBLOCK | SOCK_CLOEXEC, protocol);
+ if (sockfd != -1)
+ return sockfd;
+
+ if (errno != EINVAL)
+ return UV__ERR(errno);
+#endif
+
+ sockfd = socket(domain, type, protocol);
+ if (sockfd == -1)
+ return UV__ERR(errno);
+
+ err = uv__nonblock(sockfd, 1);
+ if (err == 0)
+ err = uv__cloexec(sockfd, 1);
+
+ if (err) {
+ uv__close(sockfd);
+ return err;
+ }
+
+#if defined(SO_NOSIGPIPE)
+ {
+ int on = 1;
+ setsockopt(sockfd, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on));
+ }
+#endif
+
+ return sockfd;
+}
+
+/* get a file pointer to a file in read-only and close-on-exec mode */
+FILE* uv__open_file(const char* path) {
+ int fd;
+ FILE* fp;
+
+ fd = uv__open_cloexec(path, O_RDONLY);
+ if (fd < 0)
+ return NULL;
+
+ fp = fdopen(fd, "r");
+ if (fp == NULL)
+ uv__close(fd);
+
+ return fp;
+}
+
+
+int uv__accept(int sockfd) {
+ int peerfd;
+ int err;
+
+ (void) &err;
+ assert(sockfd >= 0);
+
+ do
+#ifdef uv__accept4
+ peerfd = uv__accept4(sockfd, NULL, NULL, SOCK_NONBLOCK|SOCK_CLOEXEC);
+#else
+ peerfd = accept(sockfd, NULL, NULL);
+#endif
+ while (peerfd == -1 && errno == EINTR);
+
+ if (peerfd == -1)
+ return UV__ERR(errno);
+
+#ifndef uv__accept4
+ err = uv__cloexec(peerfd, 1);
+ if (err == 0)
+ err = uv__nonblock(peerfd, 1);
+
+ if (err != 0) {
+ uv__close(peerfd);
+ return err;
+ }
+#endif
+
+ return peerfd;
+}
+
+
+/* close() on macos has the "interesting" quirk that it fails with EINTR
+ * without closing the file descriptor when a thread is in the cancel state.
+ * That's why libuv calls close$NOCANCEL() instead.
+ *
+ * glibc on linux has a similar issue: close() is a cancellation point and
+ * will unwind the thread when it's in the cancel state. Work around that
+ * by making the system call directly. Musl libc is unaffected.
+ */
+int uv__close_nocancel(int fd) {
+#if defined(__APPLE__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wdollar-in-identifier-extension"
+#if defined(__LP64__) || TARGET_OS_IPHONE
+ extern int close$NOCANCEL(int);
+ return close$NOCANCEL(fd);
+#else
+ extern int close$NOCANCEL$UNIX2003(int);
+ return close$NOCANCEL$UNIX2003(fd);
+#endif
+#pragma GCC diagnostic pop
+#elif defined(__linux__)
+ return syscall(SYS_close, fd);
+#else
+ return close(fd);
+#endif
+}
+
+
+int uv__close_nocheckstdio(int fd) {
+ int saved_errno;
+ int rc;
+
+ assert(fd > -1); /* Catch uninitialized io_watcher.fd bugs. */
+
+ saved_errno = errno;
+ rc = uv__close_nocancel(fd);
+ if (rc == -1) {
+ rc = UV__ERR(errno);
+ if (rc == UV_EINTR || rc == UV__ERR(EINPROGRESS))
+ rc = 0; /* The close is in progress, not an error. */
+ errno = saved_errno;
+ }
+
+ return rc;
+}
+
+
+int uv__close(int fd) {
+ assert(fd > STDERR_FILENO); /* Catch stdio close bugs. */
+#if defined(__MVS__)
+ SAVE_ERRNO(epoll_file_close(fd));
+#endif
+ return uv__close_nocheckstdio(fd);
+}
+
+
+int uv__nonblock_ioctl(int fd, int set) {
+ int r;
+
+ do
+ r = ioctl(fd, FIONBIO, &set);
+ while (r == -1 && errno == EINTR);
+
+ if (r)
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+
+#if !defined(__hpux) && !defined(__CYGWIN__) && !defined(__MSYS__) && !defined(__HAIKU__)
+int uv__cloexec_ioctl(int fd, int set) {
+ int r;
+
+ do
+ r = ioctl(fd, set ? FIOCLEX : FIONCLEX);
+ while (r == -1 && errno == EINTR);
+
+ if (r)
+ return UV__ERR(errno);
+
+ return 0;
+}
+#endif
+
+
+int uv__nonblock_fcntl(int fd, int set) {
+ int flags;
+ int r;
+
+ do
+ r = fcntl(fd, F_GETFL);
+ while (r == -1 && errno == EINTR);
+
+ if (r == -1)
+ return UV__ERR(errno);
+
+ /* Bail out now if already set/clear. */
+ if (!!(r & O_NONBLOCK) == !!set)
+ return 0;
+
+ if (set)
+ flags = r | O_NONBLOCK;
+ else
+ flags = r & ~O_NONBLOCK;
+
+ do
+ r = fcntl(fd, F_SETFL, flags);
+ while (r == -1 && errno == EINTR);
+
+ if (r)
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+
+int uv__cloexec_fcntl(int fd, int set) {
+ int flags;
+ int r;
+
+ do
+ r = fcntl(fd, F_GETFD);
+ while (r == -1 && errno == EINTR);
+
+ if (r == -1)
+ return UV__ERR(errno);
+
+ /* Bail out now if already set/clear. */
+ if (!!(r & FD_CLOEXEC) == !!set)
+ return 0;
+
+ if (set)
+ flags = r | FD_CLOEXEC;
+ else
+ flags = r & ~FD_CLOEXEC;
+
+ do
+ r = fcntl(fd, F_SETFD, flags);
+ while (r == -1 && errno == EINTR);
+
+ if (r)
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+
+ssize_t uv__recvmsg(int fd, struct msghdr* msg, int flags) {
+ struct cmsghdr* cmsg;
+ ssize_t rc;
+ int* pfd;
+ int* end;
+#if defined(__linux__)
+ static int no_msg_cmsg_cloexec;
+ if (0 == uv__load_relaxed(&no_msg_cmsg_cloexec)) {
+ rc = recvmsg(fd, msg, flags | 0x40000000); /* MSG_CMSG_CLOEXEC */
+ if (rc != -1)
+ return rc;
+ if (errno != EINVAL)
+ return UV__ERR(errno);
+ rc = recvmsg(fd, msg, flags);
+ if (rc == -1)
+ return UV__ERR(errno);
+ uv__store_relaxed(&no_msg_cmsg_cloexec, 1);
+ } else {
+ rc = recvmsg(fd, msg, flags);
+ }
+#else
+ rc = recvmsg(fd, msg, flags);
+#endif
+ if (rc == -1)
+ return UV__ERR(errno);
+ if (msg->msg_controllen == 0)
+ return rc;
+ for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg))
+ if (cmsg->cmsg_type == SCM_RIGHTS)
+ for (pfd = (int*) CMSG_DATA(cmsg),
+ end = (int*) ((char*) cmsg + cmsg->cmsg_len);
+ pfd < end;
+ pfd += 1)
+ uv__cloexec(*pfd, 1);
+ return rc;
+}
+
+
+int uv_cwd(char* buffer, size_t* size) {
+ char scratch[1 + UV__PATH_MAX];
+
+ if (buffer == NULL || size == NULL)
+ return UV_EINVAL;
+
+ /* Try to read directly into the user's buffer first... */
+ if (getcwd(buffer, *size) != NULL)
+ goto fixup;
+
+ if (errno != ERANGE)
+ return UV__ERR(errno);
+
+ /* ...or into scratch space if the user's buffer is too small
+ * so we can report how much space to provide on the next try.
+ */
+ if (getcwd(scratch, sizeof(scratch)) == NULL)
+ return UV__ERR(errno);
+
+ buffer = scratch;
+
+fixup:
+
+ *size = strlen(buffer);
+
+ if (*size > 1 && buffer[*size - 1] == '/') {
+ *size -= 1;
+ buffer[*size] = '\0';
+ }
+
+ if (buffer == scratch) {
+ *size += 1;
+ return UV_ENOBUFS;
+ }
+
+ return 0;
+}
+
+
+int uv_chdir(const char* dir) {
+ if (chdir(dir))
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+
+void uv_disable_stdio_inheritance(void) {
+ int fd;
+
+ /* Set the CLOEXEC flag on all open descriptors. Unconditionally try the
+ * first 16 file descriptors. After that, bail out after the first error.
+ */
+ for (fd = 0; ; fd++)
+ if (uv__cloexec(fd, 1) && fd > 15)
+ break;
+}
+
+
+int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) {
+ int fd_out;
+
+ switch (handle->type) {
+ case UV_TCP:
+ case UV_NAMED_PIPE:
+ case UV_TTY:
+ fd_out = uv__stream_fd((uv_stream_t*) handle);
+ break;
+
+ case UV_UDP:
+ fd_out = ((uv_udp_t *) handle)->io_watcher.fd;
+ break;
+
+ case UV_POLL:
+ fd_out = ((uv_poll_t *) handle)->io_watcher.fd;
+ break;
+
+ default:
+ return UV_EINVAL;
+ }
+
+ if (uv__is_closing(handle) || fd_out == -1)
+ return UV_EBADF;
+
+ *fd = fd_out;
+ return 0;
+}
+
+
+static int uv__run_pending(uv_loop_t* loop) {
+ QUEUE* q;
+ QUEUE pq;
+ uv__io_t* w;
+
+ if (QUEUE_EMPTY(&loop->pending_queue))
+ return 0;
+
+ QUEUE_MOVE(&loop->pending_queue, &pq);
+
+ while (!QUEUE_EMPTY(&pq)) {
+ q = QUEUE_HEAD(&pq);
+ QUEUE_REMOVE(q);
+ QUEUE_INIT(q);
+ w = QUEUE_DATA(q, uv__io_t, pending_queue);
+ w->cb(loop, w, POLLOUT);
+ }
+
+ return 1;
+}
+
+
+static unsigned int next_power_of_two(unsigned int val) {
+ val -= 1;
+ val |= val >> 1;
+ val |= val >> 2;
+ val |= val >> 4;
+ val |= val >> 8;
+ val |= val >> 16;
+ val += 1;
+ return val;
+}
+
+static void maybe_resize(uv_loop_t* loop, unsigned int len) {
+ uv__io_t** watchers;
+ void* fake_watcher_list;
+ void* fake_watcher_count;
+ unsigned int nwatchers;
+ unsigned int i;
+
+ if (len <= loop->nwatchers)
+ return;
+
+ /* Preserve fake watcher list and count at the end of the watchers */
+ if (loop->watchers != NULL) {
+ fake_watcher_list = loop->watchers[loop->nwatchers];
+ fake_watcher_count = loop->watchers[loop->nwatchers + 1];
+ } else {
+ fake_watcher_list = NULL;
+ fake_watcher_count = NULL;
+ }
+
+ nwatchers = next_power_of_two(len + 2) - 2;
+ watchers = uv__reallocf(loop->watchers,
+ (nwatchers + 2) * sizeof(loop->watchers[0]));
+
+ if (watchers == NULL)
+ abort();
+ for (i = loop->nwatchers; i < nwatchers; i++)
+ watchers[i] = NULL;
+ watchers[nwatchers] = fake_watcher_list;
+ watchers[nwatchers + 1] = fake_watcher_count;
+
+ loop->watchers = watchers;
+ loop->nwatchers = nwatchers;
+}
+
+
+void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) {
+ assert(cb != NULL);
+ assert(fd >= -1);
+ QUEUE_INIT(&w->pending_queue);
+ QUEUE_INIT(&w->watcher_queue);
+ w->cb = cb;
+ w->fd = fd;
+ w->events = 0;
+ w->pevents = 0;
+
+#if defined(UV_HAVE_KQUEUE)
+ w->rcount = 0;
+ w->wcount = 0;
+#endif /* defined(UV_HAVE_KQUEUE) */
+}
+
+
+void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
+ assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
+ assert(0 != events);
+ assert(w->fd >= 0);
+ assert(w->fd < INT_MAX);
+
+ w->pevents |= events;
+ maybe_resize(loop, w->fd + 1);
+
+#if !defined(__sun)
+ /* The event ports backend needs to rearm all file descriptors on each and
+ * every tick of the event loop but the other backends allow us to
+ * short-circuit here if the event mask is unchanged.
+ */
+ if (w->events == w->pevents)
+ return;
+#endif
+
+ if (QUEUE_EMPTY(&w->watcher_queue))
+ QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
+
+ if (loop->watchers[w->fd] == NULL) {
+ loop->watchers[w->fd] = w;
+ loop->nfds++;
+ }
+}
+
+
+void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
+ assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
+ assert(0 != events);
+
+ if (w->fd == -1)
+ return;
+
+ assert(w->fd >= 0);
+
+ /* Happens when uv__io_stop() is called on a handle that was never started. */
+ if ((unsigned) w->fd >= loop->nwatchers)
+ return;
+
+ w->pevents &= ~events;
+
+ if (w->pevents == 0) {
+ QUEUE_REMOVE(&w->watcher_queue);
+ QUEUE_INIT(&w->watcher_queue);
+
+ if (loop->watchers[w->fd] != NULL) {
+ assert(loop->watchers[w->fd] == w);
+ assert(loop->nfds > 0);
+ loop->watchers[w->fd] = NULL;
+ loop->nfds--;
+ w->events = 0;
+ }
+ }
+ else if (QUEUE_EMPTY(&w->watcher_queue))
+ QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
+}
+
+
+void uv__io_close(uv_loop_t* loop, uv__io_t* w) {
+ uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
+ QUEUE_REMOVE(&w->pending_queue);
+
+ /* Remove stale events for this file descriptor */
+ if (w->fd != -1)
+ uv__platform_invalidate_fd(loop, w->fd);
+}
+
+
+void uv__io_feed(uv_loop_t* loop, uv__io_t* w) {
+ if (QUEUE_EMPTY(&w->pending_queue))
+ QUEUE_INSERT_TAIL(&loop->pending_queue, &w->pending_queue);
+}
+
+
+int uv__io_active(const uv__io_t* w, unsigned int events) {
+ assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
+ assert(0 != events);
+ return 0 != (w->pevents & events);
+}
+
+
+int uv__fd_exists(uv_loop_t* loop, int fd) {
+ return (unsigned) fd < loop->nwatchers && loop->watchers[fd] != NULL;
+}
+
+
+int uv_getrusage(uv_rusage_t* rusage) {
+ struct rusage usage;
+
+ if (getrusage(RUSAGE_SELF, &usage))
+ return UV__ERR(errno);
+
+ rusage->ru_utime.tv_sec = usage.ru_utime.tv_sec;
+ rusage->ru_utime.tv_usec = usage.ru_utime.tv_usec;
+
+ rusage->ru_stime.tv_sec = usage.ru_stime.tv_sec;
+ rusage->ru_stime.tv_usec = usage.ru_stime.tv_usec;
+
+#if !defined(__MVS__) && !defined(__HAIKU__)
+ rusage->ru_maxrss = usage.ru_maxrss;
+ rusage->ru_ixrss = usage.ru_ixrss;
+ rusage->ru_idrss = usage.ru_idrss;
+ rusage->ru_isrss = usage.ru_isrss;
+ rusage->ru_minflt = usage.ru_minflt;
+ rusage->ru_majflt = usage.ru_majflt;
+ rusage->ru_nswap = usage.ru_nswap;
+ rusage->ru_inblock = usage.ru_inblock;
+ rusage->ru_oublock = usage.ru_oublock;
+ rusage->ru_msgsnd = usage.ru_msgsnd;
+ rusage->ru_msgrcv = usage.ru_msgrcv;
+ rusage->ru_nsignals = usage.ru_nsignals;
+ rusage->ru_nvcsw = usage.ru_nvcsw;
+ rusage->ru_nivcsw = usage.ru_nivcsw;
+#endif
+
+ return 0;
+}
+
+
+int uv__open_cloexec(const char* path, int flags) {
+#if defined(O_CLOEXEC)
+ int fd;
+
+ fd = open(path, flags | O_CLOEXEC);
+ if (fd == -1)
+ return UV__ERR(errno);
+
+ return fd;
+#else /* O_CLOEXEC */
+ int err;
+ int fd;
+
+ fd = open(path, flags);
+ if (fd == -1)
+ return UV__ERR(errno);
+
+ err = uv__cloexec(fd, 1);
+ if (err) {
+ uv__close(fd);
+ return err;
+ }
+
+ return fd;
+#endif /* O_CLOEXEC */
+}
+
+
+int uv__dup2_cloexec(int oldfd, int newfd) {
+#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__linux__)
+ int r;
+
+ r = dup3(oldfd, newfd, O_CLOEXEC);
+ if (r == -1)
+ return UV__ERR(errno);
+
+ return r;
+#else
+ int err;
+ int r;
+
+ r = dup2(oldfd, newfd); /* Never retry. */
+ if (r == -1)
+ return UV__ERR(errno);
+
+ err = uv__cloexec(newfd, 1);
+ if (err != 0) {
+ uv__close(newfd);
+ return err;
+ }
+
+ return r;
+#endif
+}
+
+
+int uv_os_homedir(char* buffer, size_t* size) {
+ uv_passwd_t pwd;
+ size_t len;
+ int r;
+
+ /* Check if the HOME environment variable is set first. The task of
+ performing input validation on buffer and size is taken care of by
+ uv_os_getenv(). */
+ r = uv_os_getenv("HOME", buffer, size);
+
+ if (r != UV_ENOENT)
+ return r;
+
+ /* HOME is not set, so call uv__getpwuid_r() */
+ r = uv__getpwuid_r(&pwd);
+
+ if (r != 0) {
+ return r;
+ }
+
+ len = strlen(pwd.homedir);
+
+ if (len >= *size) {
+ *size = len + 1;
+ uv_os_free_passwd(&pwd);
+ return UV_ENOBUFS;
+ }
+
+ memcpy(buffer, pwd.homedir, len + 1);
+ *size = len;
+ uv_os_free_passwd(&pwd);
+
+ return 0;
+}
+
+
+int uv_os_tmpdir(char* buffer, size_t* size) {
+ const char* buf;
+ size_t len;
+
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+#define CHECK_ENV_VAR(name) \
+ do { \
+ buf = getenv(name); \
+ if (buf != NULL) \
+ goto return_buffer; \
+ } \
+ while (0)
+
+ /* Check the TMPDIR, TMP, TEMP, and TEMPDIR environment variables in order */
+ CHECK_ENV_VAR("TMPDIR");
+ CHECK_ENV_VAR("TMP");
+ CHECK_ENV_VAR("TEMP");
+ CHECK_ENV_VAR("TEMPDIR");
+
+#undef CHECK_ENV_VAR
+
+ /* No temp environment variables defined */
+ #if defined(__ANDROID__)
+ buf = "/data/local/tmp";
+ #else
+ buf = "/tmp";
+ #endif
+
+return_buffer:
+ len = strlen(buf);
+
+ if (len >= *size) {
+ *size = len + 1;
+ return UV_ENOBUFS;
+ }
+
+ /* The returned directory should not have a trailing slash. */
+ if (len > 1 && buf[len - 1] == '/') {
+ len--;
+ }
+
+ memcpy(buffer, buf, len + 1);
+ buffer[len] = '\0';
+ *size = len;
+
+ return 0;
+}
+
+
+int uv__getpwuid_r(uv_passwd_t* pwd) {
+ struct passwd pw;
+ struct passwd* result;
+ char* buf;
+ uid_t uid;
+ size_t bufsize;
+ size_t name_size;
+ size_t homedir_size;
+ size_t shell_size;
+ long initsize;
+ int r;
+
+ if (pwd == NULL)
+ return UV_EINVAL;
+
+ initsize = sysconf(_SC_GETPW_R_SIZE_MAX);
+
+ if (initsize <= 0)
+ bufsize = 4096;
+ else
+ bufsize = (size_t) initsize;
+
+ uid = geteuid();
+ buf = NULL;
+
+ for (;;) {
+ uv__free(buf);
+ buf = uv__malloc(bufsize);
+
+ if (buf == NULL)
+ return UV_ENOMEM;
+
+ r = getpwuid_r(uid, &pw, buf, bufsize, &result);
+
+ if (r != ERANGE)
+ break;
+
+ bufsize *= 2;
+ }
+
+ if (r != 0) {
+ uv__free(buf);
+ return -r;
+ }
+
+ if (result == NULL) {
+ uv__free(buf);
+ return UV_ENOENT;
+ }
+
+ /* Allocate memory for the username, shell, and home directory */
+ name_size = strlen(pw.pw_name) + 1;
+ homedir_size = strlen(pw.pw_dir) + 1;
+ shell_size = strlen(pw.pw_shell) + 1;
+ pwd->username = uv__malloc(name_size + homedir_size + shell_size);
+
+ if (pwd->username == NULL) {
+ uv__free(buf);
+ return UV_ENOMEM;
+ }
+
+ /* Copy the username */
+ memcpy(pwd->username, pw.pw_name, name_size);
+
+ /* Copy the home directory */
+ pwd->homedir = pwd->username + name_size;
+ memcpy(pwd->homedir, pw.pw_dir, homedir_size);
+
+ /* Copy the shell */
+ pwd->shell = pwd->homedir + homedir_size;
+ memcpy(pwd->shell, pw.pw_shell, shell_size);
+
+ /* Copy the uid and gid */
+ pwd->uid = pw.pw_uid;
+ pwd->gid = pw.pw_gid;
+
+ uv__free(buf);
+
+ return 0;
+}
+
+
+void uv_os_free_passwd(uv_passwd_t* pwd) {
+ if (pwd == NULL)
+ return;
+
+ /*
+ The memory for name, shell, and homedir are allocated in a single
+ uv__malloc() call. The base of the pointer is stored in pwd->username, so
+ that is the field that needs to be freed.
+ */
+ uv__free(pwd->username);
+ pwd->username = NULL;
+ pwd->shell = NULL;
+ pwd->homedir = NULL;
+}
+
+
+int uv_os_get_passwd(uv_passwd_t* pwd) {
+ return uv__getpwuid_r(pwd);
+}
+
+
+int uv_translate_sys_error(int sys_errno) {
+ /* If < 0 then it's already a libuv error. */
+ return sys_errno <= 0 ? sys_errno : -sys_errno;
+}
+
+
+int uv_os_environ(uv_env_item_t** envitems, int* count) {
+ int i, j, cnt;
+ uv_env_item_t* envitem;
+
+ *envitems = NULL;
+ *count = 0;
+
+ for (i = 0; environ[i] != NULL; i++);
+
+ *envitems = uv__calloc(i, sizeof(**envitems));
+
+ if (*envitems == NULL)
+ return UV_ENOMEM;
+
+ for (j = 0, cnt = 0; j < i; j++) {
+ char* buf;
+ char* ptr;
+
+ if (environ[j] == NULL)
+ break;
+
+ buf = uv__strdup(environ[j]);
+ if (buf == NULL)
+ goto fail;
+
+ ptr = strchr(buf, '=');
+ if (ptr == NULL) {
+ uv__free(buf);
+ continue;
+ }
+
+ *ptr = '\0';
+
+ envitem = &(*envitems)[cnt];
+ envitem->name = buf;
+ envitem->value = ptr + 1;
+
+ cnt++;
+ }
+
+ *count = cnt;
+ return 0;
+
+fail:
+ for (i = 0; i < cnt; i++) {
+ envitem = &(*envitems)[cnt];
+ uv__free(envitem->name);
+ }
+ uv__free(*envitems);
+
+ *envitems = NULL;
+ *count = 0;
+ return UV_ENOMEM;
+}
+
+
+int uv_os_getenv(const char* name, char* buffer, size_t* size) {
+ char* var;
+ size_t len;
+
+ if (name == NULL || buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ var = getenv(name);
+
+ if (var == NULL)
+ return UV_ENOENT;
+
+ len = strlen(var);
+
+ if (len >= *size) {
+ *size = len + 1;
+ return UV_ENOBUFS;
+ }
+
+ memcpy(buffer, var, len + 1);
+ *size = len;
+
+ return 0;
+}
+
+
+int uv_os_setenv(const char* name, const char* value) {
+ if (name == NULL || value == NULL)
+ return UV_EINVAL;
+
+ if (setenv(name, value, 1) != 0)
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+
+int uv_os_unsetenv(const char* name) {
+ if (name == NULL)
+ return UV_EINVAL;
+
+ if (unsetenv(name) != 0)
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+
+int uv_os_gethostname(char* buffer, size_t* size) {
+ /*
+ On some platforms, if the input buffer is not large enough, gethostname()
+ succeeds, but truncates the result. libuv can detect this and return ENOBUFS
+ instead by creating a large enough buffer and comparing the hostname length
+ to the size input.
+ */
+ char buf[UV_MAXHOSTNAMESIZE];
+ size_t len;
+
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ if (gethostname(buf, sizeof(buf)) != 0)
+ return UV__ERR(errno);
+
+ buf[sizeof(buf) - 1] = '\0'; /* Null terminate, just to be safe. */
+ len = strlen(buf);
+
+ if (len >= *size) {
+ *size = len + 1;
+ return UV_ENOBUFS;
+ }
+
+ memcpy(buffer, buf, len + 1);
+ *size = len;
+ return 0;
+}
+
+
+int uv_cpumask_size(void) {
+#if defined(__linux__) || defined(__FreeBSD__)
+ return CPU_SETSIZE;
+#else
+ return UV_ENOTSUP;
+#endif
+}
+
+
+uv_os_fd_t uv_get_osfhandle(int fd) {
+ return fd;
+}
+
+int uv_open_osfhandle(uv_os_fd_t os_fd) {
+ return os_fd;
+}
+
+uv_pid_t uv_os_getpid(void) {
+ return getpid();
+}
+
+
+uv_pid_t uv_os_getppid(void) {
+ return getppid();
+}
+
+
+int uv_os_getpriority(uv_pid_t pid, int* priority) {
+ int r;
+
+ if (priority == NULL)
+ return UV_EINVAL;
+
+ errno = 0;
+ r = getpriority(PRIO_PROCESS, (int) pid);
+
+ if (r == -1 && errno != 0)
+ return UV__ERR(errno);
+
+ *priority = r;
+ return 0;
+}
+
+
+int uv_os_setpriority(uv_pid_t pid, int priority) {
+ if (priority < UV_PRIORITY_HIGHEST || priority > UV_PRIORITY_LOW)
+ return UV_EINVAL;
+
+ if (setpriority(PRIO_PROCESS, (int) pid, priority) != 0)
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+
+int uv_os_uname(uv_utsname_t* buffer) {
+ struct utsname buf;
+ int r;
+
+ if (buffer == NULL)
+ return UV_EINVAL;
+
+ if (uname(&buf) == -1) {
+ r = UV__ERR(errno);
+ goto error;
+ }
+
+ r = uv__strscpy(buffer->sysname, buf.sysname, sizeof(buffer->sysname));
+ if (r == UV_E2BIG)
+ goto error;
+
+#ifdef _AIX
+ r = snprintf(buffer->release,
+ sizeof(buffer->release),
+ "%s.%s",
+ buf.version,
+ buf.release);
+ if (r >= sizeof(buffer->release)) {
+ r = UV_E2BIG;
+ goto error;
+ }
+#else
+ r = uv__strscpy(buffer->release, buf.release, sizeof(buffer->release));
+ if (r == UV_E2BIG)
+ goto error;
+#endif
+
+ r = uv__strscpy(buffer->version, buf.version, sizeof(buffer->version));
+ if (r == UV_E2BIG)
+ goto error;
+
+#if defined(_AIX) || defined(__PASE__)
+ r = uv__strscpy(buffer->machine, "ppc64", sizeof(buffer->machine));
+#else
+ r = uv__strscpy(buffer->machine, buf.machine, sizeof(buffer->machine));
+#endif
+
+ if (r == UV_E2BIG)
+ goto error;
+
+ return 0;
+
+error:
+ buffer->sysname[0] = '\0';
+ buffer->release[0] = '\0';
+ buffer->version[0] = '\0';
+ buffer->machine[0] = '\0';
+ return r;
+}
+
+int uv__getsockpeername(const uv_handle_t* handle,
+ uv__peersockfunc func,
+ struct sockaddr* name,
+ int* namelen) {
+ socklen_t socklen;
+ uv_os_fd_t fd;
+ int r;
+
+ r = uv_fileno(handle, &fd);
+ if (r < 0)
+ return r;
+
+ /* sizeof(socklen_t) != sizeof(int) on some systems. */
+ socklen = (socklen_t) *namelen;
+
+ if (func(fd, name, &socklen))
+ return UV__ERR(errno);
+
+ *namelen = (int) socklen;
+ return 0;
+}
+
+int uv_gettimeofday(uv_timeval64_t* tv) {
+ struct timeval time;
+
+ if (tv == NULL)
+ return UV_EINVAL;
+
+ if (gettimeofday(&time, NULL) != 0)
+ return UV__ERR(errno);
+
+ tv->tv_sec = (int64_t) time.tv_sec;
+ tv->tv_usec = (int32_t) time.tv_usec;
+ return 0;
+}
+
+void uv_sleep(unsigned int msec) {
+ struct timespec timeout;
+ int rc;
+
+ timeout.tv_sec = msec / 1000;
+ timeout.tv_nsec = (msec % 1000) * 1000 * 1000;
+
+ do
+ rc = nanosleep(&timeout, &timeout);
+ while (rc == -1 && errno == EINTR);
+
+ assert(rc == 0);
+}
+
+int uv__search_path(const char* prog, char* buf, size_t* buflen) {
+ char abspath[UV__PATH_MAX];
+ size_t abspath_size;
+ char trypath[UV__PATH_MAX];
+ char* cloned_path;
+ char* path_env;
+ char* token;
+
+ if (buf == NULL || buflen == NULL || *buflen == 0)
+ return UV_EINVAL;
+
+ /*
+ * Possibilities for prog:
+ * i) an absolute path such as: /home/user/myprojects/nodejs/node
+ * ii) a relative path such as: ./node or ../myprojects/nodejs/node
+ * iii) a bare filename such as "node", after exporting PATH variable
+ * to its location.
+ */
+
+ /* Case i) and ii) absolute or relative paths */
+ if (strchr(prog, '/') != NULL) {
+ if (realpath(prog, abspath) != abspath)
+ return UV__ERR(errno);
+
+ abspath_size = strlen(abspath);
+
+ *buflen -= 1;
+ if (*buflen > abspath_size)
+ *buflen = abspath_size;
+
+ memcpy(buf, abspath, *buflen);
+ buf[*buflen] = '\0';
+
+ return 0;
+ }
+
+ /* Case iii). Search PATH environment variable */
+ cloned_path = NULL;
+ token = NULL;
+ path_env = getenv("PATH");
+
+ if (path_env == NULL)
+ return UV_EINVAL;
+
+ cloned_path = uv__strdup(path_env);
+ if (cloned_path == NULL)
+ return UV_ENOMEM;
+
+ token = strtok(cloned_path, ":");
+ while (token != NULL) {
+ snprintf(trypath, sizeof(trypath) - 1, "%s/%s", token, prog);
+ if (realpath(trypath, abspath) == abspath) {
+ /* Check the match is executable */
+ if (access(abspath, X_OK) == 0) {
+ abspath_size = strlen(abspath);
+
+ *buflen -= 1;
+ if (*buflen > abspath_size)
+ *buflen = abspath_size;
+
+ memcpy(buf, abspath, *buflen);
+ buf[*buflen] = '\0';
+
+ uv__free(cloned_path);
+ return 0;
+ }
+ }
+ token = strtok(NULL, ":");
+ }
+ uv__free(cloned_path);
+
+ /* Out of tokens (path entries), and no match found */
+ return UV_EINVAL;
+}
diff --git a/Utilities/cmlibuv/src/unix/cygwin.c b/Utilities/cmlibuv/src/unix/cygwin.c
new file mode 100644
index 0000000..169958d
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/cygwin.c
@@ -0,0 +1,53 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <sys/sysinfo.h>
+#include <unistd.h>
+
+int uv_uptime(double* uptime) {
+ struct sysinfo info;
+
+ if (sysinfo(&info) < 0)
+ return UV__ERR(errno);
+
+ *uptime = info.uptime;
+ return 0;
+}
+
+int uv_resident_set_memory(size_t* rss) {
+ /* FIXME: read /proc/meminfo? */
+ *rss = 0;
+ return 0;
+}
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+ /* FIXME: read /proc/stat? */
+ *cpu_infos = NULL;
+ *count = 0;
+ return UV_ENOSYS;
+}
+
+uint64_t uv_get_constrained_memory(void) {
+ return 0; /* Memory constraints are unknown. */
+}
diff --git a/Utilities/cmlibuv/src/unix/darwin-proctitle.c b/Utilities/cmlibuv/src/unix/darwin-proctitle.c
new file mode 100644
index 0000000..5288083
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/darwin-proctitle.c
@@ -0,0 +1,192 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <pthread.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <TargetConditionals.h>
+
+#if !TARGET_OS_IPHONE
+#include "darwin-stub.h"
+#endif
+
+
+static int uv__pthread_setname_np(const char* name) {
+ char namebuf[64]; /* MAXTHREADNAMESIZE */
+ int err;
+
+ strncpy(namebuf, name, sizeof(namebuf) - 1);
+ namebuf[sizeof(namebuf) - 1] = '\0';
+
+ err = pthread_setname_np(namebuf);
+ if (err)
+ return UV__ERR(err);
+
+ return 0;
+}
+
+
+int uv__set_process_title(const char* title) {
+#if TARGET_OS_IPHONE
+ return uv__pthread_setname_np(title);
+#else
+ CFStringRef (*pCFStringCreateWithCString)(CFAllocatorRef,
+ const char*,
+ CFStringEncoding);
+ CFBundleRef (*pCFBundleGetBundleWithIdentifier)(CFStringRef);
+ void *(*pCFBundleGetDataPointerForName)(CFBundleRef, CFStringRef);
+ void *(*pCFBundleGetFunctionPointerForName)(CFBundleRef, CFStringRef);
+ CFTypeRef (*pLSGetCurrentApplicationASN)(void);
+ OSStatus (*pLSSetApplicationInformationItem)(int,
+ CFTypeRef,
+ CFStringRef,
+ CFStringRef,
+ CFDictionaryRef*);
+ void* application_services_handle;
+ void* core_foundation_handle;
+ CFBundleRef launch_services_bundle;
+ CFStringRef* display_name_key;
+ CFDictionaryRef (*pCFBundleGetInfoDictionary)(CFBundleRef);
+ CFBundleRef (*pCFBundleGetMainBundle)(void);
+ CFDictionaryRef (*pLSApplicationCheckIn)(int, CFDictionaryRef);
+ void (*pLSSetApplicationLaunchServicesServerConnectionStatus)(uint64_t,
+ void*);
+ CFTypeRef asn;
+ int err;
+
+ err = UV_ENOENT;
+ application_services_handle = dlopen("/System/Library/Frameworks/"
+ "ApplicationServices.framework/"
+ "Versions/A/ApplicationServices",
+ RTLD_LAZY | RTLD_LOCAL);
+ core_foundation_handle = dlopen("/System/Library/Frameworks/"
+ "CoreFoundation.framework/"
+ "Versions/A/CoreFoundation",
+ RTLD_LAZY | RTLD_LOCAL);
+
+ if (application_services_handle == NULL || core_foundation_handle == NULL)
+ goto out;
+
+ *(void **)(&pCFStringCreateWithCString) =
+ dlsym(core_foundation_handle, "CFStringCreateWithCString");
+ *(void **)(&pCFBundleGetBundleWithIdentifier) =
+ dlsym(core_foundation_handle, "CFBundleGetBundleWithIdentifier");
+ *(void **)(&pCFBundleGetDataPointerForName) =
+ dlsym(core_foundation_handle, "CFBundleGetDataPointerForName");
+ *(void **)(&pCFBundleGetFunctionPointerForName) =
+ dlsym(core_foundation_handle, "CFBundleGetFunctionPointerForName");
+
+ if (pCFStringCreateWithCString == NULL ||
+ pCFBundleGetBundleWithIdentifier == NULL ||
+ pCFBundleGetDataPointerForName == NULL ||
+ pCFBundleGetFunctionPointerForName == NULL) {
+ goto out;
+ }
+
+#define S(s) pCFStringCreateWithCString(NULL, (s), kCFStringEncodingUTF8)
+
+ launch_services_bundle =
+ pCFBundleGetBundleWithIdentifier(S("com.apple.LaunchServices"));
+
+ if (launch_services_bundle == NULL)
+ goto out;
+
+ *(void **)(&pLSGetCurrentApplicationASN) =
+ pCFBundleGetFunctionPointerForName(launch_services_bundle,
+ S("_LSGetCurrentApplicationASN"));
+
+ if (pLSGetCurrentApplicationASN == NULL)
+ goto out;
+
+ *(void **)(&pLSSetApplicationInformationItem) =
+ pCFBundleGetFunctionPointerForName(launch_services_bundle,
+ S("_LSSetApplicationInformationItem"));
+
+ if (pLSSetApplicationInformationItem == NULL)
+ goto out;
+
+ display_name_key = pCFBundleGetDataPointerForName(launch_services_bundle,
+ S("_kLSDisplayNameKey"));
+
+ if (display_name_key == NULL || *display_name_key == NULL)
+ goto out;
+
+ *(void **)(&pCFBundleGetInfoDictionary) = dlsym(core_foundation_handle,
+ "CFBundleGetInfoDictionary");
+ *(void **)(&pCFBundleGetMainBundle) = dlsym(core_foundation_handle,
+ "CFBundleGetMainBundle");
+ if (pCFBundleGetInfoDictionary == NULL || pCFBundleGetMainBundle == NULL)
+ goto out;
+
+ *(void **)(&pLSApplicationCheckIn) = pCFBundleGetFunctionPointerForName(
+ launch_services_bundle,
+ S("_LSApplicationCheckIn"));
+
+ if (pLSApplicationCheckIn == NULL)
+ goto out;
+
+ *(void **)(&pLSSetApplicationLaunchServicesServerConnectionStatus) =
+ pCFBundleGetFunctionPointerForName(
+ launch_services_bundle,
+ S("_LSSetApplicationLaunchServicesServerConnectionStatus"));
+
+ if (pLSSetApplicationLaunchServicesServerConnectionStatus == NULL)
+ goto out;
+
+ pLSSetApplicationLaunchServicesServerConnectionStatus(0, NULL);
+
+ /* Check into process manager?! */
+ pLSApplicationCheckIn(-2,
+ pCFBundleGetInfoDictionary(pCFBundleGetMainBundle()));
+
+ asn = pLSGetCurrentApplicationASN();
+
+ err = UV_EBUSY;
+ if (asn == NULL)
+ goto out;
+
+ err = UV_EINVAL;
+ if (pLSSetApplicationInformationItem(-2, /* Magic value. */
+ asn,
+ *display_name_key,
+ S(title),
+ NULL) != noErr) {
+ goto out;
+ }
+
+ uv__pthread_setname_np(title); /* Don't care if it fails. */
+ err = 0;
+
+out:
+ if (core_foundation_handle != NULL)
+ dlclose(core_foundation_handle);
+
+ if (application_services_handle != NULL)
+ dlclose(application_services_handle);
+
+ return err;
+#endif /* !TARGET_OS_IPHONE */
+}
diff --git a/Utilities/cmlibuv/src/unix/darwin-stub.h b/Utilities/cmlibuv/src/unix/darwin-stub.h
new file mode 100644
index 0000000..433e3ef
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/darwin-stub.h
@@ -0,0 +1,113 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_DARWIN_STUB_H_
+#define UV_DARWIN_STUB_H_
+
+#include <stdint.h>
+
+struct CFArrayCallBacks;
+struct CFRunLoopSourceContext;
+struct FSEventStreamContext;
+struct CFRange;
+
+typedef double CFAbsoluteTime;
+typedef double CFTimeInterval;
+typedef int FSEventStreamEventFlags;
+typedef int OSStatus;
+typedef long CFIndex;
+typedef struct CFArrayCallBacks CFArrayCallBacks;
+typedef struct CFRunLoopSourceContext CFRunLoopSourceContext;
+typedef struct FSEventStreamContext FSEventStreamContext;
+typedef uint32_t FSEventStreamCreateFlags;
+typedef uint64_t FSEventStreamEventId;
+typedef unsigned CFStringEncoding;
+typedef void* CFAllocatorRef;
+typedef void* CFArrayRef;
+typedef void* CFBundleRef;
+typedef void* CFDataRef;
+typedef void* CFDictionaryRef;
+typedef void* CFMutableDictionaryRef;
+typedef struct CFRange CFRange;
+typedef void* CFRunLoopRef;
+typedef void* CFRunLoopSourceRef;
+typedef void* CFStringRef;
+typedef void* CFTypeRef;
+typedef void* FSEventStreamRef;
+
+typedef uint32_t IOOptionBits;
+typedef unsigned int io_iterator_t;
+typedef unsigned int io_object_t;
+typedef unsigned int io_service_t;
+typedef unsigned int io_registry_entry_t;
+
+
+typedef void (*FSEventStreamCallback)(const FSEventStreamRef,
+ void*,
+ size_t,
+ void*,
+ const FSEventStreamEventFlags*,
+ const FSEventStreamEventId*);
+
+struct CFRunLoopSourceContext {
+ CFIndex version;
+ void* info;
+ void* pad[7];
+ void (*perform)(void*);
+};
+
+struct FSEventStreamContext {
+ CFIndex version;
+ void* info;
+ void* pad[3];
+};
+
+struct CFRange {
+ CFIndex location;
+ CFIndex length;
+};
+
+static const CFStringEncoding kCFStringEncodingUTF8 = 0x8000100;
+static const OSStatus noErr = 0;
+
+static const FSEventStreamEventId kFSEventStreamEventIdSinceNow = -1;
+
+static const int kFSEventStreamCreateFlagNoDefer = 2;
+static const int kFSEventStreamCreateFlagFileEvents = 16;
+
+static const int kFSEventStreamEventFlagEventIdsWrapped = 8;
+static const int kFSEventStreamEventFlagHistoryDone = 16;
+static const int kFSEventStreamEventFlagItemChangeOwner = 0x4000;
+static const int kFSEventStreamEventFlagItemCreated = 0x100;
+static const int kFSEventStreamEventFlagItemFinderInfoMod = 0x2000;
+static const int kFSEventStreamEventFlagItemInodeMetaMod = 0x400;
+static const int kFSEventStreamEventFlagItemIsDir = 0x20000;
+static const int kFSEventStreamEventFlagItemModified = 0x1000;
+static const int kFSEventStreamEventFlagItemRemoved = 0x200;
+static const int kFSEventStreamEventFlagItemRenamed = 0x800;
+static const int kFSEventStreamEventFlagItemXattrMod = 0x8000;
+static const int kFSEventStreamEventFlagKernelDropped = 4;
+static const int kFSEventStreamEventFlagMount = 64;
+static const int kFSEventStreamEventFlagRootChanged = 32;
+static const int kFSEventStreamEventFlagUnmount = 128;
+static const int kFSEventStreamEventFlagUserDropped = 2;
+
+#endif /* UV_DARWIN_STUB_H_ */
diff --git a/Utilities/cmlibuv/src/unix/darwin.c b/Utilities/cmlibuv/src/unix/darwin.c
new file mode 100644
index 0000000..d0ecd45
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/darwin.c
@@ -0,0 +1,371 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <assert.h>
+#include <stdint.h>
+#include <errno.h>
+
+#include <dlfcn.h>
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+#include <mach-o/dyld.h> /* _NSGetExecutablePath */
+#include <sys/resource.h>
+#include <sys/sysctl.h>
+#include <unistd.h> /* sysconf */
+
+#if !TARGET_OS_IPHONE
+#include "darwin-stub.h"
+#endif
+
+static uv_once_t once = UV_ONCE_INIT;
+static uint64_t (*time_func)(void);
+static mach_timebase_info_data_t timebase;
+
+typedef unsigned char UInt8;
+
+int uv__platform_loop_init(uv_loop_t* loop) {
+ loop->cf_state = NULL;
+
+ if (uv__kqueue_init(loop))
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+
+void uv__platform_loop_delete(uv_loop_t* loop) {
+ uv__fsevents_loop_delete(loop);
+}
+
+
+static void uv__hrtime_init_once(void) {
+ if (KERN_SUCCESS != mach_timebase_info(&timebase))
+ abort();
+
+ time_func = (uint64_t (*)(void)) dlsym(RTLD_DEFAULT, "mach_continuous_time");
+ if (time_func == NULL)
+ time_func = mach_absolute_time;
+}
+
+
+uint64_t uv__hrtime(uv_clocktype_t type) {
+ uv_once(&once, uv__hrtime_init_once);
+ return time_func() * timebase.numer / timebase.denom;
+}
+
+
+int uv_exepath(char* buffer, size_t* size) {
+ /* realpath(exepath) may be > PATH_MAX so double it to be on the safe side. */
+ char abspath[PATH_MAX * 2 + 1];
+ char exepath[PATH_MAX + 1];
+ uint32_t exepath_size;
+ size_t abspath_size;
+
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ exepath_size = sizeof(exepath);
+ if (_NSGetExecutablePath(exepath, &exepath_size))
+ return UV_EIO;
+
+ if (realpath(exepath, abspath) != abspath)
+ return UV__ERR(errno);
+
+ abspath_size = strlen(abspath);
+ if (abspath_size == 0)
+ return UV_EIO;
+
+ *size -= 1;
+ if (*size > abspath_size)
+ *size = abspath_size;
+
+ memcpy(buffer, abspath, *size);
+ buffer[*size] = '\0';
+
+ return 0;
+}
+
+
+uint64_t uv_get_free_memory(void) {
+ vm_statistics_data_t info;
+ mach_msg_type_number_t count = sizeof(info) / sizeof(integer_t);
+
+ if (host_statistics(mach_host_self(), HOST_VM_INFO,
+ (host_info_t)&info, &count) != KERN_SUCCESS) {
+ return UV_EINVAL; /* FIXME(bnoordhuis) Translate error. */
+ }
+
+ return (uint64_t) info.free_count * sysconf(_SC_PAGESIZE);
+}
+
+
+uint64_t uv_get_total_memory(void) {
+ uint64_t info;
+ int which[] = {CTL_HW, HW_MEMSIZE};
+ size_t size = sizeof(info);
+
+ if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ return (uint64_t) info;
+}
+
+
+uint64_t uv_get_constrained_memory(void) {
+ return 0; /* Memory constraints are unknown. */
+}
+
+
+void uv_loadavg(double avg[3]) {
+ struct loadavg info;
+ size_t size = sizeof(info);
+ int which[] = {CTL_VM, VM_LOADAVG};
+
+ if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0) < 0) return;
+
+ avg[0] = (double) info.ldavg[0] / info.fscale;
+ avg[1] = (double) info.ldavg[1] / info.fscale;
+ avg[2] = (double) info.ldavg[2] / info.fscale;
+}
+
+
+int uv_resident_set_memory(size_t* rss) {
+ mach_msg_type_number_t count;
+ task_basic_info_data_t info;
+ kern_return_t err;
+
+ count = TASK_BASIC_INFO_COUNT;
+ err = task_info(mach_task_self(),
+ TASK_BASIC_INFO,
+ (task_info_t) &info,
+ &count);
+ (void) &err;
+ /* task_info(TASK_BASIC_INFO) cannot really fail. Anything other than
+ * KERN_SUCCESS implies a libuv bug.
+ */
+ assert(err == KERN_SUCCESS);
+ *rss = info.resident_size;
+
+ return 0;
+}
+
+
+int uv_uptime(double* uptime) {
+ time_t now;
+ struct timeval info;
+ size_t size = sizeof(info);
+ static int which[] = {CTL_KERN, KERN_BOOTTIME};
+
+ if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ now = time(NULL);
+ *uptime = now - info.tv_sec;
+
+ return 0;
+}
+
+static int uv__get_cpu_speed(uint64_t* speed) {
+ /* IOKit */
+ void (*pIOObjectRelease)(io_object_t);
+ kern_return_t (*pIOMasterPort)(mach_port_t, mach_port_t*);
+ CFMutableDictionaryRef (*pIOServiceMatching)(const char*);
+ kern_return_t (*pIOServiceGetMatchingServices)(mach_port_t,
+ CFMutableDictionaryRef,
+ io_iterator_t*);
+ io_service_t (*pIOIteratorNext)(io_iterator_t);
+ CFTypeRef (*pIORegistryEntryCreateCFProperty)(io_registry_entry_t,
+ CFStringRef,
+ CFAllocatorRef,
+ IOOptionBits);
+
+ /* CoreFoundation */
+ CFStringRef (*pCFStringCreateWithCString)(CFAllocatorRef,
+ const char*,
+ CFStringEncoding);
+ CFStringEncoding (*pCFStringGetSystemEncoding)(void);
+ UInt8 *(*pCFDataGetBytePtr)(CFDataRef);
+ CFIndex (*pCFDataGetLength)(CFDataRef);
+ void (*pCFDataGetBytes)(CFDataRef, CFRange, UInt8*);
+ void (*pCFRelease)(CFTypeRef);
+
+ void* core_foundation_handle;
+ void* iokit_handle;
+ int err;
+
+ kern_return_t kr;
+ mach_port_t mach_port;
+ io_iterator_t it;
+ io_object_t service;
+
+ mach_port = 0;
+
+ err = UV_ENOENT;
+ core_foundation_handle = dlopen("/System/Library/Frameworks/"
+ "CoreFoundation.framework/"
+ "Versions/A/CoreFoundation",
+ RTLD_LAZY | RTLD_LOCAL);
+ iokit_handle = dlopen("/System/Library/Frameworks/IOKit.framework/"
+ "Versions/A/IOKit",
+ RTLD_LAZY | RTLD_LOCAL);
+
+ if (core_foundation_handle == NULL || iokit_handle == NULL)
+ goto out;
+
+#define V(handle, symbol) \
+ do { \
+ *(void **)(&p ## symbol) = dlsym((handle), #symbol); \
+ if (p ## symbol == NULL) \
+ goto out; \
+ } \
+ while (0)
+ V(iokit_handle, IOMasterPort);
+ V(iokit_handle, IOServiceMatching);
+ V(iokit_handle, IOServiceGetMatchingServices);
+ V(iokit_handle, IOIteratorNext);
+ V(iokit_handle, IOObjectRelease);
+ V(iokit_handle, IORegistryEntryCreateCFProperty);
+ V(core_foundation_handle, CFStringCreateWithCString);
+ V(core_foundation_handle, CFStringGetSystemEncoding);
+ V(core_foundation_handle, CFDataGetBytePtr);
+ V(core_foundation_handle, CFDataGetLength);
+ V(core_foundation_handle, CFDataGetBytes);
+ V(core_foundation_handle, CFRelease);
+#undef V
+
+#define S(s) pCFStringCreateWithCString(NULL, (s), kCFStringEncodingUTF8)
+
+ kr = pIOMasterPort(MACH_PORT_NULL, &mach_port);
+ assert(kr == KERN_SUCCESS);
+ CFMutableDictionaryRef classes_to_match
+ = pIOServiceMatching("IOPlatformDevice");
+ kr = pIOServiceGetMatchingServices(mach_port, classes_to_match, &it);
+ assert(kr == KERN_SUCCESS);
+ service = pIOIteratorNext(it);
+
+ CFStringRef device_type_str = S("device_type");
+ CFStringRef clock_frequency_str = S("clock-frequency");
+
+ while (service != 0) {
+ CFDataRef data;
+ data = pIORegistryEntryCreateCFProperty(service,
+ device_type_str,
+ NULL,
+ 0);
+ if (data) {
+ const UInt8* raw = pCFDataGetBytePtr(data);
+ if (strncmp((char*)raw, "cpu", 3) == 0 ||
+ strncmp((char*)raw, "processor", 9) == 0) {
+ CFDataRef freq_ref;
+ freq_ref = pIORegistryEntryCreateCFProperty(service,
+ clock_frequency_str,
+ NULL,
+ 0);
+ if (freq_ref) {
+ uint32_t freq;
+ CFIndex len = pCFDataGetLength(freq_ref);
+ CFRange range;
+ range.location = 0;
+ range.length = len;
+
+ pCFDataGetBytes(freq_ref, range, (UInt8*)&freq);
+ *speed = freq;
+ pCFRelease(freq_ref);
+ pCFRelease(data);
+ break;
+ }
+ }
+ pCFRelease(data);
+ }
+
+ service = pIOIteratorNext(it);
+ }
+
+ pIOObjectRelease(it);
+
+ err = 0;
+out:
+ if (core_foundation_handle != NULL)
+ dlclose(core_foundation_handle);
+
+ if (iokit_handle != NULL)
+ dlclose(iokit_handle);
+
+ mach_port_deallocate(mach_task_self(), mach_port);
+
+ return err;
+}
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+ unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK),
+ multiplier = ((uint64_t)1000L / ticks);
+ char model[512];
+ size_t size;
+ unsigned int i;
+ natural_t numcpus;
+ mach_msg_type_number_t msg_type;
+ processor_cpu_load_info_data_t *info;
+ uv_cpu_info_t* cpu_info;
+ uint64_t cpuspeed;
+ int err;
+
+ size = sizeof(model);
+ if (sysctlbyname("machdep.cpu.brand_string", &model, &size, NULL, 0) &&
+ sysctlbyname("hw.model", &model, &size, NULL, 0)) {
+ return UV__ERR(errno);
+ }
+
+ err = uv__get_cpu_speed(&cpuspeed);
+ if (err < 0)
+ return err;
+
+ if (host_processor_info(mach_host_self(), PROCESSOR_CPU_LOAD_INFO, &numcpus,
+ (processor_info_array_t*)&info,
+ &msg_type) != KERN_SUCCESS) {
+ return UV_EINVAL; /* FIXME(bnoordhuis) Translate error. */
+ }
+
+ *cpu_infos = uv__malloc(numcpus * sizeof(**cpu_infos));
+ if (!(*cpu_infos)) {
+ vm_deallocate(mach_task_self(), (vm_address_t)info, msg_type);
+ return UV_ENOMEM;
+ }
+
+ *count = numcpus;
+
+ for (i = 0; i < numcpus; i++) {
+ cpu_info = &(*cpu_infos)[i];
+
+ cpu_info->cpu_times.user = (uint64_t)(info[i].cpu_ticks[0]) * multiplier;
+ cpu_info->cpu_times.nice = (uint64_t)(info[i].cpu_ticks[3]) * multiplier;
+ cpu_info->cpu_times.sys = (uint64_t)(info[i].cpu_ticks[1]) * multiplier;
+ cpu_info->cpu_times.idle = (uint64_t)(info[i].cpu_ticks[2]) * multiplier;
+ cpu_info->cpu_times.irq = 0;
+
+ cpu_info->model = uv__strdup(model);
+ cpu_info->speed = cpuspeed/1000000;
+ }
+ vm_deallocate(mach_task_self(), (vm_address_t)info, msg_type);
+
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/unix/dl.c b/Utilities/cmlibuv/src/unix/dl.c
new file mode 100644
index 0000000..fc1c052
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/dl.c
@@ -0,0 +1,80 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <string.h>
+#include <locale.h>
+
+static int uv__dlerror(uv_lib_t* lib);
+
+
+int uv_dlopen(const char* filename, uv_lib_t* lib) {
+ dlerror(); /* Reset error status. */
+ lib->errmsg = NULL;
+ lib->handle = dlopen(filename, RTLD_LAZY);
+ return lib->handle ? 0 : uv__dlerror(lib);
+}
+
+
+void uv_dlclose(uv_lib_t* lib) {
+ uv__free(lib->errmsg);
+ lib->errmsg = NULL;
+
+ if (lib->handle) {
+ /* Ignore errors. No good way to signal them without leaking memory. */
+ dlclose(lib->handle);
+ lib->handle = NULL;
+ }
+}
+
+
+int uv_dlsym(uv_lib_t* lib, const char* name, void** ptr) {
+ dlerror(); /* Reset error status. */
+ *ptr = dlsym(lib->handle, name);
+ return uv__dlerror(lib);
+}
+
+
+const char* uv_dlerror(const uv_lib_t* lib) {
+ return lib->errmsg ? lib->errmsg : "no error";
+}
+
+
+static int uv__dlerror(uv_lib_t* lib) {
+ const char* errmsg;
+
+ uv__free(lib->errmsg);
+
+ errmsg = dlerror();
+
+ if (errmsg) {
+ lib->errmsg = uv__strdup(errmsg);
+ return -1;
+ }
+ else {
+ lib->errmsg = NULL;
+ return 0;
+ }
+}
diff --git a/Utilities/cmlibuv/src/unix/freebsd.c b/Utilities/cmlibuv/src/unix/freebsd.c
new file mode 100644
index 0000000..fe795a0
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/freebsd.c
@@ -0,0 +1,282 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <assert.h>
+#include <string.h>
+#include <errno.h>
+
+#include <paths.h>
+#include <sys/user.h>
+#include <sys/types.h>
+#include <sys/resource.h>
+#include <sys/sysctl.h>
+#include <vm/vm_param.h> /* VM_LOADAVG */
+#include <time.h>
+#include <stdlib.h>
+#include <unistd.h> /* sysconf */
+#include <fcntl.h>
+
+#ifndef CPUSTATES
+# define CPUSTATES 5U
+#endif
+#ifndef CP_USER
+# define CP_USER 0
+# define CP_NICE 1
+# define CP_SYS 2
+# define CP_IDLE 3
+# define CP_INTR 4
+#endif
+
+
+int uv__platform_loop_init(uv_loop_t* loop) {
+ return uv__kqueue_init(loop);
+}
+
+
+void uv__platform_loop_delete(uv_loop_t* loop) {
+}
+
+int uv_exepath(char* buffer, size_t* size) {
+ char abspath[PATH_MAX * 2 + 1];
+ int mib[4];
+ size_t abspath_size;
+
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_PROC;
+ mib[2] = KERN_PROC_PATHNAME;
+ mib[3] = -1;
+
+ abspath_size = sizeof abspath;
+ if (sysctl(mib, ARRAY_SIZE(mib), abspath, &abspath_size, NULL, 0))
+ return UV__ERR(errno);
+
+ assert(abspath_size > 0);
+ abspath_size -= 1;
+ *size -= 1;
+
+ if (*size > abspath_size)
+ *size = abspath_size;
+
+ memcpy(buffer, abspath, *size);
+ buffer[*size] = '\0';
+
+ return 0;
+}
+
+uint64_t uv_get_free_memory(void) {
+ int freecount;
+ size_t size = sizeof(freecount);
+
+ if (sysctlbyname("vm.stats.vm.v_free_count", &freecount, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ return (uint64_t) freecount * sysconf(_SC_PAGESIZE);
+
+}
+
+
+uint64_t uv_get_total_memory(void) {
+ unsigned long info;
+ int which[] = {CTL_HW, HW_PHYSMEM};
+
+ size_t size = sizeof(info);
+
+ if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ return (uint64_t) info;
+}
+
+
+uint64_t uv_get_constrained_memory(void) {
+ return 0; /* Memory constraints are unknown. */
+}
+
+
+void uv_loadavg(double avg[3]) {
+ struct loadavg info;
+ size_t size = sizeof(info);
+ int which[] = {CTL_VM, VM_LOADAVG};
+
+ if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0) < 0) return;
+
+ avg[0] = (double) info.ldavg[0] / info.fscale;
+ avg[1] = (double) info.ldavg[1] / info.fscale;
+ avg[2] = (double) info.ldavg[2] / info.fscale;
+}
+
+
+int uv_resident_set_memory(size_t* rss) {
+ struct kinfo_proc kinfo;
+ size_t page_size;
+ size_t kinfo_size;
+ int mib[4];
+
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_PROC;
+ mib[2] = KERN_PROC_PID;
+ mib[3] = getpid();
+
+ kinfo_size = sizeof(kinfo);
+
+ if (sysctl(mib, ARRAY_SIZE(mib), &kinfo, &kinfo_size, NULL, 0))
+ return UV__ERR(errno);
+
+ page_size = getpagesize();
+
+#ifdef __DragonFly__
+ *rss = kinfo.kp_vm_rssize * page_size;
+#else
+ *rss = kinfo.ki_rssize * page_size;
+#endif
+
+ return 0;
+}
+
+
+int uv_uptime(double* uptime) {
+ int r;
+ struct timespec sp;
+ r = clock_gettime(CLOCK_MONOTONIC, &sp);
+ if (r)
+ return UV__ERR(errno);
+
+ *uptime = sp.tv_sec;
+ return 0;
+}
+
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+ unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK),
+ multiplier = ((uint64_t)1000L / ticks), cpuspeed, maxcpus,
+ cur = 0;
+ uv_cpu_info_t* cpu_info;
+ const char* maxcpus_key;
+ const char* cptimes_key;
+ const char* model_key;
+ char model[512];
+ long* cp_times;
+ int numcpus;
+ size_t size;
+ int i;
+
+#if defined(__DragonFly__)
+ /* This is not quite correct but DragonFlyBSD doesn't seem to have anything
+ * comparable to kern.smp.maxcpus or kern.cp_times (kern.cp_time is a total,
+ * not per CPU). At least this stops uv_cpu_info() from failing completely.
+ */
+ maxcpus_key = "hw.ncpu";
+ cptimes_key = "kern.cp_time";
+#else
+ maxcpus_key = "kern.smp.maxcpus";
+ cptimes_key = "kern.cp_times";
+#endif
+
+#if defined(__arm__) || defined(__aarch64__)
+ /* The key hw.model and hw.clockrate are not available on FreeBSD ARM. */
+ model_key = "hw.machine";
+ cpuspeed = 0;
+#else
+ model_key = "hw.model";
+
+ size = sizeof(cpuspeed);
+ if (sysctlbyname("hw.clockrate", &cpuspeed, &size, NULL, 0))
+ return -errno;
+#endif
+
+ size = sizeof(model);
+ if (sysctlbyname(model_key, &model, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ size = sizeof(numcpus);
+ if (sysctlbyname("hw.ncpu", &numcpus, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ *cpu_infos = uv__malloc(numcpus * sizeof(**cpu_infos));
+ if (!(*cpu_infos))
+ return UV_ENOMEM;
+
+ *count = numcpus;
+
+ /* kern.cp_times on FreeBSD i386 gives an array up to maxcpus instead of
+ * ncpu.
+ */
+ size = sizeof(maxcpus);
+ if (sysctlbyname(maxcpus_key, &maxcpus, &size, NULL, 0)) {
+ uv__free(*cpu_infos);
+ return UV__ERR(errno);
+ }
+
+ size = maxcpus * CPUSTATES * sizeof(long);
+
+ cp_times = uv__malloc(size);
+ if (cp_times == NULL) {
+ uv__free(*cpu_infos);
+ return UV_ENOMEM;
+ }
+
+ if (sysctlbyname(cptimes_key, cp_times, &size, NULL, 0)) {
+ uv__free(cp_times);
+ uv__free(*cpu_infos);
+ return UV__ERR(errno);
+ }
+
+ for (i = 0; i < numcpus; i++) {
+ cpu_info = &(*cpu_infos)[i];
+
+ cpu_info->cpu_times.user = (uint64_t)(cp_times[CP_USER+cur]) * multiplier;
+ cpu_info->cpu_times.nice = (uint64_t)(cp_times[CP_NICE+cur]) * multiplier;
+ cpu_info->cpu_times.sys = (uint64_t)(cp_times[CP_SYS+cur]) * multiplier;
+ cpu_info->cpu_times.idle = (uint64_t)(cp_times[CP_IDLE+cur]) * multiplier;
+ cpu_info->cpu_times.irq = (uint64_t)(cp_times[CP_INTR+cur]) * multiplier;
+
+ cpu_info->model = uv__strdup(model);
+ cpu_info->speed = cpuspeed;
+
+ cur+=CPUSTATES;
+ }
+
+ uv__free(cp_times);
+ return 0;
+}
+
+
+int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
+#if __FreeBSD__ >= 11
+ return sendmmsg(fd, mmsg, vlen, /* flags */ 0);
+#else
+ return errno = ENOSYS, -1;
+#endif
+}
+
+
+int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
+#if __FreeBSD__ >= 11
+ return recvmmsg(fd, mmsg, vlen, 0 /* flags */, NULL /* timeout */);
+#else
+ return errno = ENOSYS, -1;
+#endif
+}
diff --git a/Utilities/cmlibuv/src/unix/fs.c b/Utilities/cmlibuv/src/unix/fs.c
new file mode 100644
index 0000000..6d57cee
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/fs.c
@@ -0,0 +1,2144 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/* Caveat emptor: this file deviates from the libuv convention of returning
+ * negated errno codes. Most uv_fs_*() functions map directly to the system
+ * call of the same name. For more complex wrappers, it's easier to just
+ * return -1 with errno set. The dispatcher in uv__fs_work() takes care of
+ * getting the errno to the right place (req->result or as the return value.)
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <errno.h>
+#include <dlfcn.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h> /* PATH_MAX */
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/uio.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <poll.h>
+
+#if defined(__DragonFly__) || \
+ defined(__FreeBSD__) || \
+ defined(__FreeBSD_kernel__) || \
+ defined(__OpenBSD__) || \
+ defined(__NetBSD__)
+# define HAVE_PREADV 1
+#else
+# define HAVE_PREADV 0
+#endif
+
+#if defined(__linux__) || defined(__sun)
+# include <sys/sendfile.h>
+#endif
+
+#if defined(__APPLE__)
+# include <sys/sysctl.h>
+#elif defined(__linux__) && !defined(FICLONE)
+# include <sys/ioctl.h>
+# define FICLONE _IOW(0x94, 9, int)
+#endif
+
+#if defined(_AIX) && !defined(_AIX71)
+# include <utime.h>
+#endif
+
+#if defined(__APPLE__) || \
+ defined(__DragonFly__) || \
+ defined(__FreeBSD__) || \
+ defined(__FreeBSD_kernel__) || \
+ defined(__OpenBSD__) || \
+ defined(__NetBSD__)
+# include <sys/param.h>
+# include <sys/mount.h>
+#elif defined(__sun) || \
+ defined(__MVS__) || \
+ defined(__NetBSD__) || \
+ defined(__HAIKU__) || \
+ defined(__QNX__)
+# include <sys/statvfs.h>
+#else
+# include <sys/statfs.h>
+#endif
+
+#if defined(_AIX) && _XOPEN_SOURCE <= 600
+extern char *mkdtemp(char *template); /* See issue #740 on AIX < 7 */
+#endif
+
+#define INIT(subtype) \
+ do { \
+ if (req == NULL) \
+ return UV_EINVAL; \
+ UV_REQ_INIT(req, UV_FS); \
+ req->fs_type = UV_FS_ ## subtype; \
+ req->result = 0; \
+ req->ptr = NULL; \
+ req->loop = loop; \
+ req->path = NULL; \
+ req->new_path = NULL; \
+ req->bufs = NULL; \
+ req->cb = cb; \
+ } \
+ while (0)
+
+#define PATH \
+ do { \
+ assert(path != NULL); \
+ if (cb == NULL) { \
+ req->path = path; \
+ } else { \
+ req->path = uv__strdup(path); \
+ if (req->path == NULL) \
+ return UV_ENOMEM; \
+ } \
+ } \
+ while (0)
+
+#define PATH2 \
+ do { \
+ if (cb == NULL) { \
+ req->path = path; \
+ req->new_path = new_path; \
+ } else { \
+ size_t path_len; \
+ size_t new_path_len; \
+ path_len = strlen(path) + 1; \
+ new_path_len = strlen(new_path) + 1; \
+ req->path = uv__malloc(path_len + new_path_len); \
+ if (req->path == NULL) \
+ return UV_ENOMEM; \
+ req->new_path = req->path + path_len; \
+ memcpy((void*) req->path, path, path_len); \
+ memcpy((void*) req->new_path, new_path, new_path_len); \
+ } \
+ } \
+ while (0)
+
+#define POST \
+ do { \
+ if (cb != NULL) { \
+ uv__req_register(loop, req); \
+ uv__work_submit(loop, \
+ &req->work_req, \
+ UV__WORK_FAST_IO, \
+ uv__fs_work, \
+ uv__fs_done); \
+ return 0; \
+ } \
+ else { \
+ uv__fs_work(&req->work_req); \
+ return req->result; \
+ } \
+ } \
+ while (0)
+
+
+static int uv__fs_close(int fd) {
+ int rc;
+
+ rc = uv__close_nocancel(fd);
+ if (rc == -1)
+ if (errno == EINTR || errno == EINPROGRESS)
+ rc = 0; /* The close is in progress, not an error. */
+
+ return rc;
+}
+
+
+static ssize_t uv__fs_fsync(uv_fs_t* req) {
+#if defined(__APPLE__)
+ /* Apple's fdatasync and fsync explicitly do NOT flush the drive write cache
+ * to the drive platters. This is in contrast to Linux's fdatasync and fsync
+ * which do, according to recent man pages. F_FULLFSYNC is Apple's equivalent
+ * for flushing buffered data to permanent storage. If F_FULLFSYNC is not
+ * supported by the file system we fall back to F_BARRIERFSYNC or fsync().
+ * This is the same approach taken by sqlite, except sqlite does not issue
+ * an F_BARRIERFSYNC call.
+ */
+ int r;
+
+ r = fcntl(req->file, F_FULLFSYNC);
+ if (r != 0)
+ r = fcntl(req->file, 85 /* F_BARRIERFSYNC */); /* fsync + barrier */
+ if (r != 0)
+ r = fsync(req->file);
+ return r;
+#else
+ return fsync(req->file);
+#endif
+}
+
+
+static ssize_t uv__fs_fdatasync(uv_fs_t* req) {
+#if defined(__linux__) || defined(__sun) || defined(__NetBSD__)
+ return fdatasync(req->file);
+#elif defined(__APPLE__)
+ /* See the comment in uv__fs_fsync. */
+ return uv__fs_fsync(req);
+#else
+ return fsync(req->file);
+#endif
+}
+
+
+UV_UNUSED(static struct timespec uv__fs_to_timespec(double time)) {
+ struct timespec ts;
+ ts.tv_sec = time;
+ ts.tv_nsec = (uint64_t)(time * 1000000) % 1000000 * 1000;
+ return ts;
+}
+
+UV_UNUSED(static struct timeval uv__fs_to_timeval(double time)) {
+ struct timeval tv;
+ tv.tv_sec = time;
+ tv.tv_usec = (uint64_t)(time * 1000000) % 1000000;
+ return tv;
+}
+
+static ssize_t uv__fs_futime(uv_fs_t* req) {
+#if defined(__linux__) \
+ || defined(_AIX71) \
+ || defined(__HAIKU__)
+ /* utimesat() has nanosecond resolution but we stick to microseconds
+ * for the sake of consistency with other platforms.
+ */
+ struct timespec ts[2];
+ ts[0] = uv__fs_to_timespec(req->atime);
+ ts[1] = uv__fs_to_timespec(req->mtime);
+ return futimens(req->file, ts);
+#elif defined(__APPLE__) \
+ || defined(__DragonFly__) \
+ || defined(__FreeBSD__) \
+ || defined(__FreeBSD_kernel__) \
+ || defined(__NetBSD__) \
+ || defined(__OpenBSD__) \
+ || defined(__sun)
+ struct timeval tv[2];
+ tv[0] = uv__fs_to_timeval(req->atime);
+ tv[1] = uv__fs_to_timeval(req->mtime);
+# if defined(__sun)
+ return futimesat(req->file, NULL, tv);
+# else
+ return futimes(req->file, tv);
+# endif
+#elif defined(__MVS__)
+ attrib_t atr;
+ memset(&atr, 0, sizeof(atr));
+ atr.att_mtimechg = 1;
+ atr.att_atimechg = 1;
+ atr.att_mtime = req->mtime;
+ atr.att_atime = req->atime;
+ return __fchattr(req->file, &atr, sizeof(atr));
+#else
+ errno = ENOSYS;
+ return -1;
+#endif
+}
+
+#if (defined(__sun) || defined(__hpux)) && (_XOPEN_SOURCE < 600 || defined(CMAKE_BOOTSTRAP))
+static char* uv__mkdtemp(char *template)
+{
+ if (!mktemp(template) || mkdir(template, 0700))
+ return NULL;
+ return template;
+}
+#else
+#define uv__mkdtemp mkdtemp
+#endif
+
+static ssize_t uv__fs_mkdtemp(uv_fs_t* req) {
+ return uv__mkdtemp((char*) req->path) ? 0 : -1;
+}
+
+
+static int (*uv__mkostemp)(char*, int);
+
+
+static void uv__mkostemp_initonce(void) {
+ /* z/os doesn't have RTLD_DEFAULT but that's okay
+ * because it doesn't have mkostemp(O_CLOEXEC) either.
+ */
+#ifdef RTLD_DEFAULT
+ uv__mkostemp = (int (*)(char*, int)) dlsym(RTLD_DEFAULT, "mkostemp");
+
+ /* We don't care about errors, but we do want to clean them up.
+ * If there has been no error, then dlerror() will just return
+ * NULL.
+ */
+ dlerror();
+#endif /* RTLD_DEFAULT */
+}
+
+
+static int uv__fs_mkstemp(uv_fs_t* req) {
+ static uv_once_t once = UV_ONCE_INIT;
+ int r;
+#ifdef O_CLOEXEC
+ static int no_cloexec_support;
+#endif
+ static const char pattern[] = "XXXXXX";
+ static const size_t pattern_size = sizeof(pattern) - 1;
+ char* path;
+ size_t path_length;
+
+ path = (char*) req->path;
+ path_length = strlen(path);
+
+ /* EINVAL can be returned for 2 reasons:
+ 1. The template's last 6 characters were not XXXXXX
+ 2. open() didn't support O_CLOEXEC
+ We want to avoid going to the fallback path in case
+ of 1, so it's manually checked before. */
+ if (path_length < pattern_size ||
+ strcmp(path + path_length - pattern_size, pattern)) {
+ errno = EINVAL;
+ r = -1;
+ goto clobber;
+ }
+
+ uv_once(&once, uv__mkostemp_initonce);
+
+#ifdef O_CLOEXEC
+ if (uv__load_relaxed(&no_cloexec_support) == 0 && uv__mkostemp != NULL) {
+ r = uv__mkostemp(path, O_CLOEXEC);
+
+ if (r >= 0)
+ return r;
+
+ /* If mkostemp() returns EINVAL, it means the kernel doesn't
+ support O_CLOEXEC, so we just fallback to mkstemp() below. */
+ if (errno != EINVAL)
+ goto clobber;
+
+ /* We set the static variable so that next calls don't even
+ try to use mkostemp. */
+ uv__store_relaxed(&no_cloexec_support, 1);
+ }
+#endif /* O_CLOEXEC */
+
+ if (req->cb != NULL)
+ uv_rwlock_rdlock(&req->loop->cloexec_lock);
+
+ r = mkstemp(path);
+
+ /* In case of failure `uv__cloexec` will leave error in `errno`,
+ * so it is enough to just set `r` to `-1`.
+ */
+ if (r >= 0 && uv__cloexec(r, 1) != 0) {
+ r = uv__close(r);
+ if (r != 0)
+ abort();
+ r = -1;
+ }
+
+ if (req->cb != NULL)
+ uv_rwlock_rdunlock(&req->loop->cloexec_lock);
+
+clobber:
+ if (r < 0)
+ path[0] = '\0';
+ return r;
+}
+
+
+static ssize_t uv__fs_open(uv_fs_t* req) {
+#ifdef O_CLOEXEC
+ return open(req->path, req->flags | O_CLOEXEC, req->mode);
+#else /* O_CLOEXEC */
+ int r;
+
+ if (req->cb != NULL)
+ uv_rwlock_rdlock(&req->loop->cloexec_lock);
+
+ r = open(req->path, req->flags, req->mode);
+
+ /* In case of failure `uv__cloexec` will leave error in `errno`,
+ * so it is enough to just set `r` to `-1`.
+ */
+ if (r >= 0 && uv__cloexec(r, 1) != 0) {
+ r = uv__close(r);
+ if (r != 0)
+ abort();
+ r = -1;
+ }
+
+ if (req->cb != NULL)
+ uv_rwlock_rdunlock(&req->loop->cloexec_lock);
+
+ return r;
+#endif /* O_CLOEXEC */
+}
+
+
+#if !HAVE_PREADV
+static ssize_t uv__fs_preadv(uv_file fd,
+ uv_buf_t* bufs,
+ unsigned int nbufs,
+ off_t off) {
+ uv_buf_t* buf;
+ uv_buf_t* end;
+ ssize_t result;
+ ssize_t rc;
+ size_t pos;
+
+ assert(nbufs > 0);
+
+ result = 0;
+ pos = 0;
+ buf = bufs + 0;
+ end = bufs + nbufs;
+
+ for (;;) {
+ do
+ rc = pread(fd, buf->base + pos, buf->len - pos, off + result);
+ while (rc == -1 && errno == EINTR);
+
+ if (rc == 0)
+ break;
+
+ if (rc == -1 && result == 0)
+ return UV__ERR(errno);
+
+ if (rc == -1)
+ break; /* We read some data so return that, ignore the error. */
+
+ pos += rc;
+ result += rc;
+
+ if (pos < buf->len)
+ continue;
+
+ pos = 0;
+ buf += 1;
+
+ if (buf == end)
+ break;
+ }
+
+ return result;
+}
+#endif
+
+
+static ssize_t uv__fs_read(uv_fs_t* req) {
+#if defined(__linux__)
+ static int no_preadv;
+#endif
+ unsigned int iovmax;
+ ssize_t result;
+
+ iovmax = uv__getiovmax();
+ if (req->nbufs > iovmax)
+ req->nbufs = iovmax;
+
+ if (req->off < 0) {
+ if (req->nbufs == 1)
+ result = read(req->file, req->bufs[0].base, req->bufs[0].len);
+ else
+ result = readv(req->file, (struct iovec*) req->bufs, req->nbufs);
+ } else {
+ if (req->nbufs == 1) {
+ result = pread(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
+ goto done;
+ }
+
+#if HAVE_PREADV
+ result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
+#else
+# if defined(__linux__)
+ if (uv__load_relaxed(&no_preadv)) retry:
+# endif
+ {
+ result = uv__fs_preadv(req->file, req->bufs, req->nbufs, req->off);
+ }
+# if defined(__linux__)
+ else {
+ result = uv__preadv(req->file,
+ (struct iovec*)req->bufs,
+ req->nbufs,
+ req->off);
+ if (result == -1 && errno == ENOSYS) {
+ uv__store_relaxed(&no_preadv, 1);
+ goto retry;
+ }
+ }
+# endif
+#endif
+ }
+
+done:
+ /* Early cleanup of bufs allocation, since we're done with it. */
+ if (req->bufs != req->bufsml)
+ uv__free(req->bufs);
+
+ req->bufs = NULL;
+ req->nbufs = 0;
+
+#ifdef __PASE__
+ /* PASE returns EOPNOTSUPP when reading a directory, convert to EISDIR */
+ if (result == -1 && errno == EOPNOTSUPP) {
+ struct stat buf;
+ ssize_t rc;
+ rc = fstat(req->file, &buf);
+ if (rc == 0 && S_ISDIR(buf.st_mode)) {
+ errno = EISDIR;
+ }
+ }
+#endif
+
+ return result;
+}
+
+
+#if defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_8)
+#define UV_CONST_DIRENT uv__dirent_t
+#else
+#define UV_CONST_DIRENT const uv__dirent_t
+#endif
+
+
+static int uv__fs_scandir_filter(UV_CONST_DIRENT* dent) {
+ return strcmp(dent->d_name, ".") != 0 && strcmp(dent->d_name, "..") != 0;
+}
+
+
+static int uv__fs_scandir_sort(UV_CONST_DIRENT** a, UV_CONST_DIRENT** b) {
+ return strcmp((*a)->d_name, (*b)->d_name);
+}
+
+
+static ssize_t uv__fs_scandir(uv_fs_t* req) {
+ uv__dirent_t** dents;
+ int n;
+
+ dents = NULL;
+ n = scandir(req->path, &dents, uv__fs_scandir_filter, uv__fs_scandir_sort);
+
+ /* NOTE: We will use nbufs as an index field */
+ req->nbufs = 0;
+
+ if (n == 0) {
+ /* OS X still needs to deallocate some memory.
+ * Memory was allocated using the system allocator, so use free() here.
+ */
+ free(dents);
+ dents = NULL;
+ } else if (n == -1) {
+ return n;
+ }
+
+ req->ptr = dents;
+
+ return n;
+}
+
+static int uv__fs_opendir(uv_fs_t* req) {
+ uv_dir_t* dir;
+
+ dir = uv__malloc(sizeof(*dir));
+ if (dir == NULL)
+ goto error;
+
+ dir->dir = opendir(req->path);
+ if (dir->dir == NULL)
+ goto error;
+
+ req->ptr = dir;
+ return 0;
+
+error:
+ uv__free(dir);
+ req->ptr = NULL;
+ return -1;
+}
+
+static int uv__fs_readdir(uv_fs_t* req) {
+ uv_dir_t* dir;
+ uv_dirent_t* dirent;
+ struct dirent* res;
+ unsigned int dirent_idx;
+ unsigned int i;
+
+ dir = req->ptr;
+ dirent_idx = 0;
+
+ while (dirent_idx < dir->nentries) {
+ /* readdir() returns NULL on end of directory, as well as on error. errno
+ is used to differentiate between the two conditions. */
+ errno = 0;
+ res = readdir(dir->dir);
+
+ if (res == NULL) {
+ if (errno != 0)
+ goto error;
+ break;
+ }
+
+ if (strcmp(res->d_name, ".") == 0 || strcmp(res->d_name, "..") == 0)
+ continue;
+
+ dirent = &dir->dirents[dirent_idx];
+ dirent->name = uv__strdup(res->d_name);
+
+ if (dirent->name == NULL)
+ goto error;
+
+ dirent->type = uv__fs_get_dirent_type(res);
+ ++dirent_idx;
+ }
+
+ return dirent_idx;
+
+error:
+ for (i = 0; i < dirent_idx; ++i) {
+ uv__free((char*) dir->dirents[i].name);
+ dir->dirents[i].name = NULL;
+ }
+
+ return -1;
+}
+
+static int uv__fs_closedir(uv_fs_t* req) {
+ uv_dir_t* dir;
+
+ dir = req->ptr;
+
+ if (dir->dir != NULL) {
+ closedir(dir->dir);
+ dir->dir = NULL;
+ }
+
+ uv__free(req->ptr);
+ req->ptr = NULL;
+ return 0;
+}
+
+static int uv__fs_statfs(uv_fs_t* req) {
+ uv_statfs_t* stat_fs;
+#if defined(__sun) || \
+ defined(__MVS__) || \
+ defined(__NetBSD__) || \
+ defined(__HAIKU__) || \
+ defined(__QNX__)
+ struct statvfs buf;
+
+ if (0 != statvfs(req->path, &buf))
+#else
+ struct statfs buf;
+
+ if (0 != statfs(req->path, &buf))
+#endif /* defined(__sun) */
+ return -1;
+
+ stat_fs = uv__malloc(sizeof(*stat_fs));
+ if (stat_fs == NULL) {
+ errno = ENOMEM;
+ return -1;
+ }
+
+#if defined(__sun) || \
+ defined(__MVS__) || \
+ defined(__OpenBSD__) || \
+ defined(__NetBSD__) || \
+ defined(__HAIKU__) || \
+ defined(__QNX__)
+ stat_fs->f_type = 0; /* f_type is not supported. */
+#else
+ stat_fs->f_type = buf.f_type;
+#endif
+ stat_fs->f_bsize = buf.f_bsize;
+ stat_fs->f_blocks = buf.f_blocks;
+ stat_fs->f_bfree = buf.f_bfree;
+ stat_fs->f_bavail = buf.f_bavail;
+ stat_fs->f_files = buf.f_files;
+ stat_fs->f_ffree = buf.f_ffree;
+ req->ptr = stat_fs;
+ return 0;
+}
+
+static ssize_t uv__fs_pathmax_size(const char* path) {
+ ssize_t pathmax;
+
+ pathmax = pathconf(path, _PC_PATH_MAX);
+
+ if (pathmax == -1)
+ pathmax = UV__PATH_MAX;
+
+ return pathmax;
+}
+
+static ssize_t uv__fs_readlink(uv_fs_t* req) {
+ ssize_t maxlen;
+ ssize_t len;
+ char* buf;
+
+#if defined(_POSIX_PATH_MAX) || defined(PATH_MAX)
+ maxlen = uv__fs_pathmax_size(req->path);
+#else
+ /* We may not have a real PATH_MAX. Read size of link. */
+ struct stat st;
+ int ret;
+ ret = lstat(req->path, &st);
+ if (ret != 0)
+ return -1;
+ if (!S_ISLNK(st.st_mode)) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ maxlen = st.st_size;
+
+ /* According to readlink(2) lstat can report st_size == 0
+ for some symlinks, such as those in /proc or /sys. */
+ if (maxlen == 0)
+ maxlen = uv__fs_pathmax_size(req->path);
+#endif
+
+ buf = uv__malloc(maxlen);
+
+ if (buf == NULL) {
+ errno = ENOMEM;
+ return -1;
+ }
+
+#if defined(__MVS__)
+ len = os390_readlink(req->path, buf, maxlen);
+#else
+ len = readlink(req->path, buf, maxlen);
+#endif
+
+ if (len == -1) {
+ uv__free(buf);
+ return -1;
+ }
+
+ /* Uncommon case: resize to make room for the trailing nul byte. */
+ if (len == maxlen) {
+ buf = uv__reallocf(buf, len + 1);
+
+ if (buf == NULL)
+ return -1;
+ }
+
+ buf[len] = '\0';
+ req->ptr = buf;
+
+ return 0;
+}
+
+static ssize_t uv__fs_realpath(uv_fs_t* req) {
+ char* buf;
+
+#if defined(_POSIX_VERSION) && _POSIX_VERSION >= 200809L
+ buf = realpath(req->path, NULL);
+ if (buf == NULL)
+ return -1;
+#else
+ ssize_t len;
+
+ len = uv__fs_pathmax_size(req->path);
+ buf = uv__malloc(len + 1);
+
+ if (buf == NULL) {
+ errno = ENOMEM;
+ return -1;
+ }
+
+ if (realpath(req->path, buf) == NULL) {
+ uv__free(buf);
+ return -1;
+ }
+#endif
+
+ req->ptr = buf;
+
+ return 0;
+}
+
+static ssize_t uv__fs_sendfile_emul(uv_fs_t* req) {
+ struct pollfd pfd;
+ int use_pread;
+ off_t offset;
+ ssize_t nsent;
+ ssize_t nread;
+ ssize_t nwritten;
+ size_t buflen;
+ size_t len;
+ ssize_t n;
+ int in_fd;
+ int out_fd;
+ char buf[8192];
+
+ len = req->bufsml[0].len;
+ in_fd = req->flags;
+ out_fd = req->file;
+ offset = req->off;
+ use_pread = 1;
+
+ /* Here are the rules regarding errors:
+ *
+ * 1. Read errors are reported only if nsent==0, otherwise we return nsent.
+ * The user needs to know that some data has already been sent, to stop
+ * them from sending it twice.
+ *
+ * 2. Write errors are always reported. Write errors are bad because they
+ * mean data loss: we've read data but now we can't write it out.
+ *
+ * We try to use pread() and fall back to regular read() if the source fd
+ * doesn't support positional reads, for example when it's a pipe fd.
+ *
+ * If we get EAGAIN when writing to the target fd, we poll() on it until
+ * it becomes writable again.
+ *
+ * FIXME: If we get a write error when use_pread==1, it should be safe to
+ * return the number of sent bytes instead of an error because pread()
+ * is, in theory, idempotent. However, special files in /dev or /proc
+ * may support pread() but not necessarily return the same data on
+ * successive reads.
+ *
+ * FIXME: There is no way now to signal that we managed to send *some* data
+ * before a write error.
+ */
+ for (nsent = 0; (size_t) nsent < len; ) {
+ buflen = len - nsent;
+
+ if (buflen > sizeof(buf))
+ buflen = sizeof(buf);
+
+ do
+ if (use_pread)
+ nread = pread(in_fd, buf, buflen, offset);
+ else
+ nread = read(in_fd, buf, buflen);
+ while (nread == -1 && errno == EINTR);
+
+ if (nread == 0)
+ goto out;
+
+ if (nread == -1) {
+ if (use_pread && nsent == 0 && (errno == EIO || errno == ESPIPE)) {
+ use_pread = 0;
+ continue;
+ }
+
+ if (nsent == 0)
+ nsent = -1;
+
+ goto out;
+ }
+
+ for (nwritten = 0; nwritten < nread; ) {
+ do
+ n = write(out_fd, buf + nwritten, nread - nwritten);
+ while (n == -1 && errno == EINTR);
+
+ if (n != -1) {
+ nwritten += n;
+ continue;
+ }
+
+ if (errno != EAGAIN && errno != EWOULDBLOCK) {
+ nsent = -1;
+ goto out;
+ }
+
+ pfd.fd = out_fd;
+ pfd.events = POLLOUT;
+ pfd.revents = 0;
+
+ do
+ n = poll(&pfd, 1, -1);
+ while (n == -1 && errno == EINTR);
+
+ if (n == -1 || (pfd.revents & ~POLLOUT) != 0) {
+ errno = EIO;
+ nsent = -1;
+ goto out;
+ }
+ }
+
+ offset += nread;
+ nsent += nread;
+ }
+
+out:
+ if (nsent != -1)
+ req->off = offset;
+
+ return nsent;
+}
+
+
+static ssize_t uv__fs_sendfile(uv_fs_t* req) {
+ int in_fd;
+ int out_fd;
+
+ in_fd = req->flags;
+ out_fd = req->file;
+
+#if defined(__linux__) || defined(__sun)
+ {
+ off_t off;
+ ssize_t r;
+
+ off = req->off;
+
+#ifdef __linux__
+ {
+ static int copy_file_range_support = 1;
+
+ if (copy_file_range_support) {
+ r = uv__fs_copy_file_range(in_fd, NULL, out_fd, &off, req->bufsml[0].len, 0);
+
+ if (r == -1 && errno == ENOSYS) {
+ errno = 0;
+ copy_file_range_support = 0;
+ } else {
+ goto ok;
+ }
+ }
+ }
+#endif
+
+ r = sendfile(out_fd, in_fd, &off, req->bufsml[0].len);
+
+ok:
+ /* sendfile() on SunOS returns EINVAL if the target fd is not a socket but
+ * it still writes out data. Fortunately, we can detect it by checking if
+ * the offset has been updated.
+ */
+ if (r != -1 || off > req->off) {
+ r = off - req->off;
+ req->off = off;
+ return r;
+ }
+
+ if (errno == EINVAL ||
+ errno == EIO ||
+ errno == ENOTSOCK ||
+ errno == EXDEV) {
+ errno = 0;
+ return uv__fs_sendfile_emul(req);
+ }
+
+ return -1;
+ }
+#elif defined(__APPLE__) || \
+ defined(__DragonFly__) || \
+ defined(__FreeBSD__) || \
+ defined(__FreeBSD_kernel__)
+ {
+ off_t len;
+ ssize_t r;
+
+ /* sendfile() on FreeBSD and Darwin returns EAGAIN if the target fd is in
+ * non-blocking mode and not all data could be written. If a non-zero
+ * number of bytes have been sent, we don't consider it an error.
+ */
+
+#if defined(__FreeBSD__) || defined(__DragonFly__)
+ len = 0;
+ r = sendfile(in_fd, out_fd, req->off, req->bufsml[0].len, NULL, &len, 0);
+#elif defined(__FreeBSD_kernel__)
+ len = 0;
+ r = bsd_sendfile(in_fd,
+ out_fd,
+ req->off,
+ req->bufsml[0].len,
+ NULL,
+ &len,
+ 0);
+#else
+ /* The darwin sendfile takes len as an input for the length to send,
+ * so make sure to initialize it with the caller's value. */
+ len = req->bufsml[0].len;
+ r = sendfile(in_fd, out_fd, req->off, &len, NULL, 0);
+#endif
+
+ /*
+ * The man page for sendfile(2) on DragonFly states that `len` contains
+ * a meaningful value ONLY in case of EAGAIN and EINTR.
+ * Nothing is said about it's value in case of other errors, so better
+ * not depend on the potential wrong assumption that is was not modified
+ * by the syscall.
+ */
+ if (r == 0 || ((errno == EAGAIN || errno == EINTR) && len != 0)) {
+ req->off += len;
+ return (ssize_t) len;
+ }
+
+ if (errno == EINVAL ||
+ errno == EIO ||
+ errno == ENOTSOCK ||
+ errno == EXDEV) {
+ errno = 0;
+ return uv__fs_sendfile_emul(req);
+ }
+
+ return -1;
+ }
+#else
+ /* Squelch compiler warnings. */
+ (void) &in_fd;
+ (void) &out_fd;
+
+ return uv__fs_sendfile_emul(req);
+#endif
+}
+
+
+static ssize_t uv__fs_utime(uv_fs_t* req) {
+#if defined(__linux__) \
+ || defined(_AIX71) \
+ || defined(__sun) \
+ || defined(__HAIKU__)
+ /* utimesat() has nanosecond resolution but we stick to microseconds
+ * for the sake of consistency with other platforms.
+ */
+ struct timespec ts[2];
+ ts[0] = uv__fs_to_timespec(req->atime);
+ ts[1] = uv__fs_to_timespec(req->mtime);
+ return utimensat(AT_FDCWD, req->path, ts, 0);
+#elif defined(__APPLE__) \
+ || defined(__DragonFly__) \
+ || defined(__FreeBSD__) \
+ || defined(__FreeBSD_kernel__) \
+ || defined(__NetBSD__) \
+ || defined(__OpenBSD__)
+ struct timeval tv[2];
+ tv[0] = uv__fs_to_timeval(req->atime);
+ tv[1] = uv__fs_to_timeval(req->mtime);
+ return utimes(req->path, tv);
+#elif defined(_AIX) \
+ && !defined(_AIX71)
+ struct utimbuf buf;
+ buf.actime = req->atime;
+ buf.modtime = req->mtime;
+ return utime(req->path, &buf);
+#elif defined(__MVS__)
+ attrib_t atr;
+ memset(&atr, 0, sizeof(atr));
+ atr.att_mtimechg = 1;
+ atr.att_atimechg = 1;
+ atr.att_mtime = req->mtime;
+ atr.att_atime = req->atime;
+ return __lchattr((char*) req->path, &atr, sizeof(atr));
+#else
+ errno = ENOSYS;
+ return -1;
+#endif
+}
+
+
+static ssize_t uv__fs_lutime(uv_fs_t* req) {
+#if defined(__linux__) || \
+ defined(_AIX71) || \
+ defined(__sun) || \
+ defined(__HAIKU__)
+ struct timespec ts[2];
+ ts[0] = uv__fs_to_timespec(req->atime);
+ ts[1] = uv__fs_to_timespec(req->mtime);
+ return utimensat(AT_FDCWD, req->path, ts, AT_SYMLINK_NOFOLLOW);
+#elif defined(__APPLE__) || \
+ defined(__DragonFly__) || \
+ defined(__FreeBSD__) || \
+ defined(__FreeBSD_kernel__) || \
+ defined(__NetBSD__)
+ struct timeval tv[2];
+ tv[0] = uv__fs_to_timeval(req->atime);
+ tv[1] = uv__fs_to_timeval(req->mtime);
+ return lutimes(req->path, tv);
+#else
+ errno = ENOSYS;
+ return -1;
+#endif
+}
+
+
+static ssize_t uv__fs_write(uv_fs_t* req) {
+#if defined(__linux__)
+ static int no_pwritev;
+#endif
+ ssize_t r;
+
+ /* Serialize writes on OS X, concurrent write() and pwrite() calls result in
+ * data loss. We can't use a per-file descriptor lock, the descriptor may be
+ * a dup().
+ */
+#if defined(__APPLE__)
+ static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
+
+ if (pthread_mutex_lock(&lock))
+ abort();
+#endif
+
+ if (req->off < 0) {
+ if (req->nbufs == 1)
+ r = write(req->file, req->bufs[0].base, req->bufs[0].len);
+ else
+ r = writev(req->file, (struct iovec*) req->bufs, req->nbufs);
+ } else {
+ if (req->nbufs == 1) {
+ r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
+ goto done;
+ }
+#if HAVE_PREADV
+ r = pwritev(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
+#else
+# if defined(__linux__)
+ if (no_pwritev) retry:
+# endif
+ {
+ r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
+ }
+# if defined(__linux__)
+ else {
+ r = uv__pwritev(req->file,
+ (struct iovec*) req->bufs,
+ req->nbufs,
+ req->off);
+ if (r == -1 && errno == ENOSYS) {
+ no_pwritev = 1;
+ goto retry;
+ }
+ }
+# endif
+#endif
+ }
+
+done:
+#if defined(__APPLE__)
+ if (pthread_mutex_unlock(&lock))
+ abort();
+#endif
+
+ return r;
+}
+
+static ssize_t uv__fs_copyfile(uv_fs_t* req) {
+ uv_fs_t fs_req;
+ uv_file srcfd;
+ uv_file dstfd;
+ struct stat src_statsbuf;
+ struct stat dst_statsbuf;
+ int dst_flags;
+ int result;
+ int err;
+ off_t bytes_to_send;
+ off_t in_offset;
+ off_t bytes_written;
+ size_t bytes_chunk;
+
+ dstfd = -1;
+ err = 0;
+
+ /* Open the source file. */
+ srcfd = uv_fs_open(NULL, &fs_req, req->path, O_RDONLY, 0, NULL);
+ uv_fs_req_cleanup(&fs_req);
+
+ if (srcfd < 0)
+ return srcfd;
+
+ /* Get the source file's mode. */
+ if (fstat(srcfd, &src_statsbuf)) {
+ err = UV__ERR(errno);
+ goto out;
+ }
+
+ dst_flags = O_WRONLY | O_CREAT;
+
+ if (req->flags & UV_FS_COPYFILE_EXCL)
+ dst_flags |= O_EXCL;
+
+ /* Open the destination file. */
+ dstfd = uv_fs_open(NULL,
+ &fs_req,
+ req->new_path,
+ dst_flags,
+ src_statsbuf.st_mode,
+ NULL);
+ uv_fs_req_cleanup(&fs_req);
+
+ if (dstfd < 0) {
+ err = dstfd;
+ goto out;
+ }
+
+ /* If the file is not being opened exclusively, verify that the source and
+ destination are not the same file. If they are the same, bail out early. */
+ if ((req->flags & UV_FS_COPYFILE_EXCL) == 0) {
+ /* Get the destination file's mode. */
+ if (fstat(dstfd, &dst_statsbuf)) {
+ err = UV__ERR(errno);
+ goto out;
+ }
+
+ /* Check if srcfd and dstfd refer to the same file */
+ if (src_statsbuf.st_dev == dst_statsbuf.st_dev &&
+ src_statsbuf.st_ino == dst_statsbuf.st_ino) {
+ goto out;
+ }
+
+ /* Truncate the file in case the destination already existed. */
+ if (ftruncate(dstfd, 0) != 0) {
+ err = UV__ERR(errno);
+ goto out;
+ }
+ }
+
+ if (fchmod(dstfd, src_statsbuf.st_mode) == -1) {
+ err = UV__ERR(errno);
+#ifdef __linux__
+ if (err != UV_EPERM)
+ goto out;
+
+ {
+ struct statfs s;
+
+ /* fchmod() on CIFS shares always fails with EPERM unless the share is
+ * mounted with "noperm". As fchmod() is a meaningless operation on such
+ * shares anyway, detect that condition and squelch the error.
+ */
+ if (fstatfs(dstfd, &s) == -1)
+ goto out;
+
+ if (s.f_type != /* CIFS */ 0xFF534D42u)
+ goto out;
+ }
+
+ err = 0;
+#else /* !__linux__ */
+ goto out;
+#endif /* !__linux__ */
+ }
+
+#ifdef FICLONE
+ if (req->flags & UV_FS_COPYFILE_FICLONE ||
+ req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
+ if (ioctl(dstfd, FICLONE, srcfd) == 0) {
+ /* ioctl() with FICLONE succeeded. */
+ goto out;
+ }
+ /* If an error occurred and force was set, return the error to the caller;
+ * fall back to sendfile() when force was not set. */
+ if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
+ err = UV__ERR(errno);
+ goto out;
+ }
+ }
+#else
+ if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
+ err = UV_ENOSYS;
+ goto out;
+ }
+#endif
+
+ bytes_to_send = src_statsbuf.st_size;
+ in_offset = 0;
+ while (bytes_to_send != 0) {
+ bytes_chunk = SSIZE_MAX;
+ if (bytes_to_send < (off_t) bytes_chunk)
+ bytes_chunk = bytes_to_send;
+ uv_fs_sendfile(NULL, &fs_req, dstfd, srcfd, in_offset, bytes_chunk, NULL);
+ bytes_written = fs_req.result;
+ uv_fs_req_cleanup(&fs_req);
+
+ if (bytes_written < 0) {
+ err = bytes_written;
+ break;
+ }
+
+ bytes_to_send -= bytes_written;
+ in_offset += bytes_written;
+ }
+
+out:
+ if (err < 0)
+ result = err;
+ else
+ result = 0;
+
+ /* Close the source file. */
+ err = uv__close_nocheckstdio(srcfd);
+
+ /* Don't overwrite any existing errors. */
+ if (err != 0 && result == 0)
+ result = err;
+
+ /* Close the destination file if it is open. */
+ if (dstfd >= 0) {
+ err = uv__close_nocheckstdio(dstfd);
+
+ /* Don't overwrite any existing errors. */
+ if (err != 0 && result == 0)
+ result = err;
+
+ /* Remove the destination file if something went wrong. */
+ if (result != 0) {
+ uv_fs_unlink(NULL, &fs_req, req->new_path, NULL);
+ /* Ignore the unlink return value, as an error already happened. */
+ uv_fs_req_cleanup(&fs_req);
+ }
+ }
+
+ if (result == 0)
+ return 0;
+
+ errno = UV__ERR(result);
+ return -1;
+}
+
+static void uv__to_stat(struct stat* src, uv_stat_t* dst) {
+ dst->st_dev = src->st_dev;
+ dst->st_mode = src->st_mode;
+ dst->st_nlink = src->st_nlink;
+ dst->st_uid = src->st_uid;
+ dst->st_gid = src->st_gid;
+ dst->st_rdev = src->st_rdev;
+ dst->st_ino = src->st_ino;
+ dst->st_size = src->st_size;
+ dst->st_blksize = src->st_blksize;
+ dst->st_blocks = src->st_blocks;
+
+#if defined(__APPLE__)
+ dst->st_atim.tv_sec = src->st_atimespec.tv_sec;
+ dst->st_atim.tv_nsec = src->st_atimespec.tv_nsec;
+ dst->st_mtim.tv_sec = src->st_mtimespec.tv_sec;
+ dst->st_mtim.tv_nsec = src->st_mtimespec.tv_nsec;
+ dst->st_ctim.tv_sec = src->st_ctimespec.tv_sec;
+ dst->st_ctim.tv_nsec = src->st_ctimespec.tv_nsec;
+ dst->st_birthtim.tv_sec = src->st_birthtimespec.tv_sec;
+ dst->st_birthtim.tv_nsec = src->st_birthtimespec.tv_nsec;
+ dst->st_flags = src->st_flags;
+ dst->st_gen = src->st_gen;
+#elif defined(__ANDROID__)
+ dst->st_atim.tv_sec = src->st_atime;
+ dst->st_atim.tv_nsec = src->st_atimensec;
+ dst->st_mtim.tv_sec = src->st_mtime;
+ dst->st_mtim.tv_nsec = src->st_mtimensec;
+ dst->st_ctim.tv_sec = src->st_ctime;
+ dst->st_ctim.tv_nsec = src->st_ctimensec;
+ dst->st_birthtim.tv_sec = src->st_ctime;
+ dst->st_birthtim.tv_nsec = src->st_ctimensec;
+ dst->st_flags = 0;
+ dst->st_gen = 0;
+#elif !defined(_AIX) && ( \
+ defined(__DragonFly__) || \
+ defined(__FreeBSD__) || \
+ defined(__OpenBSD__) || \
+ defined(__NetBSD__) || \
+ defined(_GNU_SOURCE) || \
+ defined(_BSD_SOURCE) || \
+ defined(_SVID_SOURCE) || \
+ defined(_XOPEN_SOURCE) || \
+ defined(_DEFAULT_SOURCE))
+ dst->st_atim.tv_sec = src->st_atim.tv_sec;
+ dst->st_atim.tv_nsec = src->st_atim.tv_nsec;
+ dst->st_mtim.tv_sec = src->st_mtim.tv_sec;
+ dst->st_mtim.tv_nsec = src->st_mtim.tv_nsec;
+ dst->st_ctim.tv_sec = src->st_ctim.tv_sec;
+ dst->st_ctim.tv_nsec = src->st_ctim.tv_nsec;
+# if defined(__FreeBSD__) || \
+ defined(__NetBSD__)
+ dst->st_birthtim.tv_sec = src->st_birthtim.tv_sec;
+ dst->st_birthtim.tv_nsec = src->st_birthtim.tv_nsec;
+ dst->st_flags = src->st_flags;
+ dst->st_gen = src->st_gen;
+# else
+ dst->st_birthtim.tv_sec = src->st_ctim.tv_sec;
+ dst->st_birthtim.tv_nsec = src->st_ctim.tv_nsec;
+ dst->st_flags = 0;
+ dst->st_gen = 0;
+# endif
+#else
+ dst->st_atim.tv_sec = src->st_atime;
+ dst->st_atim.tv_nsec = 0;
+ dst->st_mtim.tv_sec = src->st_mtime;
+ dst->st_mtim.tv_nsec = 0;
+ dst->st_ctim.tv_sec = src->st_ctime;
+ dst->st_ctim.tv_nsec = 0;
+ dst->st_birthtim.tv_sec = src->st_ctime;
+ dst->st_birthtim.tv_nsec = 0;
+ dst->st_flags = 0;
+ dst->st_gen = 0;
+#endif
+}
+
+
+static int uv__fs_statx(int fd,
+ const char* path,
+ int is_fstat,
+ int is_lstat,
+ uv_stat_t* buf) {
+ STATIC_ASSERT(UV_ENOSYS != -1);
+#ifdef __linux__
+ static int no_statx;
+ struct uv__statx statxbuf;
+ int dirfd;
+ int flags;
+ int mode;
+ int rc;
+
+ if (uv__load_relaxed(&no_statx))
+ return UV_ENOSYS;
+
+ dirfd = AT_FDCWD;
+ flags = 0; /* AT_STATX_SYNC_AS_STAT */
+ mode = 0xFFF; /* STATX_BASIC_STATS + STATX_BTIME */
+
+ if (is_fstat) {
+ dirfd = fd;
+ flags |= 0x1000; /* AT_EMPTY_PATH */
+ }
+
+ if (is_lstat)
+ flags |= AT_SYMLINK_NOFOLLOW;
+
+ rc = uv__statx(dirfd, path, flags, mode, &statxbuf);
+
+ switch (rc) {
+ case 0:
+ break;
+ case -1:
+ /* EPERM happens when a seccomp filter rejects the system call.
+ * Has been observed with libseccomp < 2.3.3 and docker < 18.04.
+ */
+ if (errno != EINVAL && errno != EPERM && errno != ENOSYS)
+ return -1;
+ /* Fall through. */
+ default:
+ /* Normally on success, zero is returned and On error, -1 is returned.
+ * Observed on S390 RHEL running in a docker container with statx not
+ * implemented, rc might return 1 with 0 set as the error code in which
+ * case we return ENOSYS.
+ */
+ uv__store_relaxed(&no_statx, 1);
+ return UV_ENOSYS;
+ }
+
+ buf->st_dev = 256 * statxbuf.stx_dev_major + statxbuf.stx_dev_minor;
+ buf->st_mode = statxbuf.stx_mode;
+ buf->st_nlink = statxbuf.stx_nlink;
+ buf->st_uid = statxbuf.stx_uid;
+ buf->st_gid = statxbuf.stx_gid;
+ buf->st_rdev = statxbuf.stx_rdev_major;
+ buf->st_ino = statxbuf.stx_ino;
+ buf->st_size = statxbuf.stx_size;
+ buf->st_blksize = statxbuf.stx_blksize;
+ buf->st_blocks = statxbuf.stx_blocks;
+ buf->st_atim.tv_sec = statxbuf.stx_atime.tv_sec;
+ buf->st_atim.tv_nsec = statxbuf.stx_atime.tv_nsec;
+ buf->st_mtim.tv_sec = statxbuf.stx_mtime.tv_sec;
+ buf->st_mtim.tv_nsec = statxbuf.stx_mtime.tv_nsec;
+ buf->st_ctim.tv_sec = statxbuf.stx_ctime.tv_sec;
+ buf->st_ctim.tv_nsec = statxbuf.stx_ctime.tv_nsec;
+ buf->st_birthtim.tv_sec = statxbuf.stx_btime.tv_sec;
+ buf->st_birthtim.tv_nsec = statxbuf.stx_btime.tv_nsec;
+ buf->st_flags = 0;
+ buf->st_gen = 0;
+
+ return 0;
+#else
+ return UV_ENOSYS;
+#endif /* __linux__ */
+}
+
+
+static int uv__fs_stat(const char *path, uv_stat_t *buf) {
+ struct stat pbuf;
+ int ret;
+
+ ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 0, buf);
+ if (ret != UV_ENOSYS)
+ return ret;
+
+ ret = stat(path, &pbuf);
+ if (ret == 0)
+ uv__to_stat(&pbuf, buf);
+
+ return ret;
+}
+
+
+static int uv__fs_lstat(const char *path, uv_stat_t *buf) {
+ struct stat pbuf;
+ int ret;
+
+ ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 1, buf);
+ if (ret != UV_ENOSYS)
+ return ret;
+
+ ret = lstat(path, &pbuf);
+ if (ret == 0)
+ uv__to_stat(&pbuf, buf);
+
+ return ret;
+}
+
+
+static int uv__fs_fstat(int fd, uv_stat_t *buf) {
+ struct stat pbuf;
+ int ret;
+
+ ret = uv__fs_statx(fd, "", /* is_fstat */ 1, /* is_lstat */ 0, buf);
+ if (ret != UV_ENOSYS)
+ return ret;
+
+ ret = fstat(fd, &pbuf);
+ if (ret == 0)
+ uv__to_stat(&pbuf, buf);
+
+ return ret;
+}
+
+static size_t uv__fs_buf_offset(uv_buf_t* bufs, size_t size) {
+ size_t offset;
+ /* Figure out which bufs are done */
+ for (offset = 0; size > 0 && bufs[offset].len <= size; ++offset)
+ size -= bufs[offset].len;
+
+ /* Fix a partial read/write */
+ if (size > 0) {
+ bufs[offset].base += size;
+ bufs[offset].len -= size;
+ }
+ return offset;
+}
+
+static ssize_t uv__fs_write_all(uv_fs_t* req) {
+ unsigned int iovmax;
+ unsigned int nbufs;
+ uv_buf_t* bufs;
+ ssize_t total;
+ ssize_t result;
+
+ iovmax = uv__getiovmax();
+ nbufs = req->nbufs;
+ bufs = req->bufs;
+ total = 0;
+
+ while (nbufs > 0) {
+ req->nbufs = nbufs;
+ if (req->nbufs > iovmax)
+ req->nbufs = iovmax;
+
+ do
+ result = uv__fs_write(req);
+ while (result < 0 && errno == EINTR);
+
+ if (result <= 0) {
+ if (total == 0)
+ total = result;
+ break;
+ }
+
+ if (req->off >= 0)
+ req->off += result;
+
+ req->nbufs = uv__fs_buf_offset(req->bufs, result);
+ req->bufs += req->nbufs;
+ nbufs -= req->nbufs;
+ total += result;
+ }
+
+ if (bufs != req->bufsml)
+ uv__free(bufs);
+
+ req->bufs = NULL;
+ req->nbufs = 0;
+
+ return total;
+}
+
+
+static void uv__fs_work(struct uv__work* w) {
+ int retry_on_eintr;
+ uv_fs_t* req;
+ ssize_t r;
+
+ req = container_of(w, uv_fs_t, work_req);
+ retry_on_eintr = !(req->fs_type == UV_FS_CLOSE ||
+ req->fs_type == UV_FS_READ);
+
+ do {
+ errno = 0;
+
+#define X(type, action) \
+ case UV_FS_ ## type: \
+ r = action; \
+ break;
+
+ switch (req->fs_type) {
+ X(ACCESS, access(req->path, req->flags));
+ X(CHMOD, chmod(req->path, req->mode));
+ X(CHOWN, chown(req->path, req->uid, req->gid));
+ X(CLOSE, uv__fs_close(req->file));
+ X(COPYFILE, uv__fs_copyfile(req));
+ X(FCHMOD, fchmod(req->file, req->mode));
+ X(FCHOWN, fchown(req->file, req->uid, req->gid));
+ X(LCHOWN, lchown(req->path, req->uid, req->gid));
+ X(FDATASYNC, uv__fs_fdatasync(req));
+ X(FSTAT, uv__fs_fstat(req->file, &req->statbuf));
+ X(FSYNC, uv__fs_fsync(req));
+ X(FTRUNCATE, ftruncate(req->file, req->off));
+ X(FUTIME, uv__fs_futime(req));
+ X(LUTIME, uv__fs_lutime(req));
+ X(LSTAT, uv__fs_lstat(req->path, &req->statbuf));
+ X(LINK, link(req->path, req->new_path));
+ X(MKDIR, mkdir(req->path, req->mode));
+ X(MKDTEMP, uv__fs_mkdtemp(req));
+ X(MKSTEMP, uv__fs_mkstemp(req));
+ X(OPEN, uv__fs_open(req));
+ X(READ, uv__fs_read(req));
+ X(SCANDIR, uv__fs_scandir(req));
+ X(OPENDIR, uv__fs_opendir(req));
+ X(READDIR, uv__fs_readdir(req));
+ X(CLOSEDIR, uv__fs_closedir(req));
+ X(READLINK, uv__fs_readlink(req));
+ X(REALPATH, uv__fs_realpath(req));
+ X(RENAME, rename(req->path, req->new_path));
+ X(RMDIR, rmdir(req->path));
+ X(SENDFILE, uv__fs_sendfile(req));
+ X(STAT, uv__fs_stat(req->path, &req->statbuf));
+ X(STATFS, uv__fs_statfs(req));
+ X(SYMLINK, symlink(req->path, req->new_path));
+ X(UNLINK, unlink(req->path));
+ X(UTIME, uv__fs_utime(req));
+ X(WRITE, uv__fs_write_all(req));
+ default: abort();
+ }
+#undef X
+ } while (r == -1 && errno == EINTR && retry_on_eintr);
+
+ if (r == -1)
+ req->result = UV__ERR(errno);
+ else
+ req->result = r;
+
+ if (r == 0 && (req->fs_type == UV_FS_STAT ||
+ req->fs_type == UV_FS_FSTAT ||
+ req->fs_type == UV_FS_LSTAT)) {
+ req->ptr = &req->statbuf;
+ }
+}
+
+
+static void uv__fs_done(struct uv__work* w, int status) {
+ uv_fs_t* req;
+
+ req = container_of(w, uv_fs_t, work_req);
+ uv__req_unregister(req->loop, req);
+
+ if (status == UV_ECANCELED) {
+ assert(req->result == 0);
+ req->result = UV_ECANCELED;
+ }
+
+ req->cb(req);
+}
+
+
+int uv_fs_access(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ int flags,
+ uv_fs_cb cb) {
+ INIT(ACCESS);
+ PATH;
+ req->flags = flags;
+ POST;
+}
+
+
+int uv_fs_chmod(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ int mode,
+ uv_fs_cb cb) {
+ INIT(CHMOD);
+ PATH;
+ req->mode = mode;
+ POST;
+}
+
+
+int uv_fs_chown(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ uv_uid_t uid,
+ uv_gid_t gid,
+ uv_fs_cb cb) {
+ INIT(CHOWN);
+ PATH;
+ req->uid = uid;
+ req->gid = gid;
+ POST;
+}
+
+
+int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
+ INIT(CLOSE);
+ req->file = file;
+ POST;
+}
+
+
+int uv_fs_fchmod(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_file file,
+ int mode,
+ uv_fs_cb cb) {
+ INIT(FCHMOD);
+ req->file = file;
+ req->mode = mode;
+ POST;
+}
+
+
+int uv_fs_fchown(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_file file,
+ uv_uid_t uid,
+ uv_gid_t gid,
+ uv_fs_cb cb) {
+ INIT(FCHOWN);
+ req->file = file;
+ req->uid = uid;
+ req->gid = gid;
+ POST;
+}
+
+
+int uv_fs_lchown(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ uv_uid_t uid,
+ uv_gid_t gid,
+ uv_fs_cb cb) {
+ INIT(LCHOWN);
+ PATH;
+ req->uid = uid;
+ req->gid = gid;
+ POST;
+}
+
+
+int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
+ INIT(FDATASYNC);
+ req->file = file;
+ POST;
+}
+
+
+int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
+ INIT(FSTAT);
+ req->file = file;
+ POST;
+}
+
+
+int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
+ INIT(FSYNC);
+ req->file = file;
+ POST;
+}
+
+
+int uv_fs_ftruncate(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_file file,
+ int64_t off,
+ uv_fs_cb cb) {
+ INIT(FTRUNCATE);
+ req->file = file;
+ req->off = off;
+ POST;
+}
+
+
+int uv_fs_futime(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_file file,
+ double atime,
+ double mtime,
+ uv_fs_cb cb) {
+ INIT(FUTIME);
+ req->file = file;
+ req->atime = atime;
+ req->mtime = mtime;
+ POST;
+}
+
+int uv_fs_lutime(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ double atime,
+ double mtime,
+ uv_fs_cb cb) {
+ INIT(LUTIME);
+ PATH;
+ req->atime = atime;
+ req->mtime = mtime;
+ POST;
+}
+
+
+int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
+ INIT(LSTAT);
+ PATH;
+ POST;
+}
+
+
+int uv_fs_link(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ const char* new_path,
+ uv_fs_cb cb) {
+ INIT(LINK);
+ PATH2;
+ POST;
+}
+
+
+int uv_fs_mkdir(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ int mode,
+ uv_fs_cb cb) {
+ INIT(MKDIR);
+ PATH;
+ req->mode = mode;
+ POST;
+}
+
+
+int uv_fs_mkdtemp(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* tpl,
+ uv_fs_cb cb) {
+ INIT(MKDTEMP);
+ req->path = uv__strdup(tpl);
+ if (req->path == NULL)
+ return UV_ENOMEM;
+ POST;
+}
+
+
+int uv_fs_mkstemp(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* tpl,
+ uv_fs_cb cb) {
+ INIT(MKSTEMP);
+ req->path = uv__strdup(tpl);
+ if (req->path == NULL)
+ return UV_ENOMEM;
+ POST;
+}
+
+
+int uv_fs_open(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ int flags,
+ int mode,
+ uv_fs_cb cb) {
+ INIT(OPEN);
+ PATH;
+ req->flags = flags;
+ req->mode = mode;
+ POST;
+}
+
+
+int uv_fs_read(uv_loop_t* loop, uv_fs_t* req,
+ uv_file file,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ int64_t off,
+ uv_fs_cb cb) {
+ INIT(READ);
+
+ if (bufs == NULL || nbufs == 0)
+ return UV_EINVAL;
+
+ req->file = file;
+
+ req->nbufs = nbufs;
+ req->bufs = req->bufsml;
+ if (nbufs > ARRAY_SIZE(req->bufsml))
+ req->bufs = uv__malloc(nbufs * sizeof(*bufs));
+
+ if (req->bufs == NULL)
+ return UV_ENOMEM;
+
+ memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
+
+ req->off = off;
+ POST;
+}
+
+
+int uv_fs_scandir(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ int flags,
+ uv_fs_cb cb) {
+ INIT(SCANDIR);
+ PATH;
+ req->flags = flags;
+ POST;
+}
+
+int uv_fs_opendir(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ uv_fs_cb cb) {
+ INIT(OPENDIR);
+ PATH;
+ POST;
+}
+
+int uv_fs_readdir(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_dir_t* dir,
+ uv_fs_cb cb) {
+ INIT(READDIR);
+
+ if (dir == NULL || dir->dir == NULL || dir->dirents == NULL)
+ return UV_EINVAL;
+
+ req->ptr = dir;
+ POST;
+}
+
+int uv_fs_closedir(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_dir_t* dir,
+ uv_fs_cb cb) {
+ INIT(CLOSEDIR);
+
+ if (dir == NULL)
+ return UV_EINVAL;
+
+ req->ptr = dir;
+ POST;
+}
+
+int uv_fs_readlink(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ uv_fs_cb cb) {
+ INIT(READLINK);
+ PATH;
+ POST;
+}
+
+
+int uv_fs_realpath(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char * path,
+ uv_fs_cb cb) {
+ INIT(REALPATH);
+ PATH;
+ POST;
+}
+
+
+int uv_fs_rename(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ const char* new_path,
+ uv_fs_cb cb) {
+ INIT(RENAME);
+ PATH2;
+ POST;
+}
+
+
+int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
+ INIT(RMDIR);
+ PATH;
+ POST;
+}
+
+
+int uv_fs_sendfile(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_file out_fd,
+ uv_file in_fd,
+ int64_t off,
+ size_t len,
+ uv_fs_cb cb) {
+ INIT(SENDFILE);
+ req->flags = in_fd; /* hack */
+ req->file = out_fd;
+ req->off = off;
+ req->bufsml[0].len = len;
+ POST;
+}
+
+
+int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
+ INIT(STAT);
+ PATH;
+ POST;
+}
+
+
+int uv_fs_symlink(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ const char* new_path,
+ int flags,
+ uv_fs_cb cb) {
+ INIT(SYMLINK);
+ PATH2;
+ req->flags = flags;
+ POST;
+}
+
+
+int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
+ INIT(UNLINK);
+ PATH;
+ POST;
+}
+
+
+int uv_fs_utime(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ double atime,
+ double mtime,
+ uv_fs_cb cb) {
+ INIT(UTIME);
+ PATH;
+ req->atime = atime;
+ req->mtime = mtime;
+ POST;
+}
+
+
+int uv_fs_write(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_file file,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ int64_t off,
+ uv_fs_cb cb) {
+ INIT(WRITE);
+
+ if (bufs == NULL || nbufs == 0)
+ return UV_EINVAL;
+
+ req->file = file;
+
+ req->nbufs = nbufs;
+ req->bufs = req->bufsml;
+ if (nbufs > ARRAY_SIZE(req->bufsml))
+ req->bufs = uv__malloc(nbufs * sizeof(*bufs));
+
+ if (req->bufs == NULL)
+ return UV_ENOMEM;
+
+ memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
+
+ req->off = off;
+ POST;
+}
+
+
+void uv_fs_req_cleanup(uv_fs_t* req) {
+ if (req == NULL)
+ return;
+
+ /* Only necessary for asychronous requests, i.e., requests with a callback.
+ * Synchronous ones don't copy their arguments and have req->path and
+ * req->new_path pointing to user-owned memory. UV_FS_MKDTEMP and
+ * UV_FS_MKSTEMP are the exception to the rule, they always allocate memory.
+ */
+ if (req->path != NULL &&
+ (req->cb != NULL ||
+ req->fs_type == UV_FS_MKDTEMP || req->fs_type == UV_FS_MKSTEMP))
+ uv__free((void*) req->path); /* Memory is shared with req->new_path. */
+
+ req->path = NULL;
+ req->new_path = NULL;
+
+ if (req->fs_type == UV_FS_READDIR && req->ptr != NULL)
+ uv__fs_readdir_cleanup(req);
+
+ if (req->fs_type == UV_FS_SCANDIR && req->ptr != NULL)
+ uv__fs_scandir_cleanup(req);
+
+ if (req->bufs != req->bufsml)
+ uv__free(req->bufs);
+ req->bufs = NULL;
+
+ if (req->fs_type != UV_FS_OPENDIR && req->ptr != &req->statbuf)
+ uv__free(req->ptr);
+ req->ptr = NULL;
+}
+
+
+int uv_fs_copyfile(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ const char* new_path,
+ int flags,
+ uv_fs_cb cb) {
+ INIT(COPYFILE);
+
+ if (flags & ~(UV_FS_COPYFILE_EXCL |
+ UV_FS_COPYFILE_FICLONE |
+ UV_FS_COPYFILE_FICLONE_FORCE)) {
+ return UV_EINVAL;
+ }
+
+ PATH2;
+ req->flags = flags;
+ POST;
+}
+
+
+int uv_fs_statfs(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ uv_fs_cb cb) {
+ INIT(STATFS);
+ PATH;
+ POST;
+}
+
+int uv_fs_get_system_error(const uv_fs_t* req) {
+ return -req->result;
+}
diff --git a/Utilities/cmlibuv/src/unix/fsevents.c b/Utilities/cmlibuv/src/unix/fsevents.c
new file mode 100644
index 0000000..a51f29b
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/fsevents.c
@@ -0,0 +1,923 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#if TARGET_OS_IPHONE || MAC_OS_X_VERSION_MAX_ALLOWED < 1070
+
+/* iOS (currently) doesn't provide the FSEvents-API (nor CoreServices) */
+/* macOS prior to 10.7 doesn't provide the full FSEvents API so use kqueue */
+
+int uv__fsevents_init(uv_fs_event_t* handle) {
+ return 0;
+}
+
+
+int uv__fsevents_close(uv_fs_event_t* handle) {
+ return 0;
+}
+
+
+void uv__fsevents_loop_delete(uv_loop_t* loop) {
+}
+
+#else /* TARGET_OS_IPHONE */
+
+#include "darwin-stub.h"
+
+#include <dlfcn.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <pthread.h>
+
+static const int kFSEventsModified =
+ kFSEventStreamEventFlagItemChangeOwner |
+ kFSEventStreamEventFlagItemFinderInfoMod |
+ kFSEventStreamEventFlagItemInodeMetaMod |
+ kFSEventStreamEventFlagItemModified |
+ kFSEventStreamEventFlagItemXattrMod;
+
+static const int kFSEventsRenamed =
+ kFSEventStreamEventFlagItemCreated |
+ kFSEventStreamEventFlagItemRemoved |
+ kFSEventStreamEventFlagItemRenamed;
+
+static const int kFSEventsSystem =
+ kFSEventStreamEventFlagUserDropped |
+ kFSEventStreamEventFlagKernelDropped |
+ kFSEventStreamEventFlagEventIdsWrapped |
+ kFSEventStreamEventFlagHistoryDone |
+ kFSEventStreamEventFlagMount |
+ kFSEventStreamEventFlagUnmount |
+ kFSEventStreamEventFlagRootChanged;
+
+typedef struct uv__fsevents_event_s uv__fsevents_event_t;
+typedef struct uv__cf_loop_signal_s uv__cf_loop_signal_t;
+typedef struct uv__cf_loop_state_s uv__cf_loop_state_t;
+
+enum uv__cf_loop_signal_type_e {
+ kUVCFLoopSignalRegular,
+ kUVCFLoopSignalClosing
+};
+typedef enum uv__cf_loop_signal_type_e uv__cf_loop_signal_type_t;
+
+struct uv__cf_loop_signal_s {
+ QUEUE member;
+ uv_fs_event_t* handle;
+ uv__cf_loop_signal_type_t type;
+};
+
+struct uv__fsevents_event_s {
+ QUEUE member;
+ int events;
+ char path[1];
+};
+
+struct uv__cf_loop_state_s {
+ CFRunLoopRef loop;
+ CFRunLoopSourceRef signal_source;
+ int fsevent_need_reschedule;
+ FSEventStreamRef fsevent_stream;
+ uv_sem_t fsevent_sem;
+ uv_mutex_t fsevent_mutex;
+ void* fsevent_handles[2];
+ unsigned int fsevent_handle_count;
+};
+
+/* Forward declarations */
+static void uv__cf_loop_cb(void* arg);
+static void* uv__cf_loop_runner(void* arg);
+static int uv__cf_loop_signal(uv_loop_t* loop,
+ uv_fs_event_t* handle,
+ uv__cf_loop_signal_type_t type);
+
+/* Lazy-loaded by uv__fsevents_global_init(). */
+static CFArrayRef (*pCFArrayCreate)(CFAllocatorRef,
+ const void**,
+ CFIndex,
+ const CFArrayCallBacks*);
+static void (*pCFRelease)(CFTypeRef);
+static void (*pCFRunLoopAddSource)(CFRunLoopRef,
+ CFRunLoopSourceRef,
+ CFStringRef);
+static CFRunLoopRef (*pCFRunLoopGetCurrent)(void);
+static void (*pCFRunLoopRemoveSource)(CFRunLoopRef,
+ CFRunLoopSourceRef,
+ CFStringRef);
+static void (*pCFRunLoopRun)(void);
+static CFRunLoopSourceRef (*pCFRunLoopSourceCreate)(CFAllocatorRef,
+ CFIndex,
+ CFRunLoopSourceContext*);
+static void (*pCFRunLoopSourceSignal)(CFRunLoopSourceRef);
+static void (*pCFRunLoopStop)(CFRunLoopRef);
+static void (*pCFRunLoopWakeUp)(CFRunLoopRef);
+static CFStringRef (*pCFStringCreateWithFileSystemRepresentation)(
+ CFAllocatorRef,
+ const char*);
+static CFStringEncoding (*pCFStringGetSystemEncoding)(void);
+static CFStringRef (*pkCFRunLoopDefaultMode);
+static FSEventStreamRef (*pFSEventStreamCreate)(CFAllocatorRef,
+ FSEventStreamCallback,
+ FSEventStreamContext*,
+ CFArrayRef,
+ FSEventStreamEventId,
+ CFTimeInterval,
+ FSEventStreamCreateFlags);
+static void (*pFSEventStreamFlushSync)(FSEventStreamRef);
+static void (*pFSEventStreamInvalidate)(FSEventStreamRef);
+static void (*pFSEventStreamRelease)(FSEventStreamRef);
+static void (*pFSEventStreamScheduleWithRunLoop)(FSEventStreamRef,
+ CFRunLoopRef,
+ CFStringRef);
+static int (*pFSEventStreamStart)(FSEventStreamRef);
+static void (*pFSEventStreamStop)(FSEventStreamRef);
+
+#define UV__FSEVENTS_PROCESS(handle, block) \
+ do { \
+ QUEUE events; \
+ QUEUE* q; \
+ uv__fsevents_event_t* event; \
+ int err; \
+ uv_mutex_lock(&(handle)->cf_mutex); \
+ /* Split-off all events and empty original queue */ \
+ QUEUE_MOVE(&(handle)->cf_events, &events); \
+ /* Get error (if any) and zero original one */ \
+ err = (handle)->cf_error; \
+ (handle)->cf_error = 0; \
+ uv_mutex_unlock(&(handle)->cf_mutex); \
+ /* Loop through events, deallocating each after processing */ \
+ while (!QUEUE_EMPTY(&events)) { \
+ q = QUEUE_HEAD(&events); \
+ event = QUEUE_DATA(q, uv__fsevents_event_t, member); \
+ QUEUE_REMOVE(q); \
+ /* NOTE: Checking uv__is_active() is required here, because handle \
+ * callback may close handle and invoking it after it will lead to \
+ * incorrect behaviour */ \
+ if (!uv__is_closing((handle)) && uv__is_active((handle))) \
+ block \
+ /* Free allocated data */ \
+ uv__free(event); \
+ } \
+ if (err != 0 && !uv__is_closing((handle)) && uv__is_active((handle))) \
+ (handle)->cb((handle), NULL, 0, err); \
+ } while (0)
+
+
+/* Runs in UV loop's thread, when there're events to report to handle */
+static void uv__fsevents_cb(uv_async_t* cb) {
+ uv_fs_event_t* handle;
+
+ handle = cb->data;
+
+ UV__FSEVENTS_PROCESS(handle, {
+ handle->cb(handle, event->path[0] ? event->path : NULL, event->events, 0);
+ });
+}
+
+
+/* Runs in CF thread, pushed event into handle's event list */
+static void uv__fsevents_push_event(uv_fs_event_t* handle,
+ QUEUE* events,
+ int err) {
+ assert(events != NULL || err != 0);
+ uv_mutex_lock(&handle->cf_mutex);
+
+ /* Concatenate two queues */
+ if (events != NULL)
+ QUEUE_ADD(&handle->cf_events, events);
+
+ /* Propagate error */
+ if (err != 0)
+ handle->cf_error = err;
+ uv_mutex_unlock(&handle->cf_mutex);
+
+ uv_async_send(handle->cf_cb);
+}
+
+
+/* Runs in CF thread, when there're events in FSEventStream */
+static void uv__fsevents_event_cb(const FSEventStreamRef streamRef,
+ void* info,
+ size_t numEvents,
+ void* eventPaths,
+ const FSEventStreamEventFlags eventFlags[],
+ const FSEventStreamEventId eventIds[]) {
+ size_t i;
+ int len;
+ char** paths;
+ char* path;
+ char* pos;
+ uv_fs_event_t* handle;
+ QUEUE* q;
+ uv_loop_t* loop;
+ uv__cf_loop_state_t* state;
+ uv__fsevents_event_t* event;
+ FSEventStreamEventFlags flags;
+ QUEUE head;
+
+ loop = info;
+ state = loop->cf_state;
+ assert(state != NULL);
+ paths = eventPaths;
+
+ /* For each handle */
+ uv_mutex_lock(&state->fsevent_mutex);
+ QUEUE_FOREACH(q, &state->fsevent_handles) {
+ handle = QUEUE_DATA(q, uv_fs_event_t, cf_member);
+ QUEUE_INIT(&head);
+
+ /* Process and filter out events */
+ for (i = 0; i < numEvents; i++) {
+ flags = eventFlags[i];
+
+ /* Ignore system events */
+ if (flags & kFSEventsSystem)
+ continue;
+
+ path = paths[i];
+ len = strlen(path);
+
+ if (handle->realpath_len == 0)
+ continue; /* This should be unreachable */
+
+ /* Filter out paths that are outside handle's request */
+ if (len < handle->realpath_len)
+ continue;
+
+ /* Make sure that realpath actually named a directory,
+ * (unless watching root, which alone keeps a trailing slash on the realpath)
+ * or that we matched the whole string */
+ if (handle->realpath_len != len &&
+ handle->realpath_len > 1 &&
+ path[handle->realpath_len] != '/')
+ continue;
+
+ if (memcmp(path, handle->realpath, handle->realpath_len) != 0)
+ continue;
+
+ if (!(handle->realpath_len == 1 && handle->realpath[0] == '/')) {
+ /* Remove common prefix, unless the watched folder is "/" */
+ path += handle->realpath_len;
+ len -= handle->realpath_len;
+
+ /* Ignore events with path equal to directory itself */
+ if (len <= 1 && (flags & kFSEventStreamEventFlagItemIsDir))
+ continue;
+
+ if (len == 0) {
+ /* Since we're using fsevents to watch the file itself,
+ * realpath == path, and we now need to get the basename of the file back
+ * (for commonality with other codepaths and platforms). */
+ while (len < handle->realpath_len && path[-1] != '/') {
+ path--;
+ len++;
+ }
+ /* Created and Removed seem to be always set, but don't make sense */
+ flags &= ~kFSEventsRenamed;
+ } else {
+ /* Skip forward slash */
+ path++;
+ len--;
+ }
+ }
+
+ /* Do not emit events from subdirectories (without option set) */
+ if ((handle->cf_flags & UV_FS_EVENT_RECURSIVE) == 0 && *path != '\0') {
+ pos = strchr(path + 1, '/');
+ if (pos != NULL)
+ continue;
+ }
+
+ event = uv__malloc(sizeof(*event) + len);
+ if (event == NULL)
+ break;
+
+ memset(event, 0, sizeof(*event));
+ memcpy(event->path, path, len + 1);
+ event->events = UV_RENAME;
+
+ if (0 == (flags & kFSEventsRenamed)) {
+ if (0 != (flags & kFSEventsModified) ||
+ 0 == (flags & kFSEventStreamEventFlagItemIsDir))
+ event->events = UV_CHANGE;
+ }
+
+ QUEUE_INSERT_TAIL(&head, &event->member);
+ }
+
+ if (!QUEUE_EMPTY(&head))
+ uv__fsevents_push_event(handle, &head, 0);
+ }
+ uv_mutex_unlock(&state->fsevent_mutex);
+}
+
+
+/* Runs in CF thread */
+static int uv__fsevents_create_stream(uv_loop_t* loop, CFArrayRef paths) {
+ uv__cf_loop_state_t* state;
+ FSEventStreamContext ctx;
+ FSEventStreamRef ref;
+ CFAbsoluteTime latency;
+ FSEventStreamCreateFlags flags;
+
+ /* Initialize context */
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.info = loop;
+
+ latency = 0.05;
+
+ /* Explanation of selected flags:
+ * 1. NoDefer - without this flag, events that are happening continuously
+ * (i.e. each event is happening after time interval less than `latency`,
+ * counted from previous event), will be deferred and passed to callback
+ * once they'll either fill whole OS buffer, or when this continuous stream
+ * will stop (i.e. there'll be delay between events, bigger than
+ * `latency`).
+ * Specifying this flag will invoke callback after `latency` time passed
+ * since event.
+ * 2. FileEvents - fire callback for file changes too (by default it is firing
+ * it only for directory changes).
+ */
+ flags = kFSEventStreamCreateFlagNoDefer | kFSEventStreamCreateFlagFileEvents;
+
+ /*
+ * NOTE: It might sound like a good idea to remember last seen StreamEventId,
+ * but in reality one dir might have last StreamEventId less than, the other,
+ * that is being watched now. Which will cause FSEventStream API to report
+ * changes to files from the past.
+ */
+ ref = pFSEventStreamCreate(NULL,
+ &uv__fsevents_event_cb,
+ &ctx,
+ paths,
+ kFSEventStreamEventIdSinceNow,
+ latency,
+ flags);
+ assert(ref != NULL);
+
+ state = loop->cf_state;
+ pFSEventStreamScheduleWithRunLoop(ref,
+ state->loop,
+ *pkCFRunLoopDefaultMode);
+ if (!pFSEventStreamStart(ref)) {
+ pFSEventStreamInvalidate(ref);
+ pFSEventStreamRelease(ref);
+ return UV_EMFILE;
+ }
+
+ state->fsevent_stream = ref;
+ return 0;
+}
+
+
+/* Runs in CF thread */
+static void uv__fsevents_destroy_stream(uv_loop_t* loop) {
+ uv__cf_loop_state_t* state;
+
+ state = loop->cf_state;
+
+ if (state->fsevent_stream == NULL)
+ return;
+
+ /* Stop emitting events */
+ pFSEventStreamStop(state->fsevent_stream);
+
+ /* Release stream */
+ pFSEventStreamInvalidate(state->fsevent_stream);
+ pFSEventStreamRelease(state->fsevent_stream);
+ state->fsevent_stream = NULL;
+}
+
+
+/* Runs in CF thread, when there're new fsevent handles to add to stream */
+static void uv__fsevents_reschedule(uv_fs_event_t* handle,
+ uv__cf_loop_signal_type_t type) {
+ uv__cf_loop_state_t* state;
+ QUEUE* q;
+ uv_fs_event_t* curr;
+ CFArrayRef cf_paths;
+ CFStringRef* paths;
+ unsigned int i;
+ int err;
+ unsigned int path_count;
+
+ state = handle->loop->cf_state;
+ paths = NULL;
+ cf_paths = NULL;
+ err = 0;
+ /* NOTE: `i` is used in deallocation loop below */
+ i = 0;
+
+ /* Optimization to prevent O(n^2) time spent when starting to watch
+ * many files simultaneously
+ */
+ uv_mutex_lock(&state->fsevent_mutex);
+ if (state->fsevent_need_reschedule == 0) {
+ uv_mutex_unlock(&state->fsevent_mutex);
+ goto final;
+ }
+ state->fsevent_need_reschedule = 0;
+ uv_mutex_unlock(&state->fsevent_mutex);
+
+ /* Destroy previous FSEventStream */
+ uv__fsevents_destroy_stream(handle->loop);
+
+ /* Any failure below will be a memory failure */
+ err = UV_ENOMEM;
+
+ /* Create list of all watched paths */
+ uv_mutex_lock(&state->fsevent_mutex);
+ path_count = state->fsevent_handle_count;
+ if (path_count != 0) {
+ paths = uv__malloc(sizeof(*paths) * path_count);
+ if (paths == NULL) {
+ uv_mutex_unlock(&state->fsevent_mutex);
+ goto final;
+ }
+
+ q = &state->fsevent_handles;
+ for (; i < path_count; i++) {
+ q = QUEUE_NEXT(q);
+ assert(q != &state->fsevent_handles);
+ curr = QUEUE_DATA(q, uv_fs_event_t, cf_member);
+
+ assert(curr->realpath != NULL);
+ paths[i] =
+ pCFStringCreateWithFileSystemRepresentation(NULL, curr->realpath);
+ if (paths[i] == NULL) {
+ uv_mutex_unlock(&state->fsevent_mutex);
+ goto final;
+ }
+ }
+ }
+ uv_mutex_unlock(&state->fsevent_mutex);
+ err = 0;
+
+ if (path_count != 0) {
+ /* Create new FSEventStream */
+ cf_paths = pCFArrayCreate(NULL, (const void**) paths, path_count, NULL);
+ if (cf_paths == NULL) {
+ err = UV_ENOMEM;
+ goto final;
+ }
+ err = uv__fsevents_create_stream(handle->loop, cf_paths);
+ }
+
+final:
+ /* Deallocate all paths in case of failure */
+ if (err != 0) {
+ if (cf_paths == NULL) {
+ while (i != 0)
+ pCFRelease(paths[--i]);
+ uv__free(paths);
+ } else {
+ /* CFArray takes ownership of both strings and original C-array */
+ pCFRelease(cf_paths);
+ }
+
+ /* Broadcast error to all handles */
+ uv_mutex_lock(&state->fsevent_mutex);
+ QUEUE_FOREACH(q, &state->fsevent_handles) {
+ curr = QUEUE_DATA(q, uv_fs_event_t, cf_member);
+ uv__fsevents_push_event(curr, NULL, err);
+ }
+ uv_mutex_unlock(&state->fsevent_mutex);
+ }
+
+ /*
+ * Main thread will block until the removal of handle from the list,
+ * we must tell it when we're ready.
+ *
+ * NOTE: This is coupled with `uv_sem_wait()` in `uv__fsevents_close`
+ */
+ if (type == kUVCFLoopSignalClosing)
+ uv_sem_post(&state->fsevent_sem);
+}
+
+
+static int uv__fsevents_global_init(void) {
+ static pthread_mutex_t global_init_mutex = PTHREAD_MUTEX_INITIALIZER;
+ static void* core_foundation_handle;
+ static void* core_services_handle;
+ int err;
+
+ err = 0;
+ pthread_mutex_lock(&global_init_mutex);
+ if (core_foundation_handle != NULL)
+ goto out;
+
+ /* The libraries are never unloaded because we currently don't have a good
+ * mechanism for keeping a reference count. It's unlikely to be an issue
+ * but if it ever becomes one, we can turn the dynamic library handles into
+ * per-event loop properties and have the dynamic linker keep track for us.
+ */
+ err = UV_ENOSYS;
+ core_foundation_handle = dlopen("/System/Library/Frameworks/"
+ "CoreFoundation.framework/"
+ "Versions/A/CoreFoundation",
+ RTLD_LAZY | RTLD_LOCAL);
+ if (core_foundation_handle == NULL)
+ goto out;
+
+ core_services_handle = dlopen("/System/Library/Frameworks/"
+ "CoreServices.framework/"
+ "Versions/A/CoreServices",
+ RTLD_LAZY | RTLD_LOCAL);
+ if (core_services_handle == NULL)
+ goto out;
+
+ err = UV_ENOENT;
+#define V(handle, symbol) \
+ do { \
+ *(void **)(&p ## symbol) = dlsym((handle), #symbol); \
+ if (p ## symbol == NULL) \
+ goto out; \
+ } \
+ while (0)
+ V(core_foundation_handle, CFArrayCreate);
+ V(core_foundation_handle, CFRelease);
+ V(core_foundation_handle, CFRunLoopAddSource);
+ V(core_foundation_handle, CFRunLoopGetCurrent);
+ V(core_foundation_handle, CFRunLoopRemoveSource);
+ V(core_foundation_handle, CFRunLoopRun);
+ V(core_foundation_handle, CFRunLoopSourceCreate);
+ V(core_foundation_handle, CFRunLoopSourceSignal);
+ V(core_foundation_handle, CFRunLoopStop);
+ V(core_foundation_handle, CFRunLoopWakeUp);
+ V(core_foundation_handle, CFStringCreateWithFileSystemRepresentation);
+ V(core_foundation_handle, CFStringGetSystemEncoding);
+ V(core_foundation_handle, kCFRunLoopDefaultMode);
+ V(core_services_handle, FSEventStreamCreate);
+ V(core_services_handle, FSEventStreamFlushSync);
+ V(core_services_handle, FSEventStreamInvalidate);
+ V(core_services_handle, FSEventStreamRelease);
+ V(core_services_handle, FSEventStreamScheduleWithRunLoop);
+ V(core_services_handle, FSEventStreamStart);
+ V(core_services_handle, FSEventStreamStop);
+#undef V
+ err = 0;
+
+out:
+ if (err && core_services_handle != NULL) {
+ dlclose(core_services_handle);
+ core_services_handle = NULL;
+ }
+
+ if (err && core_foundation_handle != NULL) {
+ dlclose(core_foundation_handle);
+ core_foundation_handle = NULL;
+ }
+
+ pthread_mutex_unlock(&global_init_mutex);
+ return err;
+}
+
+
+/* Runs in UV loop */
+static int uv__fsevents_loop_init(uv_loop_t* loop) {
+ CFRunLoopSourceContext ctx;
+ uv__cf_loop_state_t* state;
+ pthread_attr_t attr_storage;
+ pthread_attr_t* attr;
+ int err;
+
+ if (loop->cf_state != NULL)
+ return 0;
+
+ err = uv__fsevents_global_init();
+ if (err)
+ return err;
+
+ state = uv__calloc(1, sizeof(*state));
+ if (state == NULL)
+ return UV_ENOMEM;
+
+ err = uv_mutex_init(&loop->cf_mutex);
+ if (err)
+ goto fail_mutex_init;
+
+ err = uv_sem_init(&loop->cf_sem, 0);
+ if (err)
+ goto fail_sem_init;
+
+ QUEUE_INIT(&loop->cf_signals);
+
+ err = uv_sem_init(&state->fsevent_sem, 0);
+ if (err)
+ goto fail_fsevent_sem_init;
+
+ err = uv_mutex_init(&state->fsevent_mutex);
+ if (err)
+ goto fail_fsevent_mutex_init;
+
+ QUEUE_INIT(&state->fsevent_handles);
+ state->fsevent_need_reschedule = 0;
+ state->fsevent_handle_count = 0;
+
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.info = loop;
+ ctx.perform = uv__cf_loop_cb;
+ state->signal_source = pCFRunLoopSourceCreate(NULL, 0, &ctx);
+ if (state->signal_source == NULL) {
+ err = UV_ENOMEM;
+ goto fail_signal_source_create;
+ }
+
+ /* In the unlikely event that pthread_attr_init() fails, create the thread
+ * with the default stack size. We'll use a little more address space but
+ * that in itself is not a fatal error.
+ */
+ attr = &attr_storage;
+ if (pthread_attr_init(attr))
+ attr = NULL;
+
+ if (attr != NULL)
+ if (pthread_attr_setstacksize(attr, 4 * PTHREAD_STACK_MIN))
+ abort();
+
+ loop->cf_state = state;
+
+ /* uv_thread_t is an alias for pthread_t. */
+ err = UV__ERR(pthread_create(&loop->cf_thread, attr, uv__cf_loop_runner, loop));
+
+ if (attr != NULL)
+ pthread_attr_destroy(attr);
+
+ if (err)
+ goto fail_thread_create;
+
+ /* Synchronize threads */
+ uv_sem_wait(&loop->cf_sem);
+ return 0;
+
+fail_thread_create:
+ loop->cf_state = NULL;
+
+fail_signal_source_create:
+ uv_mutex_destroy(&state->fsevent_mutex);
+
+fail_fsevent_mutex_init:
+ uv_sem_destroy(&state->fsevent_sem);
+
+fail_fsevent_sem_init:
+ uv_sem_destroy(&loop->cf_sem);
+
+fail_sem_init:
+ uv_mutex_destroy(&loop->cf_mutex);
+
+fail_mutex_init:
+ uv__free(state);
+ return err;
+}
+
+
+/* Runs in UV loop */
+void uv__fsevents_loop_delete(uv_loop_t* loop) {
+ uv__cf_loop_signal_t* s;
+ uv__cf_loop_state_t* state;
+ QUEUE* q;
+
+ if (loop->cf_state == NULL)
+ return;
+
+ if (uv__cf_loop_signal(loop, NULL, kUVCFLoopSignalRegular) != 0)
+ abort();
+
+ uv_thread_join(&loop->cf_thread);
+ uv_sem_destroy(&loop->cf_sem);
+ uv_mutex_destroy(&loop->cf_mutex);
+
+ /* Free any remaining data */
+ while (!QUEUE_EMPTY(&loop->cf_signals)) {
+ q = QUEUE_HEAD(&loop->cf_signals);
+ s = QUEUE_DATA(q, uv__cf_loop_signal_t, member);
+ QUEUE_REMOVE(q);
+ uv__free(s);
+ }
+
+ /* Destroy state */
+ state = loop->cf_state;
+ uv_sem_destroy(&state->fsevent_sem);
+ uv_mutex_destroy(&state->fsevent_mutex);
+ pCFRelease(state->signal_source);
+ uv__free(state);
+ loop->cf_state = NULL;
+}
+
+
+/* Runs in CF thread. This is the CF loop's body */
+static void* uv__cf_loop_runner(void* arg) {
+ uv_loop_t* loop;
+ uv__cf_loop_state_t* state;
+
+ loop = arg;
+ state = loop->cf_state;
+ state->loop = pCFRunLoopGetCurrent();
+
+ pCFRunLoopAddSource(state->loop,
+ state->signal_source,
+ *pkCFRunLoopDefaultMode);
+
+ uv_sem_post(&loop->cf_sem);
+
+ pCFRunLoopRun();
+ pCFRunLoopRemoveSource(state->loop,
+ state->signal_source,
+ *pkCFRunLoopDefaultMode);
+
+ state->loop = NULL;
+
+ return NULL;
+}
+
+
+/* Runs in CF thread, executed after `uv__cf_loop_signal()` */
+static void uv__cf_loop_cb(void* arg) {
+ uv_loop_t* loop;
+ uv__cf_loop_state_t* state;
+ QUEUE* item;
+ QUEUE split_head;
+ uv__cf_loop_signal_t* s;
+
+ loop = arg;
+ state = loop->cf_state;
+
+ uv_mutex_lock(&loop->cf_mutex);
+ QUEUE_MOVE(&loop->cf_signals, &split_head);
+ uv_mutex_unlock(&loop->cf_mutex);
+
+ while (!QUEUE_EMPTY(&split_head)) {
+ item = QUEUE_HEAD(&split_head);
+ QUEUE_REMOVE(item);
+
+ s = QUEUE_DATA(item, uv__cf_loop_signal_t, member);
+
+ /* This was a termination signal */
+ if (s->handle == NULL)
+ pCFRunLoopStop(state->loop);
+ else
+ uv__fsevents_reschedule(s->handle, s->type);
+
+ uv__free(s);
+ }
+}
+
+
+/* Runs in UV loop to notify CF thread */
+int uv__cf_loop_signal(uv_loop_t* loop,
+ uv_fs_event_t* handle,
+ uv__cf_loop_signal_type_t type) {
+ uv__cf_loop_signal_t* item;
+ uv__cf_loop_state_t* state;
+
+ item = uv__malloc(sizeof(*item));
+ if (item == NULL)
+ return UV_ENOMEM;
+
+ item->handle = handle;
+ item->type = type;
+
+ uv_mutex_lock(&loop->cf_mutex);
+ QUEUE_INSERT_TAIL(&loop->cf_signals, &item->member);
+
+ state = loop->cf_state;
+ assert(state != NULL);
+ pCFRunLoopSourceSignal(state->signal_source);
+ pCFRunLoopWakeUp(state->loop);
+
+ uv_mutex_unlock(&loop->cf_mutex);
+
+ return 0;
+}
+
+
+/* Runs in UV loop to initialize handle */
+int uv__fsevents_init(uv_fs_event_t* handle) {
+ int err;
+ uv__cf_loop_state_t* state;
+
+ err = uv__fsevents_loop_init(handle->loop);
+ if (err)
+ return err;
+
+ /* Get absolute path to file */
+ handle->realpath = realpath(handle->path, NULL);
+ if (handle->realpath == NULL)
+ return UV__ERR(errno);
+ handle->realpath_len = strlen(handle->realpath);
+
+ /* Initialize event queue */
+ QUEUE_INIT(&handle->cf_events);
+ handle->cf_error = 0;
+
+ /*
+ * Events will occur in other thread.
+ * Initialize callback for getting them back into event loop's thread
+ */
+ handle->cf_cb = uv__malloc(sizeof(*handle->cf_cb));
+ if (handle->cf_cb == NULL) {
+ err = UV_ENOMEM;
+ goto fail_cf_cb_malloc;
+ }
+
+ handle->cf_cb->data = handle;
+ uv_async_init(handle->loop, handle->cf_cb, uv__fsevents_cb);
+ handle->cf_cb->flags |= UV_HANDLE_INTERNAL;
+ uv_unref((uv_handle_t*) handle->cf_cb);
+
+ err = uv_mutex_init(&handle->cf_mutex);
+ if (err)
+ goto fail_cf_mutex_init;
+
+ /* Insert handle into the list */
+ state = handle->loop->cf_state;
+ uv_mutex_lock(&state->fsevent_mutex);
+ QUEUE_INSERT_TAIL(&state->fsevent_handles, &handle->cf_member);
+ state->fsevent_handle_count++;
+ state->fsevent_need_reschedule = 1;
+ uv_mutex_unlock(&state->fsevent_mutex);
+
+ /* Reschedule FSEventStream */
+ assert(handle != NULL);
+ err = uv__cf_loop_signal(handle->loop, handle, kUVCFLoopSignalRegular);
+ if (err)
+ goto fail_loop_signal;
+
+ return 0;
+
+fail_loop_signal:
+ uv_mutex_destroy(&handle->cf_mutex);
+
+fail_cf_mutex_init:
+ uv__free(handle->cf_cb);
+ handle->cf_cb = NULL;
+
+fail_cf_cb_malloc:
+ uv__free(handle->realpath);
+ handle->realpath = NULL;
+ handle->realpath_len = 0;
+
+ return err;
+}
+
+
+/* Runs in UV loop to de-initialize handle */
+int uv__fsevents_close(uv_fs_event_t* handle) {
+ int err;
+ uv__cf_loop_state_t* state;
+
+ if (handle->cf_cb == NULL)
+ return UV_EINVAL;
+
+ /* Remove handle from the list */
+ state = handle->loop->cf_state;
+ uv_mutex_lock(&state->fsevent_mutex);
+ QUEUE_REMOVE(&handle->cf_member);
+ state->fsevent_handle_count--;
+ state->fsevent_need_reschedule = 1;
+ uv_mutex_unlock(&state->fsevent_mutex);
+
+ /* Reschedule FSEventStream */
+ assert(handle != NULL);
+ err = uv__cf_loop_signal(handle->loop, handle, kUVCFLoopSignalClosing);
+ if (err)
+ return UV__ERR(err);
+
+ /* Wait for deinitialization */
+ uv_sem_wait(&state->fsevent_sem);
+
+ uv_close((uv_handle_t*) handle->cf_cb, (uv_close_cb) uv__free);
+ handle->cf_cb = NULL;
+
+ /* Free data in queue */
+ UV__FSEVENTS_PROCESS(handle, {
+ /* NOP */
+ });
+
+ uv_mutex_destroy(&handle->cf_mutex);
+ uv__free(handle->realpath);
+ handle->realpath = NULL;
+ handle->realpath_len = 0;
+
+ return 0;
+}
+
+#endif /* TARGET_OS_IPHONE */
diff --git a/Utilities/cmlibuv/src/unix/getaddrinfo.c b/Utilities/cmlibuv/src/unix/getaddrinfo.c
new file mode 100644
index 0000000..d7ca7d1
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/getaddrinfo.c
@@ -0,0 +1,255 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/* Expose glibc-specific EAI_* error codes. Needs to be defined before we
+ * include any headers.
+ */
+#ifndef _GNU_SOURCE
+# define _GNU_SOURCE
+#endif
+
+#include "uv.h"
+#include "internal.h"
+#include "idna.h"
+
+#include <errno.h>
+#include <stddef.h> /* NULL */
+#include <stdlib.h>
+#include <string.h>
+#include <net/if.h> /* if_indextoname() */
+
+/* EAI_* constants. */
+#include <netdb.h>
+
+
+int uv__getaddrinfo_translate_error(int sys_err) {
+ switch (sys_err) {
+ case 0: return 0;
+#if defined(EAI_ADDRFAMILY)
+ case EAI_ADDRFAMILY: return UV_EAI_ADDRFAMILY;
+#endif
+#if defined(EAI_AGAIN)
+ case EAI_AGAIN: return UV_EAI_AGAIN;
+#endif
+#if defined(EAI_BADFLAGS)
+ case EAI_BADFLAGS: return UV_EAI_BADFLAGS;
+#endif
+#if defined(EAI_BADHINTS)
+ case EAI_BADHINTS: return UV_EAI_BADHINTS;
+#endif
+#if defined(EAI_CANCELED)
+ case EAI_CANCELED: return UV_EAI_CANCELED;
+#endif
+#if defined(EAI_FAIL)
+ case EAI_FAIL: return UV_EAI_FAIL;
+#endif
+#if defined(EAI_FAMILY)
+ case EAI_FAMILY: return UV_EAI_FAMILY;
+#endif
+#if defined(EAI_MEMORY)
+ case EAI_MEMORY: return UV_EAI_MEMORY;
+#endif
+#if defined(EAI_NODATA)
+ case EAI_NODATA: return UV_EAI_NODATA;
+#endif
+#if defined(EAI_NONAME)
+# if !defined(EAI_NODATA) || EAI_NODATA != EAI_NONAME
+ case EAI_NONAME: return UV_EAI_NONAME;
+# endif
+#endif
+#if defined(EAI_OVERFLOW)
+ case EAI_OVERFLOW: return UV_EAI_OVERFLOW;
+#endif
+#if defined(EAI_PROTOCOL)
+ case EAI_PROTOCOL: return UV_EAI_PROTOCOL;
+#endif
+#if defined(EAI_SERVICE)
+ case EAI_SERVICE: return UV_EAI_SERVICE;
+#endif
+#if defined(EAI_SOCKTYPE)
+ case EAI_SOCKTYPE: return UV_EAI_SOCKTYPE;
+#endif
+#if defined(EAI_SYSTEM)
+ case EAI_SYSTEM: return UV__ERR(errno);
+#endif
+ }
+ assert(!"unknown EAI_* error code");
+ abort();
+#ifndef __SUNPRO_C
+ return 0; /* Pacify compiler. */
+#endif
+}
+
+
+static void uv__getaddrinfo_work(struct uv__work* w) {
+ uv_getaddrinfo_t* req;
+ int err;
+
+ req = container_of(w, uv_getaddrinfo_t, work_req);
+ err = getaddrinfo(req->hostname, req->service, req->hints, &req->addrinfo);
+ req->retcode = uv__getaddrinfo_translate_error(err);
+}
+
+
+static void uv__getaddrinfo_done(struct uv__work* w, int status) {
+ uv_getaddrinfo_t* req;
+
+ req = container_of(w, uv_getaddrinfo_t, work_req);
+ uv__req_unregister(req->loop, req);
+
+ /* See initialization in uv_getaddrinfo(). */
+ if (req->hints)
+ uv__free(req->hints);
+ else if (req->service)
+ uv__free(req->service);
+ else if (req->hostname)
+ uv__free(req->hostname);
+ else
+ assert(0);
+
+ req->hints = NULL;
+ req->service = NULL;
+ req->hostname = NULL;
+
+ if (status == UV_ECANCELED) {
+ assert(req->retcode == 0);
+ req->retcode = UV_EAI_CANCELED;
+ }
+
+ if (req->cb)
+ req->cb(req, req->retcode, req->addrinfo);
+}
+
+
+int uv_getaddrinfo(uv_loop_t* loop,
+ uv_getaddrinfo_t* req,
+ uv_getaddrinfo_cb cb,
+ const char* hostname,
+ const char* service,
+ const struct addrinfo* hints) {
+ char hostname_ascii[256];
+ size_t hostname_len;
+ size_t service_len;
+ size_t hints_len;
+ size_t len;
+ char* buf;
+ long rc;
+
+ if (req == NULL || (hostname == NULL && service == NULL))
+ return UV_EINVAL;
+
+ /* FIXME(bnoordhuis) IDNA does not seem to work z/OS,
+ * probably because it uses EBCDIC rather than ASCII.
+ */
+#ifdef __MVS__
+ (void) &hostname_ascii;
+#else
+ if (hostname != NULL) {
+ rc = uv__idna_toascii(hostname,
+ hostname + strlen(hostname),
+ hostname_ascii,
+ hostname_ascii + sizeof(hostname_ascii));
+ if (rc < 0)
+ return rc;
+ hostname = hostname_ascii;
+ }
+#endif
+
+ hostname_len = hostname ? strlen(hostname) + 1 : 0;
+ service_len = service ? strlen(service) + 1 : 0;
+ hints_len = hints ? sizeof(*hints) : 0;
+ buf = uv__malloc(hostname_len + service_len + hints_len);
+
+ if (buf == NULL)
+ return UV_ENOMEM;
+
+ uv__req_init(loop, req, UV_GETADDRINFO);
+ req->loop = loop;
+ req->cb = cb;
+ req->addrinfo = NULL;
+ req->hints = NULL;
+ req->service = NULL;
+ req->hostname = NULL;
+ req->retcode = 0;
+
+ /* order matters, see uv_getaddrinfo_done() */
+ len = 0;
+
+ if (hints) {
+ req->hints = memcpy(buf + len, hints, sizeof(*hints));
+ len += sizeof(*hints);
+ }
+
+ if (service) {
+ req->service = memcpy(buf + len, service, service_len);
+ len += service_len;
+ }
+
+ if (hostname)
+ req->hostname = memcpy(buf + len, hostname, hostname_len);
+
+ if (cb) {
+ uv__work_submit(loop,
+ &req->work_req,
+ UV__WORK_SLOW_IO,
+ uv__getaddrinfo_work,
+ uv__getaddrinfo_done);
+ return 0;
+ } else {
+ uv__getaddrinfo_work(&req->work_req);
+ uv__getaddrinfo_done(&req->work_req, 0);
+ return req->retcode;
+ }
+}
+
+
+void uv_freeaddrinfo(struct addrinfo* ai) {
+ if (ai)
+ freeaddrinfo(ai);
+}
+
+
+int uv_if_indextoname(unsigned int ifindex, char* buffer, size_t* size) {
+ char ifname_buf[UV_IF_NAMESIZE];
+ size_t len;
+
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ if (if_indextoname(ifindex, ifname_buf) == NULL)
+ return UV__ERR(errno);
+
+ len = strnlen(ifname_buf, sizeof(ifname_buf));
+
+ if (*size <= len) {
+ *size = len + 1;
+ return UV_ENOBUFS;
+ }
+
+ memcpy(buffer, ifname_buf, len);
+ buffer[len] = '\0';
+ *size = len;
+
+ return 0;
+}
+
+int uv_if_indextoiid(unsigned int ifindex, char* buffer, size_t* size) {
+ return uv_if_indextoname(ifindex, buffer, size);
+}
diff --git a/Utilities/cmlibuv/src/unix/getnameinfo.c b/Utilities/cmlibuv/src/unix/getnameinfo.c
new file mode 100644
index 0000000..b695081
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/getnameinfo.c
@@ -0,0 +1,121 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+*
+* Permission is hereby granted, free of charge, to any person obtaining a copy
+* of this software and associated documentation files (the "Software"), to
+* deal in the Software without restriction, including without limitation the
+* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+* sell copies of the Software, and to permit persons to whom the Software is
+* furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+* IN THE SOFTWARE.
+*/
+
+#include "uv.h"
+#include "internal.h"
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+
+static void uv__getnameinfo_work(struct uv__work* w) {
+ uv_getnameinfo_t* req;
+ int err;
+ socklen_t salen;
+
+ req = container_of(w, uv_getnameinfo_t, work_req);
+
+ if (req->storage.ss_family == AF_INET)
+ salen = sizeof(struct sockaddr_in);
+ else if (req->storage.ss_family == AF_INET6)
+ salen = sizeof(struct sockaddr_in6);
+ else
+ abort();
+
+ err = getnameinfo((struct sockaddr*) &req->storage,
+ salen,
+ req->host,
+ sizeof(req->host),
+ req->service,
+ sizeof(req->service),
+ req->flags);
+ req->retcode = uv__getaddrinfo_translate_error(err);
+}
+
+static void uv__getnameinfo_done(struct uv__work* w, int status) {
+ uv_getnameinfo_t* req;
+ char* host;
+ char* service;
+
+ req = container_of(w, uv_getnameinfo_t, work_req);
+ uv__req_unregister(req->loop, req);
+ host = service = NULL;
+
+ if (status == UV_ECANCELED) {
+ assert(req->retcode == 0);
+ req->retcode = UV_EAI_CANCELED;
+ } else if (req->retcode == 0) {
+ host = req->host;
+ service = req->service;
+ }
+
+ if (req->getnameinfo_cb)
+ req->getnameinfo_cb(req, req->retcode, host, service);
+}
+
+/*
+* Entry point for getnameinfo
+* return 0 if a callback will be made
+* return error code if validation fails
+*/
+int uv_getnameinfo(uv_loop_t* loop,
+ uv_getnameinfo_t* req,
+ uv_getnameinfo_cb getnameinfo_cb,
+ const struct sockaddr* addr,
+ int flags) {
+ if (req == NULL || addr == NULL)
+ return UV_EINVAL;
+
+ if (addr->sa_family == AF_INET) {
+ memcpy(&req->storage,
+ addr,
+ sizeof(struct sockaddr_in));
+ } else if (addr->sa_family == AF_INET6) {
+ memcpy(&req->storage,
+ addr,
+ sizeof(struct sockaddr_in6));
+ } else {
+ return UV_EINVAL;
+ }
+
+ uv__req_init(loop, (uv_req_t*)req, UV_GETNAMEINFO);
+
+ req->getnameinfo_cb = getnameinfo_cb;
+ req->flags = flags;
+ req->type = UV_GETNAMEINFO;
+ req->loop = loop;
+ req->retcode = 0;
+
+ if (getnameinfo_cb) {
+ uv__work_submit(loop,
+ &req->work_req,
+ UV__WORK_SLOW_IO,
+ uv__getnameinfo_work,
+ uv__getnameinfo_done);
+ return 0;
+ } else {
+ uv__getnameinfo_work(&req->work_req);
+ uv__getnameinfo_done(&req->work_req, 0);
+ return req->retcode;
+ }
+}
diff --git a/Utilities/cmlibuv/src/unix/haiku.c b/Utilities/cmlibuv/src/unix/haiku.c
new file mode 100644
index 0000000..cf17d83
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/haiku.c
@@ -0,0 +1,167 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <FindDirectory.h> /* find_path() */
+#include <OS.h>
+
+
+void uv_loadavg(double avg[3]) {
+ avg[0] = 0;
+ avg[1] = 0;
+ avg[2] = 0;
+}
+
+
+int uv_exepath(char* buffer, size_t* size) {
+ char abspath[B_PATH_NAME_LENGTH];
+ status_t status;
+ ssize_t abspath_len;
+
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ status = find_path(B_APP_IMAGE_SYMBOL, B_FIND_PATH_IMAGE_PATH, NULL, abspath,
+ sizeof(abspath));
+ if (status != B_OK)
+ return UV__ERR(status);
+
+ abspath_len = uv__strscpy(buffer, abspath, *size);
+ *size -= 1;
+ if (abspath_len >= 0 && *size > (size_t)abspath_len)
+ *size = (size_t)abspath_len;
+
+ return 0;
+}
+
+
+uint64_t uv_get_free_memory(void) {
+ status_t status;
+ system_info sinfo;
+
+ status = get_system_info(&sinfo);
+ if (status != B_OK)
+ return 0;
+
+ return (sinfo.max_pages - sinfo.used_pages) * B_PAGE_SIZE;
+}
+
+
+uint64_t uv_get_total_memory(void) {
+ status_t status;
+ system_info sinfo;
+
+ status = get_system_info(&sinfo);
+ if (status != B_OK)
+ return 0;
+
+ return sinfo.max_pages * B_PAGE_SIZE;
+}
+
+
+uint64_t uv_get_constrained_memory(void) {
+ return 0; /* Memory constraints are unknown. */
+}
+
+
+int uv_resident_set_memory(size_t* rss) {
+ area_info area;
+ ssize_t cookie;
+ status_t status;
+ thread_info thread;
+
+ status = get_thread_info(find_thread(NULL), &thread);
+ if (status != B_OK)
+ return UV__ERR(status);
+
+ cookie = 0;
+ *rss = 0;
+ while (get_next_area_info(thread.team, &cookie, &area) == B_OK)
+ *rss += area.ram_size;
+
+ return 0;
+}
+
+
+int uv_uptime(double* uptime) {
+ /* system_time() returns time since booting in microseconds */
+ *uptime = (double)system_time() / 1000000;
+ return 0;
+}
+
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+ cpu_topology_node_info* topology_infos;
+ int i;
+ status_t status;
+ system_info system;
+ uint32_t topology_count;
+ uint64_t cpuspeed;
+ uv_cpu_info_t* cpu_info;
+
+ if (cpu_infos == NULL || count == NULL)
+ return UV_EINVAL;
+
+ status = get_cpu_topology_info(NULL, &topology_count);
+ if (status != B_OK)
+ return UV__ERR(status);
+
+ topology_infos = uv__malloc(topology_count * sizeof(*topology_infos));
+ if (topology_infos == NULL)
+ return UV_ENOMEM;
+
+ status = get_cpu_topology_info(topology_infos, &topology_count);
+ if (status != B_OK) {
+ uv__free(topology_infos);
+ return UV__ERR(status);
+ }
+
+ cpuspeed = 0;
+ for (i = 0; i < (int)topology_count; i++) {
+ if (topology_infos[i].type == B_TOPOLOGY_CORE) {
+ cpuspeed = topology_infos[i].data.core.default_frequency;
+ break;
+ }
+ }
+
+ uv__free(topology_infos);
+
+ status = get_system_info(&system);
+ if (status != B_OK)
+ return UV__ERR(status);
+
+ *cpu_infos = uv__calloc(system.cpu_count, sizeof(**cpu_infos));
+ if (*cpu_infos == NULL)
+ return UV_ENOMEM;
+
+ /* CPU time and model are not exposed by Haiku. */
+ cpu_info = *cpu_infos;
+ for (i = 0; i < (int)system.cpu_count; i++) {
+ cpu_info->model = uv__strdup("unknown");
+ cpu_info->speed = (int)(cpuspeed / 1000000);
+ cpu_info++;
+ }
+ *count = system.cpu_count;
+
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/unix/hpux.c b/Utilities/cmlibuv/src/unix/hpux.c
new file mode 100644
index 0000000..4d3f628
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/hpux.c
@@ -0,0 +1,30 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdint.h>
+#include <time.h>
+
+uint64_t uv__hrtime(uv_clocktype_t type) {
+ return (uint64_t) gethrtime();
+}
diff --git a/Utilities/cmlibuv/src/unix/ibmi.c b/Utilities/cmlibuv/src/unix/ibmi.c
new file mode 100644
index 0000000..73ab02a
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/ibmi.c
@@ -0,0 +1,501 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/ioctl.h>
+#include <net/if.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+
+#include <sys/time.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <utmp.h>
+#include <libgen.h>
+
+#include <sys/protosw.h>
+#include <procinfo.h>
+#include <sys/proc.h>
+#include <sys/procfs.h>
+
+#include <ctype.h>
+
+#include <sys/mntctl.h>
+#include <sys/vmount.h>
+#include <limits.h>
+#include <strings.h>
+#include <sys/vnode.h>
+
+#include <as400_protos.h>
+#include <as400_types.h>
+
+char* original_exepath = NULL;
+uv_mutex_t process_title_mutex;
+uv_once_t process_title_mutex_once = UV_ONCE_INIT;
+
+typedef struct {
+ int bytes_available;
+ int bytes_returned;
+ char current_date_and_time[8];
+ char system_name[8];
+ char elapsed_time[6];
+ char restricted_state_flag;
+ char reserved;
+ int percent_processing_unit_used;
+ int jobs_in_system;
+ int percent_permanent_addresses;
+ int percent_temporary_addresses;
+ int system_asp;
+ int percent_system_asp_used;
+ int total_auxiliary_storage;
+ int current_unprotected_storage_used;
+ int maximum_unprotected_storage_used;
+ int percent_db_capability;
+ int main_storage_size;
+ int number_of_partitions;
+ int partition_identifier;
+ int reserved1;
+ int current_processing_capacity;
+ char processor_sharing_attribute;
+ char reserved2[3];
+ int number_of_processors;
+ int active_jobs_in_system;
+ int active_threads_in_system;
+ int maximum_jobs_in_system;
+ int percent_temporary_256mb_segments_used;
+ int percent_temporary_4gb_segments_used;
+ int percent_permanent_256mb_segments_used;
+ int percent_permanent_4gb_segments_used;
+ int percent_current_interactive_performance;
+ int percent_uncapped_cpu_capacity_used;
+ int percent_shared_processor_pool_used;
+ long main_storage_size_long;
+} SSTS0200;
+
+
+typedef struct {
+ char header[208];
+ unsigned char loca_adapter_address[12];
+} LIND0500;
+
+
+typedef struct {
+ int bytes_provided;
+ int bytes_available;
+ char msgid[7];
+} errcode_s;
+
+
+static const unsigned char e2a[256] = {
+ 0, 1, 2, 3, 156, 9, 134, 127, 151, 141, 142, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 157, 133, 8, 135, 24, 25, 146, 143, 28, 29, 30, 31,
+ 128, 129, 130, 131, 132, 10, 23, 27, 136, 137, 138, 139, 140, 5, 6, 7,
+ 144, 145, 22, 147, 148, 149, 150, 4, 152, 153, 154, 155, 20, 21, 158, 26,
+ 32, 160, 161, 162, 163, 164, 165, 166, 167, 168, 91, 46, 60, 40, 43, 33,
+ 38, 169, 170, 171, 172, 173, 174, 175, 176, 177, 93, 36, 42, 41, 59, 94,
+ 45, 47, 178, 179, 180, 181, 182, 183, 184, 185, 124, 44, 37, 95, 62, 63,
+ 186, 187, 188, 189, 190, 191, 192, 193, 194, 96, 58, 35, 64, 39, 61, 34,
+ 195, 97, 98, 99, 100, 101, 102, 103, 104, 105, 196, 197, 198, 199, 200, 201,
+ 202, 106, 107, 108, 109, 110, 111, 112, 113, 114, 203, 204, 205, 206, 207, 208,
+ 209, 126, 115, 116, 117, 118, 119, 120, 121, 122, 210, 211, 212, 213, 214, 215,
+ 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231,
+ 123, 65, 66, 67, 68, 69, 70, 71, 72, 73, 232, 233, 234, 235, 236, 237,
+ 125, 74, 75, 76, 77, 78, 79, 80, 81, 82, 238, 239, 240, 241, 242, 243,
+ 92, 159, 83, 84, 85, 86, 87, 88, 89, 90, 244, 245, 246, 247, 248, 249,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 250, 251, 252, 253, 254, 255};
+
+
+static const unsigned char a2e[256] = {
+ 0, 1, 2, 3, 55, 45, 46, 47, 22, 5, 37, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 60, 61, 50, 38, 24, 25, 63, 39, 28, 29, 30, 31,
+ 64, 79, 127, 123, 91, 108, 80, 125, 77, 93, 92, 78, 107, 96, 75, 97,
+ 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 122, 94, 76, 126, 110, 111,
+ 124, 193, 194, 195, 196, 197, 198, 199, 200, 201, 209, 210, 211, 212, 213, 214,
+ 215, 216, 217, 226, 227, 228, 229, 230, 231, 232, 233, 74, 224, 90, 95, 109,
+ 121, 129, 130, 131, 132, 133, 134, 135, 136, 137, 145, 146, 147, 148, 149, 150,
+ 151, 152, 153, 162, 163, 164, 165, 166, 167, 168, 169, 192, 106, 208, 161, 7,
+ 32, 33, 34, 35, 36, 21, 6, 23, 40, 41, 42, 43, 44, 9, 10, 27,
+ 48, 49, 26, 51, 52, 53, 54, 8, 56, 57, 58, 59, 4, 20, 62, 225,
+ 65, 66, 67, 68, 69, 70, 71, 72, 73, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 98, 99, 100, 101, 102, 103, 104, 105, 112, 113, 114, 115, 116, 117,
+ 118, 119, 120, 128, 138, 139, 140, 141, 142, 143, 144, 154, 155, 156, 157, 158,
+ 159, 160, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183,
+ 184, 185, 186, 187, 188, 189, 190, 191, 202, 203, 204, 205, 206, 207, 218, 219,
+ 220, 221, 222, 223, 234, 235, 236, 237, 238, 239, 250, 251, 252, 253, 254, 255};
+
+
+static void iconv_e2a(unsigned char src[], unsigned char dst[], size_t length) {
+ size_t i;
+ for (i = 0; i < length; i++)
+ dst[i] = e2a[src[i]];
+}
+
+
+static void iconv_a2e(const char* src, unsigned char dst[], size_t length) {
+ size_t srclen;
+ size_t i;
+
+ srclen = strlen(src);
+ if (srclen > length)
+ abort();
+ for (i = 0; i < srclen; i++)
+ dst[i] = a2e[src[i]];
+ /* padding the remaining part with spaces */
+ for (; i < length; i++)
+ dst[i] = a2e[' '];
+}
+
+void init_process_title_mutex_once(void) {
+ uv_mutex_init(&process_title_mutex);
+}
+
+static int get_ibmi_system_status(SSTS0200* rcvr) {
+ /* rcvrlen is input parameter 2 to QWCRSSTS */
+ unsigned int rcvrlen = sizeof(*rcvr);
+ unsigned char format[8], reset_status[10];
+
+ /* format is input parameter 3 to QWCRSSTS */
+ iconv_a2e("SSTS0200", format, sizeof(format));
+ /* reset_status is input parameter 4 */
+ iconv_a2e("*NO", reset_status, sizeof(reset_status));
+
+ /* errcode is input parameter 5 to QWCRSSTS */
+ errcode_s errcode;
+
+ /* qwcrssts_pointer is the 16-byte tagged system pointer to QWCRSSTS */
+ ILEpointer __attribute__((aligned(16))) qwcrssts_pointer;
+
+ /* qwcrssts_argv is the array of argument pointers to QWCRSSTS */
+ void* qwcrssts_argv[6];
+
+ /* Set the IBM i pointer to the QSYS/QWCRSSTS *PGM object */
+ int rc = _RSLOBJ2(&qwcrssts_pointer, RSLOBJ_TS_PGM, "QWCRSSTS", "QSYS");
+
+ if (rc != 0)
+ return rc;
+
+ /* initialize the QWCRSSTS returned info structure */
+ memset(rcvr, 0, sizeof(*rcvr));
+
+ /* initialize the QWCRSSTS error code structure */
+ memset(&errcode, 0, sizeof(errcode));
+ errcode.bytes_provided = sizeof(errcode);
+
+ /* initialize the array of argument pointers for the QWCRSSTS API */
+ qwcrssts_argv[0] = rcvr;
+ qwcrssts_argv[1] = &rcvrlen;
+ qwcrssts_argv[2] = &format;
+ qwcrssts_argv[3] = &reset_status;
+ qwcrssts_argv[4] = &errcode;
+ qwcrssts_argv[5] = NULL;
+
+ /* Call the IBM i QWCRSSTS API from PASE */
+ rc = _PGMCALL(&qwcrssts_pointer, qwcrssts_argv, 0);
+
+ return rc;
+}
+
+
+uint64_t uv_get_free_memory(void) {
+ SSTS0200 rcvr;
+
+ if (get_ibmi_system_status(&rcvr))
+ return 0;
+
+ return (uint64_t)rcvr.main_storage_size * 1024ULL;
+}
+
+
+uint64_t uv_get_total_memory(void) {
+ SSTS0200 rcvr;
+
+ if (get_ibmi_system_status(&rcvr))
+ return 0;
+
+ return (uint64_t)rcvr.main_storage_size * 1024ULL;
+}
+
+
+uint64_t uv_get_constrained_memory(void) {
+ return 0; /* Memory constraints are unknown. */
+}
+
+
+void uv_loadavg(double avg[3]) {
+ SSTS0200 rcvr;
+
+ if (get_ibmi_system_status(&rcvr)) {
+ avg[0] = avg[1] = avg[2] = 0;
+ return;
+ }
+
+ /* The average (in tenths) of the elapsed time during which the processing
+ * units were in use. For example, a value of 411 in binary would be 41.1%.
+ * This percentage could be greater than 100% for an uncapped partition.
+ */
+ double processing_unit_used_percent =
+ rcvr.percent_processing_unit_used / 1000.0;
+
+ avg[0] = avg[1] = avg[2] = processing_unit_used_percent;
+}
+
+
+int uv_resident_set_memory(size_t* rss) {
+ *rss = 0;
+ return 0;
+}
+
+
+int uv_uptime(double* uptime) {
+ return UV_ENOSYS;
+}
+
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+ unsigned int numcpus, idx = 0;
+ uv_cpu_info_t* cpu_info;
+
+ *cpu_infos = NULL;
+ *count = 0;
+
+ numcpus = sysconf(_SC_NPROCESSORS_ONLN);
+
+ *cpu_infos = uv__malloc(numcpus * sizeof(uv_cpu_info_t));
+ if (!*cpu_infos) {
+ return UV_ENOMEM;
+ }
+
+ cpu_info = *cpu_infos;
+ for (idx = 0; idx < numcpus; idx++) {
+ cpu_info->speed = 0;
+ cpu_info->model = uv__strdup("unknown");
+ cpu_info->cpu_times.user = 0;
+ cpu_info->cpu_times.sys = 0;
+ cpu_info->cpu_times.idle = 0;
+ cpu_info->cpu_times.irq = 0;
+ cpu_info->cpu_times.nice = 0;
+ cpu_info++;
+ }
+ *count = numcpus;
+
+ return 0;
+}
+
+
+static int get_ibmi_physical_address(const char* line, char (*phys_addr)[6]) {
+ LIND0500 rcvr;
+ /* rcvrlen is input parameter 2 to QDCRLIND */
+ unsigned int rcvrlen = sizeof(rcvr);
+ unsigned char format[8], line_name[10];
+ unsigned char mac_addr[sizeof(rcvr.loca_adapter_address)];
+ int c[6];
+
+ /* format is input parameter 3 to QDCRLIND */
+ iconv_a2e("LIND0500", format, sizeof(format));
+
+ /* line_name is input parameter 4 to QDCRLIND */
+ iconv_a2e(line, line_name, sizeof(line_name));
+
+ /* err is input parameter 5 to QDCRLIND */
+ errcode_s err;
+
+ /* qwcrssts_pointer is the 16-byte tagged system pointer to QDCRLIND */
+ ILEpointer __attribute__((aligned(16))) qdcrlind_pointer;
+
+ /* qwcrssts_argv is the array of argument pointers to QDCRLIND */
+ void* qdcrlind_argv[6];
+
+ /* Set the IBM i pointer to the QSYS/QDCRLIND *PGM object */
+ int rc = _RSLOBJ2(&qdcrlind_pointer, RSLOBJ_TS_PGM, "QDCRLIND", "QSYS");
+
+ if (rc != 0)
+ return rc;
+
+ /* initialize the QDCRLIND returned info structure */
+ memset(&rcvr, 0, sizeof(rcvr));
+
+ /* initialize the QDCRLIND error code structure */
+ memset(&err, 0, sizeof(err));
+ err.bytes_provided = sizeof(err);
+
+ /* initialize the array of argument pointers for the QDCRLIND API */
+ qdcrlind_argv[0] = &rcvr;
+ qdcrlind_argv[1] = &rcvrlen;
+ qdcrlind_argv[2] = &format;
+ qdcrlind_argv[3] = &line_name;
+ qdcrlind_argv[4] = &err;
+ qdcrlind_argv[5] = NULL;
+
+ /* Call the IBM i QDCRLIND API from PASE */
+ rc = _PGMCALL(&qdcrlind_pointer, qdcrlind_argv, 0);
+ if (rc != 0)
+ return rc;
+
+ /* convert ebcdic loca_adapter_address to ascii first */
+ iconv_e2a(rcvr.loca_adapter_address, mac_addr,
+ sizeof(rcvr.loca_adapter_address));
+
+ /* convert loca_adapter_address(char[12]) to phys_addr(char[6]) */
+ int r = sscanf(mac_addr, "%02x%02x%02x%02x%02x%02x",
+ &c[0], &c[1], &c[2], &c[3], &c[4], &c[5]);
+
+ if (r == ARRAY_SIZE(c)) {
+ (*phys_addr)[0] = c[0];
+ (*phys_addr)[1] = c[1];
+ (*phys_addr)[2] = c[2];
+ (*phys_addr)[3] = c[3];
+ (*phys_addr)[4] = c[4];
+ (*phys_addr)[5] = c[5];
+ } else {
+ memset(*phys_addr, 0, sizeof(*phys_addr));
+ rc = -1;
+ }
+ return rc;
+}
+
+
+int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
+ uv_interface_address_t* address;
+ struct ifaddrs_pase *ifap = NULL, *cur;
+ int inet6, r = 0;
+
+ *count = 0;
+ *addresses = NULL;
+
+ if (Qp2getifaddrs(&ifap))
+ return UV_ENOSYS;
+
+ /* The first loop to get the size of the array to be allocated */
+ for (cur = ifap; cur; cur = cur->ifa_next) {
+ if (!(cur->ifa_addr->sa_family == AF_INET6 ||
+ cur->ifa_addr->sa_family == AF_INET))
+ continue;
+
+ if (!(cur->ifa_flags & IFF_UP && cur->ifa_flags & IFF_RUNNING))
+ continue;
+
+ (*count)++;
+ }
+
+ if (*count == 0) {
+ Qp2freeifaddrs(ifap);
+ return 0;
+ }
+
+ /* Alloc the return interface structs */
+ *addresses = uv__calloc(*count, sizeof(**addresses));
+ if (*addresses == NULL) {
+ Qp2freeifaddrs(ifap);
+ return UV_ENOMEM;
+ }
+ address = *addresses;
+
+ /* The second loop to fill in the array */
+ for (cur = ifap; cur; cur = cur->ifa_next) {
+ if (!(cur->ifa_addr->sa_family == AF_INET6 ||
+ cur->ifa_addr->sa_family == AF_INET))
+ continue;
+
+ if (!(cur->ifa_flags & IFF_UP && cur->ifa_flags & IFF_RUNNING))
+ continue;
+
+ address->name = uv__strdup(cur->ifa_name);
+
+ inet6 = (cur->ifa_addr->sa_family == AF_INET6);
+
+ if (inet6) {
+ address->address.address6 = *((struct sockaddr_in6*)cur->ifa_addr);
+ address->netmask.netmask6 = *((struct sockaddr_in6*)cur->ifa_netmask);
+ address->netmask.netmask6.sin6_family = AF_INET6;
+ } else {
+ address->address.address4 = *((struct sockaddr_in*)cur->ifa_addr);
+ address->netmask.netmask4 = *((struct sockaddr_in*)cur->ifa_netmask);
+ address->netmask.netmask4.sin_family = AF_INET;
+ }
+ address->is_internal = cur->ifa_flags & IFF_LOOPBACK ? 1 : 0;
+ if (!address->is_internal) {
+ int rc = get_ibmi_physical_address(address->name, &address->phys_addr);
+ if (rc != 0)
+ r = rc;
+ }
+
+ address++;
+ }
+
+ Qp2freeifaddrs(ifap);
+ return r;
+}
+
+
+void uv_free_interface_addresses(uv_interface_address_t* addresses, int count) {
+ int i;
+
+ for (i = 0; i < count; ++i) {
+ uv__free(addresses[i].name);
+ }
+
+ uv__free(addresses);
+}
+
+char** uv_setup_args(int argc, char** argv) {
+ char exepath[UV__PATH_MAX];
+ char* s;
+ size_t size;
+
+ if (argc > 0) {
+ /* Use argv[0] to determine value for uv_exepath(). */
+ size = sizeof(exepath);
+ if (uv__search_path(argv[0], exepath, &size) == 0) {
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+ original_exepath = uv__strdup(exepath);
+ uv_mutex_unlock(&process_title_mutex);
+ }
+ }
+
+ return argv;
+}
+
+int uv_set_process_title(const char* title) {
+ return 0;
+}
+
+int uv_get_process_title(char* buffer, size_t size) {
+ if (buffer == NULL || size == 0)
+ return UV_EINVAL;
+
+ buffer[0] = '\0';
+ return 0;
+}
+
+void uv__process_title_cleanup(void) {
+}
diff --git a/Utilities/cmlibuv/src/unix/internal.h b/Utilities/cmlibuv/src/unix/internal.h
new file mode 100644
index 0000000..444dc85
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/internal.h
@@ -0,0 +1,354 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_UNIX_INTERNAL_H_
+#define UV_UNIX_INTERNAL_H_
+
+#include "uv-common.h"
+
+#include <assert.h>
+#include <limits.h> /* _POSIX_PATH_MAX, PATH_MAX */
+#include <stdlib.h> /* abort */
+#include <string.h> /* strrchr */
+#include <fcntl.h> /* O_CLOEXEC and O_NONBLOCK, if supported. */
+#include <stdio.h>
+#include <errno.h>
+#include <sys/socket.h>
+
+#if defined(__STRICT_ANSI__)
+# define inline __inline
+#endif
+
+#if defined(__linux__)
+# include "linux-syscalls.h"
+#endif /* __linux__ */
+
+#if defined(__MVS__)
+# include "os390-syscalls.h"
+#endif /* __MVS__ */
+
+#if defined(__sun)
+# include <sys/port.h>
+# include <port.h>
+#endif /* __sun */
+
+#if defined(_AIX)
+# define reqevents events
+# define rtnevents revents
+# include <sys/poll.h>
+#else
+# include <poll.h>
+#endif /* _AIX */
+
+#if defined(__APPLE__) && !TARGET_OS_IPHONE
+# include <AvailabilityMacros.h>
+#endif
+
+#if defined(PATH_MAX)
+# define UV__PATH_MAX PATH_MAX
+#else
+# define UV__PATH_MAX 8192
+#endif
+
+#if defined(CMAKE_BOOTSTRAP)
+# undef pthread_atfork
+# define pthread_atfork(prepare, parent, child) \
+ uv__pthread_atfork(prepare, parent, child)
+int uv__pthread_atfork(void (*prepare)(void), void (*parent)(void),
+ void (*child)(void));
+# undef pthread_sigmask
+# define pthread_sigmask(how, set, oldset) \
+ uv__pthread_sigmask(how, set, oldset)
+int uv__pthread_sigmask(int how, const sigset_t* set, sigset_t* oset);
+#elif defined(__ANDROID__)
+int uv__pthread_sigmask(int how, const sigset_t* set, sigset_t* oset);
+# ifdef pthread_sigmask
+# undef pthread_sigmask
+# endif
+# define pthread_sigmask(how, set, oldset) uv__pthread_sigmask(how, set, oldset)
+#endif
+
+#define ACCESS_ONCE(type, var) \
+ (*(volatile type*) &(var))
+
+#define ROUND_UP(a, b) \
+ ((a) % (b) ? ((a) + (b)) - ((a) % (b)) : (a))
+
+#define UNREACHABLE() \
+ do { \
+ assert(0 && "unreachable code"); \
+ abort(); \
+ } \
+ while (0)
+
+#define SAVE_ERRNO(block) \
+ do { \
+ int _saved_errno = errno; \
+ do { block; } while (0); \
+ errno = _saved_errno; \
+ } \
+ while (0)
+
+/* The __clang__ and __INTEL_COMPILER checks are superfluous because they
+ * define __GNUC__. They are here to convey to you, dear reader, that these
+ * macros are enabled when compiling with clang or icc.
+ */
+#if defined(__clang__) || \
+ defined(__GNUC__) || \
+ defined(__INTEL_COMPILER)
+# define UV_UNUSED(declaration) __attribute__((unused)) declaration
+#else
+# define UV_UNUSED(declaration) declaration
+#endif
+
+/* Leans on the fact that, on Linux, POLLRDHUP == EPOLLRDHUP. */
+#ifdef POLLRDHUP
+# define UV__POLLRDHUP POLLRDHUP
+#else
+# define UV__POLLRDHUP 0x2000
+#endif
+
+#ifdef POLLPRI
+# define UV__POLLPRI POLLPRI
+#else
+# define UV__POLLPRI 0
+#endif
+
+#if !defined(O_CLOEXEC) && defined(__FreeBSD__)
+/*
+ * It may be that we are just missing `__POSIX_VISIBLE >= 200809`.
+ * Try using fixed value const and give up, if it doesn't work
+ */
+# define O_CLOEXEC 0x00100000
+#endif
+
+typedef struct uv__stream_queued_fds_s uv__stream_queued_fds_t;
+
+/* loop flags */
+enum {
+ UV_LOOP_BLOCK_SIGPROF = 1
+};
+
+/* flags of excluding ifaddr */
+enum {
+ UV__EXCLUDE_IFPHYS,
+ UV__EXCLUDE_IFADDR
+};
+
+typedef enum {
+ UV_CLOCK_PRECISE = 0, /* Use the highest resolution clock available. */
+ UV_CLOCK_FAST = 1 /* Use the fastest clock with <= 1ms granularity. */
+} uv_clocktype_t;
+
+struct uv__stream_queued_fds_s {
+ unsigned int size;
+ unsigned int offset;
+ int fds[1];
+};
+
+
+#if defined(_AIX) || \
+ defined(__APPLE__) || \
+ defined(__DragonFly__) || \
+ defined(__FreeBSD__) || \
+ defined(__FreeBSD_kernel__) || \
+ defined(__linux__) || \
+ defined(__OpenBSD__) || \
+ defined(__NetBSD__)
+#define uv__cloexec uv__cloexec_ioctl
+#define uv__nonblock uv__nonblock_ioctl
+#else
+#define uv__cloexec uv__cloexec_fcntl
+#define uv__nonblock uv__nonblock_fcntl
+#endif
+
+/* On Linux, uv__nonblock_fcntl() and uv__nonblock_ioctl() do not commute
+ * when O_NDELAY is not equal to O_NONBLOCK. Case in point: linux/sparc32
+ * and linux/sparc64, where O_NDELAY is O_NONBLOCK + another bit.
+ *
+ * Libuv uses uv__nonblock_fcntl() directly sometimes so ensure that it
+ * commutes with uv__nonblock().
+ */
+#if defined(__linux__) && O_NDELAY != O_NONBLOCK
+#undef uv__nonblock
+#define uv__nonblock uv__nonblock_fcntl
+#endif
+
+/* core */
+int uv__cloexec_ioctl(int fd, int set);
+int uv__cloexec_fcntl(int fd, int set);
+int uv__nonblock_ioctl(int fd, int set);
+int uv__nonblock_fcntl(int fd, int set);
+int uv__close(int fd); /* preserves errno */
+int uv__close_nocheckstdio(int fd);
+int uv__close_nocancel(int fd);
+int uv__socket(int domain, int type, int protocol);
+ssize_t uv__recvmsg(int fd, struct msghdr *msg, int flags);
+void uv__make_close_pending(uv_handle_t* handle);
+int uv__getiovmax(void);
+
+void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd);
+void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events);
+void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events);
+void uv__io_close(uv_loop_t* loop, uv__io_t* w);
+void uv__io_feed(uv_loop_t* loop, uv__io_t* w);
+int uv__io_active(const uv__io_t* w, unsigned int events);
+int uv__io_check_fd(uv_loop_t* loop, int fd);
+void uv__io_poll(uv_loop_t* loop, int timeout); /* in milliseconds or -1 */
+int uv__io_fork(uv_loop_t* loop);
+int uv__fd_exists(uv_loop_t* loop, int fd);
+
+/* async */
+void uv__async_stop(uv_loop_t* loop);
+int uv__async_fork(uv_loop_t* loop);
+
+
+/* loop */
+void uv__run_idle(uv_loop_t* loop);
+void uv__run_check(uv_loop_t* loop);
+void uv__run_prepare(uv_loop_t* loop);
+
+/* stream */
+void uv__stream_init(uv_loop_t* loop, uv_stream_t* stream,
+ uv_handle_type type);
+int uv__stream_open(uv_stream_t*, int fd, int flags);
+void uv__stream_destroy(uv_stream_t* stream);
+#if defined(__APPLE__)
+int uv__stream_try_select(uv_stream_t* stream, int* fd);
+#endif /* defined(__APPLE__) */
+void uv__server_io(uv_loop_t* loop, uv__io_t* w, unsigned int events);
+int uv__accept(int sockfd);
+int uv__dup2_cloexec(int oldfd, int newfd);
+int uv__open_cloexec(const char* path, int flags);
+
+/* tcp */
+int uv_tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb);
+int uv__tcp_nodelay(int fd, int on);
+int uv__tcp_keepalive(int fd, int on, unsigned int delay);
+
+/* pipe */
+int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb);
+
+/* signal */
+void uv__signal_close(uv_signal_t* handle);
+void uv__signal_global_once_init(void);
+void uv__signal_loop_cleanup(uv_loop_t* loop);
+int uv__signal_loop_fork(uv_loop_t* loop);
+
+/* platform specific */
+uint64_t uv__hrtime(uv_clocktype_t type);
+int uv__kqueue_init(uv_loop_t* loop);
+int uv__platform_loop_init(uv_loop_t* loop);
+void uv__platform_loop_delete(uv_loop_t* loop);
+void uv__platform_invalidate_fd(uv_loop_t* loop, int fd);
+
+/* various */
+void uv__async_close(uv_async_t* handle);
+void uv__check_close(uv_check_t* handle);
+void uv__fs_event_close(uv_fs_event_t* handle);
+void uv__idle_close(uv_idle_t* handle);
+void uv__pipe_close(uv_pipe_t* handle);
+void uv__poll_close(uv_poll_t* handle);
+void uv__prepare_close(uv_prepare_t* handle);
+void uv__process_close(uv_process_t* handle);
+void uv__stream_close(uv_stream_t* handle);
+void uv__tcp_close(uv_tcp_t* handle);
+void uv__udp_close(uv_udp_t* handle);
+void uv__udp_finish_close(uv_udp_t* handle);
+uv_handle_type uv__handle_type(int fd);
+FILE* uv__open_file(const char* path);
+int uv__getpwuid_r(uv_passwd_t* pwd);
+int uv__search_path(const char* prog, char* buf, size_t* buflen);
+
+/* random */
+int uv__random_devurandom(void* buf, size_t buflen);
+int uv__random_getrandom(void* buf, size_t buflen);
+int uv__random_getentropy(void* buf, size_t buflen);
+int uv__random_readpath(const char* path, void* buf, size_t buflen);
+int uv__random_sysctl(void* buf, size_t buflen);
+
+#if defined(__APPLE__) && !defined(CMAKE_BOOTSTRAP)
+int uv___stream_fd(const uv_stream_t* handle);
+#define uv__stream_fd(handle) (uv___stream_fd((const uv_stream_t*) (handle)))
+#else
+#define uv__stream_fd(handle) ((handle)->io_watcher.fd)
+#endif /* defined(__APPLE__) */
+
+#ifdef O_NONBLOCK
+# define UV__F_NONBLOCK O_NONBLOCK
+#else
+# define UV__F_NONBLOCK 1
+#endif
+
+int uv__make_pipe(int fds[2], int flags);
+
+#if defined(__APPLE__)
+
+int uv__fsevents_init(uv_fs_event_t* handle);
+int uv__fsevents_close(uv_fs_event_t* handle);
+void uv__fsevents_loop_delete(uv_loop_t* loop);
+
+#endif /* defined(__APPLE__) */
+
+UV_UNUSED(static void uv__update_time(uv_loop_t* loop)) {
+ /* Use a fast time source if available. We only need millisecond precision.
+ */
+ loop->time = uv__hrtime(UV_CLOCK_FAST) / 1000000;
+}
+
+UV_UNUSED(static char* uv__basename_r(const char* path)) {
+ char* s;
+
+ s = strrchr(path, '/');
+ if (s == NULL)
+ return (char*) path;
+
+ return s + 1;
+}
+
+#if defined(__linux__)
+int uv__inotify_fork(uv_loop_t* loop, void* old_watchers);
+#endif
+
+typedef int (*uv__peersockfunc)(int, struct sockaddr*, socklen_t*);
+
+int uv__getsockpeername(const uv_handle_t* handle,
+ uv__peersockfunc func,
+ struct sockaddr* name,
+ int* namelen);
+
+#if defined(__linux__) || \
+ defined(__FreeBSD__) || \
+ defined(__FreeBSD_kernel__)
+#define HAVE_MMSG 1
+struct uv__mmsghdr {
+ struct msghdr msg_hdr;
+ unsigned int msg_len;
+};
+
+int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen);
+int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen);
+#else
+#define HAVE_MMSG 0
+#endif
+
+
+#endif /* UV_UNIX_INTERNAL_H_ */
diff --git a/Utilities/cmlibuv/src/unix/kqueue.c b/Utilities/cmlibuv/src/unix/kqueue.c
new file mode 100644
index 0000000..bf183d5
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/kqueue.c
@@ -0,0 +1,585 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include <sys/sysctl.h>
+#include <sys/types.h>
+#include <sys/event.h>
+#include <sys/time.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <time.h>
+
+/*
+ * Required on
+ * - Until at least FreeBSD 11.0
+ * - Older versions of Mac OS X
+ *
+ * http://www.boost.org/doc/libs/1_61_0/boost/asio/detail/kqueue_reactor.hpp
+ */
+#ifndef EV_OOBAND
+#define EV_OOBAND EV_FLAG1
+#endif
+
+static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags);
+
+
+int uv__kqueue_init(uv_loop_t* loop) {
+ loop->backend_fd = kqueue();
+ if (loop->backend_fd == -1)
+ return UV__ERR(errno);
+
+ uv__cloexec(loop->backend_fd, 1);
+
+ return 0;
+}
+
+
+#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
+static int uv__has_forked_with_cfrunloop;
+#endif
+
+int uv__io_fork(uv_loop_t* loop) {
+ int err;
+ loop->backend_fd = -1;
+ err = uv__kqueue_init(loop);
+ if (err)
+ return err;
+
+#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
+ if (loop->cf_state != NULL) {
+ /* We cannot start another CFRunloop and/or thread in the child
+ process; CF aborts if you try or if you try to touch the thread
+ at all to kill it. So the best we can do is ignore it from now
+ on. This means we can't watch directories in the same way
+ anymore (like other BSDs). It also means we cannot properly
+ clean up the allocated resources; calling
+ uv__fsevents_loop_delete from uv_loop_close will crash the
+ process. So we sidestep the issue by pretending like we never
+ started it in the first place.
+ */
+ uv__store_relaxed(&uv__has_forked_with_cfrunloop, 1);
+ uv__free(loop->cf_state);
+ loop->cf_state = NULL;
+ }
+#endif /* #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 */
+ return err;
+}
+
+
+int uv__io_check_fd(uv_loop_t* loop, int fd) {
+ struct kevent ev;
+ int rc;
+
+ rc = 0;
+ EV_SET(&ev, fd, EVFILT_READ, EV_ADD, 0, 0, 0);
+ if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
+ rc = UV__ERR(errno);
+
+ EV_SET(&ev, fd, EVFILT_READ, EV_DELETE, 0, 0, 0);
+ if (rc == 0)
+ if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
+ abort();
+
+ return rc;
+}
+
+
+void uv__io_poll(uv_loop_t* loop, int timeout) {
+ struct kevent events[1024];
+ struct kevent* ev;
+ struct timespec spec;
+ unsigned int nevents;
+ unsigned int revents;
+ QUEUE* q;
+ uv__io_t* w;
+ sigset_t* pset;
+ sigset_t set;
+ uint64_t base;
+ uint64_t diff;
+ int have_signals;
+ int filter;
+ int fflags;
+ int count;
+ int nfds;
+ int fd;
+ int op;
+ int i;
+ int user_timeout;
+ int reset_timeout;
+
+ if (loop->nfds == 0) {
+ assert(QUEUE_EMPTY(&loop->watcher_queue));
+ return;
+ }
+
+ nevents = 0;
+
+ while (!QUEUE_EMPTY(&loop->watcher_queue)) {
+ q = QUEUE_HEAD(&loop->watcher_queue);
+ QUEUE_REMOVE(q);
+ QUEUE_INIT(q);
+
+ w = QUEUE_DATA(q, uv__io_t, watcher_queue);
+ assert(w->pevents != 0);
+ assert(w->fd >= 0);
+ assert(w->fd < (int) loop->nwatchers);
+
+ if ((w->events & POLLIN) == 0 && (w->pevents & POLLIN) != 0) {
+ filter = EVFILT_READ;
+ fflags = 0;
+ op = EV_ADD;
+
+ if (w->cb == uv__fs_event) {
+ filter = EVFILT_VNODE;
+ fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
+ | NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
+ op = EV_ADD | EV_ONESHOT; /* Stop the event from firing repeatedly. */
+ }
+
+ EV_SET(events + nevents, w->fd, filter, op, fflags, 0, 0);
+
+ if (++nevents == ARRAY_SIZE(events)) {
+ if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
+ abort();
+ nevents = 0;
+ }
+ }
+
+ if ((w->events & POLLOUT) == 0 && (w->pevents & POLLOUT) != 0) {
+ EV_SET(events + nevents, w->fd, EVFILT_WRITE, EV_ADD, 0, 0, 0);
+
+ if (++nevents == ARRAY_SIZE(events)) {
+ if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
+ abort();
+ nevents = 0;
+ }
+ }
+
+ if ((w->events & UV__POLLPRI) == 0 && (w->pevents & UV__POLLPRI) != 0) {
+ EV_SET(events + nevents, w->fd, EV_OOBAND, EV_ADD, 0, 0, 0);
+
+ if (++nevents == ARRAY_SIZE(events)) {
+ if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
+ abort();
+ nevents = 0;
+ }
+ }
+
+ w->events = w->pevents;
+ }
+
+ pset = NULL;
+ if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
+ pset = &set;
+ sigemptyset(pset);
+ sigaddset(pset, SIGPROF);
+ }
+
+ assert(timeout >= -1);
+ base = loop->time;
+ count = 48; /* Benchmarks suggest this gives the best throughput. */
+
+ if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
+ reset_timeout = 1;
+ user_timeout = timeout;
+ timeout = 0;
+ } else {
+ reset_timeout = 0;
+ }
+
+ for (;; nevents = 0) {
+ /* Only need to set the provider_entry_time if timeout != 0. The function
+ * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
+ */
+ if (timeout != 0)
+ uv__metrics_set_provider_entry_time(loop);
+
+ if (timeout != -1) {
+ spec.tv_sec = timeout / 1000;
+ spec.tv_nsec = (timeout % 1000) * 1000000;
+ }
+
+ if (pset != NULL)
+ pthread_sigmask(SIG_BLOCK, pset, NULL);
+
+ nfds = kevent(loop->backend_fd,
+ events,
+ nevents,
+ events,
+ ARRAY_SIZE(events),
+ timeout == -1 ? NULL : &spec);
+
+ if (pset != NULL)
+ pthread_sigmask(SIG_UNBLOCK, pset, NULL);
+
+ /* Update loop->time unconditionally. It's tempting to skip the update when
+ * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
+ * operating system didn't reschedule our process while in the syscall.
+ */
+ SAVE_ERRNO(uv__update_time(loop));
+
+ if (nfds == 0) {
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ if (timeout == -1)
+ continue;
+ if (timeout > 0)
+ goto update_timeout;
+ }
+
+ assert(timeout != -1);
+ return;
+ }
+
+ if (nfds == -1) {
+ if (errno != EINTR)
+ abort();
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (timeout == 0)
+ return;
+
+ if (timeout == -1)
+ continue;
+
+ /* Interrupted by a signal. Update timeout and poll again. */
+ goto update_timeout;
+ }
+
+ have_signals = 0;
+ nevents = 0;
+
+ assert(loop->watchers != NULL);
+ loop->watchers[loop->nwatchers] = (void*) events;
+ loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
+ for (i = 0; i < nfds; i++) {
+ ev = events + i;
+ fd = ev->ident;
+ /* Skip invalidated events, see uv__platform_invalidate_fd */
+ if (fd == -1)
+ continue;
+ w = loop->watchers[fd];
+
+ if (w == NULL) {
+ /* File descriptor that we've stopped watching, disarm it.
+ * TODO: batch up. */
+ struct kevent events[1];
+
+ EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
+ if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
+ if (errno != EBADF && errno != ENOENT)
+ abort();
+
+ continue;
+ }
+
+ if (ev->filter == EVFILT_VNODE) {
+ assert(w->events == POLLIN);
+ assert(w->pevents == POLLIN);
+ uv__metrics_update_idle_time(loop);
+ w->cb(loop, w, ev->fflags); /* XXX always uv__fs_event() */
+ nevents++;
+ continue;
+ }
+
+ revents = 0;
+
+ if (ev->filter == EVFILT_READ) {
+ if (w->pevents & POLLIN) {
+ revents |= POLLIN;
+ w->rcount = ev->data;
+ } else {
+ /* TODO batch up */
+ struct kevent events[1];
+ EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
+ if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
+ if (errno != ENOENT)
+ abort();
+ }
+ }
+
+ if (ev->filter == EV_OOBAND) {
+ if (w->pevents & UV__POLLPRI) {
+ revents |= UV__POLLPRI;
+ w->rcount = ev->data;
+ } else {
+ /* TODO batch up */
+ struct kevent events[1];
+ EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
+ if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
+ if (errno != ENOENT)
+ abort();
+ }
+ }
+
+ if (ev->filter == EVFILT_WRITE) {
+ if (w->pevents & POLLOUT) {
+ revents |= POLLOUT;
+ w->wcount = ev->data;
+ } else {
+ /* TODO batch up */
+ struct kevent events[1];
+ EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
+ if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
+ if (errno != ENOENT)
+ abort();
+ }
+ }
+
+ if (ev->flags & EV_ERROR)
+ revents |= POLLERR;
+
+ if ((ev->flags & EV_EOF) && (w->pevents & UV__POLLRDHUP))
+ revents |= UV__POLLRDHUP;
+
+ if (revents == 0)
+ continue;
+
+ /* Run signal watchers last. This also affects child process watchers
+ * because those are implemented in terms of signal watchers.
+ */
+ if (w == &loop->signal_io_watcher) {
+ have_signals = 1;
+ } else {
+ uv__metrics_update_idle_time(loop);
+ w->cb(loop, w, revents);
+ }
+
+ nevents++;
+ }
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (have_signals != 0) {
+ uv__metrics_update_idle_time(loop);
+ loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
+ }
+
+ loop->watchers[loop->nwatchers] = NULL;
+ loop->watchers[loop->nwatchers + 1] = NULL;
+
+ if (have_signals != 0)
+ return; /* Event loop should cycle now so don't poll again. */
+
+ if (nevents != 0) {
+ if (nfds == ARRAY_SIZE(events) && --count != 0) {
+ /* Poll for more events but don't block this time. */
+ timeout = 0;
+ continue;
+ }
+ return;
+ }
+
+ if (timeout == 0)
+ return;
+
+ if (timeout == -1)
+ continue;
+
+update_timeout:
+ assert(timeout > 0);
+
+ diff = loop->time - base;
+ if (diff >= (uint64_t) timeout)
+ return;
+
+ timeout -= diff;
+ }
+}
+
+
+void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
+ struct kevent* events;
+ uintptr_t i;
+ uintptr_t nfds;
+
+ assert(loop->watchers != NULL);
+ assert(fd >= 0);
+
+ events = (struct kevent*) loop->watchers[loop->nwatchers];
+ nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
+ if (events == NULL)
+ return;
+
+ /* Invalidate events with same file descriptor */
+ for (i = 0; i < nfds; i++)
+ if ((int) events[i].ident == fd)
+ events[i].ident = -1;
+}
+
+
+static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags) {
+ uv_fs_event_t* handle;
+ struct kevent ev;
+ int events;
+ const char* path;
+#if defined(F_GETPATH)
+ /* MAXPATHLEN == PATH_MAX but the former is what XNU calls it internally. */
+ char pathbuf[MAXPATHLEN];
+#endif
+
+ handle = container_of(w, uv_fs_event_t, event_watcher);
+
+ if (fflags & (NOTE_ATTRIB | NOTE_EXTEND))
+ events = UV_CHANGE;
+ else
+ events = UV_RENAME;
+
+ path = NULL;
+#if defined(F_GETPATH)
+ /* Also works when the file has been unlinked from the file system. Passing
+ * in the path when the file has been deleted is arguably a little strange
+ * but it's consistent with what the inotify backend does.
+ */
+ if (fcntl(handle->event_watcher.fd, F_GETPATH, pathbuf) == 0)
+ path = uv__basename_r(pathbuf);
+#endif
+ handle->cb(handle, path, events, 0);
+
+ if (handle->event_watcher.fd == -1)
+ return;
+
+ /* Watcher operates in one-shot mode, re-arm it. */
+ fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
+ | NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
+
+ EV_SET(&ev, w->fd, EVFILT_VNODE, EV_ADD | EV_ONESHOT, fflags, 0, 0);
+
+ if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
+ abort();
+}
+
+
+int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
+ uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
+ return 0;
+}
+
+
+int uv_fs_event_start(uv_fs_event_t* handle,
+ uv_fs_event_cb cb,
+ const char* path,
+ unsigned int flags) {
+ int fd;
+#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
+ struct stat statbuf;
+#endif
+
+ if (uv__is_active(handle))
+ return UV_EINVAL;
+
+ handle->cb = cb;
+ handle->path = uv__strdup(path);
+ if (handle->path == NULL)
+ return UV_ENOMEM;
+
+ /* TODO open asynchronously - but how do we report back errors? */
+ fd = open(handle->path, O_RDONLY);
+ if (fd == -1) {
+ uv__free(handle->path);
+ handle->path = NULL;
+ return UV__ERR(errno);
+ }
+
+#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
+ /* Nullify field to perform checks later */
+ handle->cf_cb = NULL;
+ handle->realpath = NULL;
+ handle->realpath_len = 0;
+ handle->cf_flags = flags;
+
+ if (fstat(fd, &statbuf))
+ goto fallback;
+ /* FSEvents works only with directories */
+ if (!(statbuf.st_mode & S_IFDIR))
+ goto fallback;
+
+ if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop)) {
+ int r;
+ /* The fallback fd is no longer needed */
+ uv__close_nocheckstdio(fd);
+ handle->event_watcher.fd = -1;
+ r = uv__fsevents_init(handle);
+ if (r == 0) {
+ uv__handle_start(handle);
+ } else {
+ uv__free(handle->path);
+ handle->path = NULL;
+ }
+ return r;
+ }
+fallback:
+#endif /* #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 */
+
+ uv__handle_start(handle);
+ uv__io_init(&handle->event_watcher, uv__fs_event, fd);
+ uv__io_start(handle->loop, &handle->event_watcher, POLLIN);
+
+ return 0;
+}
+
+
+int uv_fs_event_stop(uv_fs_event_t* handle) {
+ int r;
+ r = 0;
+
+ if (!uv__is_active(handle))
+ return 0;
+
+ uv__handle_stop(handle);
+
+#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
+ if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop))
+ if (handle->cf_cb != NULL)
+ r = uv__fsevents_close(handle);
+#endif
+
+ if (handle->event_watcher.fd != -1) {
+ uv__io_close(handle->loop, &handle->event_watcher);
+ uv__close(handle->event_watcher.fd);
+ handle->event_watcher.fd = -1;
+ }
+
+ uv__free(handle->path);
+ handle->path = NULL;
+
+ return r;
+}
+
+
+void uv__fs_event_close(uv_fs_event_t* handle) {
+ uv_fs_event_stop(handle);
+}
diff --git a/Utilities/cmlibuv/src/unix/linux-core.c b/Utilities/cmlibuv/src/unix/linux-core.c
new file mode 100644
index 0000000..4db2f05
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/linux-core.c
@@ -0,0 +1,1146 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/* We lean on the fact that POLL{IN,OUT,ERR,HUP} correspond with their
+ * EPOLL* counterparts. We use the POLL* variants in this file because that
+ * is what libuv uses elsewhere.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+
+#include <net/if.h>
+#include <sys/epoll.h>
+#include <sys/param.h>
+#include <sys/prctl.h>
+#include <sys/sysinfo.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <time.h>
+
+#define HAVE_IFADDRS_H 1
+
+#ifdef __UCLIBC__
+# if __UCLIBC_MAJOR__ < 0 && __UCLIBC_MINOR__ < 9 && __UCLIBC_SUBLEVEL__ < 32
+# undef HAVE_IFADDRS_H
+# endif
+#endif
+
+#ifdef HAVE_IFADDRS_H
+# if defined(__ANDROID__)
+# include "uv/android-ifaddrs.h"
+# else
+# include <ifaddrs.h>
+# endif
+# include <sys/socket.h>
+# include <net/ethernet.h>
+# include <netpacket/packet.h>
+#endif /* HAVE_IFADDRS_H */
+
+/* Available from 2.6.32 onwards. */
+#ifndef CLOCK_MONOTONIC_COARSE
+# define CLOCK_MONOTONIC_COARSE 6
+#endif
+
+/* This is rather annoying: CLOCK_BOOTTIME lives in <linux/time.h> but we can't
+ * include that file because it conflicts with <time.h>. We'll just have to
+ * define it ourselves.
+ */
+#ifndef CLOCK_BOOTTIME
+# define CLOCK_BOOTTIME 7
+#endif
+
+static int read_models(unsigned int numcpus, uv_cpu_info_t* ci);
+static int read_times(FILE* statfile_fp,
+ unsigned int numcpus,
+ uv_cpu_info_t* ci);
+static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci);
+static uint64_t read_cpufreq(unsigned int cpunum);
+
+
+int uv__platform_loop_init(uv_loop_t* loop) {
+ int fd;
+ fd = epoll_create1(O_CLOEXEC);
+
+ /* epoll_create1() can fail either because it's not implemented (old kernel)
+ * or because it doesn't understand the O_CLOEXEC flag.
+ */
+ if (fd == -1 && (errno == ENOSYS || errno == EINVAL)) {
+ fd = epoll_create(256);
+
+ if (fd != -1)
+ uv__cloexec(fd, 1);
+ }
+
+ loop->backend_fd = fd;
+ loop->inotify_fd = -1;
+ loop->inotify_watchers = NULL;
+
+ if (fd == -1)
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+
+int uv__io_fork(uv_loop_t* loop) {
+ int err;
+ void* old_watchers;
+
+ old_watchers = loop->inotify_watchers;
+
+ uv__close(loop->backend_fd);
+ loop->backend_fd = -1;
+ uv__platform_loop_delete(loop);
+
+ err = uv__platform_loop_init(loop);
+ if (err)
+ return err;
+
+ return uv__inotify_fork(loop, old_watchers);
+}
+
+
+void uv__platform_loop_delete(uv_loop_t* loop) {
+ if (loop->inotify_fd == -1) return;
+ uv__io_stop(loop, &loop->inotify_read_watcher, POLLIN);
+ uv__close(loop->inotify_fd);
+ loop->inotify_fd = -1;
+}
+
+
+void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
+ struct epoll_event* events;
+ struct epoll_event dummy;
+ uintptr_t i;
+ uintptr_t nfds;
+
+ assert(loop->watchers != NULL);
+ assert(fd >= 0);
+
+ events = (struct epoll_event*) loop->watchers[loop->nwatchers];
+ nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
+ if (events != NULL)
+ /* Invalidate events with same file descriptor */
+ for (i = 0; i < nfds; i++)
+ if (events[i].data.fd == fd)
+ events[i].data.fd = -1;
+
+ /* Remove the file descriptor from the epoll.
+ * This avoids a problem where the same file description remains open
+ * in another process, causing repeated junk epoll events.
+ *
+ * We pass in a dummy epoll_event, to work around a bug in old kernels.
+ */
+ if (loop->backend_fd >= 0) {
+ /* Work around a bug in kernels 3.10 to 3.19 where passing a struct that
+ * has the EPOLLWAKEUP flag set generates spurious audit syslog warnings.
+ */
+ memset(&dummy, 0, sizeof(dummy));
+ epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &dummy);
+ }
+}
+
+
+int uv__io_check_fd(uv_loop_t* loop, int fd) {
+ struct epoll_event e;
+ int rc;
+
+ memset(&e, 0, sizeof(e));
+ e.events = POLLIN;
+ e.data.fd = -1;
+
+ rc = 0;
+ if (epoll_ctl(loop->backend_fd, EPOLL_CTL_ADD, fd, &e))
+ if (errno != EEXIST)
+ rc = UV__ERR(errno);
+
+ if (rc == 0)
+ if (epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &e))
+ abort();
+
+ return rc;
+}
+
+
+void uv__io_poll(uv_loop_t* loop, int timeout) {
+ /* A bug in kernels < 2.6.37 makes timeouts larger than ~30 minutes
+ * effectively infinite on 32 bits architectures. To avoid blocking
+ * indefinitely, we cap the timeout and poll again if necessary.
+ *
+ * Note that "30 minutes" is a simplification because it depends on
+ * the value of CONFIG_HZ. The magic constant assumes CONFIG_HZ=1200,
+ * that being the largest value I have seen in the wild (and only once.)
+ */
+ static const int max_safe_timeout = 1789569;
+ static int no_epoll_pwait_cached;
+ static int no_epoll_wait_cached;
+ int no_epoll_pwait;
+ int no_epoll_wait;
+ struct epoll_event events[1024];
+ struct epoll_event* pe;
+ struct epoll_event e;
+ int real_timeout;
+ QUEUE* q;
+ uv__io_t* w;
+ sigset_t sigset;
+ uint64_t sigmask;
+ uint64_t base;
+ int have_signals;
+ int nevents;
+ int count;
+ int nfds;
+ int fd;
+ int op;
+ int i;
+ int user_timeout;
+ int reset_timeout;
+
+ if (loop->nfds == 0) {
+ assert(QUEUE_EMPTY(&loop->watcher_queue));
+ return;
+ }
+
+ memset(&e, 0, sizeof(e));
+
+ while (!QUEUE_EMPTY(&loop->watcher_queue)) {
+ q = QUEUE_HEAD(&loop->watcher_queue);
+ QUEUE_REMOVE(q);
+ QUEUE_INIT(q);
+
+ w = QUEUE_DATA(q, uv__io_t, watcher_queue);
+ assert(w->pevents != 0);
+ assert(w->fd >= 0);
+ assert(w->fd < (int) loop->nwatchers);
+
+ e.events = w->pevents;
+ e.data.fd = w->fd;
+
+ if (w->events == 0)
+ op = EPOLL_CTL_ADD;
+ else
+ op = EPOLL_CTL_MOD;
+
+ /* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching
+ * events, skip the syscall and squelch the events after epoll_wait().
+ */
+ if (epoll_ctl(loop->backend_fd, op, w->fd, &e)) {
+ if (errno != EEXIST)
+ abort();
+
+ assert(op == EPOLL_CTL_ADD);
+
+ /* We've reactivated a file descriptor that's been watched before. */
+ if (epoll_ctl(loop->backend_fd, EPOLL_CTL_MOD, w->fd, &e))
+ abort();
+ }
+
+ w->events = w->pevents;
+ }
+
+ sigmask = 0;
+ if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
+ sigemptyset(&sigset);
+ sigaddset(&sigset, SIGPROF);
+ sigmask |= 1 << (SIGPROF - 1);
+ }
+
+ assert(timeout >= -1);
+ base = loop->time;
+ count = 48; /* Benchmarks suggest this gives the best throughput. */
+ real_timeout = timeout;
+
+ if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
+ reset_timeout = 1;
+ user_timeout = timeout;
+ timeout = 0;
+ } else {
+ reset_timeout = 0;
+ user_timeout = 0;
+ }
+
+ /* You could argue there is a dependency between these two but
+ * ultimately we don't care about their ordering with respect
+ * to one another. Worst case, we make a few system calls that
+ * could have been avoided because another thread already knows
+ * they fail with ENOSYS. Hardly the end of the world.
+ */
+ no_epoll_pwait = uv__load_relaxed(&no_epoll_pwait_cached);
+ no_epoll_wait = uv__load_relaxed(&no_epoll_wait_cached);
+
+ for (;;) {
+ /* Only need to set the provider_entry_time if timeout != 0. The function
+ * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
+ */
+ if (timeout != 0)
+ uv__metrics_set_provider_entry_time(loop);
+
+ /* See the comment for max_safe_timeout for an explanation of why
+ * this is necessary. Executive summary: kernel bug workaround.
+ */
+ if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout)
+ timeout = max_safe_timeout;
+
+ if (sigmask != 0 && no_epoll_pwait != 0)
+ if (pthread_sigmask(SIG_BLOCK, &sigset, NULL))
+ abort();
+
+ if (no_epoll_wait != 0 || (sigmask != 0 && no_epoll_pwait == 0)) {
+ nfds = epoll_pwait(loop->backend_fd,
+ events,
+ ARRAY_SIZE(events),
+ timeout,
+ &sigset);
+ if (nfds == -1 && errno == ENOSYS) {
+ uv__store_relaxed(&no_epoll_pwait_cached, 1);
+ no_epoll_pwait = 1;
+ }
+ } else {
+ nfds = epoll_wait(loop->backend_fd,
+ events,
+ ARRAY_SIZE(events),
+ timeout);
+ if (nfds == -1 && errno == ENOSYS) {
+ uv__store_relaxed(&no_epoll_wait_cached, 1);
+ no_epoll_wait = 1;
+ }
+ }
+
+ if (sigmask != 0 && no_epoll_pwait != 0)
+ if (pthread_sigmask(SIG_UNBLOCK, &sigset, NULL))
+ abort();
+
+ /* Update loop->time unconditionally. It's tempting to skip the update when
+ * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
+ * operating system didn't reschedule our process while in the syscall.
+ */
+ SAVE_ERRNO(uv__update_time(loop));
+
+ if (nfds == 0) {
+ assert(timeout != -1);
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (timeout == -1)
+ continue;
+
+ if (timeout == 0)
+ return;
+
+ /* We may have been inside the system call for longer than |timeout|
+ * milliseconds so we need to update the timestamp to avoid drift.
+ */
+ goto update_timeout;
+ }
+
+ if (nfds == -1) {
+ if (errno == ENOSYS) {
+ /* epoll_wait() or epoll_pwait() failed, try the other system call. */
+ assert(no_epoll_wait == 0 || no_epoll_pwait == 0);
+ continue;
+ }
+
+ if (errno != EINTR)
+ abort();
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (timeout == -1)
+ continue;
+
+ if (timeout == 0)
+ return;
+
+ /* Interrupted by a signal. Update timeout and poll again. */
+ goto update_timeout;
+ }
+
+ have_signals = 0;
+ nevents = 0;
+
+ {
+ /* Squelch a -Waddress-of-packed-member warning with gcc >= 9. */
+ union {
+ struct epoll_event* events;
+ uv__io_t* watchers;
+ } x;
+
+ x.events = events;
+ assert(loop->watchers != NULL);
+ loop->watchers[loop->nwatchers] = x.watchers;
+ loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
+ }
+
+ for (i = 0; i < nfds; i++) {
+ pe = events + i;
+ fd = pe->data.fd;
+
+ /* Skip invalidated events, see uv__platform_invalidate_fd */
+ if (fd == -1)
+ continue;
+
+ assert(fd >= 0);
+ assert((unsigned) fd < loop->nwatchers);
+
+ w = loop->watchers[fd];
+
+ if (w == NULL) {
+ /* File descriptor that we've stopped watching, disarm it.
+ *
+ * Ignore all errors because we may be racing with another thread
+ * when the file descriptor is closed.
+ */
+ epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, pe);
+ continue;
+ }
+
+ /* Give users only events they're interested in. Prevents spurious
+ * callbacks when previous callback invocation in this loop has stopped
+ * the current watcher. Also, filters out events that users has not
+ * requested us to watch.
+ */
+ pe->events &= w->pevents | POLLERR | POLLHUP;
+
+ /* Work around an epoll quirk where it sometimes reports just the
+ * EPOLLERR or EPOLLHUP event. In order to force the event loop to
+ * move forward, we merge in the read/write events that the watcher
+ * is interested in; uv__read() and uv__write() will then deal with
+ * the error or hangup in the usual fashion.
+ *
+ * Note to self: happens when epoll reports EPOLLIN|EPOLLHUP, the user
+ * reads the available data, calls uv_read_stop(), then sometime later
+ * calls uv_read_start() again. By then, libuv has forgotten about the
+ * hangup and the kernel won't report EPOLLIN again because there's
+ * nothing left to read. If anything, libuv is to blame here. The
+ * current hack is just a quick bandaid; to properly fix it, libuv
+ * needs to remember the error/hangup event. We should get that for
+ * free when we switch over to edge-triggered I/O.
+ */
+ if (pe->events == POLLERR || pe->events == POLLHUP)
+ pe->events |=
+ w->pevents & (POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
+
+ if (pe->events != 0) {
+ /* Run signal watchers last. This also affects child process watchers
+ * because those are implemented in terms of signal watchers.
+ */
+ if (w == &loop->signal_io_watcher) {
+ have_signals = 1;
+ } else {
+ uv__metrics_update_idle_time(loop);
+ w->cb(loop, w, pe->events);
+ }
+
+ nevents++;
+ }
+ }
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (have_signals != 0) {
+ uv__metrics_update_idle_time(loop);
+ loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
+ }
+
+ loop->watchers[loop->nwatchers] = NULL;
+ loop->watchers[loop->nwatchers + 1] = NULL;
+
+ if (have_signals != 0)
+ return; /* Event loop should cycle now so don't poll again. */
+
+ if (nevents != 0) {
+ if (nfds == ARRAY_SIZE(events) && --count != 0) {
+ /* Poll for more events but don't block this time. */
+ timeout = 0;
+ continue;
+ }
+ return;
+ }
+
+ if (timeout == 0)
+ return;
+
+ if (timeout == -1)
+ continue;
+
+update_timeout:
+ assert(timeout > 0);
+
+ real_timeout -= (loop->time - base);
+ if (real_timeout <= 0)
+ return;
+
+ timeout = real_timeout;
+ }
+}
+
+
+uint64_t uv__hrtime(uv_clocktype_t type) {
+ static clock_t fast_clock_id = -1;
+ struct timespec t;
+ clock_t clock_id;
+
+ /* Prefer CLOCK_MONOTONIC_COARSE if available but only when it has
+ * millisecond granularity or better. CLOCK_MONOTONIC_COARSE is
+ * serviced entirely from the vDSO, whereas CLOCK_MONOTONIC may
+ * decide to make a costly system call.
+ */
+ /* TODO(bnoordhuis) Use CLOCK_MONOTONIC_COARSE for UV_CLOCK_PRECISE
+ * when it has microsecond granularity or better (unlikely).
+ */
+ clock_id = CLOCK_MONOTONIC;
+ if (type != UV_CLOCK_FAST)
+ goto done;
+
+ clock_id = uv__load_relaxed(&fast_clock_id);
+ if (clock_id != -1)
+ goto done;
+
+ clock_id = CLOCK_MONOTONIC;
+ if (0 == clock_getres(CLOCK_MONOTONIC_COARSE, &t))
+ if (t.tv_nsec <= 1 * 1000 * 1000)
+ clock_id = CLOCK_MONOTONIC_COARSE;
+
+ uv__store_relaxed(&fast_clock_id, clock_id);
+
+done:
+
+ if (clock_gettime(clock_id, &t))
+ return 0; /* Not really possible. */
+
+ return t.tv_sec * (uint64_t) 1e9 + t.tv_nsec;
+}
+
+
+int uv_resident_set_memory(size_t* rss) {
+ char buf[1024];
+ const char* s;
+ ssize_t n;
+ long val;
+ int fd;
+ int i;
+
+ do
+ fd = open("/proc/self/stat", O_RDONLY);
+ while (fd == -1 && errno == EINTR);
+
+ if (fd == -1)
+ return UV__ERR(errno);
+
+ do
+ n = read(fd, buf, sizeof(buf) - 1);
+ while (n == -1 && errno == EINTR);
+
+ uv__close(fd);
+ if (n == -1)
+ return UV__ERR(errno);
+ buf[n] = '\0';
+
+ s = strchr(buf, ' ');
+ if (s == NULL)
+ goto err;
+
+ s += 1;
+ if (*s != '(')
+ goto err;
+
+ s = strchr(s, ')');
+ if (s == NULL)
+ goto err;
+
+ for (i = 1; i <= 22; i++) {
+ s = strchr(s + 1, ' ');
+ if (s == NULL)
+ goto err;
+ }
+
+ errno = 0;
+ val = strtol(s, NULL, 10);
+ if (errno != 0)
+ goto err;
+ if (val < 0)
+ goto err;
+
+ *rss = val * getpagesize();
+ return 0;
+
+err:
+ return UV_EINVAL;
+}
+
+
+int uv_uptime(double* uptime) {
+ static volatile int no_clock_boottime;
+ struct timespec now;
+ int r;
+
+ /* Try CLOCK_BOOTTIME first, fall back to CLOCK_MONOTONIC if not available
+ * (pre-2.6.39 kernels). CLOCK_MONOTONIC doesn't increase when the system
+ * is suspended.
+ */
+ if (no_clock_boottime) {
+ retry: r = clock_gettime(CLOCK_MONOTONIC, &now);
+ }
+ else if ((r = clock_gettime(CLOCK_BOOTTIME, &now)) && errno == EINVAL) {
+ no_clock_boottime = 1;
+ goto retry;
+ }
+
+ if (r)
+ return UV__ERR(errno);
+
+ *uptime = now.tv_sec;
+ return 0;
+}
+
+
+static int uv__cpu_num(FILE* statfile_fp, unsigned int* numcpus) {
+ unsigned int num;
+ char buf[1024];
+
+ if (!fgets(buf, sizeof(buf), statfile_fp))
+ return UV_EIO;
+
+ num = 0;
+ while (fgets(buf, sizeof(buf), statfile_fp)) {
+ if (strncmp(buf, "cpu", 3))
+ break;
+ num++;
+ }
+
+ if (num == 0)
+ return UV_EIO;
+
+ *numcpus = num;
+ return 0;
+}
+
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+ unsigned int numcpus;
+ uv_cpu_info_t* ci;
+ int err;
+ FILE* statfile_fp;
+
+ *cpu_infos = NULL;
+ *count = 0;
+
+ statfile_fp = uv__open_file("/proc/stat");
+ if (statfile_fp == NULL)
+ return UV__ERR(errno);
+
+ err = uv__cpu_num(statfile_fp, &numcpus);
+ if (err < 0)
+ goto out;
+
+ err = UV_ENOMEM;
+ ci = uv__calloc(numcpus, sizeof(*ci));
+ if (ci == NULL)
+ goto out;
+
+ err = read_models(numcpus, ci);
+ if (err == 0)
+ err = read_times(statfile_fp, numcpus, ci);
+
+ if (err) {
+ uv_free_cpu_info(ci, numcpus);
+ goto out;
+ }
+
+ /* read_models() on x86 also reads the CPU speed from /proc/cpuinfo.
+ * We don't check for errors here. Worst case, the field is left zero.
+ */
+ if (ci[0].speed == 0)
+ read_speeds(numcpus, ci);
+
+ *cpu_infos = ci;
+ *count = numcpus;
+ err = 0;
+
+out:
+
+ if (fclose(statfile_fp))
+ if (errno != EINTR && errno != EINPROGRESS)
+ abort();
+
+ return err;
+}
+
+
+static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci) {
+ unsigned int num;
+
+ for (num = 0; num < numcpus; num++)
+ ci[num].speed = read_cpufreq(num) / 1000;
+}
+
+
+/* Also reads the CPU frequency on x86. The other architectures only have
+ * a BogoMIPS field, which may not be very accurate.
+ *
+ * Note: Simply returns on error, uv_cpu_info() takes care of the cleanup.
+ */
+static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) {
+ static const char model_marker[] = "model name\t: ";
+ static const char speed_marker[] = "cpu MHz\t\t: ";
+ const char* inferred_model;
+ unsigned int model_idx;
+ unsigned int speed_idx;
+ char buf[1024];
+ char* model;
+ FILE* fp;
+
+ /* Most are unused on non-ARM, non-MIPS and non-x86 architectures. */
+ (void) &model_marker;
+ (void) &speed_marker;
+ (void) &speed_idx;
+ (void) &model;
+ (void) &buf;
+ (void) &fp;
+
+ model_idx = 0;
+ speed_idx = 0;
+
+#if defined(__arm__) || \
+ defined(__i386__) || \
+ defined(__mips__) || \
+ defined(__x86_64__)
+ fp = uv__open_file("/proc/cpuinfo");
+ if (fp == NULL)
+ return UV__ERR(errno);
+
+ while (fgets(buf, sizeof(buf), fp)) {
+ if (model_idx < numcpus) {
+ if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
+ model = buf + sizeof(model_marker) - 1;
+ model = uv__strndup(model, strlen(model) - 1); /* Strip newline. */
+ if (model == NULL) {
+ fclose(fp);
+ return UV_ENOMEM;
+ }
+ ci[model_idx++].model = model;
+ continue;
+ }
+ }
+#if defined(__arm__) || defined(__mips__)
+ if (model_idx < numcpus) {
+#if defined(__arm__)
+ /* Fallback for pre-3.8 kernels. */
+ static const char model_marker[] = "Processor\t: ";
+#else /* defined(__mips__) */
+ static const char model_marker[] = "cpu model\t\t: ";
+#endif
+ if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
+ model = buf + sizeof(model_marker) - 1;
+ model = uv__strndup(model, strlen(model) - 1); /* Strip newline. */
+ if (model == NULL) {
+ fclose(fp);
+ return UV_ENOMEM;
+ }
+ ci[model_idx++].model = model;
+ continue;
+ }
+ }
+#else /* !__arm__ && !__mips__ */
+ if (speed_idx < numcpus) {
+ if (strncmp(buf, speed_marker, sizeof(speed_marker) - 1) == 0) {
+ ci[speed_idx++].speed = atoi(buf + sizeof(speed_marker) - 1);
+ continue;
+ }
+ }
+#endif /* __arm__ || __mips__ */
+ }
+
+ fclose(fp);
+#endif /* __arm__ || __i386__ || __mips__ || __x86_64__ */
+
+ /* Now we want to make sure that all the models contain *something* because
+ * it's not safe to leave them as null. Copy the last entry unless there
+ * isn't one, in that case we simply put "unknown" into everything.
+ */
+ inferred_model = "unknown";
+ if (model_idx > 0)
+ inferred_model = ci[model_idx - 1].model;
+
+ while (model_idx < numcpus) {
+ model = uv__strndup(inferred_model, strlen(inferred_model));
+ if (model == NULL)
+ return UV_ENOMEM;
+ ci[model_idx++].model = model;
+ }
+
+ return 0;
+}
+
+
+static int read_times(FILE* statfile_fp,
+ unsigned int numcpus,
+ uv_cpu_info_t* ci) {
+ struct uv_cpu_times_s ts;
+ unsigned int ticks;
+ unsigned int multiplier;
+ uint64_t user;
+ uint64_t nice;
+ uint64_t sys;
+ uint64_t idle;
+ uint64_t dummy;
+ uint64_t irq;
+ uint64_t num;
+ uint64_t len;
+ char buf[1024];
+
+ ticks = (unsigned int)sysconf(_SC_CLK_TCK);
+ multiplier = ((uint64_t)1000L / ticks);
+ assert(ticks != (unsigned int) -1);
+ assert(ticks != 0);
+
+ rewind(statfile_fp);
+
+ if (!fgets(buf, sizeof(buf), statfile_fp))
+ abort();
+
+ num = 0;
+
+ while (fgets(buf, sizeof(buf), statfile_fp)) {
+ if (num >= numcpus)
+ break;
+
+ if (strncmp(buf, "cpu", 3))
+ break;
+
+ /* skip "cpu<num> " marker */
+ {
+ unsigned int n;
+ int r = sscanf(buf, "cpu%u ", &n);
+ assert(r == 1);
+ (void) r; /* silence build warning */
+ for (len = sizeof("cpu0"); n /= 10; len++);
+ }
+
+ /* Line contains user, nice, system, idle, iowait, irq, softirq, steal,
+ * guest, guest_nice but we're only interested in the first four + irq.
+ *
+ * Don't use %*s to skip fields or %ll to read straight into the uint64_t
+ * fields, they're not allowed in C89 mode.
+ */
+ if (6 != sscanf(buf + len,
+ "%" PRIu64 " %" PRIu64 " %" PRIu64
+ "%" PRIu64 " %" PRIu64 " %" PRIu64,
+ &user,
+ &nice,
+ &sys,
+ &idle,
+ &dummy,
+ &irq))
+ abort();
+
+ ts.user = user * multiplier;
+ ts.nice = nice * multiplier;
+ ts.sys = sys * multiplier;
+ ts.idle = idle * multiplier;
+ ts.irq = irq * multiplier;
+ ci[num++].cpu_times = ts;
+ }
+ assert(num == numcpus);
+
+ return 0;
+}
+
+
+static uint64_t read_cpufreq(unsigned int cpunum) {
+ uint64_t val;
+ char buf[1024];
+ FILE* fp;
+
+ snprintf(buf,
+ sizeof(buf),
+ "/sys/devices/system/cpu/cpu%u/cpufreq/scaling_cur_freq",
+ cpunum);
+
+ fp = uv__open_file(buf);
+ if (fp == NULL)
+ return 0;
+
+ if (fscanf(fp, "%" PRIu64, &val) != 1)
+ val = 0;
+
+ fclose(fp);
+
+ return val;
+}
+
+
+static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) {
+ if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
+ return 1;
+ if (ent->ifa_addr == NULL)
+ return 1;
+ /*
+ * On Linux getifaddrs returns information related to the raw underlying
+ * devices. We're not interested in this information yet.
+ */
+ if (ent->ifa_addr->sa_family == PF_PACKET)
+ return exclude_type;
+ return !exclude_type;
+}
+
+int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
+#ifndef HAVE_IFADDRS_H
+ *count = 0;
+ *addresses = NULL;
+ return UV_ENOSYS;
+#else
+ struct ifaddrs *addrs, *ent;
+ uv_interface_address_t* address;
+ int i;
+ struct sockaddr_ll *sll;
+
+ *count = 0;
+ *addresses = NULL;
+
+ if (getifaddrs(&addrs))
+ return UV__ERR(errno);
+
+ /* Count the number of interfaces */
+ for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+ if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
+ continue;
+
+ (*count)++;
+ }
+
+ if (*count == 0) {
+ freeifaddrs(addrs);
+ return 0;
+ }
+
+ /* Make sure the memory is initiallized to zero using calloc() */
+ *addresses = uv__calloc(*count, sizeof(**addresses));
+ if (!(*addresses)) {
+ freeifaddrs(addrs);
+ return UV_ENOMEM;
+ }
+
+ address = *addresses;
+
+ for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+ if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
+ continue;
+
+ address->name = uv__strdup(ent->ifa_name);
+
+ if (ent->ifa_addr->sa_family == AF_INET6) {
+ address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
+ } else {
+ address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
+ }
+
+ if (ent->ifa_netmask->sa_family == AF_INET6) {
+ address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
+ } else {
+ address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
+ }
+
+ address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK);
+
+ address++;
+ }
+
+ /* Fill in physical addresses for each interface */
+ for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+ if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFPHYS))
+ continue;
+
+ address = *addresses;
+
+ for (i = 0; i < (*count); i++) {
+ size_t namelen = strlen(ent->ifa_name);
+ /* Alias interface share the same physical address */
+ if (strncmp(address->name, ent->ifa_name, namelen) == 0 &&
+ (address->name[namelen] == 0 || address->name[namelen] == ':')) {
+ sll = (struct sockaddr_ll*)ent->ifa_addr;
+ memcpy(address->phys_addr, sll->sll_addr, sizeof(address->phys_addr));
+ }
+ address++;
+ }
+ }
+
+ freeifaddrs(addrs);
+
+ return 0;
+#endif
+}
+
+
+void uv_free_interface_addresses(uv_interface_address_t* addresses,
+ int count) {
+ int i;
+
+ for (i = 0; i < count; i++) {
+ uv__free(addresses[i].name);
+ }
+
+ uv__free(addresses);
+}
+
+
+void uv__set_process_title(const char* title) {
+#if defined(PR_SET_NAME)
+ prctl(PR_SET_NAME, title); /* Only copies first 16 characters. */
+#endif
+}
+
+
+static int uv__slurp(const char* filename, char* buf, size_t len) {
+ ssize_t n;
+ int fd;
+
+ assert(len > 0);
+
+ fd = uv__open_cloexec(filename, O_RDONLY);
+ if (fd < 0)
+ return fd;
+
+ do
+ n = read(fd, buf, len - 1);
+ while (n == -1 && errno == EINTR);
+
+ if (uv__close_nocheckstdio(fd))
+ abort();
+
+ if (n < 0)
+ return UV__ERR(errno);
+
+ buf[n] = '\0';
+
+ return 0;
+}
+
+
+static uint64_t uv__read_proc_meminfo(const char* what) {
+ uint64_t rc;
+ char* p;
+ char buf[4096]; /* Large enough to hold all of /proc/meminfo. */
+
+ if (uv__slurp("/proc/meminfo", buf, sizeof(buf)))
+ return 0;
+
+ p = strstr(buf, what);
+
+ if (p == NULL)
+ return 0;
+
+ p += strlen(what);
+
+ rc = 0;
+ sscanf(p, "%" PRIu64 " kB", &rc);
+
+ return rc * 1024;
+}
+
+
+uint64_t uv_get_free_memory(void) {
+ struct sysinfo info;
+ uint64_t rc;
+
+ rc = uv__read_proc_meminfo("MemFree:");
+
+ if (rc != 0)
+ return rc;
+
+ if (0 == sysinfo(&info))
+ return (uint64_t) info.freeram * info.mem_unit;
+
+ return 0;
+}
+
+
+uint64_t uv_get_total_memory(void) {
+ struct sysinfo info;
+ uint64_t rc;
+
+ rc = uv__read_proc_meminfo("MemTotal:");
+
+ if (rc != 0)
+ return rc;
+
+ if (0 == sysinfo(&info))
+ return (uint64_t) info.totalram * info.mem_unit;
+
+ return 0;
+}
+
+
+static uint64_t uv__read_cgroups_uint64(const char* cgroup, const char* param) {
+ char filename[256];
+ char buf[32]; /* Large enough to hold an encoded uint64_t. */
+ uint64_t rc;
+
+ rc = 0;
+ snprintf(filename, sizeof(filename), "/sys/fs/cgroup/%s/%s", cgroup, param);
+ if (0 == uv__slurp(filename, buf, sizeof(buf)))
+ sscanf(buf, "%" PRIu64, &rc);
+
+ return rc;
+}
+
+
+uint64_t uv_get_constrained_memory(void) {
+ /*
+ * This might return 0 if there was a problem getting the memory limit from
+ * cgroups. This is OK because a return value of 0 signifies that the memory
+ * limit is unknown.
+ */
+ return uv__read_cgroups_uint64("memory", "memory.limit_in_bytes");
+}
+
+
+void uv_loadavg(double avg[3]) {
+ struct sysinfo info;
+ char buf[128]; /* Large enough to hold all of /proc/loadavg. */
+
+ if (0 == uv__slurp("/proc/loadavg", buf, sizeof(buf)))
+ if (3 == sscanf(buf, "%lf %lf %lf", &avg[0], &avg[1], &avg[2]))
+ return;
+
+ if (sysinfo(&info) < 0)
+ return;
+
+ avg[0] = (double) info.loads[0] / 65536.0;
+ avg[1] = (double) info.loads[1] / 65536.0;
+ avg[2] = (double) info.loads[2] / 65536.0;
+}
diff --git a/Utilities/cmlibuv/src/unix/linux-inotify.c b/Utilities/cmlibuv/src/unix/linux-inotify.c
new file mode 100644
index 0000000..42b601a
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/linux-inotify.c
@@ -0,0 +1,327 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "uv/tree.h"
+#include "internal.h"
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+
+#include <sys/inotify.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+struct watcher_list {
+ RB_ENTRY(watcher_list) entry;
+ QUEUE watchers;
+ int iterating;
+ char* path;
+ int wd;
+};
+
+struct watcher_root {
+ struct watcher_list* rbh_root;
+};
+#define CAST(p) ((struct watcher_root*)(p))
+
+
+static int compare_watchers(const struct watcher_list* a,
+ const struct watcher_list* b) {
+ if (a->wd < b->wd) return -1;
+ if (a->wd > b->wd) return 1;
+ return 0;
+}
+
+
+RB_GENERATE_STATIC(watcher_root, watcher_list, entry, compare_watchers)
+
+
+static void uv__inotify_read(uv_loop_t* loop,
+ uv__io_t* w,
+ unsigned int revents);
+
+static void maybe_free_watcher_list(struct watcher_list* w,
+ uv_loop_t* loop);
+
+static int init_inotify(uv_loop_t* loop) {
+ int fd;
+
+ if (loop->inotify_fd != -1)
+ return 0;
+
+ fd = inotify_init1(IN_NONBLOCK | IN_CLOEXEC);
+ if (fd < 0)
+ return UV__ERR(errno);
+
+ loop->inotify_fd = fd;
+ uv__io_init(&loop->inotify_read_watcher, uv__inotify_read, loop->inotify_fd);
+ uv__io_start(loop, &loop->inotify_read_watcher, POLLIN);
+
+ return 0;
+}
+
+
+int uv__inotify_fork(uv_loop_t* loop, void* old_watchers) {
+ /* Open the inotify_fd, and re-arm all the inotify watchers. */
+ int err;
+ struct watcher_list* tmp_watcher_list_iter;
+ struct watcher_list* watcher_list;
+ struct watcher_list tmp_watcher_list;
+ QUEUE queue;
+ QUEUE* q;
+ uv_fs_event_t* handle;
+ char* tmp_path;
+
+ if (old_watchers != NULL) {
+ /* We must restore the old watcher list to be able to close items
+ * out of it.
+ */
+ loop->inotify_watchers = old_watchers;
+
+ QUEUE_INIT(&tmp_watcher_list.watchers);
+ /* Note that the queue we use is shared with the start and stop()
+ * functions, making QUEUE_FOREACH unsafe to use. So we use the
+ * QUEUE_MOVE trick to safely iterate. Also don't free the watcher
+ * list until we're done iterating. c.f. uv__inotify_read.
+ */
+ RB_FOREACH_SAFE(watcher_list, watcher_root,
+ CAST(&old_watchers), tmp_watcher_list_iter) {
+ watcher_list->iterating = 1;
+ QUEUE_MOVE(&watcher_list->watchers, &queue);
+ while (!QUEUE_EMPTY(&queue)) {
+ q = QUEUE_HEAD(&queue);
+ handle = QUEUE_DATA(q, uv_fs_event_t, watchers);
+ /* It's critical to keep a copy of path here, because it
+ * will be set to NULL by stop() and then deallocated by
+ * maybe_free_watcher_list
+ */
+ tmp_path = uv__strdup(handle->path);
+ assert(tmp_path != NULL);
+ QUEUE_REMOVE(q);
+ QUEUE_INSERT_TAIL(&watcher_list->watchers, q);
+ uv_fs_event_stop(handle);
+
+ QUEUE_INSERT_TAIL(&tmp_watcher_list.watchers, &handle->watchers);
+ handle->path = tmp_path;
+ }
+ watcher_list->iterating = 0;
+ maybe_free_watcher_list(watcher_list, loop);
+ }
+
+ QUEUE_MOVE(&tmp_watcher_list.watchers, &queue);
+ while (!QUEUE_EMPTY(&queue)) {
+ q = QUEUE_HEAD(&queue);
+ QUEUE_REMOVE(q);
+ handle = QUEUE_DATA(q, uv_fs_event_t, watchers);
+ tmp_path = handle->path;
+ handle->path = NULL;
+ err = uv_fs_event_start(handle, handle->cb, tmp_path, 0);
+ uv__free(tmp_path);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+
+static struct watcher_list* find_watcher(uv_loop_t* loop, int wd) {
+ struct watcher_list w;
+ w.wd = wd;
+ return RB_FIND(watcher_root, CAST(&loop->inotify_watchers), &w);
+}
+
+static void maybe_free_watcher_list(struct watcher_list* w, uv_loop_t* loop) {
+ /* if the watcher_list->watchers is being iterated over, we can't free it. */
+ if ((!w->iterating) && QUEUE_EMPTY(&w->watchers)) {
+ /* No watchers left for this path. Clean up. */
+ RB_REMOVE(watcher_root, CAST(&loop->inotify_watchers), w);
+ inotify_rm_watch(loop->inotify_fd, w->wd);
+ uv__free(w);
+ }
+}
+
+static void uv__inotify_read(uv_loop_t* loop,
+ uv__io_t* dummy,
+ unsigned int events) {
+ const struct inotify_event* e;
+ struct watcher_list* w;
+ uv_fs_event_t* h;
+ QUEUE queue;
+ QUEUE* q;
+ const char* path;
+ ssize_t size;
+ const char *p;
+ /* needs to be large enough for sizeof(inotify_event) + strlen(path) */
+ char buf[4096];
+
+ while (1) {
+ do
+ size = read(loop->inotify_fd, buf, sizeof(buf));
+ while (size == -1 && errno == EINTR);
+
+ if (size == -1) {
+ assert(errno == EAGAIN || errno == EWOULDBLOCK);
+ break;
+ }
+
+ assert(size > 0); /* pre-2.6.21 thing, size=0 == read buffer too small */
+
+ /* Now we have one or more inotify_event structs. */
+ for (p = buf; p < buf + size; p += sizeof(*e) + e->len) {
+ e = (const struct inotify_event*) p;
+
+ events = 0;
+ if (e->mask & (IN_ATTRIB|IN_MODIFY))
+ events |= UV_CHANGE;
+ if (e->mask & ~(IN_ATTRIB|IN_MODIFY))
+ events |= UV_RENAME;
+
+ w = find_watcher(loop, e->wd);
+ if (w == NULL)
+ continue; /* Stale event, no watchers left. */
+
+ /* inotify does not return the filename when monitoring a single file
+ * for modifications. Repurpose the filename for API compatibility.
+ * I'm not convinced this is a good thing, maybe it should go.
+ */
+ path = e->len ? (const char*) (e + 1) : uv__basename_r(w->path);
+
+ /* We're about to iterate over the queue and call user's callbacks.
+ * What can go wrong?
+ * A callback could call uv_fs_event_stop()
+ * and the queue can change under our feet.
+ * So, we use QUEUE_MOVE() trick to safely iterate over the queue.
+ * And we don't free the watcher_list until we're done iterating.
+ *
+ * First,
+ * tell uv_fs_event_stop() (that could be called from a user's callback)
+ * not to free watcher_list.
+ */
+ w->iterating = 1;
+ QUEUE_MOVE(&w->watchers, &queue);
+ while (!QUEUE_EMPTY(&queue)) {
+ q = QUEUE_HEAD(&queue);
+ h = QUEUE_DATA(q, uv_fs_event_t, watchers);
+
+ QUEUE_REMOVE(q);
+ QUEUE_INSERT_TAIL(&w->watchers, q);
+
+ h->cb(h, path, events, 0);
+ }
+ /* done iterating, time to (maybe) free empty watcher_list */
+ w->iterating = 0;
+ maybe_free_watcher_list(w, loop);
+ }
+ }
+}
+
+
+int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
+ uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
+ return 0;
+}
+
+
+int uv_fs_event_start(uv_fs_event_t* handle,
+ uv_fs_event_cb cb,
+ const char* path,
+ unsigned int flags) {
+ struct watcher_list* w;
+ size_t len;
+ int events;
+ int err;
+ int wd;
+
+ if (uv__is_active(handle))
+ return UV_EINVAL;
+
+ err = init_inotify(handle->loop);
+ if (err)
+ return err;
+
+ events = IN_ATTRIB
+ | IN_CREATE
+ | IN_MODIFY
+ | IN_DELETE
+ | IN_DELETE_SELF
+ | IN_MOVE_SELF
+ | IN_MOVED_FROM
+ | IN_MOVED_TO;
+
+ wd = inotify_add_watch(handle->loop->inotify_fd, path, events);
+ if (wd == -1)
+ return UV__ERR(errno);
+
+ w = find_watcher(handle->loop, wd);
+ if (w)
+ goto no_insert;
+
+ len = strlen(path) + 1;
+ w = uv__malloc(sizeof(*w) + len);
+ if (w == NULL)
+ return UV_ENOMEM;
+
+ w->wd = wd;
+ w->path = memcpy(w + 1, path, len);
+ QUEUE_INIT(&w->watchers);
+ w->iterating = 0;
+ RB_INSERT(watcher_root, CAST(&handle->loop->inotify_watchers), w);
+
+no_insert:
+ uv__handle_start(handle);
+ QUEUE_INSERT_TAIL(&w->watchers, &handle->watchers);
+ handle->path = w->path;
+ handle->cb = cb;
+ handle->wd = wd;
+
+ return 0;
+}
+
+
+int uv_fs_event_stop(uv_fs_event_t* handle) {
+ struct watcher_list* w;
+
+ if (!uv__is_active(handle))
+ return 0;
+
+ w = find_watcher(handle->loop, handle->wd);
+ assert(w != NULL);
+
+ handle->wd = -1;
+ handle->path = NULL;
+ uv__handle_stop(handle);
+ QUEUE_REMOVE(&handle->watchers);
+
+ maybe_free_watcher_list(w, handle->loop);
+
+ return 0;
+}
+
+
+void uv__fs_event_close(uv_fs_event_t* handle) {
+ uv_fs_event_stop(handle);
+}
diff --git a/Utilities/cmlibuv/src/unix/linux-syscalls.c b/Utilities/cmlibuv/src/unix/linux-syscalls.c
new file mode 100644
index 0000000..44daaf1
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/linux-syscalls.c
@@ -0,0 +1,267 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "linux-syscalls.h"
+#include <unistd.h>
+#include <signal.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <errno.h>
+
+#if defined(__arm__)
+# if defined(__thumb__) || defined(__ARM_EABI__)
+# define UV_SYSCALL_BASE 0
+# else
+# define UV_SYSCALL_BASE 0x900000
+# endif
+#endif /* __arm__ */
+
+#ifndef __NR_recvmmsg
+# if defined(__x86_64__)
+# define __NR_recvmmsg 299
+# elif defined(__arm__)
+# define __NR_recvmmsg (UV_SYSCALL_BASE + 365)
+# endif
+#endif /* __NR_recvmsg */
+
+#ifndef __NR_sendmmsg
+# if defined(__x86_64__)
+# define __NR_sendmmsg 307
+# elif defined(__arm__)
+# define __NR_sendmmsg (UV_SYSCALL_BASE + 374)
+# endif
+#endif /* __NR_sendmmsg */
+
+#ifndef __NR_utimensat
+# if defined(__x86_64__)
+# define __NR_utimensat 280
+# elif defined(__i386__)
+# define __NR_utimensat 320
+# elif defined(__arm__)
+# define __NR_utimensat (UV_SYSCALL_BASE + 348)
+# endif
+#endif /* __NR_utimensat */
+
+#ifndef __NR_preadv
+# if defined(__x86_64__)
+# define __NR_preadv 295
+# elif defined(__i386__)
+# define __NR_preadv 333
+# elif defined(__arm__)
+# define __NR_preadv (UV_SYSCALL_BASE + 361)
+# endif
+#endif /* __NR_preadv */
+
+#ifndef __NR_pwritev
+# if defined(__x86_64__)
+# define __NR_pwritev 296
+# elif defined(__i386__)
+# define __NR_pwritev 334
+# elif defined(__arm__)
+# define __NR_pwritev (UV_SYSCALL_BASE + 362)
+# endif
+#endif /* __NR_pwritev */
+
+#ifndef __NR_dup3
+# if defined(__x86_64__)
+# define __NR_dup3 292
+# elif defined(__i386__)
+# define __NR_dup3 330
+# elif defined(__arm__)
+# define __NR_dup3 (UV_SYSCALL_BASE + 358)
+# endif
+#endif /* __NR_pwritev */
+
+#ifndef __NR_copy_file_range
+# if defined(__x86_64__)
+# define __NR_copy_file_range 326
+# elif defined(__i386__)
+# define __NR_copy_file_range 377
+# elif defined(__s390__)
+# define __NR_copy_file_range 375
+# elif defined(__arm__)
+# define __NR_copy_file_range (UV_SYSCALL_BASE + 391)
+# elif defined(__aarch64__)
+# define __NR_copy_file_range 285
+# elif defined(__powerpc__)
+# define __NR_copy_file_range 379
+# elif defined(__arc__)
+# define __NR_copy_file_range 285
+# endif
+#endif /* __NR_copy_file_range */
+
+#ifndef __NR_statx
+# if defined(__x86_64__)
+# define __NR_statx 332
+# elif defined(__i386__)
+# define __NR_statx 383
+# elif defined(__aarch64__)
+# define __NR_statx 397
+# elif defined(__arm__)
+# define __NR_statx (UV_SYSCALL_BASE + 397)
+# elif defined(__ppc__)
+# define __NR_statx 383
+# elif defined(__s390__)
+# define __NR_statx 379
+# endif
+#endif /* __NR_statx */
+
+#ifndef __NR_getrandom
+# if defined(__x86_64__)
+# define __NR_getrandom 318
+# elif defined(__i386__)
+# define __NR_getrandom 355
+# elif defined(__aarch64__)
+# define __NR_getrandom 384
+# elif defined(__arm__)
+# define __NR_getrandom (UV_SYSCALL_BASE + 384)
+# elif defined(__ppc__)
+# define __NR_getrandom 359
+# elif defined(__s390__)
+# define __NR_getrandom 349
+# endif
+#endif /* __NR_getrandom */
+
+struct uv__mmsghdr;
+
+int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
+#if defined(__i386__)
+ unsigned long args[4];
+ int rc;
+
+ args[0] = (unsigned long) fd;
+ args[1] = (unsigned long) mmsg;
+ args[2] = (unsigned long) vlen;
+ args[3] = /* flags */ 0;
+
+ /* socketcall() raises EINVAL when SYS_SENDMMSG is not supported. */
+ rc = syscall(/* __NR_socketcall */ 102, 20 /* SYS_SENDMMSG */, args);
+ if (rc == -1)
+ if (errno == EINVAL)
+ errno = ENOSYS;
+
+ return rc;
+#elif defined(__NR_sendmmsg)
+ return syscall(__NR_sendmmsg, fd, mmsg, vlen, /* flags */ 0);
+#else
+ return errno = ENOSYS, -1;
+#endif
+}
+
+
+int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
+#if defined(__i386__)
+ unsigned long args[5];
+ int rc;
+
+ args[0] = (unsigned long) fd;
+ args[1] = (unsigned long) mmsg;
+ args[2] = (unsigned long) vlen;
+ args[3] = /* flags */ 0;
+ args[4] = /* timeout */ 0;
+
+ /* socketcall() raises EINVAL when SYS_RECVMMSG is not supported. */
+ rc = syscall(/* __NR_socketcall */ 102, 19 /* SYS_RECVMMSG */, args);
+ if (rc == -1)
+ if (errno == EINVAL)
+ errno = ENOSYS;
+
+ return rc;
+#elif defined(__NR_recvmmsg)
+ return syscall(__NR_recvmmsg, fd, mmsg, vlen, /* flags */ 0, /* timeout */ 0);
+#else
+ return errno = ENOSYS, -1;
+#endif
+}
+
+
+ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt, int64_t offset) {
+#if defined(__NR_preadv)
+ return syscall(__NR_preadv, fd, iov, iovcnt, (long)offset, (long)(offset >> 32));
+#else
+ return errno = ENOSYS, -1;
+#endif
+}
+
+
+ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset) {
+#if defined(__NR_pwritev)
+ return syscall(__NR_pwritev, fd, iov, iovcnt, (long)offset, (long)(offset >> 32));
+#else
+ return errno = ENOSYS, -1;
+#endif
+}
+
+
+int uv__dup3(int oldfd, int newfd, int flags) {
+#if defined(__NR_dup3)
+ return syscall(__NR_dup3, oldfd, newfd, flags);
+#else
+ return errno = ENOSYS, -1;
+#endif
+}
+
+
+ssize_t
+uv__fs_copy_file_range(int fd_in,
+ ssize_t* off_in,
+ int fd_out,
+ ssize_t* off_out,
+ size_t len,
+ unsigned int flags)
+{
+#ifdef __NR_copy_file_range
+ return syscall(__NR_copy_file_range,
+ fd_in,
+ off_in,
+ fd_out,
+ off_out,
+ len,
+ flags);
+#else
+ return errno = ENOSYS, -1;
+#endif
+}
+
+
+int uv__statx(int dirfd,
+ const char* path,
+ int flags,
+ unsigned int mask,
+ struct uv__statx* statxbuf) {
+ /* __NR_statx make Android box killed by SIGSYS.
+ * That looks like a seccomp2 sandbox filter rejecting the system call.
+ */
+#if defined(__NR_statx) && !defined(__ANDROID__)
+ return syscall(__NR_statx, dirfd, path, flags, mask, statxbuf);
+#else
+ return errno = ENOSYS, -1;
+#endif
+}
+
+
+ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags) {
+#if defined(__NR_getrandom)
+ return syscall(__NR_getrandom, buf, buflen, flags);
+#else
+ return errno = ENOSYS, -1;
+#endif
+}
diff --git a/Utilities/cmlibuv/src/unix/linux-syscalls.h b/Utilities/cmlibuv/src/unix/linux-syscalls.h
new file mode 100644
index 0000000..761ff32
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/linux-syscalls.h
@@ -0,0 +1,81 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_LINUX_SYSCALL_H_
+#define UV_LINUX_SYSCALL_H_
+
+#undef _GNU_SOURCE
+#define _GNU_SOURCE
+
+#include <stdint.h>
+#include <signal.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/socket.h>
+
+struct uv__statx_timestamp {
+ int64_t tv_sec;
+ uint32_t tv_nsec;
+ int32_t unused0;
+};
+
+struct uv__statx {
+ uint32_t stx_mask;
+ uint32_t stx_blksize;
+ uint64_t stx_attributes;
+ uint32_t stx_nlink;
+ uint32_t stx_uid;
+ uint32_t stx_gid;
+ uint16_t stx_mode;
+ uint16_t unused0;
+ uint64_t stx_ino;
+ uint64_t stx_size;
+ uint64_t stx_blocks;
+ uint64_t stx_attributes_mask;
+ struct uv__statx_timestamp stx_atime;
+ struct uv__statx_timestamp stx_btime;
+ struct uv__statx_timestamp stx_ctime;
+ struct uv__statx_timestamp stx_mtime;
+ uint32_t stx_rdev_major;
+ uint32_t stx_rdev_minor;
+ uint32_t stx_dev_major;
+ uint32_t stx_dev_minor;
+ uint64_t unused1[14];
+};
+
+ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt, int64_t offset);
+ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset);
+int uv__dup3(int oldfd, int newfd, int flags);
+ssize_t
+uv__fs_copy_file_range(int fd_in,
+ ssize_t* off_in,
+ int fd_out,
+ ssize_t* off_out,
+ size_t len,
+ unsigned int flags);
+int uv__statx(int dirfd,
+ const char* path,
+ int flags,
+ unsigned int mask,
+ struct uv__statx* statxbuf);
+ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags);
+
+#endif /* UV_LINUX_SYSCALL_H_ */
diff --git a/Utilities/cmlibuv/src/unix/loop-watcher.c b/Utilities/cmlibuv/src/unix/loop-watcher.c
new file mode 100644
index 0000000..b8c1c2a
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/loop-watcher.c
@@ -0,0 +1,68 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#define UV_LOOP_WATCHER_DEFINE(name, type) \
+ int uv_##name##_init(uv_loop_t* loop, uv_##name##_t* handle) { \
+ uv__handle_init(loop, (uv_handle_t*)handle, UV_##type); \
+ handle->name##_cb = NULL; \
+ return 0; \
+ } \
+ \
+ int uv_##name##_start(uv_##name##_t* handle, uv_##name##_cb cb) { \
+ if (uv__is_active(handle)) return 0; \
+ if (cb == NULL) return UV_EINVAL; \
+ QUEUE_INSERT_HEAD(&handle->loop->name##_handles, &handle->queue); \
+ handle->name##_cb = cb; \
+ uv__handle_start(handle); \
+ return 0; \
+ } \
+ \
+ int uv_##name##_stop(uv_##name##_t* handle) { \
+ if (!uv__is_active(handle)) return 0; \
+ QUEUE_REMOVE(&handle->queue); \
+ uv__handle_stop(handle); \
+ return 0; \
+ } \
+ \
+ void uv__run_##name(uv_loop_t* loop) { \
+ uv_##name##_t* h; \
+ QUEUE queue; \
+ QUEUE* q; \
+ QUEUE_MOVE(&loop->name##_handles, &queue); \
+ while (!QUEUE_EMPTY(&queue)) { \
+ q = QUEUE_HEAD(&queue); \
+ h = QUEUE_DATA(q, uv_##name##_t, queue); \
+ QUEUE_REMOVE(q); \
+ QUEUE_INSERT_TAIL(&loop->name##_handles, q); \
+ h->name##_cb(h); \
+ } \
+ } \
+ \
+ void uv__##name##_close(uv_##name##_t* handle) { \
+ uv_##name##_stop(handle); \
+ }
+
+UV_LOOP_WATCHER_DEFINE(prepare, PREPARE)
+UV_LOOP_WATCHER_DEFINE(check, CHECK)
+UV_LOOP_WATCHER_DEFINE(idle, IDLE)
diff --git a/Utilities/cmlibuv/src/unix/loop.c b/Utilities/cmlibuv/src/unix/loop.c
new file mode 100644
index 0000000..a88e71c
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/loop.c
@@ -0,0 +1,228 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "uv/tree.h"
+#include "internal.h"
+#include "heap-inl.h"
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+int uv_loop_init(uv_loop_t* loop) {
+ uv__loop_internal_fields_t* lfields;
+ void* saved_data;
+ int err;
+
+
+ saved_data = loop->data;
+ memset(loop, 0, sizeof(*loop));
+ loop->data = saved_data;
+
+ lfields = (uv__loop_internal_fields_t*) uv__calloc(1, sizeof(*lfields));
+ if (lfields == NULL)
+ return UV_ENOMEM;
+ loop->internal_fields = lfields;
+
+ err = uv_mutex_init(&lfields->loop_metrics.lock);
+ if (err)
+ goto fail_metrics_mutex_init;
+
+ heap_init((struct heap*) &loop->timer_heap);
+ QUEUE_INIT(&loop->wq);
+ QUEUE_INIT(&loop->idle_handles);
+ QUEUE_INIT(&loop->async_handles);
+ QUEUE_INIT(&loop->check_handles);
+ QUEUE_INIT(&loop->prepare_handles);
+ QUEUE_INIT(&loop->handle_queue);
+
+ loop->active_handles = 0;
+ loop->active_reqs.count = 0;
+ loop->nfds = 0;
+ loop->watchers = NULL;
+ loop->nwatchers = 0;
+ QUEUE_INIT(&loop->pending_queue);
+ QUEUE_INIT(&loop->watcher_queue);
+
+ loop->closing_handles = NULL;
+ uv__update_time(loop);
+ loop->async_io_watcher.fd = -1;
+ loop->async_wfd = -1;
+ loop->signal_pipefd[0] = -1;
+ loop->signal_pipefd[1] = -1;
+ loop->backend_fd = -1;
+ loop->emfile_fd = -1;
+
+ loop->timer_counter = 0;
+ loop->stop_flag = 0;
+
+ err = uv__platform_loop_init(loop);
+ if (err)
+ goto fail_platform_init;
+
+ uv__signal_global_once_init();
+ err = uv_signal_init(loop, &loop->child_watcher);
+ if (err)
+ goto fail_signal_init;
+
+ uv__handle_unref(&loop->child_watcher);
+ loop->child_watcher.flags |= UV_HANDLE_INTERNAL;
+ QUEUE_INIT(&loop->process_handles);
+
+ err = uv_rwlock_init(&loop->cloexec_lock);
+ if (err)
+ goto fail_rwlock_init;
+
+ err = uv_mutex_init(&loop->wq_mutex);
+ if (err)
+ goto fail_mutex_init;
+
+ err = uv_async_init(loop, &loop->wq_async, uv__work_done);
+ if (err)
+ goto fail_async_init;
+
+ uv__handle_unref(&loop->wq_async);
+ loop->wq_async.flags |= UV_HANDLE_INTERNAL;
+
+ return 0;
+
+fail_async_init:
+ uv_mutex_destroy(&loop->wq_mutex);
+
+fail_mutex_init:
+ uv_rwlock_destroy(&loop->cloexec_lock);
+
+fail_rwlock_init:
+ uv__signal_loop_cleanup(loop);
+
+fail_signal_init:
+ uv__platform_loop_delete(loop);
+
+fail_platform_init:
+ uv_mutex_destroy(&lfields->loop_metrics.lock);
+
+fail_metrics_mutex_init:
+ uv__free(lfields);
+ loop->internal_fields = NULL;
+
+ uv__free(loop->watchers);
+ loop->nwatchers = 0;
+ return err;
+}
+
+
+int uv_loop_fork(uv_loop_t* loop) {
+ int err;
+ unsigned int i;
+ uv__io_t* w;
+
+ err = uv__io_fork(loop);
+ if (err)
+ return err;
+
+ err = uv__async_fork(loop);
+ if (err)
+ return err;
+
+ err = uv__signal_loop_fork(loop);
+ if (err)
+ return err;
+
+ /* Rearm all the watchers that aren't re-queued by the above. */
+ for (i = 0; i < loop->nwatchers; i++) {
+ w = loop->watchers[i];
+ if (w == NULL)
+ continue;
+
+ if (w->pevents != 0 && QUEUE_EMPTY(&w->watcher_queue)) {
+ w->events = 0; /* Force re-registration in uv__io_poll. */
+ QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
+ }
+ }
+
+ return 0;
+}
+
+
+void uv__loop_close(uv_loop_t* loop) {
+ uv__loop_internal_fields_t* lfields;
+
+ uv__signal_loop_cleanup(loop);
+ uv__platform_loop_delete(loop);
+ uv__async_stop(loop);
+
+ if (loop->emfile_fd != -1) {
+ uv__close(loop->emfile_fd);
+ loop->emfile_fd = -1;
+ }
+
+ if (loop->backend_fd != -1) {
+ uv__close(loop->backend_fd);
+ loop->backend_fd = -1;
+ }
+
+ uv_mutex_lock(&loop->wq_mutex);
+ assert(QUEUE_EMPTY(&loop->wq) && "thread pool work queue not empty!");
+ assert(!uv__has_active_reqs(loop));
+ uv_mutex_unlock(&loop->wq_mutex);
+ uv_mutex_destroy(&loop->wq_mutex);
+
+ /*
+ * Note that all thread pool stuff is finished at this point and
+ * it is safe to just destroy rw lock
+ */
+ uv_rwlock_destroy(&loop->cloexec_lock);
+
+#if 0
+ assert(QUEUE_EMPTY(&loop->pending_queue));
+ assert(QUEUE_EMPTY(&loop->watcher_queue));
+ assert(loop->nfds == 0);
+#endif
+
+ uv__free(loop->watchers);
+ loop->watchers = NULL;
+ loop->nwatchers = 0;
+
+ lfields = uv__get_internal_fields(loop);
+ uv_mutex_destroy(&lfields->loop_metrics.lock);
+ uv__free(lfields);
+ loop->internal_fields = NULL;
+}
+
+
+int uv__loop_configure(uv_loop_t* loop, uv_loop_option option, va_list ap) {
+ uv__loop_internal_fields_t* lfields;
+
+ lfields = uv__get_internal_fields(loop);
+ if (option == UV_METRICS_IDLE_TIME) {
+ lfields->flags |= UV_METRICS_IDLE_TIME;
+ return 0;
+ }
+
+ if (option != UV_LOOP_BLOCK_SIGNAL)
+ return UV_ENOSYS;
+
+ if (va_arg(ap, int) != SIGPROF)
+ return UV_EINVAL;
+
+ loop->flags |= UV_LOOP_BLOCK_SIGPROF;
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/unix/netbsd.c b/Utilities/cmlibuv/src/unix/netbsd.c
new file mode 100644
index 0000000..c66333f
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/netbsd.c
@@ -0,0 +1,259 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <assert.h>
+#include <string.h>
+#include <errno.h>
+
+#include <kvm.h>
+#include <paths.h>
+#include <unistd.h>
+#include <time.h>
+#include <stdlib.h>
+#include <fcntl.h>
+
+#include <sys/resource.h>
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#include <uvm/uvm_extern.h>
+
+#include <unistd.h>
+#include <time.h>
+
+
+int uv__platform_loop_init(uv_loop_t* loop) {
+ return uv__kqueue_init(loop);
+}
+
+
+void uv__platform_loop_delete(uv_loop_t* loop) {
+}
+
+
+void uv_loadavg(double avg[3]) {
+ struct loadavg info;
+ size_t size = sizeof(info);
+ int which[] = {CTL_VM, VM_LOADAVG};
+
+ if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0) == -1) return;
+
+ avg[0] = (double) info.ldavg[0] / info.fscale;
+ avg[1] = (double) info.ldavg[1] / info.fscale;
+ avg[2] = (double) info.ldavg[2] / info.fscale;
+}
+
+
+int uv_exepath(char* buffer, size_t* size) {
+ /* Intermediate buffer, retrieving partial path name does not work
+ * As of NetBSD-8(beta), vnode->path translator does not handle files
+ * with longer names than 31 characters.
+ */
+ char int_buf[PATH_MAX];
+ size_t int_size;
+ int mib[4];
+
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_PROC_ARGS;
+ mib[2] = -1;
+ mib[3] = KERN_PROC_PATHNAME;
+ int_size = ARRAY_SIZE(int_buf);
+
+ if (sysctl(mib, 4, int_buf, &int_size, NULL, 0))
+ return UV__ERR(errno);
+
+ /* Copy string from the intermediate buffer to outer one with appropriate
+ * length.
+ */
+ /* TODO(bnoordhuis) Check uv__strscpy() return value. */
+ uv__strscpy(buffer, int_buf, *size);
+
+ /* Set new size. */
+ *size = strlen(buffer);
+
+ return 0;
+}
+
+
+uint64_t uv_get_free_memory(void) {
+ struct uvmexp info;
+ size_t size = sizeof(info);
+ int which[] = {CTL_VM, VM_UVMEXP};
+
+ if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ return (uint64_t) info.free * sysconf(_SC_PAGESIZE);
+}
+
+
+uint64_t uv_get_total_memory(void) {
+#if defined(HW_PHYSMEM64)
+ uint64_t info;
+ int which[] = {CTL_HW, HW_PHYSMEM64};
+#else
+ unsigned int info;
+ int which[] = {CTL_HW, HW_PHYSMEM};
+#endif
+ size_t size = sizeof(info);
+
+ if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ return (uint64_t) info;
+}
+
+
+uint64_t uv_get_constrained_memory(void) {
+ return 0; /* Memory constraints are unknown. */
+}
+
+
+int uv_resident_set_memory(size_t* rss) {
+ kvm_t *kd = NULL;
+ struct kinfo_proc2 *kinfo = NULL;
+ pid_t pid;
+ int nprocs;
+ int max_size = sizeof(struct kinfo_proc2);
+ int page_size;
+
+ page_size = getpagesize();
+ pid = getpid();
+
+ kd = kvm_open(NULL, NULL, NULL, KVM_NO_FILES, "kvm_open");
+
+ if (kd == NULL) goto error;
+
+ kinfo = kvm_getproc2(kd, KERN_PROC_PID, pid, max_size, &nprocs);
+ if (kinfo == NULL) goto error;
+
+ *rss = kinfo->p_vm_rssize * page_size;
+
+ kvm_close(kd);
+
+ return 0;
+
+error:
+ if (kd) kvm_close(kd);
+ return UV_EPERM;
+}
+
+
+int uv_uptime(double* uptime) {
+ time_t now;
+ struct timeval info;
+ size_t size = sizeof(info);
+ static int which[] = {CTL_KERN, KERN_BOOTTIME};
+
+ if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ now = time(NULL);
+
+ *uptime = (double)(now - info.tv_sec);
+ return 0;
+}
+
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+ unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK);
+ unsigned int multiplier = ((uint64_t)1000L / ticks);
+ unsigned int cur = 0;
+ uv_cpu_info_t* cpu_info;
+ u_int64_t* cp_times;
+ char model[512];
+ u_int64_t cpuspeed;
+ int numcpus;
+ size_t size;
+ int i;
+
+ size = sizeof(model);
+ if (sysctlbyname("machdep.cpu_brand", &model, &size, NULL, 0) &&
+ sysctlbyname("hw.model", &model, &size, NULL, 0)) {
+ return UV__ERR(errno);
+ }
+
+ size = sizeof(numcpus);
+ if (sysctlbyname("hw.ncpu", &numcpus, &size, NULL, 0))
+ return UV__ERR(errno);
+ *count = numcpus;
+
+ /* Only i386 and amd64 have machdep.tsc_freq */
+ size = sizeof(cpuspeed);
+ if (sysctlbyname("machdep.tsc_freq", &cpuspeed, &size, NULL, 0))
+ cpuspeed = 0;
+
+ size = numcpus * CPUSTATES * sizeof(*cp_times);
+ cp_times = uv__malloc(size);
+ if (cp_times == NULL)
+ return UV_ENOMEM;
+
+ if (sysctlbyname("kern.cp_time", cp_times, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ *cpu_infos = uv__malloc(numcpus * sizeof(**cpu_infos));
+ if (!(*cpu_infos)) {
+ uv__free(cp_times);
+ uv__free(*cpu_infos);
+ return UV_ENOMEM;
+ }
+
+ for (i = 0; i < numcpus; i++) {
+ cpu_info = &(*cpu_infos)[i];
+ cpu_info->cpu_times.user = (uint64_t)(cp_times[CP_USER+cur]) * multiplier;
+ cpu_info->cpu_times.nice = (uint64_t)(cp_times[CP_NICE+cur]) * multiplier;
+ cpu_info->cpu_times.sys = (uint64_t)(cp_times[CP_SYS+cur]) * multiplier;
+ cpu_info->cpu_times.idle = (uint64_t)(cp_times[CP_IDLE+cur]) * multiplier;
+ cpu_info->cpu_times.irq = (uint64_t)(cp_times[CP_INTR+cur]) * multiplier;
+ cpu_info->model = uv__strdup(model);
+ cpu_info->speed = (int)(cpuspeed/(uint64_t) 1e6);
+ cur += CPUSTATES;
+ }
+ uv__free(cp_times);
+ return 0;
+}
+
+int uv__random_sysctl(void* buf, size_t len) {
+ static int name[] = {CTL_KERN, KERN_ARND};
+ size_t count, req;
+ unsigned char* p;
+
+ p = buf;
+ while (len) {
+ req = len < 32 ? len : 32;
+ count = req;
+
+ if (sysctl(name, ARRAY_SIZE(name), p, &count, NULL, 0) == -1)
+ return UV__ERR(errno);
+
+ if (count != req)
+ return UV_EIO; /* Can't happen. */
+
+ p += count;
+ len -= count;
+ }
+
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/unix/no-fsevents.c b/Utilities/cmlibuv/src/unix/no-fsevents.c
new file mode 100644
index 0000000..158643a
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/no-fsevents.c
@@ -0,0 +1,42 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <errno.h>
+
+int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
+ return UV_ENOSYS;
+}
+
+int uv_fs_event_start(uv_fs_event_t* handle, uv_fs_event_cb cb,
+ const char* filename, unsigned int flags) {
+ return UV_ENOSYS;
+}
+
+int uv_fs_event_stop(uv_fs_event_t* handle) {
+ return UV_ENOSYS;
+}
+
+void uv__fs_event_close(uv_fs_event_t* handle) {
+ UNREACHABLE();
+}
diff --git a/Utilities/cmlibuv/src/unix/no-proctitle.c b/Utilities/cmlibuv/src/unix/no-proctitle.c
new file mode 100644
index 0000000..32aa0af
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/no-proctitle.c
@@ -0,0 +1,45 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <errno.h>
+#include <stddef.h>
+
+char** uv_setup_args(int argc, char** argv) {
+ return argv;
+}
+
+void uv__process_title_cleanup(void) {
+}
+
+int uv_set_process_title(const char* title) {
+ return 0;
+}
+
+int uv_get_process_title(char* buffer, size_t size) {
+ if (buffer == NULL || size == 0)
+ return UV_EINVAL;
+
+ buffer[0] = '\0';
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/unix/openbsd.c b/Utilities/cmlibuv/src/unix/openbsd.c
new file mode 100644
index 0000000..f32a94d
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/openbsd.c
@@ -0,0 +1,240 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/resource.h>
+#include <sys/sched.h>
+#include <sys/time.h>
+#include <sys/sysctl.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <paths.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+
+int uv__platform_loop_init(uv_loop_t* loop) {
+ return uv__kqueue_init(loop);
+}
+
+
+void uv__platform_loop_delete(uv_loop_t* loop) {
+}
+
+
+void uv_loadavg(double avg[3]) {
+ struct loadavg info;
+ size_t size = sizeof(info);
+ int which[] = {CTL_VM, VM_LOADAVG};
+
+ if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0) < 0) return;
+
+ avg[0] = (double) info.ldavg[0] / info.fscale;
+ avg[1] = (double) info.ldavg[1] / info.fscale;
+ avg[2] = (double) info.ldavg[2] / info.fscale;
+}
+
+
+int uv_exepath(char* buffer, size_t* size) {
+ int mib[4];
+ char **argsbuf = NULL;
+ size_t argsbuf_size = 100U;
+ size_t exepath_size;
+ pid_t mypid;
+ int err;
+
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ mypid = getpid();
+ for (;;) {
+ err = UV_ENOMEM;
+ argsbuf = uv__reallocf(argsbuf, argsbuf_size);
+ if (argsbuf == NULL)
+ goto out;
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_PROC_ARGS;
+ mib[2] = mypid;
+ mib[3] = KERN_PROC_ARGV;
+ if (sysctl(mib, ARRAY_SIZE(mib), argsbuf, &argsbuf_size, NULL, 0) == 0) {
+ break;
+ }
+ if (errno != ENOMEM) {
+ err = UV__ERR(errno);
+ goto out;
+ }
+ argsbuf_size *= 2U;
+ }
+
+ if (argsbuf[0] == NULL) {
+ err = UV_EINVAL; /* FIXME(bnoordhuis) More appropriate error. */
+ goto out;
+ }
+
+ *size -= 1;
+ exepath_size = strlen(argsbuf[0]);
+ if (*size > exepath_size)
+ *size = exepath_size;
+
+ memcpy(buffer, argsbuf[0], *size);
+ buffer[*size] = '\0';
+ err = 0;
+
+out:
+ uv__free(argsbuf);
+
+ return err;
+}
+
+
+uint64_t uv_get_free_memory(void) {
+ struct uvmexp info;
+ size_t size = sizeof(info);
+ int which[] = {CTL_VM, VM_UVMEXP};
+
+ if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ return (uint64_t) info.free * sysconf(_SC_PAGESIZE);
+}
+
+
+uint64_t uv_get_total_memory(void) {
+ uint64_t info;
+ int which[] = {CTL_HW, HW_PHYSMEM64};
+ size_t size = sizeof(info);
+
+ if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ return (uint64_t) info;
+}
+
+
+uint64_t uv_get_constrained_memory(void) {
+ return 0; /* Memory constraints are unknown. */
+}
+
+
+int uv_resident_set_memory(size_t* rss) {
+ struct kinfo_proc kinfo;
+ size_t page_size = getpagesize();
+ size_t size = sizeof(struct kinfo_proc);
+ int mib[6];
+
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_PROC;
+ mib[2] = KERN_PROC_PID;
+ mib[3] = getpid();
+ mib[4] = sizeof(struct kinfo_proc);
+ mib[5] = 1;
+
+ if (sysctl(mib, ARRAY_SIZE(mib), &kinfo, &size, NULL, 0) < 0)
+ return UV__ERR(errno);
+
+ *rss = kinfo.p_vm_rssize * page_size;
+ return 0;
+}
+
+
+int uv_uptime(double* uptime) {
+ time_t now;
+ struct timeval info;
+ size_t size = sizeof(info);
+ static int which[] = {CTL_KERN, KERN_BOOTTIME};
+
+ if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ now = time(NULL);
+
+ *uptime = (double)(now - info.tv_sec);
+ return 0;
+}
+
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+ unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK),
+ multiplier = ((uint64_t)1000L / ticks), cpuspeed;
+ uint64_t info[CPUSTATES];
+ char model[512];
+ int numcpus = 1;
+ int which[] = {CTL_HW,HW_MODEL};
+ int percpu[] = {CTL_KERN,KERN_CPTIME2,0};
+ size_t size;
+ int i, j;
+ uv_cpu_info_t* cpu_info;
+
+ size = sizeof(model);
+ if (sysctl(which, ARRAY_SIZE(which), &model, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ which[1] = HW_NCPUONLINE;
+ size = sizeof(numcpus);
+ if (sysctl(which, ARRAY_SIZE(which), &numcpus, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ *cpu_infos = uv__malloc(numcpus * sizeof(**cpu_infos));
+ if (!(*cpu_infos))
+ return UV_ENOMEM;
+
+ i = 0;
+ *count = numcpus;
+
+ which[1] = HW_CPUSPEED;
+ size = sizeof(cpuspeed);
+ if (sysctl(which, ARRAY_SIZE(which), &cpuspeed, &size, NULL, 0))
+ goto error;
+
+ size = sizeof(info);
+ for (i = 0; i < numcpus; i++) {
+ percpu[2] = i;
+ if (sysctl(percpu, ARRAY_SIZE(percpu), &info, &size, NULL, 0))
+ goto error;
+
+ cpu_info = &(*cpu_infos)[i];
+
+ cpu_info->cpu_times.user = (uint64_t)(info[CP_USER]) * multiplier;
+ cpu_info->cpu_times.nice = (uint64_t)(info[CP_NICE]) * multiplier;
+ cpu_info->cpu_times.sys = (uint64_t)(info[CP_SYS]) * multiplier;
+ cpu_info->cpu_times.idle = (uint64_t)(info[CP_IDLE]) * multiplier;
+ cpu_info->cpu_times.irq = (uint64_t)(info[CP_INTR]) * multiplier;
+
+ cpu_info->model = uv__strdup(model);
+ cpu_info->speed = cpuspeed;
+ }
+
+ return 0;
+
+error:
+ *count = 0;
+ for (j = 0; j < i; j++)
+ uv__free((*cpu_infos)[j].model);
+
+ uv__free(*cpu_infos);
+ *cpu_infos = NULL;
+ return UV__ERR(errno);
+}
diff --git a/Utilities/cmlibuv/src/unix/os390-syscalls.c b/Utilities/cmlibuv/src/unix/os390-syscalls.c
new file mode 100644
index 0000000..491e950
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/os390-syscalls.c
@@ -0,0 +1,584 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+
+#include "os390-syscalls.h"
+#include <errno.h>
+#include <stdlib.h>
+#include <search.h>
+#include <termios.h>
+#include <sys/msg.h>
+
+#define CW_INTRPT 1
+#define CW_CONDVAR 32
+
+#pragma linkage(BPX4CTW, OS)
+#pragma linkage(BPX1CTW, OS)
+
+static QUEUE global_epoll_queue;
+static uv_mutex_t global_epoll_lock;
+static uv_once_t once = UV_ONCE_INIT;
+
+int scandir(const char* maindir, struct dirent*** namelist,
+ int (*filter)(const struct dirent*),
+ int (*compar)(const struct dirent**,
+ const struct dirent **)) {
+ struct dirent** nl;
+ struct dirent** nl_copy;
+ struct dirent* dirent;
+ unsigned count;
+ size_t allocated;
+ DIR* mdir;
+
+ nl = NULL;
+ count = 0;
+ allocated = 0;
+ mdir = opendir(maindir);
+ if (!mdir)
+ return -1;
+
+ while (1) {
+ dirent = readdir(mdir);
+ if (!dirent)
+ break;
+ if (!filter || filter(dirent)) {
+ struct dirent* copy;
+ copy = uv__malloc(sizeof(*copy));
+ if (!copy)
+ goto error;
+ memcpy(copy, dirent, sizeof(*copy));
+
+ nl_copy = uv__realloc(nl, sizeof(*copy) * (count + 1));
+ if (nl_copy == NULL) {
+ uv__free(copy);
+ goto error;
+ }
+
+ nl = nl_copy;
+ nl[count++] = copy;
+ }
+ }
+
+ qsort(nl, count, sizeof(struct dirent *),
+ (int (*)(const void *, const void *)) compar);
+
+ closedir(mdir);
+
+ *namelist = nl;
+ return count;
+
+error:
+ while (count > 0) {
+ dirent = nl[--count];
+ uv__free(dirent);
+ }
+ uv__free(nl);
+ closedir(mdir);
+ errno = ENOMEM;
+ return -1;
+}
+
+
+static unsigned int next_power_of_two(unsigned int val) {
+ val -= 1;
+ val |= val >> 1;
+ val |= val >> 2;
+ val |= val >> 4;
+ val |= val >> 8;
+ val |= val >> 16;
+ val += 1;
+ return val;
+}
+
+
+static void maybe_resize(uv__os390_epoll* lst, unsigned int len) {
+ unsigned int newsize;
+ unsigned int i;
+ struct pollfd* newlst;
+ struct pollfd event;
+
+ if (len <= lst->size)
+ return;
+
+ if (lst->size == 0)
+ event.fd = -1;
+ else {
+ /* Extract the message queue at the end. */
+ event = lst->items[lst->size - 1];
+ lst->items[lst->size - 1].fd = -1;
+ }
+
+ newsize = next_power_of_two(len);
+ newlst = uv__reallocf(lst->items, newsize * sizeof(lst->items[0]));
+
+ if (newlst == NULL)
+ abort();
+ for (i = lst->size; i < newsize; ++i)
+ newlst[i].fd = -1;
+
+ /* Restore the message queue at the end */
+ newlst[newsize - 1] = event;
+
+ lst->items = newlst;
+ lst->size = newsize;
+}
+
+
+static void init_message_queue(uv__os390_epoll* lst) {
+ struct {
+ long int header;
+ char body;
+ } msg;
+
+ /* initialize message queue */
+ lst->msg_queue = msgget(IPC_PRIVATE, 0600 | IPC_CREAT);
+ if (lst->msg_queue == -1)
+ abort();
+
+ /*
+ On z/OS, the message queue will be affiliated with the process only
+ when a send is performed on it. Once this is done, the system
+ can be queried for all message queues belonging to our process id.
+ */
+ msg.header = 1;
+ if (msgsnd(lst->msg_queue, &msg, sizeof(msg.body), 0) != 0)
+ abort();
+
+ /* Clean up the dummy message sent above */
+ if (msgrcv(lst->msg_queue, &msg, sizeof(msg.body), 0, 0) != sizeof(msg.body))
+ abort();
+}
+
+
+static void before_fork(void) {
+ uv_mutex_lock(&global_epoll_lock);
+}
+
+
+static void after_fork(void) {
+ uv_mutex_unlock(&global_epoll_lock);
+}
+
+
+static void child_fork(void) {
+ QUEUE* q;
+ uv_once_t child_once = UV_ONCE_INIT;
+
+ /* reset once */
+ memcpy(&once, &child_once, sizeof(child_once));
+
+ /* reset epoll list */
+ while (!QUEUE_EMPTY(&global_epoll_queue)) {
+ uv__os390_epoll* lst;
+ q = QUEUE_HEAD(&global_epoll_queue);
+ QUEUE_REMOVE(q);
+ lst = QUEUE_DATA(q, uv__os390_epoll, member);
+ uv__free(lst->items);
+ lst->items = NULL;
+ lst->size = 0;
+ }
+
+ uv_mutex_unlock(&global_epoll_lock);
+ uv_mutex_destroy(&global_epoll_lock);
+}
+
+
+static void epoll_init(void) {
+ QUEUE_INIT(&global_epoll_queue);
+ if (uv_mutex_init(&global_epoll_lock))
+ abort();
+
+ if (pthread_atfork(&before_fork, &after_fork, &child_fork))
+ abort();
+}
+
+
+uv__os390_epoll* epoll_create1(int flags) {
+ uv__os390_epoll* lst;
+
+ lst = uv__malloc(sizeof(*lst));
+ if (lst != NULL) {
+ /* initialize list */
+ lst->size = 0;
+ lst->items = NULL;
+ init_message_queue(lst);
+ maybe_resize(lst, 1);
+ lst->items[lst->size - 1].fd = lst->msg_queue;
+ lst->items[lst->size - 1].events = POLLIN;
+ lst->items[lst->size - 1].revents = 0;
+ uv_once(&once, epoll_init);
+ uv_mutex_lock(&global_epoll_lock);
+ QUEUE_INSERT_TAIL(&global_epoll_queue, &lst->member);
+ uv_mutex_unlock(&global_epoll_lock);
+ }
+
+ return lst;
+}
+
+
+int epoll_ctl(uv__os390_epoll* lst,
+ int op,
+ int fd,
+ struct epoll_event *event) {
+ uv_mutex_lock(&global_epoll_lock);
+
+ if (op == EPOLL_CTL_DEL) {
+ if (fd >= lst->size || lst->items[fd].fd == -1) {
+ uv_mutex_unlock(&global_epoll_lock);
+ errno = ENOENT;
+ return -1;
+ }
+ lst->items[fd].fd = -1;
+ } else if (op == EPOLL_CTL_ADD) {
+
+ /* Resizing to 'fd + 1' would expand the list to contain at least
+ * 'fd'. But we need to guarantee that the last index on the list
+ * is reserved for the message queue. So specify 'fd + 2' instead.
+ */
+ maybe_resize(lst, fd + 2);
+ if (lst->items[fd].fd != -1) {
+ uv_mutex_unlock(&global_epoll_lock);
+ errno = EEXIST;
+ return -1;
+ }
+ lst->items[fd].fd = fd;
+ lst->items[fd].events = event->events;
+ lst->items[fd].revents = 0;
+ } else if (op == EPOLL_CTL_MOD) {
+ if (fd >= lst->size - 1 || lst->items[fd].fd == -1) {
+ uv_mutex_unlock(&global_epoll_lock);
+ errno = ENOENT;
+ return -1;
+ }
+ lst->items[fd].events = event->events;
+ lst->items[fd].revents = 0;
+ } else
+ abort();
+
+ uv_mutex_unlock(&global_epoll_lock);
+ return 0;
+}
+
+#define EP_MAX_PFDS (ULONG_MAX / sizeof(struct pollfd))
+#define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
+
+int epoll_wait(uv__os390_epoll* lst, struct epoll_event* events,
+ int maxevents, int timeout) {
+ nmsgsfds_t size;
+ struct pollfd* pfds;
+ int pollret;
+ int reventcount;
+ int nevents;
+ struct pollfd msg_fd;
+ int i;
+
+ if (!lst || !lst->items || !events) {
+ errno = EFAULT;
+ return -1;
+ }
+
+ if (lst->size > EP_MAX_PFDS) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (maxevents <= 0 || maxevents > EP_MAX_EVENTS) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (lst->size > 0)
+ _SET_FDS_MSGS(size, 1, lst->size - 1);
+ else
+ _SET_FDS_MSGS(size, 0, 0);
+ pfds = lst->items;
+ pollret = poll(pfds, size, timeout);
+ if (pollret <= 0)
+ return pollret;
+
+ assert(lst->size > 0);
+
+ pollret = _NFDS(pollret) + _NMSGS(pollret);
+
+ reventcount = 0;
+ nevents = 0;
+ msg_fd = pfds[lst->size - 1];
+ for (i = 0;
+ i < lst->size && i < maxevents && reventcount < pollret; ++i) {
+ struct epoll_event ev;
+ struct pollfd* pfd;
+
+ pfd = &pfds[i];
+ if (pfd->fd == -1 || pfd->revents == 0)
+ continue;
+
+ ev.fd = pfd->fd;
+ ev.events = pfd->revents;
+ ev.is_msg = 0;
+ if (pfd->revents & POLLIN && pfd->revents & POLLOUT)
+ reventcount += 2;
+ else if (pfd->revents & (POLLIN | POLLOUT))
+ ++reventcount;
+
+ pfd->revents = 0;
+ events[nevents++] = ev;
+ }
+
+ if (msg_fd.revents != 0 && msg_fd.fd != -1)
+ if (i == lst->size)
+ events[nevents - 1].is_msg = 1;
+
+ return nevents;
+}
+
+
+int epoll_file_close(int fd) {
+ QUEUE* q;
+
+ uv_once(&once, epoll_init);
+ uv_mutex_lock(&global_epoll_lock);
+ QUEUE_FOREACH(q, &global_epoll_queue) {
+ uv__os390_epoll* lst;
+
+ lst = QUEUE_DATA(q, uv__os390_epoll, member);
+ if (fd < lst->size && lst->items != NULL && lst->items[fd].fd != -1)
+ lst->items[fd].fd = -1;
+ }
+
+ uv_mutex_unlock(&global_epoll_lock);
+ return 0;
+}
+
+void epoll_queue_close(uv__os390_epoll* lst) {
+ /* Remove epoll instance from global queue */
+ uv_mutex_lock(&global_epoll_lock);
+ QUEUE_REMOVE(&lst->member);
+ uv_mutex_unlock(&global_epoll_lock);
+
+ /* Free resources */
+ msgctl(lst->msg_queue, IPC_RMID, NULL);
+ lst->msg_queue = -1;
+ uv__free(lst->items);
+ lst->items = NULL;
+}
+
+
+int nanosleep(const struct timespec* req, struct timespec* rem) {
+ unsigned nano;
+ unsigned seconds;
+ unsigned events;
+ unsigned secrem;
+ unsigned nanorem;
+ int rv;
+ int err;
+ int rsn;
+
+ nano = (int)req->tv_nsec;
+ seconds = req->tv_sec;
+ events = CW_CONDVAR | CW_INTRPT;
+ secrem = 0;
+ nanorem = 0;
+
+#if defined(_LP64)
+ BPX4CTW(&seconds, &nano, &events, &secrem, &nanorem, &rv, &err, &rsn);
+#else
+ BPX1CTW(&seconds, &nano, &events, &secrem, &nanorem, &rv, &err, &rsn);
+#endif
+
+ /* Don't clobber errno unless BPX1CTW/BPX4CTW errored.
+ * Don't leak EAGAIN, that just means the timeout expired.
+ */
+ if (rv == -1)
+ if (err == EAGAIN)
+ rv = 0;
+ else
+ errno = err;
+
+ if (rem != NULL && (rv == 0 || err == EINTR)) {
+ rem->tv_nsec = nanorem;
+ rem->tv_sec = secrem;
+ }
+
+ return rv;
+}
+
+
+char* mkdtemp(char* path) {
+ static const char* tempchars =
+ "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
+ static const size_t num_chars = 62;
+ static const size_t num_x = 6;
+ char *ep, *cp;
+ unsigned int tries, i;
+ size_t len;
+ uint64_t v;
+ int fd;
+ int retval;
+ int saved_errno;
+
+ len = strlen(path);
+ ep = path + len;
+ if (len < num_x || strncmp(ep - num_x, "XXXXXX", num_x)) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ fd = open("/dev/urandom", O_RDONLY);
+ if (fd == -1)
+ return NULL;
+
+ tries = TMP_MAX;
+ retval = -1;
+ do {
+ if (read(fd, &v, sizeof(v)) != sizeof(v))
+ break;
+
+ cp = ep - num_x;
+ for (i = 0; i < num_x; i++) {
+ *cp++ = tempchars[v % num_chars];
+ v /= num_chars;
+ }
+
+ if (mkdir(path, S_IRWXU) == 0) {
+ retval = 0;
+ break;
+ }
+ else if (errno != EEXIST)
+ break;
+ } while (--tries);
+
+ saved_errno = errno;
+ uv__close(fd);
+ if (tries == 0) {
+ errno = EEXIST;
+ return NULL;
+ }
+
+ if (retval == -1) {
+ errno = saved_errno;
+ return NULL;
+ }
+
+ return path;
+}
+
+
+ssize_t os390_readlink(const char* path, char* buf, size_t len) {
+ ssize_t rlen;
+ ssize_t vlen;
+ ssize_t plen;
+ char* delimiter;
+ char old_delim;
+ char* tmpbuf;
+ char realpathstr[PATH_MAX + 1];
+
+ tmpbuf = uv__malloc(len + 1);
+ if (tmpbuf == NULL) {
+ errno = ENOMEM;
+ return -1;
+ }
+
+ rlen = readlink(path, tmpbuf, len);
+ if (rlen < 0) {
+ uv__free(tmpbuf);
+ return rlen;
+ }
+
+ if (rlen < 3 || strncmp("/$", tmpbuf, 2) != 0) {
+ /* Straightforward readlink. */
+ memcpy(buf, tmpbuf, rlen);
+ uv__free(tmpbuf);
+ return rlen;
+ }
+
+ /*
+ * There is a parmlib variable at the beginning
+ * which needs interpretation.
+ */
+ tmpbuf[rlen] = '\0';
+ delimiter = strchr(tmpbuf + 2, '/');
+ if (delimiter == NULL)
+ /* No slash at the end */
+ delimiter = strchr(tmpbuf + 2, '\0');
+
+ /* Read real path of the variable. */
+ old_delim = *delimiter;
+ *delimiter = '\0';
+ if (realpath(tmpbuf, realpathstr) == NULL) {
+ uv__free(tmpbuf);
+ return -1;
+ }
+
+ /* realpathstr is not guaranteed to end with null byte.*/
+ realpathstr[PATH_MAX] = '\0';
+
+ /* Reset the delimiter and fill up the buffer. */
+ *delimiter = old_delim;
+ plen = strlen(delimiter);
+ vlen = strlen(realpathstr);
+ rlen = plen + vlen;
+ if (rlen > len) {
+ uv__free(tmpbuf);
+ errno = ENAMETOOLONG;
+ return -1;
+ }
+ memcpy(buf, realpathstr, vlen);
+ memcpy(buf + vlen, delimiter, plen);
+
+ /* Done using temporary buffer. */
+ uv__free(tmpbuf);
+
+ return rlen;
+}
+
+
+size_t strnlen(const char* str, size_t maxlen) {
+ char* p = memchr(str, 0, maxlen);
+ if (p == NULL)
+ return maxlen;
+ else
+ return p - str;
+}
+
+
+int sem_init(UV_PLATFORM_SEM_T* semid, int pshared, unsigned int value) {
+ UNREACHABLE();
+}
+
+
+int sem_destroy(UV_PLATFORM_SEM_T* semid) {
+ UNREACHABLE();
+}
+
+
+int sem_post(UV_PLATFORM_SEM_T* semid) {
+ UNREACHABLE();
+}
+
+
+int sem_trywait(UV_PLATFORM_SEM_T* semid) {
+ UNREACHABLE();
+}
+
+
+int sem_wait(UV_PLATFORM_SEM_T* semid) {
+ UNREACHABLE();
+}
diff --git a/Utilities/cmlibuv/src/unix/os390-syscalls.h b/Utilities/cmlibuv/src/unix/os390-syscalls.h
new file mode 100644
index 0000000..86416bb
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/os390-syscalls.h
@@ -0,0 +1,74 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+
+#ifndef UV_OS390_SYSCALL_H_
+#define UV_OS390_SYSCALL_H_
+
+#include "uv.h"
+#include "internal.h"
+#include <dirent.h>
+#include <poll.h>
+#include <pthread.h>
+
+#define EPOLL_CTL_ADD 1
+#define EPOLL_CTL_DEL 2
+#define EPOLL_CTL_MOD 3
+#define MAX_EPOLL_INSTANCES 256
+#define MAX_ITEMS_PER_EPOLL 1024
+
+#define UV__O_CLOEXEC 0x80000
+
+struct epoll_event {
+ int events;
+ int fd;
+ int is_msg;
+};
+
+typedef struct {
+ QUEUE member;
+ struct pollfd* items;
+ unsigned long size;
+ int msg_queue;
+} uv__os390_epoll;
+
+/* epoll api */
+uv__os390_epoll* epoll_create1(int flags);
+int epoll_ctl(uv__os390_epoll* ep, int op, int fd, struct epoll_event *event);
+int epoll_wait(uv__os390_epoll* ep, struct epoll_event *events, int maxevents, int timeout);
+int epoll_file_close(int fd);
+
+/* utility functions */
+int nanosleep(const struct timespec* req, struct timespec* rem);
+int scandir(const char* maindir, struct dirent*** namelist,
+ int (*filter)(const struct dirent *),
+ int (*compar)(const struct dirent **,
+ const struct dirent **));
+char *mkdtemp(char* path);
+ssize_t os390_readlink(const char* path, char* buf, size_t len);
+size_t strnlen(const char* str, size_t maxlen);
+int sem_init(UV_PLATFORM_SEM_T* semid, int pshared, unsigned int value);
+int sem_destroy(UV_PLATFORM_SEM_T* semid);
+int sem_post(UV_PLATFORM_SEM_T* semid);
+int sem_trywait(UV_PLATFORM_SEM_T* semid);
+int sem_wait(UV_PLATFORM_SEM_T* semid);
+
+#endif /* UV_OS390_SYSCALL_H_ */
diff --git a/Utilities/cmlibuv/src/unix/os390.c b/Utilities/cmlibuv/src/unix/os390.c
new file mode 100644
index 0000000..3bb4426
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/os390.c
@@ -0,0 +1,975 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "internal.h"
+#include <sys/ioctl.h>
+#include <net/if.h>
+#include <utmpx.h>
+#include <unistd.h>
+#include <sys/ps.h>
+#include <builtins.h>
+#include <termios.h>
+#include <sys/msg.h>
+#if defined(__clang__)
+#include "csrsic.h"
+#else
+#include "//'SYS1.SAMPLIB(CSRSIC)'"
+#endif
+
+#define CVT_PTR 0x10
+#define PSA_PTR 0x00
+#define CSD_OFFSET 0x294
+
+/*
+ Long-term average CPU service used by this logical partition,
+ in millions of service units per hour. If this value is above
+ the partition's defined capacity, the partition will be capped.
+ It is calculated using the physical CPU adjustment factor
+ (RCTPCPUA) so it may not match other measures of service which
+ are based on the logical CPU adjustment factor. It is available
+ if the hardware supports LPAR cluster.
+*/
+#define RCTLACS_OFFSET 0xC4
+
+/* 32-bit count of alive CPUs. This includes both CPs and IFAs */
+#define CSD_NUMBER_ONLINE_CPUS 0xD4
+
+/* Address of system resources manager (SRM) control table */
+#define CVTOPCTP_OFFSET 0x25C
+
+/* Address of the RCT table */
+#define RMCTRCT_OFFSET 0xE4
+
+/* Address of the rsm control and enumeration area. */
+#define CVTRCEP_OFFSET 0x490
+
+/*
+ Number of frames currently available to system.
+ Excluded are frames backing perm storage, frames offline, and bad frames.
+*/
+#define RCEPOOL_OFFSET 0x004
+
+/* Total number of frames currently on all available frame queues. */
+#define RCEAFC_OFFSET 0x088
+
+/* CPC model length from the CSRSI Service. */
+#define CPCMODEL_LENGTH 16
+
+/* Pointer to the home (current) ASCB. */
+#define PSAAOLD 0x224
+
+/* Pointer to rsm address space block extension. */
+#define ASCBRSME 0x16C
+
+/*
+ NUMBER OF FRAMES CURRENTLY IN USE BY THIS ADDRESS SPACE.
+ It does not include 2G frames.
+*/
+#define RAXFMCT 0x2C
+
+/* Thread Entry constants */
+#define PGTH_CURRENT 1
+#define PGTH_LEN 26
+#define PGTHAPATH 0x20
+#pragma linkage(BPX4GTH, OS)
+#pragma linkage(BPX1GTH, OS)
+
+/* TOD Clock resolution in nanoseconds */
+#define TOD_RES 4.096
+
+typedef unsigned data_area_ptr_assign_type;
+
+typedef union {
+ struct {
+#if defined(_LP64)
+ data_area_ptr_assign_type lower;
+#endif
+ data_area_ptr_assign_type assign;
+ };
+ char* deref;
+} data_area_ptr;
+
+
+void uv_loadavg(double avg[3]) {
+ /* TODO: implement the following */
+ avg[0] = 0;
+ avg[1] = 0;
+ avg[2] = 0;
+}
+
+
+int uv__platform_loop_init(uv_loop_t* loop) {
+ uv__os390_epoll* ep;
+
+ ep = epoll_create1(0);
+ loop->ep = ep;
+ if (ep == NULL)
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+
+void uv__platform_loop_delete(uv_loop_t* loop) {
+ if (loop->ep != NULL) {
+ epoll_queue_close(loop->ep);
+ loop->ep = NULL;
+ }
+}
+
+
+uint64_t uv__hrtime(uv_clocktype_t type) {
+ unsigned long long timestamp;
+ __stckf(&timestamp);
+ /* Convert to nanoseconds */
+ return timestamp / TOD_RES;
+}
+
+
+/*
+ Get the exe path using the thread entry information
+ in the address space.
+*/
+static int getexe(const int pid, char* buf, size_t len) {
+ struct {
+ int pid;
+ int thid[2];
+ char accesspid;
+ char accessthid;
+ char asid[2];
+ char loginname[8];
+ char flag;
+ char len;
+ } Input_data;
+
+ union {
+ struct {
+ char gthb[4];
+ int pid;
+ int thid[2];
+ char accesspid;
+ char accessthid[3];
+ int lenused;
+ int offsetProcess;
+ int offsetConTTY;
+ int offsetPath;
+ int offsetCommand;
+ int offsetFileData;
+ int offsetThread;
+ } Output_data;
+ char buf[2048];
+ } Output_buf;
+
+ struct Output_path_type {
+ char gthe[4];
+ short int len;
+ char path[1024];
+ };
+
+ int Input_length;
+ int Output_length;
+ void* Input_address;
+ void* Output_address;
+ struct Output_path_type* Output_path;
+ int rv;
+ int rc;
+ int rsn;
+
+ Input_length = PGTH_LEN;
+ Output_length = sizeof(Output_buf);
+ Output_address = &Output_buf;
+ Input_address = &Input_data;
+ memset(&Input_data, 0, sizeof Input_data);
+ Input_data.flag |= PGTHAPATH;
+ Input_data.pid = pid;
+ Input_data.accesspid = PGTH_CURRENT;
+
+#ifdef _LP64
+ BPX4GTH(&Input_length,
+ &Input_address,
+ &Output_length,
+ &Output_address,
+ &rv,
+ &rc,
+ &rsn);
+#else
+ BPX1GTH(&Input_length,
+ &Input_address,
+ &Output_length,
+ &Output_address,
+ &rv,
+ &rc,
+ &rsn);
+#endif
+
+ if (rv == -1) {
+ errno = rc;
+ return -1;
+ }
+
+ /* Check highest byte to ensure data availability */
+ assert(((Output_buf.Output_data.offsetPath >>24) & 0xFF) == 'A');
+
+ /* Get the offset from the lowest 3 bytes */
+ Output_path = (struct Output_path_type*) ((char*) (&Output_buf) +
+ (Output_buf.Output_data.offsetPath & 0x00FFFFFF));
+
+ if (Output_path->len >= len) {
+ errno = ENOBUFS;
+ return -1;
+ }
+
+ uv__strscpy(buf, Output_path->path, len);
+
+ return 0;
+}
+
+
+/*
+ * We could use a static buffer for the path manipulations that we need outside
+ * of the function, but this function could be called by multiple consumers and
+ * we don't want to potentially create a race condition in the use of snprintf.
+ * There is no direct way of getting the exe path in zOS - either through /procfs
+ * or through some libc APIs. The below approach is to parse the argv[0]'s pattern
+ * and use it in conjunction with PATH environment variable to craft one.
+ */
+int uv_exepath(char* buffer, size_t* size) {
+ int res;
+ char args[PATH_MAX];
+ int pid;
+
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ pid = getpid();
+ res = getexe(pid, args, sizeof(args));
+ if (res < 0)
+ return UV_EINVAL;
+
+ return uv__search_path(args, buffer, size);
+}
+
+
+uint64_t uv_get_free_memory(void) {
+ uint64_t freeram;
+
+ data_area_ptr cvt = {0};
+ data_area_ptr rcep = {0};
+ cvt.assign = *(data_area_ptr_assign_type*)(CVT_PTR);
+ rcep.assign = *(data_area_ptr_assign_type*)(cvt.deref + CVTRCEP_OFFSET);
+ freeram = *((uint64_t*)(rcep.deref + RCEAFC_OFFSET)) * 4;
+ return freeram;
+}
+
+
+uint64_t uv_get_total_memory(void) {
+ uint64_t totalram;
+
+ data_area_ptr cvt = {0};
+ data_area_ptr rcep = {0};
+ cvt.assign = *(data_area_ptr_assign_type*)(CVT_PTR);
+ rcep.assign = *(data_area_ptr_assign_type*)(cvt.deref + CVTRCEP_OFFSET);
+ totalram = *((uint64_t*)(rcep.deref + RCEPOOL_OFFSET)) * 4;
+ return totalram;
+}
+
+
+uint64_t uv_get_constrained_memory(void) {
+ return 0; /* Memory constraints are unknown. */
+}
+
+
+int uv_resident_set_memory(size_t* rss) {
+ char* ascb;
+ char* rax;
+ size_t nframes;
+
+ ascb = *(char* __ptr32 *)(PSA_PTR + PSAAOLD);
+ rax = *(char* __ptr32 *)(ascb + ASCBRSME);
+ nframes = *(unsigned int*)(rax + RAXFMCT);
+
+ *rss = nframes * sysconf(_SC_PAGESIZE);
+ return 0;
+}
+
+
+int uv_uptime(double* uptime) {
+ struct utmpx u ;
+ struct utmpx *v;
+ time64_t t;
+
+ u.ut_type = BOOT_TIME;
+ v = getutxid(&u);
+ if (v == NULL)
+ return -1;
+ *uptime = difftime64(time64(&t), v->ut_tv.tv_sec);
+ return 0;
+}
+
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+ uv_cpu_info_t* cpu_info;
+ int idx;
+ siv1v2 info;
+ data_area_ptr cvt = {0};
+ data_area_ptr csd = {0};
+ data_area_ptr rmctrct = {0};
+ data_area_ptr cvtopctp = {0};
+ int cpu_usage_avg;
+
+ cvt.assign = *(data_area_ptr_assign_type*)(CVT_PTR);
+
+ csd.assign = *((data_area_ptr_assign_type *) (cvt.deref + CSD_OFFSET));
+ cvtopctp.assign = *((data_area_ptr_assign_type *) (cvt.deref + CVTOPCTP_OFFSET));
+ rmctrct.assign = *((data_area_ptr_assign_type *) (cvtopctp.deref + RMCTRCT_OFFSET));
+
+ *count = *((int*) (csd.deref + CSD_NUMBER_ONLINE_CPUS));
+ cpu_usage_avg = *((unsigned short int*) (rmctrct.deref + RCTLACS_OFFSET));
+
+ *cpu_infos = uv__malloc(*count * sizeof(uv_cpu_info_t));
+ if (!*cpu_infos)
+ return UV_ENOMEM;
+
+ cpu_info = *cpu_infos;
+ idx = 0;
+ while (idx < *count) {
+ cpu_info->speed = *(int*)(info.siv1v2si22v1.si22v1cpucapability);
+ cpu_info->model = uv__malloc(CPCMODEL_LENGTH + 1);
+ memset(cpu_info->model, '\0', CPCMODEL_LENGTH + 1);
+ memcpy(cpu_info->model, info.siv1v2si11v1.si11v1cpcmodel, CPCMODEL_LENGTH);
+ cpu_info->cpu_times.user = cpu_usage_avg;
+ /* TODO: implement the following */
+ cpu_info->cpu_times.sys = 0;
+ cpu_info->cpu_times.idle = 0;
+ cpu_info->cpu_times.irq = 0;
+ cpu_info->cpu_times.nice = 0;
+ ++cpu_info;
+ ++idx;
+ }
+
+ return 0;
+}
+
+
+static int uv__interface_addresses_v6(uv_interface_address_t** addresses,
+ int* count) {
+ uv_interface_address_t* address;
+ int sockfd;
+ int maxsize;
+ __net_ifconf6header_t ifc;
+ __net_ifconf6entry_t* ifr;
+ __net_ifconf6entry_t* p;
+ __net_ifconf6entry_t flg;
+
+ *count = 0;
+ /* Assume maximum buffer size allowable */
+ maxsize = 16384;
+
+ if (0 > (sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP)))
+ return UV__ERR(errno);
+
+ ifc.__nif6h_version = 1;
+ ifc.__nif6h_buflen = maxsize;
+ ifc.__nif6h_buffer = uv__calloc(1, maxsize);;
+
+ if (ioctl(sockfd, SIOCGIFCONF6, &ifc) == -1) {
+ uv__close(sockfd);
+ return UV__ERR(errno);
+ }
+
+
+ *count = 0;
+ ifr = (__net_ifconf6entry_t*)(ifc.__nif6h_buffer);
+ while ((char*)ifr < (char*)ifc.__nif6h_buffer + ifc.__nif6h_buflen) {
+ p = ifr;
+ ifr = (__net_ifconf6entry_t*)((char*)ifr + ifc.__nif6h_entrylen);
+
+ if (!(p->__nif6e_addr.sin6_family == AF_INET6 ||
+ p->__nif6e_addr.sin6_family == AF_INET))
+ continue;
+
+ if (!(p->__nif6e_flags & _NIF6E_FLAGS_ON_LINK_ACTIVE))
+ continue;
+
+ ++(*count);
+ }
+
+ /* Alloc the return interface structs */
+ *addresses = uv__malloc(*count * sizeof(uv_interface_address_t));
+ if (!(*addresses)) {
+ uv__close(sockfd);
+ return UV_ENOMEM;
+ }
+ address = *addresses;
+
+ ifr = (__net_ifconf6entry_t*)(ifc.__nif6h_buffer);
+ while ((char*)ifr < (char*)ifc.__nif6h_buffer + ifc.__nif6h_buflen) {
+ p = ifr;
+ ifr = (__net_ifconf6entry_t*)((char*)ifr + ifc.__nif6h_entrylen);
+
+ if (!(p->__nif6e_addr.sin6_family == AF_INET6 ||
+ p->__nif6e_addr.sin6_family == AF_INET))
+ continue;
+
+ if (!(p->__nif6e_flags & _NIF6E_FLAGS_ON_LINK_ACTIVE))
+ continue;
+
+ /* All conditions above must match count loop */
+
+ address->name = uv__strdup(p->__nif6e_name);
+
+ if (p->__nif6e_addr.sin6_family == AF_INET6)
+ address->address.address6 = *((struct sockaddr_in6*) &p->__nif6e_addr);
+ else
+ address->address.address4 = *((struct sockaddr_in*) &p->__nif6e_addr);
+
+ /* TODO: Retrieve netmask using SIOCGIFNETMASK ioctl */
+
+ address->is_internal = flg.__nif6e_flags & _NIF6E_FLAGS_LOOPBACK ? 1 : 0;
+ memset(address->phys_addr, 0, sizeof(address->phys_addr));
+ address++;
+ }
+
+ uv__close(sockfd);
+ return 0;
+}
+
+
+int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
+ uv_interface_address_t* address;
+ int sockfd;
+ int maxsize;
+ struct ifconf ifc;
+ struct ifreq flg;
+ struct ifreq* ifr;
+ struct ifreq* p;
+ int count_v6;
+
+ *count = 0;
+ *addresses = NULL;
+
+ /* get the ipv6 addresses first */
+ uv_interface_address_t* addresses_v6;
+ uv__interface_addresses_v6(&addresses_v6, &count_v6);
+
+ /* now get the ipv4 addresses */
+
+ /* Assume maximum buffer size allowable */
+ maxsize = 16384;
+
+ sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
+ if (0 > sockfd)
+ return UV__ERR(errno);
+
+ ifc.ifc_req = uv__calloc(1, maxsize);
+ ifc.ifc_len = maxsize;
+ if (ioctl(sockfd, SIOCGIFCONF, &ifc) == -1) {
+ uv__close(sockfd);
+ return UV__ERR(errno);
+ }
+
+#define MAX(a,b) (((a)>(b))?(a):(b))
+#define ADDR_SIZE(p) MAX((p).sa_len, sizeof(p))
+
+ /* Count all up and running ipv4/ipv6 addresses */
+ ifr = ifc.ifc_req;
+ while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) {
+ p = ifr;
+ ifr = (struct ifreq*)
+ ((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr));
+
+ if (!(p->ifr_addr.sa_family == AF_INET6 ||
+ p->ifr_addr.sa_family == AF_INET))
+ continue;
+
+ memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name));
+ if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1) {
+ uv__close(sockfd);
+ return UV__ERR(errno);
+ }
+
+ if (!(flg.ifr_flags & IFF_UP && flg.ifr_flags & IFF_RUNNING))
+ continue;
+
+ (*count)++;
+ }
+
+ if (*count == 0) {
+ uv__close(sockfd);
+ return 0;
+ }
+
+ /* Alloc the return interface structs */
+ *addresses = uv__malloc((*count + count_v6) *
+ sizeof(uv_interface_address_t));
+
+ if (!(*addresses)) {
+ uv__close(sockfd);
+ return UV_ENOMEM;
+ }
+ address = *addresses;
+
+ /* copy over the ipv6 addresses */
+ memcpy(address, addresses_v6, count_v6 * sizeof(uv_interface_address_t));
+ address += count_v6;
+ *count += count_v6;
+ uv__free(addresses_v6);
+
+ ifr = ifc.ifc_req;
+ while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) {
+ p = ifr;
+ ifr = (struct ifreq*)
+ ((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr));
+
+ if (!(p->ifr_addr.sa_family == AF_INET6 ||
+ p->ifr_addr.sa_family == AF_INET))
+ continue;
+
+ memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name));
+ if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1) {
+ uv__close(sockfd);
+ return UV_ENOSYS;
+ }
+
+ if (!(flg.ifr_flags & IFF_UP && flg.ifr_flags & IFF_RUNNING))
+ continue;
+
+ /* All conditions above must match count loop */
+
+ address->name = uv__strdup(p->ifr_name);
+
+ if (p->ifr_addr.sa_family == AF_INET6) {
+ address->address.address6 = *((struct sockaddr_in6*) &p->ifr_addr);
+ } else {
+ address->address.address4 = *((struct sockaddr_in*) &p->ifr_addr);
+ }
+
+ address->is_internal = flg.ifr_flags & IFF_LOOPBACK ? 1 : 0;
+ memset(address->phys_addr, 0, sizeof(address->phys_addr));
+ address++;
+ }
+
+#undef ADDR_SIZE
+#undef MAX
+
+ uv__close(sockfd);
+ return 0;
+}
+
+
+void uv_free_interface_addresses(uv_interface_address_t* addresses,
+ int count) {
+ int i;
+ for (i = 0; i < count; ++i)
+ uv__free(addresses[i].name);
+ uv__free(addresses);
+}
+
+
+void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
+ struct epoll_event* events;
+ struct epoll_event dummy;
+ uintptr_t i;
+ uintptr_t nfds;
+
+ assert(loop->watchers != NULL);
+ assert(fd >= 0);
+
+ events = (struct epoll_event*) loop->watchers[loop->nwatchers];
+ nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
+ if (events != NULL)
+ /* Invalidate events with same file descriptor */
+ for (i = 0; i < nfds; i++)
+ if ((int) events[i].fd == fd)
+ events[i].fd = -1;
+
+ /* Remove the file descriptor from the epoll. */
+ if (loop->ep != NULL)
+ epoll_ctl(loop->ep, EPOLL_CTL_DEL, fd, &dummy);
+}
+
+
+int uv__io_check_fd(uv_loop_t* loop, int fd) {
+ struct pollfd p[1];
+ int rv;
+
+ p[0].fd = fd;
+ p[0].events = POLLIN;
+
+ do
+ rv = poll(p, 1, 0);
+ while (rv == -1 && errno == EINTR);
+
+ if (rv == -1)
+ abort();
+
+ if (p[0].revents & POLLNVAL)
+ return -1;
+
+ return 0;
+}
+
+
+void uv__fs_event_close(uv_fs_event_t* handle) {
+ uv_fs_event_stop(handle);
+}
+
+
+int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
+ uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
+ return 0;
+}
+
+
+int uv_fs_event_start(uv_fs_event_t* handle, uv_fs_event_cb cb,
+ const char* filename, unsigned int flags) {
+ uv__os390_epoll* ep;
+ _RFIS reg_struct;
+ char* path;
+ int rc;
+
+ if (uv__is_active(handle))
+ return UV_EINVAL;
+
+ ep = handle->loop->ep;
+ assert(ep->msg_queue != -1);
+
+ reg_struct.__rfis_cmd = _RFIS_REG;
+ reg_struct.__rfis_qid = ep->msg_queue;
+ reg_struct.__rfis_type = 1;
+ memcpy(reg_struct.__rfis_utok, &handle, sizeof(handle));
+
+ path = uv__strdup(filename);
+ if (path == NULL)
+ return UV_ENOMEM;
+
+ rc = __w_pioctl(path, _IOCC_REGFILEINT, sizeof(reg_struct), &reg_struct);
+ if (rc != 0)
+ return UV__ERR(errno);
+
+ uv__handle_start(handle);
+ handle->path = path;
+ handle->cb = cb;
+ memcpy(handle->rfis_rftok, reg_struct.__rfis_rftok,
+ sizeof(handle->rfis_rftok));
+
+ return 0;
+}
+
+
+int uv_fs_event_stop(uv_fs_event_t* handle) {
+ uv__os390_epoll* ep;
+ _RFIS reg_struct;
+ int rc;
+
+ if (!uv__is_active(handle))
+ return 0;
+
+ ep = handle->loop->ep;
+ assert(ep->msg_queue != -1);
+
+ reg_struct.__rfis_cmd = _RFIS_UNREG;
+ reg_struct.__rfis_qid = ep->msg_queue;
+ reg_struct.__rfis_type = 1;
+ memcpy(reg_struct.__rfis_rftok, handle->rfis_rftok,
+ sizeof(handle->rfis_rftok));
+
+ /*
+ * This call will take "/" as the path argument in case we
+ * don't care to supply the correct path. The system will simply
+ * ignore it.
+ */
+ rc = __w_pioctl("/", _IOCC_REGFILEINT, sizeof(reg_struct), &reg_struct);
+ if (rc != 0 && errno != EALREADY && errno != ENOENT)
+ abort();
+
+ uv__handle_stop(handle);
+
+ return 0;
+}
+
+
+static int os390_message_queue_handler(uv__os390_epoll* ep) {
+ uv_fs_event_t* handle;
+ int msglen;
+ int events;
+ _RFIM msg;
+
+ if (ep->msg_queue == -1)
+ return 0;
+
+ msglen = msgrcv(ep->msg_queue, &msg, sizeof(msg), 0, IPC_NOWAIT);
+
+ if (msglen == -1 && errno == ENOMSG)
+ return 0;
+
+ if (msglen == -1)
+ abort();
+
+ events = 0;
+ if (msg.__rfim_event == _RFIM_ATTR || msg.__rfim_event == _RFIM_WRITE)
+ events = UV_CHANGE;
+ else if (msg.__rfim_event == _RFIM_RENAME)
+ events = UV_RENAME;
+ else
+ /* Some event that we are not interested in. */
+ return 0;
+
+ handle = *(uv_fs_event_t**)(msg.__rfim_utok);
+ handle->cb(handle, uv__basename_r(handle->path), events, 0);
+ return 1;
+}
+
+
+void uv__io_poll(uv_loop_t* loop, int timeout) {
+ static const int max_safe_timeout = 1789569;
+ struct epoll_event events[1024];
+ struct epoll_event* pe;
+ struct epoll_event e;
+ uv__os390_epoll* ep;
+ int real_timeout;
+ QUEUE* q;
+ uv__io_t* w;
+ uint64_t base;
+ int count;
+ int nfds;
+ int fd;
+ int op;
+ int i;
+ int user_timeout;
+ int reset_timeout;
+
+ if (loop->nfds == 0) {
+ assert(QUEUE_EMPTY(&loop->watcher_queue));
+ return;
+ }
+
+ while (!QUEUE_EMPTY(&loop->watcher_queue)) {
+ uv_stream_t* stream;
+
+ q = QUEUE_HEAD(&loop->watcher_queue);
+ QUEUE_REMOVE(q);
+ QUEUE_INIT(q);
+ w = QUEUE_DATA(q, uv__io_t, watcher_queue);
+
+ assert(w->pevents != 0);
+ assert(w->fd >= 0);
+
+ stream= container_of(w, uv_stream_t, io_watcher);
+
+ assert(w->fd < (int) loop->nwatchers);
+
+ e.events = w->pevents;
+ e.fd = w->fd;
+
+ if (w->events == 0)
+ op = EPOLL_CTL_ADD;
+ else
+ op = EPOLL_CTL_MOD;
+
+ /* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching
+ * events, skip the syscall and squelch the events after epoll_wait().
+ */
+ if (epoll_ctl(loop->ep, op, w->fd, &e)) {
+ if (errno != EEXIST)
+ abort();
+
+ assert(op == EPOLL_CTL_ADD);
+
+ /* We've reactivated a file descriptor that's been watched before. */
+ if (epoll_ctl(loop->ep, EPOLL_CTL_MOD, w->fd, &e))
+ abort();
+ }
+
+ w->events = w->pevents;
+ }
+
+ assert(timeout >= -1);
+ base = loop->time;
+ count = 48; /* Benchmarks suggest this gives the best throughput. */
+ real_timeout = timeout;
+ int nevents = 0;
+
+ if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
+ reset_timeout = 1;
+ user_timeout = timeout;
+ timeout = 0;
+ } else {
+ reset_timeout = 0;
+ }
+
+ nfds = 0;
+ for (;;) {
+ /* Only need to set the provider_entry_time if timeout != 0. The function
+ * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
+ */
+ if (timeout != 0)
+ uv__metrics_set_provider_entry_time(loop);
+
+ if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout)
+ timeout = max_safe_timeout;
+
+ nfds = epoll_wait(loop->ep, events,
+ ARRAY_SIZE(events), timeout);
+
+ /* Update loop->time unconditionally. It's tempting to skip the update when
+ * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
+ * operating system didn't reschedule our process while in the syscall.
+ */
+ base = loop->time;
+ SAVE_ERRNO(uv__update_time(loop));
+ if (nfds == 0) {
+ assert(timeout != -1);
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (timeout == -1)
+ continue;
+
+ if (timeout == 0)
+ return;
+
+ /* We may have been inside the system call for longer than |timeout|
+ * milliseconds so we need to update the timestamp to avoid drift.
+ */
+ goto update_timeout;
+ }
+
+ if (nfds == -1) {
+
+ if (errno != EINTR)
+ abort();
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (timeout == -1)
+ continue;
+
+ if (timeout == 0)
+ return;
+
+ /* Interrupted by a signal. Update timeout and poll again. */
+ goto update_timeout;
+ }
+
+
+ assert(loop->watchers != NULL);
+ loop->watchers[loop->nwatchers] = (void*) events;
+ loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
+ for (i = 0; i < nfds; i++) {
+ pe = events + i;
+ fd = pe->fd;
+
+ /* Skip invalidated events, see uv__platform_invalidate_fd */
+ if (fd == -1)
+ continue;
+
+ ep = loop->ep;
+ if (pe->is_msg) {
+ os390_message_queue_handler(ep);
+ continue;
+ }
+
+ assert(fd >= 0);
+ assert((unsigned) fd < loop->nwatchers);
+
+ w = loop->watchers[fd];
+
+ if (w == NULL) {
+ /* File descriptor that we've stopped watching, disarm it.
+ *
+ * Ignore all errors because we may be racing with another thread
+ * when the file descriptor is closed.
+ */
+ epoll_ctl(loop->ep, EPOLL_CTL_DEL, fd, pe);
+ continue;
+ }
+
+ /* Give users only events they're interested in. Prevents spurious
+ * callbacks when previous callback invocation in this loop has stopped
+ * the current watcher. Also, filters out events that users has not
+ * requested us to watch.
+ */
+ pe->events &= w->pevents | POLLERR | POLLHUP;
+
+ if (pe->events == POLLERR || pe->events == POLLHUP)
+ pe->events |= w->pevents & (POLLIN | POLLOUT);
+
+ if (pe->events != 0) {
+ uv__metrics_update_idle_time(loop);
+ w->cb(loop, w, pe->events);
+ nevents++;
+ }
+ }
+ loop->watchers[loop->nwatchers] = NULL;
+ loop->watchers[loop->nwatchers + 1] = NULL;
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (nevents != 0) {
+ if (nfds == ARRAY_SIZE(events) && --count != 0) {
+ /* Poll for more events but don't block this time. */
+ timeout = 0;
+ continue;
+ }
+ return;
+ }
+
+ if (timeout == 0)
+ return;
+
+ if (timeout == -1)
+ continue;
+
+update_timeout:
+ assert(timeout > 0);
+
+ real_timeout -= (loop->time - base);
+ if (real_timeout <= 0)
+ return;
+
+ timeout = real_timeout;
+ }
+}
+
+void uv__set_process_title(const char* title) {
+ /* do nothing */
+}
+
+int uv__io_fork(uv_loop_t* loop) {
+ /*
+ Nullify the msg queue but don't close it because
+ it is still being used by the parent.
+ */
+ loop->ep = NULL;
+
+ uv__platform_loop_delete(loop);
+ return uv__platform_loop_init(loop);
+}
diff --git a/Utilities/cmlibuv/src/unix/pipe.c b/Utilities/cmlibuv/src/unix/pipe.c
new file mode 100644
index 0000000..52a8fd5
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/pipe.c
@@ -0,0 +1,379 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <assert.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/un.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+
+int uv_pipe_init(uv_loop_t* loop, uv_pipe_t* handle, int ipc) {
+ uv__stream_init(loop, (uv_stream_t*)handle, UV_NAMED_PIPE);
+ handle->shutdown_req = NULL;
+ handle->connect_req = NULL;
+ handle->pipe_fname = NULL;
+ handle->ipc = ipc;
+ return 0;
+}
+
+
+int uv_pipe_bind(uv_pipe_t* handle, const char* name) {
+ struct sockaddr_un saddr;
+ const char* pipe_fname = NULL;
+ int sockfd = -1;
+ int err;
+
+ /* Already bound? */
+ if (uv__stream_fd(handle) >= 0)
+ return UV_EINVAL;
+
+ /* Make a copy of the file name, it outlives this function's scope. */
+ pipe_fname = uv__strdup(name);
+ if (pipe_fname == NULL)
+ return UV_ENOMEM;
+
+ /* We've got a copy, don't touch the original any more. */
+ name = NULL;
+
+ err = uv__socket(AF_UNIX, SOCK_STREAM, 0);
+ if (err < 0)
+ goto err_socket;
+ sockfd = err;
+
+ memset(&saddr, 0, sizeof saddr);
+ uv__strscpy(saddr.sun_path, pipe_fname, sizeof(saddr.sun_path));
+ saddr.sun_family = AF_UNIX;
+
+ if (bind(sockfd, (struct sockaddr*)&saddr, sizeof saddr)) {
+ err = UV__ERR(errno);
+ /* Convert ENOENT to EACCES for compatibility with Windows. */
+ if (err == UV_ENOENT)
+ err = UV_EACCES;
+
+ uv__close(sockfd);
+ goto err_socket;
+ }
+
+ /* Success. */
+ handle->flags |= UV_HANDLE_BOUND;
+ handle->pipe_fname = pipe_fname; /* Is a strdup'ed copy. */
+ handle->io_watcher.fd = sockfd;
+ return 0;
+
+err_socket:
+ uv__free((void*)pipe_fname);
+ return err;
+}
+
+
+int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb) {
+ if (uv__stream_fd(handle) == -1)
+ return UV_EINVAL;
+
+ if (handle->ipc)
+ return UV_EINVAL;
+
+#if defined(__MVS__) || defined(__PASE__)
+ /* On zOS, backlog=0 has undefined behaviour */
+ /* On IBMi PASE, backlog=0 leads to "Connection refused" error */
+ if (backlog == 0)
+ backlog = 1;
+ else if (backlog < 0)
+ backlog = SOMAXCONN;
+#endif
+
+ if (listen(uv__stream_fd(handle), backlog))
+ return UV__ERR(errno);
+
+ handle->connection_cb = cb;
+ handle->io_watcher.cb = uv__server_io;
+ uv__io_start(handle->loop, &handle->io_watcher, POLLIN);
+ return 0;
+}
+
+
+void uv__pipe_close(uv_pipe_t* handle) {
+ if (handle->pipe_fname) {
+ /*
+ * Unlink the file system entity before closing the file descriptor.
+ * Doing it the other way around introduces a race where our process
+ * unlinks a socket with the same name that's just been created by
+ * another thread or process.
+ */
+ unlink(handle->pipe_fname);
+ uv__free((void*)handle->pipe_fname);
+ handle->pipe_fname = NULL;
+ }
+
+ uv__stream_close((uv_stream_t*)handle);
+}
+
+
+int uv_pipe_open(uv_pipe_t* handle, uv_file fd) {
+ int flags;
+ int mode;
+ int err;
+ flags = 0;
+
+ if (uv__fd_exists(handle->loop, fd))
+ return UV_EEXIST;
+
+ do
+ mode = fcntl(fd, F_GETFL);
+ while (mode == -1 && errno == EINTR);
+
+ if (mode == -1)
+ return UV__ERR(errno); /* according to docs, must be EBADF */
+
+ err = uv__nonblock(fd, 1);
+ if (err)
+ return err;
+
+#if defined(__APPLE__) && !defined(CMAKE_BOOTSTRAP)
+ err = uv__stream_try_select((uv_stream_t*) handle, &fd);
+ if (err)
+ return err;
+#endif /* defined(__APPLE__) */
+
+ mode &= O_ACCMODE;
+ if (mode != O_WRONLY)
+ flags |= UV_HANDLE_READABLE;
+ if (mode != O_RDONLY)
+ flags |= UV_HANDLE_WRITABLE;
+
+ return uv__stream_open((uv_stream_t*)handle, fd, flags);
+}
+
+
+void uv_pipe_connect(uv_connect_t* req,
+ uv_pipe_t* handle,
+ const char* name,
+ uv_connect_cb cb) {
+ struct sockaddr_un saddr;
+ int new_sock;
+ int err;
+ int r;
+
+ new_sock = (uv__stream_fd(handle) == -1);
+
+ if (new_sock) {
+ err = uv__socket(AF_UNIX, SOCK_STREAM, 0);
+ if (err < 0)
+ goto out;
+ handle->io_watcher.fd = err;
+ }
+
+ memset(&saddr, 0, sizeof saddr);
+ uv__strscpy(saddr.sun_path, name, sizeof(saddr.sun_path));
+ saddr.sun_family = AF_UNIX;
+
+ do {
+ r = connect(uv__stream_fd(handle),
+ (struct sockaddr*)&saddr, sizeof saddr);
+ }
+ while (r == -1 && errno == EINTR);
+
+ if (r == -1 && errno != EINPROGRESS) {
+ err = UV__ERR(errno);
+#if defined(__CYGWIN__) || defined(__MSYS__)
+ /* EBADF is supposed to mean that the socket fd is bad, but
+ Cygwin reports EBADF instead of ENOTSOCK when the file is
+ not a socket. We do not expect to see a bad fd here
+ (e.g. due to new_sock), so translate the error. */
+ if (err == UV_EBADF)
+ err = UV_ENOTSOCK;
+#endif
+ goto out;
+ }
+
+ err = 0;
+ if (new_sock) {
+ err = uv__stream_open((uv_stream_t*)handle,
+ uv__stream_fd(handle),
+ UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
+ }
+
+ if (err == 0)
+ uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
+
+out:
+ handle->delayed_error = err;
+ handle->connect_req = req;
+
+ uv__req_init(handle->loop, req, UV_CONNECT);
+ req->handle = (uv_stream_t*)handle;
+ req->cb = cb;
+ QUEUE_INIT(&req->queue);
+
+ /* Force callback to run on next tick in case of error. */
+ if (err)
+ uv__io_feed(handle->loop, &handle->io_watcher);
+
+}
+
+
+static int uv__pipe_getsockpeername(const uv_pipe_t* handle,
+ uv__peersockfunc func,
+ char* buffer,
+ size_t* size) {
+ struct sockaddr_un sa;
+ socklen_t addrlen;
+ int err;
+
+ addrlen = sizeof(sa);
+ memset(&sa, 0, addrlen);
+ err = uv__getsockpeername((const uv_handle_t*) handle,
+ func,
+ (struct sockaddr*) &sa,
+ (int*) &addrlen);
+ if (err < 0) {
+ *size = 0;
+ return err;
+ }
+
+#if defined(__linux__)
+ if (sa.sun_path[0] == 0)
+ /* Linux abstract namespace */
+ addrlen -= offsetof(struct sockaddr_un, sun_path);
+ else
+#endif
+ addrlen = strlen(sa.sun_path);
+
+
+ if ((size_t)addrlen >= *size) {
+ *size = addrlen + 1;
+ return UV_ENOBUFS;
+ }
+
+ memcpy(buffer, sa.sun_path, addrlen);
+ *size = addrlen;
+
+ /* only null-terminate if it's not an abstract socket */
+ if (buffer[0] != '\0')
+ buffer[addrlen] = '\0';
+
+ return 0;
+}
+
+
+int uv_pipe_getsockname(const uv_pipe_t* handle, char* buffer, size_t* size) {
+ return uv__pipe_getsockpeername(handle, getsockname, buffer, size);
+}
+
+
+int uv_pipe_getpeername(const uv_pipe_t* handle, char* buffer, size_t* size) {
+ return uv__pipe_getsockpeername(handle, getpeername, buffer, size);
+}
+
+
+void uv_pipe_pending_instances(uv_pipe_t* handle, int count) {
+}
+
+
+int uv_pipe_pending_count(uv_pipe_t* handle) {
+ uv__stream_queued_fds_t* queued_fds;
+
+ if (!handle->ipc)
+ return 0;
+
+ if (handle->accepted_fd == -1)
+ return 0;
+
+ if (handle->queued_fds == NULL)
+ return 1;
+
+ queued_fds = handle->queued_fds;
+ return queued_fds->offset + 1;
+}
+
+
+uv_handle_type uv_pipe_pending_type(uv_pipe_t* handle) {
+ if (!handle->ipc)
+ return UV_UNKNOWN_HANDLE;
+
+ if (handle->accepted_fd == -1)
+ return UV_UNKNOWN_HANDLE;
+ else
+ return uv__handle_type(handle->accepted_fd);
+}
+
+
+int uv_pipe_chmod(uv_pipe_t* handle, int mode) {
+ unsigned desired_mode;
+ struct stat pipe_stat;
+ char* name_buffer;
+ size_t name_len;
+ int r;
+
+ if (handle == NULL || uv__stream_fd(handle) == -1)
+ return UV_EBADF;
+
+ if (mode != UV_READABLE &&
+ mode != UV_WRITABLE &&
+ mode != (UV_WRITABLE | UV_READABLE))
+ return UV_EINVAL;
+
+ /* Unfortunately fchmod does not work on all platforms, we will use chmod. */
+ name_len = 0;
+ r = uv_pipe_getsockname(handle, NULL, &name_len);
+ if (r != UV_ENOBUFS)
+ return r;
+
+ name_buffer = uv__malloc(name_len);
+ if (name_buffer == NULL)
+ return UV_ENOMEM;
+
+ r = uv_pipe_getsockname(handle, name_buffer, &name_len);
+ if (r != 0) {
+ uv__free(name_buffer);
+ return r;
+ }
+
+ /* stat must be used as fstat has a bug on Darwin */
+ if (stat(name_buffer, &pipe_stat) == -1) {
+ uv__free(name_buffer);
+ return -errno;
+ }
+
+ desired_mode = 0;
+ if (mode & UV_READABLE)
+ desired_mode |= S_IRUSR | S_IRGRP | S_IROTH;
+ if (mode & UV_WRITABLE)
+ desired_mode |= S_IWUSR | S_IWGRP | S_IWOTH;
+
+ /* Exit early if pipe already has desired mode. */
+ if ((pipe_stat.st_mode & desired_mode) == desired_mode) {
+ uv__free(name_buffer);
+ return 0;
+ }
+
+ pipe_stat.st_mode |= desired_mode;
+
+ r = chmod(name_buffer, pipe_stat.st_mode);
+ uv__free(name_buffer);
+
+ return r != -1 ? 0 : UV__ERR(errno);
+}
diff --git a/Utilities/cmlibuv/src/unix/poll.c b/Utilities/cmlibuv/src/unix/poll.c
new file mode 100644
index 0000000..3d5022b
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/poll.c
@@ -0,0 +1,150 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <unistd.h>
+#include <assert.h>
+#include <errno.h>
+
+
+static void uv__poll_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
+ uv_poll_t* handle;
+ int pevents;
+
+ handle = container_of(w, uv_poll_t, io_watcher);
+
+ /*
+ * As documented in the kernel source fs/kernfs/file.c #780
+ * poll will return POLLERR|POLLPRI in case of sysfs
+ * polling. This does not happen in case of out-of-band
+ * TCP messages.
+ *
+ * The above is the case on (at least) FreeBSD and Linux.
+ *
+ * So to properly determine a POLLPRI or a POLLERR we need
+ * to check for both.
+ */
+ if ((events & POLLERR) && !(events & UV__POLLPRI)) {
+ uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
+ uv__handle_stop(handle);
+ handle->poll_cb(handle, UV_EBADF, 0);
+ return;
+ }
+
+ pevents = 0;
+ if (events & POLLIN)
+ pevents |= UV_READABLE;
+ if (events & UV__POLLPRI)
+ pevents |= UV_PRIORITIZED;
+ if (events & POLLOUT)
+ pevents |= UV_WRITABLE;
+ if (events & UV__POLLRDHUP)
+ pevents |= UV_DISCONNECT;
+
+ handle->poll_cb(handle, 0, pevents);
+}
+
+
+int uv_poll_init(uv_loop_t* loop, uv_poll_t* handle, int fd) {
+ int err;
+
+ if (uv__fd_exists(loop, fd))
+ return UV_EEXIST;
+
+ err = uv__io_check_fd(loop, fd);
+ if (err)
+ return err;
+
+ /* If ioctl(FIONBIO) reports ENOTTY, try fcntl(F_GETFL) + fcntl(F_SETFL).
+ * Workaround for e.g. kqueue fds not supporting ioctls.
+ */
+ err = uv__nonblock(fd, 1);
+ if (err == UV_ENOTTY)
+ if (uv__nonblock == uv__nonblock_ioctl)
+ err = uv__nonblock_fcntl(fd, 1);
+
+ if (err)
+ return err;
+
+ uv__handle_init(loop, (uv_handle_t*) handle, UV_POLL);
+ uv__io_init(&handle->io_watcher, uv__poll_io, fd);
+ handle->poll_cb = NULL;
+ return 0;
+}
+
+
+int uv_poll_init_socket(uv_loop_t* loop, uv_poll_t* handle,
+ uv_os_sock_t socket) {
+ return uv_poll_init(loop, handle, socket);
+}
+
+
+static void uv__poll_stop(uv_poll_t* handle) {
+ uv__io_stop(handle->loop,
+ &handle->io_watcher,
+ POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
+ uv__handle_stop(handle);
+ uv__platform_invalidate_fd(handle->loop, handle->io_watcher.fd);
+}
+
+
+int uv_poll_stop(uv_poll_t* handle) {
+ assert(!uv__is_closing(handle));
+ uv__poll_stop(handle);
+ return 0;
+}
+
+
+int uv_poll_start(uv_poll_t* handle, int pevents, uv_poll_cb poll_cb) {
+ int events;
+
+ assert((pevents & ~(UV_READABLE | UV_WRITABLE | UV_DISCONNECT |
+ UV_PRIORITIZED)) == 0);
+ assert(!uv__is_closing(handle));
+
+ uv__poll_stop(handle);
+
+ if (pevents == 0)
+ return 0;
+
+ events = 0;
+ if (pevents & UV_READABLE)
+ events |= POLLIN;
+ if (pevents & UV_PRIORITIZED)
+ events |= UV__POLLPRI;
+ if (pevents & UV_WRITABLE)
+ events |= POLLOUT;
+ if (pevents & UV_DISCONNECT)
+ events |= UV__POLLRDHUP;
+
+ uv__io_start(handle->loop, &handle->io_watcher, events);
+ uv__handle_start(handle);
+ handle->poll_cb = poll_cb;
+
+ return 0;
+}
+
+
+void uv__poll_close(uv_poll_t* handle) {
+ uv__poll_stop(handle);
+}
diff --git a/Utilities/cmlibuv/src/unix/posix-hrtime.c b/Utilities/cmlibuv/src/unix/posix-hrtime.c
new file mode 100644
index 0000000..870b45c
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/posix-hrtime.c
@@ -0,0 +1,74 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#if defined(__APPLE__)
+/* Special case for CMake bootstrap: no clock_gettime on macOS < 10.12 */
+
+#ifndef CMAKE_BOOTSTRAP
+#error "This code path meant only for use during CMake bootstrap."
+#endif
+
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+
+uint64_t uv__hrtime(uv_clocktype_t type) {
+ static mach_timebase_info_data_t info;
+
+ if ((ACCESS_ONCE(uint32_t, info.numer) == 0 ||
+ ACCESS_ONCE(uint32_t, info.denom) == 0) &&
+ mach_timebase_info(&info) != KERN_SUCCESS)
+ abort();
+
+ return mach_absolute_time() * info.numer / info.denom;
+}
+
+#elif defined(__hpux)
+/* Special case for CMake bootstrap: no CLOCK_MONOTONIC on HP-UX */
+
+#ifndef CMAKE_BOOTSTRAP
+#error "This code path meant only for use during CMake bootstrap."
+#endif
+
+#include <stdint.h>
+#include <time.h>
+
+uint64_t uv__hrtime(uv_clocktype_t type) {
+ return (uint64_t) gethrtime();
+}
+
+#else
+
+#include <stdint.h>
+#include <time.h>
+
+#undef NANOSEC
+#define NANOSEC ((uint64_t) 1e9)
+
+uint64_t uv__hrtime(uv_clocktype_t type) {
+ struct timespec ts;
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ return (((uint64_t) ts.tv_sec) * NANOSEC + ts.tv_nsec);
+}
+
+#endif
diff --git a/Utilities/cmlibuv/src/unix/posix-poll.c b/Utilities/cmlibuv/src/unix/posix-poll.c
new file mode 100644
index 0000000..0f4bf93
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/posix-poll.c
@@ -0,0 +1,374 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+/* POSIX defines poll() as a portable way to wait on file descriptors.
+ * Here we maintain a dynamically sized array of file descriptors and
+ * events to pass as the first argument to poll().
+ */
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <errno.h>
+#include <unistd.h>
+
+int uv__platform_loop_init(uv_loop_t* loop) {
+ loop->poll_fds = NULL;
+ loop->poll_fds_used = 0;
+ loop->poll_fds_size = 0;
+ loop->poll_fds_iterating = 0;
+ return 0;
+}
+
+void uv__platform_loop_delete(uv_loop_t* loop) {
+ uv__free(loop->poll_fds);
+ loop->poll_fds = NULL;
+}
+
+int uv__io_fork(uv_loop_t* loop) {
+ uv__platform_loop_delete(loop);
+ return uv__platform_loop_init(loop);
+}
+
+/* Allocate or dynamically resize our poll fds array. */
+static void uv__pollfds_maybe_resize(uv_loop_t* loop) {
+ size_t i;
+ size_t n;
+ struct pollfd* p;
+
+ if (loop->poll_fds_used < loop->poll_fds_size)
+ return;
+
+ n = loop->poll_fds_size ? loop->poll_fds_size * 2 : 64;
+ p = uv__reallocf(loop->poll_fds, n * sizeof(*loop->poll_fds));
+ if (p == NULL)
+ abort();
+
+ loop->poll_fds = p;
+ for (i = loop->poll_fds_size; i < n; i++) {
+ loop->poll_fds[i].fd = -1;
+ loop->poll_fds[i].events = 0;
+ loop->poll_fds[i].revents = 0;
+ }
+ loop->poll_fds_size = n;
+}
+
+/* Primitive swap operation on poll fds array elements. */
+static void uv__pollfds_swap(uv_loop_t* loop, size_t l, size_t r) {
+ struct pollfd pfd;
+ pfd = loop->poll_fds[l];
+ loop->poll_fds[l] = loop->poll_fds[r];
+ loop->poll_fds[r] = pfd;
+}
+
+/* Add a watcher's fd to our poll fds array with its pending events. */
+static void uv__pollfds_add(uv_loop_t* loop, uv__io_t* w) {
+ size_t i;
+ struct pollfd* pe;
+
+ /* If the fd is already in the set just update its events. */
+ assert(!loop->poll_fds_iterating);
+ for (i = 0; i < loop->poll_fds_used; ++i) {
+ if (loop->poll_fds[i].fd == w->fd) {
+ loop->poll_fds[i].events = w->pevents;
+ return;
+ }
+ }
+
+ /* Otherwise, allocate a new slot in the set for the fd. */
+ uv__pollfds_maybe_resize(loop);
+ pe = &loop->poll_fds[loop->poll_fds_used++];
+ pe->fd = w->fd;
+ pe->events = w->pevents;
+}
+
+/* Remove a watcher's fd from our poll fds array. */
+static void uv__pollfds_del(uv_loop_t* loop, int fd) {
+ size_t i;
+ assert(!loop->poll_fds_iterating);
+ for (i = 0; i < loop->poll_fds_used;) {
+ if (loop->poll_fds[i].fd == fd) {
+ /* swap to last position and remove */
+ --loop->poll_fds_used;
+ uv__pollfds_swap(loop, i, loop->poll_fds_used);
+ loop->poll_fds[loop->poll_fds_used].fd = -1;
+ loop->poll_fds[loop->poll_fds_used].events = 0;
+ loop->poll_fds[loop->poll_fds_used].revents = 0;
+ /* This method is called with an fd of -1 to purge the invalidated fds,
+ * so we may possibly have multiples to remove.
+ */
+ if (-1 != fd)
+ return;
+ } else {
+ /* We must only increment the loop counter when the fds do not match.
+ * Otherwise, when we are purging an invalidated fd, the value just
+ * swapped here from the previous end of the array will be skipped.
+ */
+ ++i;
+ }
+ }
+}
+
+
+void uv__io_poll(uv_loop_t* loop, int timeout) {
+ sigset_t* pset;
+ sigset_t set;
+ uint64_t time_base;
+ uint64_t time_diff;
+ QUEUE* q;
+ uv__io_t* w;
+ size_t i;
+ unsigned int nevents;
+ int nfds;
+ int have_signals;
+ struct pollfd* pe;
+ int fd;
+ int user_timeout;
+ int reset_timeout;
+
+ if (loop->nfds == 0) {
+ assert(QUEUE_EMPTY(&loop->watcher_queue));
+ return;
+ }
+
+ /* Take queued watchers and add their fds to our poll fds array. */
+ while (!QUEUE_EMPTY(&loop->watcher_queue)) {
+ q = QUEUE_HEAD(&loop->watcher_queue);
+ QUEUE_REMOVE(q);
+ QUEUE_INIT(q);
+
+ w = QUEUE_DATA(q, uv__io_t, watcher_queue);
+ assert(w->pevents != 0);
+ assert(w->fd >= 0);
+ assert(w->fd < (int) loop->nwatchers);
+
+ uv__pollfds_add(loop, w);
+
+ w->events = w->pevents;
+ }
+
+ /* Prepare a set of signals to block around poll(), if any. */
+ pset = NULL;
+ if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
+ pset = &set;
+ sigemptyset(pset);
+ sigaddset(pset, SIGPROF);
+ }
+
+ assert(timeout >= -1);
+ time_base = loop->time;
+
+ if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
+ reset_timeout = 1;
+ user_timeout = timeout;
+ timeout = 0;
+ } else {
+ reset_timeout = 0;
+ }
+
+ /* Loop calls to poll() and processing of results. If we get some
+ * results from poll() but they turn out not to be interesting to
+ * our caller then we need to loop around and poll() again.
+ */
+ for (;;) {
+ /* Only need to set the provider_entry_time if timeout != 0. The function
+ * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
+ */
+ if (timeout != 0)
+ uv__metrics_set_provider_entry_time(loop);
+
+ if (pset != NULL)
+ if (pthread_sigmask(SIG_BLOCK, pset, NULL))
+ abort();
+ nfds = poll(loop->poll_fds, (nfds_t)loop->poll_fds_used, timeout);
+ if (pset != NULL)
+ if (pthread_sigmask(SIG_UNBLOCK, pset, NULL))
+ abort();
+
+ /* Update loop->time unconditionally. It's tempting to skip the update when
+ * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
+ * operating system didn't reschedule our process while in the syscall.
+ */
+ SAVE_ERRNO(uv__update_time(loop));
+
+ if (nfds == 0) {
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ if (timeout == -1)
+ continue;
+ if (timeout > 0)
+ goto update_timeout;
+ }
+
+ assert(timeout != -1);
+ return;
+ }
+
+ if (nfds == -1) {
+ if (errno != EINTR)
+ abort();
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (timeout == -1)
+ continue;
+
+ if (timeout == 0)
+ return;
+
+ /* Interrupted by a signal. Update timeout and poll again. */
+ goto update_timeout;
+ }
+
+ /* Tell uv__platform_invalidate_fd not to manipulate our array
+ * while we are iterating over it.
+ */
+ loop->poll_fds_iterating = 1;
+
+ /* Initialize a count of events that we care about. */
+ nevents = 0;
+ have_signals = 0;
+
+ /* Loop over the entire poll fds array looking for returned events. */
+ for (i = 0; i < loop->poll_fds_used; i++) {
+ pe = loop->poll_fds + i;
+ fd = pe->fd;
+
+ /* Skip invalidated events, see uv__platform_invalidate_fd. */
+ if (fd == -1)
+ continue;
+
+ assert(fd >= 0);
+ assert((unsigned) fd < loop->nwatchers);
+
+ w = loop->watchers[fd];
+
+ if (w == NULL) {
+ /* File descriptor that we've stopped watching, ignore. */
+ uv__platform_invalidate_fd(loop, fd);
+ continue;
+ }
+
+ /* Filter out events that user has not requested us to watch
+ * (e.g. POLLNVAL).
+ */
+ pe->revents &= w->pevents | POLLERR | POLLHUP;
+
+ if (pe->revents != 0) {
+ /* Run signal watchers last. */
+ if (w == &loop->signal_io_watcher) {
+ have_signals = 1;
+ } else {
+ uv__metrics_update_idle_time(loop);
+ w->cb(loop, w, pe->revents);
+ }
+
+ nevents++;
+ }
+ }
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (have_signals != 0) {
+ uv__metrics_update_idle_time(loop);
+ loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
+ }
+
+ loop->poll_fds_iterating = 0;
+
+ /* Purge invalidated fds from our poll fds array. */
+ uv__pollfds_del(loop, -1);
+
+ if (have_signals != 0)
+ return; /* Event loop should cycle now so don't poll again. */
+
+ if (nevents != 0)
+ return;
+
+ if (timeout == 0)
+ return;
+
+ if (timeout == -1)
+ continue;
+
+update_timeout:
+ assert(timeout > 0);
+
+ time_diff = loop->time - time_base;
+ if (time_diff >= (uint64_t) timeout)
+ return;
+
+ timeout -= time_diff;
+ }
+}
+
+/* Remove the given fd from our poll fds array because no one
+ * is interested in its events anymore.
+ */
+void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
+ size_t i;
+
+ assert(fd >= 0);
+
+ if (loop->poll_fds_iterating) {
+ /* uv__io_poll is currently iterating. Just invalidate fd. */
+ for (i = 0; i < loop->poll_fds_used; i++)
+ if (loop->poll_fds[i].fd == fd) {
+ loop->poll_fds[i].fd = -1;
+ loop->poll_fds[i].events = 0;
+ loop->poll_fds[i].revents = 0;
+ }
+ } else {
+ /* uv__io_poll is not iterating. Delete fd from the set. */
+ uv__pollfds_del(loop, fd);
+ }
+}
+
+/* Check whether the given fd is supported by poll(). */
+int uv__io_check_fd(uv_loop_t* loop, int fd) {
+ struct pollfd p[1];
+ int rv;
+
+ p[0].fd = fd;
+ p[0].events = POLLIN;
+
+ do
+ rv = poll(p, 1, 0);
+ while (rv == -1 && (errno == EINTR || errno == EAGAIN));
+
+ if (rv == -1)
+ return UV__ERR(errno);
+
+ if (p[0].revents & POLLNVAL)
+ return UV_EINVAL;
+
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/unix/process.c b/Utilities/cmlibuv/src/unix/process.c
new file mode 100644
index 0000000..08aa2f3
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/process.c
@@ -0,0 +1,650 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <errno.h>
+
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <sched.h>
+
+#if defined(__APPLE__) && !TARGET_OS_IPHONE
+# include <crt_externs.h>
+# define environ (*_NSGetEnviron())
+#else
+extern char **environ;
+#endif
+
+#if defined(__linux__) || defined(__GLIBC__)
+# include <grp.h>
+#endif
+
+#ifndef CMAKE_BOOTSTRAP
+#if defined(__linux__)
+# define uv__cpu_set_t cpu_set_t
+#elif defined(__FreeBSD__)
+# include <sys/param.h>
+# include <sys/cpuset.h>
+# include <pthread_np.h>
+# define uv__cpu_set_t cpuset_t
+#endif
+#endif
+
+static void uv__chld(uv_signal_t* handle, int signum) {
+ uv_process_t* process;
+ uv_loop_t* loop;
+ int exit_status;
+ int term_signal;
+ int status;
+ pid_t pid;
+ QUEUE pending;
+ QUEUE* q;
+ QUEUE* h;
+
+ assert(signum == SIGCHLD);
+
+ QUEUE_INIT(&pending);
+ loop = handle->loop;
+
+ h = &loop->process_handles;
+ q = QUEUE_HEAD(h);
+ while (q != h) {
+ process = QUEUE_DATA(q, uv_process_t, queue);
+ q = QUEUE_NEXT(q);
+
+ do
+ pid = waitpid(process->pid, &status, WNOHANG);
+ while (pid == -1 && errno == EINTR);
+
+ if (pid == 0)
+ continue;
+
+ if (pid == -1) {
+ if (errno != ECHILD)
+ abort();
+ continue;
+ }
+
+ process->status = status;
+ QUEUE_REMOVE(&process->queue);
+ QUEUE_INSERT_TAIL(&pending, &process->queue);
+ }
+
+ h = &pending;
+ q = QUEUE_HEAD(h);
+ while (q != h) {
+ process = QUEUE_DATA(q, uv_process_t, queue);
+ q = QUEUE_NEXT(q);
+
+ QUEUE_REMOVE(&process->queue);
+ QUEUE_INIT(&process->queue);
+ uv__handle_stop(process);
+
+ if (process->exit_cb == NULL)
+ continue;
+
+ exit_status = 0;
+ if (WIFEXITED(process->status))
+ exit_status = WEXITSTATUS(process->status);
+
+ term_signal = 0;
+ if (WIFSIGNALED(process->status))
+ term_signal = WTERMSIG(process->status);
+
+ process->exit_cb(process, exit_status, term_signal);
+ }
+ assert(QUEUE_EMPTY(&pending));
+}
+
+
+static int uv__make_socketpair(int fds[2]) {
+#if defined(__FreeBSD__) || defined(__linux__)
+ if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, fds))
+ return UV__ERR(errno);
+
+ return 0;
+#else
+ int err;
+
+ if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds))
+ return UV__ERR(errno);
+
+ err = uv__cloexec(fds[0], 1);
+ if (err == 0)
+ err = uv__cloexec(fds[1], 1);
+
+ if (err != 0) {
+ uv__close(fds[0]);
+ uv__close(fds[1]);
+ return UV__ERR(errno);
+ }
+
+ return 0;
+#endif
+}
+
+
+int uv__make_pipe(int fds[2], int flags) {
+#if defined(__FreeBSD__) || defined(__linux__)
+ if (pipe2(fds, flags | O_CLOEXEC))
+ return UV__ERR(errno);
+
+ return 0;
+#else
+ if (pipe(fds))
+ return UV__ERR(errno);
+
+ if (uv__cloexec(fds[0], 1))
+ goto fail;
+
+ if (uv__cloexec(fds[1], 1))
+ goto fail;
+
+ if (flags & UV__F_NONBLOCK) {
+ if (uv__nonblock(fds[0], 1))
+ goto fail;
+
+ if (uv__nonblock(fds[1], 1))
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ uv__close(fds[0]);
+ uv__close(fds[1]);
+ return UV__ERR(errno);
+#endif
+}
+
+
+/*
+ * Used for initializing stdio streams like options.stdin_stream. Returns
+ * zero on success. See also the cleanup section in uv_spawn().
+ */
+static int uv__process_init_stdio(uv_stdio_container_t* container, int fds[2]) {
+ int mask;
+ int fd;
+
+ mask = UV_IGNORE | UV_CREATE_PIPE | UV_INHERIT_FD | UV_INHERIT_STREAM;
+
+ switch (container->flags & mask) {
+ case UV_IGNORE:
+ return 0;
+
+ case UV_CREATE_PIPE:
+ assert(container->data.stream != NULL);
+ if (container->data.stream->type != UV_NAMED_PIPE)
+ return UV_EINVAL;
+ else
+ return uv__make_socketpair(fds);
+
+ case UV_INHERIT_FD:
+ case UV_INHERIT_STREAM:
+ if (container->flags & UV_INHERIT_FD)
+ fd = container->data.fd;
+ else
+ fd = uv__stream_fd(container->data.stream);
+
+ if (fd == -1)
+ return UV_EINVAL;
+
+ fds[1] = fd;
+ return 0;
+
+ default:
+ assert(0 && "Unexpected flags");
+ return UV_EINVAL;
+ }
+}
+
+
+static int uv__process_open_stream(uv_stdio_container_t* container,
+ int pipefds[2]) {
+ int flags;
+ int err;
+
+ if (!(container->flags & UV_CREATE_PIPE) || pipefds[0] < 0)
+ return 0;
+
+ err = uv__close(pipefds[1]);
+ if (err != 0)
+ abort();
+
+ pipefds[1] = -1;
+ uv__nonblock(pipefds[0], 1);
+
+ flags = 0;
+ if (container->flags & UV_WRITABLE_PIPE)
+ flags |= UV_HANDLE_READABLE;
+ if (container->flags & UV_READABLE_PIPE)
+ flags |= UV_HANDLE_WRITABLE;
+
+ return uv__stream_open(container->data.stream, pipefds[0], flags);
+}
+
+
+static void uv__process_close_stream(uv_stdio_container_t* container) {
+ if (!(container->flags & UV_CREATE_PIPE)) return;
+ uv__stream_close(container->data.stream);
+}
+
+
+static void uv__write_int(int fd, int val) {
+ ssize_t n;
+
+ do
+ n = write(fd, &val, sizeof(val));
+ while (n == -1 && errno == EINTR);
+
+ if (n == -1 && errno == EPIPE)
+ return; /* parent process has quit */
+
+ assert(n == sizeof(val));
+}
+
+
+#if !(defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH))
+/* execvp is marked __WATCHOS_PROHIBITED __TVOS_PROHIBITED, so must be
+ * avoided. Since this isn't called on those targets, the function
+ * doesn't even need to be defined for them.
+ */
+static void uv__process_child_init(const uv_process_options_t* options,
+ int stdio_count,
+ int (*pipes)[2],
+ int error_fd) {
+ sigset_t set;
+ int close_fd;
+ int use_fd;
+ int err;
+ int fd;
+ int n;
+#ifndef CMAKE_BOOTSTRAP
+#if defined(__linux__) || defined(__FreeBSD__)
+ int r;
+ int i;
+ int cpumask_size;
+ uv__cpu_set_t cpuset;
+#endif
+#endif
+
+ if (options->flags & UV_PROCESS_DETACHED)
+ setsid();
+
+ /* First duplicate low numbered fds, since it's not safe to duplicate them,
+ * they could get replaced. Example: swapping stdout and stderr; without
+ * this fd 2 (stderr) would be duplicated into fd 1, thus making both
+ * stdout and stderr go to the same fd, which was not the intention. */
+ for (fd = 0; fd < stdio_count; fd++) {
+ use_fd = pipes[fd][1];
+ if (use_fd < 0 || use_fd >= fd)
+ continue;
+ pipes[fd][1] = fcntl(use_fd, F_DUPFD, stdio_count);
+ if (pipes[fd][1] == -1) {
+ uv__write_int(error_fd, UV__ERR(errno));
+ _exit(127);
+ }
+ }
+
+ for (fd = 0; fd < stdio_count; fd++) {
+ close_fd = pipes[fd][0];
+ use_fd = pipes[fd][1];
+
+ if (use_fd < 0) {
+ if (fd >= 3)
+ continue;
+ else {
+ /* redirect stdin, stdout and stderr to /dev/null even if UV_IGNORE is
+ * set
+ */
+ use_fd = open("/dev/null", fd == 0 ? O_RDONLY : O_RDWR);
+ close_fd = use_fd;
+
+ if (use_fd < 0) {
+ uv__write_int(error_fd, UV__ERR(errno));
+ _exit(127);
+ }
+ }
+ }
+
+ if (fd == use_fd)
+ uv__cloexec_fcntl(use_fd, 0);
+ else
+ fd = dup2(use_fd, fd);
+
+ if (fd == -1) {
+ uv__write_int(error_fd, UV__ERR(errno));
+ _exit(127);
+ }
+
+ if (fd <= 2)
+ uv__nonblock_fcntl(fd, 0);
+
+ if (close_fd >= stdio_count)
+ uv__close(close_fd);
+ }
+
+ for (fd = 0; fd < stdio_count; fd++) {
+ use_fd = pipes[fd][1];
+
+ if (use_fd >= stdio_count)
+ uv__close(use_fd);
+ }
+
+ if (options->cwd != NULL && chdir(options->cwd)) {
+ uv__write_int(error_fd, UV__ERR(errno));
+ _exit(127);
+ }
+
+ if (options->flags & (UV_PROCESS_SETUID | UV_PROCESS_SETGID)) {
+ /* When dropping privileges from root, the `setgroups` call will
+ * remove any extraneous groups. If we don't call this, then
+ * even though our uid has dropped, we may still have groups
+ * that enable us to do super-user things. This will fail if we
+ * aren't root, so don't bother checking the return value, this
+ * is just done as an optimistic privilege dropping function.
+ */
+ SAVE_ERRNO(setgroups(0, NULL));
+ }
+
+ if ((options->flags & UV_PROCESS_SETGID) && setgid(options->gid)) {
+ uv__write_int(error_fd, UV__ERR(errno));
+ _exit(127);
+ }
+
+ if ((options->flags & UV_PROCESS_SETUID) && setuid(options->uid)) {
+ uv__write_int(error_fd, UV__ERR(errno));
+ _exit(127);
+ }
+
+#ifndef CMAKE_BOOTSTRAP
+#if defined(__linux__) || defined(__FreeBSD__)
+ if (options->cpumask != NULL) {
+ cpumask_size = uv_cpumask_size();
+ assert(options->cpumask_size >= (size_t)cpumask_size);
+
+ CPU_ZERO(&cpuset);
+ for (i = 0; i < cpumask_size; ++i) {
+ if (options->cpumask[i]) {
+ CPU_SET(i, &cpuset);
+ }
+ }
+
+ r = -pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
+ if (r != 0) {
+ uv__write_int(error_fd, r);
+ _exit(127);
+ }
+ }
+#endif
+#endif
+
+ if (options->env != NULL) {
+ environ = options->env;
+ }
+
+ /* Reset signal disposition. Use a hard-coded limit because NSIG
+ * is not fixed on Linux: it's either 32, 34 or 64, depending on
+ * whether RT signals are enabled. We are not allowed to touch
+ * RT signal handlers, glibc uses them internally.
+ */
+ for (n = 1; n < 32; n += 1) {
+ if (n == SIGKILL || n == SIGSTOP)
+ continue; /* Can't be changed. */
+
+#if defined(__HAIKU__)
+ if (n == SIGKILLTHR)
+ continue; /* Can't be changed. */
+#endif
+
+ if (SIG_ERR != signal(n, SIG_DFL))
+ continue;
+
+ uv__write_int(error_fd, UV__ERR(errno));
+ _exit(127);
+ }
+
+ /* Reset signal mask. */
+ sigemptyset(&set);
+ err = pthread_sigmask(SIG_SETMASK, &set, NULL);
+
+ if (err != 0) {
+ uv__write_int(error_fd, UV__ERR(err));
+ _exit(127);
+ }
+
+ execvp(options->file, options->args);
+ uv__write_int(error_fd, UV__ERR(errno));
+ _exit(127);
+}
+#endif
+
+
+int uv_spawn(uv_loop_t* loop,
+ uv_process_t* process,
+ const uv_process_options_t* options) {
+#if defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH)
+ /* fork is marked __WATCHOS_PROHIBITED __TVOS_PROHIBITED. */
+ return UV_ENOSYS;
+#else
+ int signal_pipe[2] = { -1, -1 };
+ int pipes_storage[8][2];
+ int (*pipes)[2];
+ int stdio_count;
+ ssize_t r;
+ pid_t pid;
+ int err;
+ int exec_errorno;
+ int i;
+ int status;
+
+ if (options->cpumask != NULL) {
+#ifndef CMAKE_BOOTSTRAP
+#if defined(__linux__) || defined(__FreeBSD__)
+ if (options->cpumask_size < (size_t)uv_cpumask_size()) {
+ return UV_EINVAL;
+ }
+#else
+ return UV_ENOTSUP;
+#endif
+#else
+ return UV_ENOTSUP;
+#endif
+ }
+
+ assert(options->file != NULL);
+ assert(!(options->flags & ~(UV_PROCESS_DETACHED |
+ UV_PROCESS_SETGID |
+ UV_PROCESS_SETUID |
+ UV_PROCESS_WINDOWS_HIDE |
+ UV_PROCESS_WINDOWS_HIDE_CONSOLE |
+ UV_PROCESS_WINDOWS_HIDE_GUI |
+ UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS)));
+
+ uv__handle_init(loop, (uv_handle_t*)process, UV_PROCESS);
+ QUEUE_INIT(&process->queue);
+
+ stdio_count = options->stdio_count;
+ if (stdio_count < 3)
+ stdio_count = 3;
+
+ err = UV_ENOMEM;
+ pipes = pipes_storage;
+ if (stdio_count > (int) ARRAY_SIZE(pipes_storage))
+ pipes = uv__malloc(stdio_count * sizeof(*pipes));
+
+ if (pipes == NULL)
+ goto error;
+
+ for (i = 0; i < stdio_count; i++) {
+ pipes[i][0] = -1;
+ pipes[i][1] = -1;
+ }
+
+ for (i = 0; i < options->stdio_count; i++) {
+ err = uv__process_init_stdio(options->stdio + i, pipes[i]);
+ if (err)
+ goto error;
+ }
+
+ /* This pipe is used by the parent to wait until
+ * the child has called `execve()`. We need this
+ * to avoid the following race condition:
+ *
+ * if ((pid = fork()) > 0) {
+ * kill(pid, SIGTERM);
+ * }
+ * else if (pid == 0) {
+ * execve("/bin/cat", argp, envp);
+ * }
+ *
+ * The parent sends a signal immediately after forking.
+ * Since the child may not have called `execve()` yet,
+ * there is no telling what process receives the signal,
+ * our fork or /bin/cat.
+ *
+ * To avoid ambiguity, we create a pipe with both ends
+ * marked close-on-exec. Then, after the call to `fork()`,
+ * the parent polls the read end until it EOFs or errors with EPIPE.
+ */
+ err = uv__make_pipe(signal_pipe, 0);
+ if (err)
+ goto error;
+
+ uv_signal_start(&loop->child_watcher, uv__chld, SIGCHLD);
+
+ /* Acquire write lock to prevent opening new fds in worker threads */
+ uv_rwlock_wrlock(&loop->cloexec_lock);
+ pid = fork();
+
+ if (pid == -1) {
+ err = UV__ERR(errno);
+ uv_rwlock_wrunlock(&loop->cloexec_lock);
+ uv__close(signal_pipe[0]);
+ uv__close(signal_pipe[1]);
+ goto error;
+ }
+
+ if (pid == 0) {
+ uv__process_child_init(options, stdio_count, pipes, signal_pipe[1]);
+ abort();
+ }
+
+ /* Release lock in parent process */
+ uv_rwlock_wrunlock(&loop->cloexec_lock);
+ uv__close(signal_pipe[1]);
+
+ process->status = 0;
+ exec_errorno = 0;
+ do
+ r = read(signal_pipe[0], &exec_errorno, sizeof(exec_errorno));
+ while (r == -1 && errno == EINTR);
+
+ if (r == 0)
+ ; /* okay, EOF */
+ else if (r == sizeof(exec_errorno)) {
+ do
+ err = waitpid(pid, &status, 0); /* okay, read errorno */
+ while (err == -1 && errno == EINTR);
+ assert(err == pid);
+ } else if (r == -1 && errno == EPIPE) {
+ do
+ err = waitpid(pid, &status, 0); /* okay, got EPIPE */
+ while (err == -1 && errno == EINTR);
+ assert(err == pid);
+ } else
+ abort();
+
+ uv__close_nocheckstdio(signal_pipe[0]);
+
+ for (i = 0; i < options->stdio_count; i++) {
+ err = uv__process_open_stream(options->stdio + i, pipes[i]);
+ if (err == 0)
+ continue;
+
+ while (i--)
+ uv__process_close_stream(options->stdio + i);
+
+ goto error;
+ }
+
+ /* Only activate this handle if exec() happened successfully */
+ if (exec_errorno == 0) {
+ QUEUE_INSERT_TAIL(&loop->process_handles, &process->queue);
+ uv__handle_start(process);
+ }
+
+ process->pid = pid;
+ process->exit_cb = options->exit_cb;
+
+ if (pipes != pipes_storage)
+ uv__free(pipes);
+
+ return exec_errorno;
+
+error:
+ if (pipes != NULL) {
+ for (i = 0; i < stdio_count; i++) {
+ if (i < options->stdio_count)
+ if (options->stdio[i].flags & (UV_INHERIT_FD | UV_INHERIT_STREAM))
+ continue;
+ if (pipes[i][0] != -1)
+ uv__close_nocheckstdio(pipes[i][0]);
+ if (pipes[i][1] != -1)
+ uv__close_nocheckstdio(pipes[i][1]);
+ }
+
+ if (pipes != pipes_storage)
+ uv__free(pipes);
+ }
+
+ return err;
+#endif
+}
+
+
+int uv_process_kill(uv_process_t* process, int signum) {
+ return uv_kill(process->pid, signum);
+}
+
+
+int uv_kill(int pid, int signum) {
+ if (kill(pid, signum))
+ return UV__ERR(errno);
+ else
+ return 0;
+}
+
+
+void uv__process_close(uv_process_t* handle) {
+ QUEUE_REMOVE(&handle->queue);
+ uv__handle_stop(handle);
+ if (QUEUE_EMPTY(&handle->loop->process_handles))
+ uv_signal_stop(&handle->loop->child_watcher);
+}
diff --git a/Utilities/cmlibuv/src/unix/procfs-exepath.c b/Utilities/cmlibuv/src/unix/procfs-exepath.c
new file mode 100644
index 0000000..00dc021
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/procfs-exepath.c
@@ -0,0 +1,45 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stddef.h>
+#include <unistd.h>
+
+int uv_exepath(char* buffer, size_t* size) {
+ ssize_t n;
+
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ n = *size - 1;
+ if (n > 0)
+ n = readlink("/proc/self/exe", buffer, n);
+
+ if (n == -1)
+ return UV__ERR(errno);
+
+ buffer[n] = '\0';
+ *size = n;
+
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/unix/proctitle.c b/Utilities/cmlibuv/src/unix/proctitle.c
new file mode 100644
index 0000000..9ffe5b6
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/proctitle.c
@@ -0,0 +1,159 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+struct uv__process_title {
+ char* str;
+ size_t len; /* Length of the current process title. */
+ size_t cap; /* Maximum capacity. Computed once in uv_setup_args(). */
+};
+
+extern void uv__set_process_title(const char* title);
+
+static uv_mutex_t process_title_mutex;
+static uv_once_t process_title_mutex_once = UV_ONCE_INIT;
+static struct uv__process_title process_title;
+static void* args_mem;
+
+
+static void init_process_title_mutex_once(void) {
+ uv_mutex_init(&process_title_mutex);
+}
+
+
+char** uv_setup_args(int argc, char** argv) {
+ struct uv__process_title pt;
+ char** new_argv;
+ size_t size;
+ char* s;
+ int i;
+
+ if (argc <= 0)
+ return argv;
+
+ pt.str = argv[0];
+ pt.len = strlen(argv[0]);
+ pt.cap = pt.len + 1;
+
+ /* Calculate how much memory we need for the argv strings. */
+ size = pt.cap;
+ for (i = 1; i < argc; i++)
+ size += strlen(argv[i]) + 1;
+
+ /* Add space for the argv pointers. */
+ size += (argc + 1) * sizeof(char*);
+
+ new_argv = uv__malloc(size);
+ if (new_argv == NULL)
+ return argv;
+
+ /* Copy over the strings and set up the pointer table. */
+ i = 0;
+ s = (char*) &new_argv[argc + 1];
+ size = pt.cap;
+ goto loop;
+
+ for (/* empty */; i < argc; i++) {
+ size = strlen(argv[i]) + 1;
+ loop:
+ memcpy(s, argv[i], size);
+ new_argv[i] = s;
+ s += size;
+ }
+ new_argv[i] = NULL;
+
+ /* argv is not adjacent on z/os, we use just argv[0] on that platform. */
+#ifndef __MVS__
+ pt.cap = argv[i - 1] + size - argv[0];
+#endif
+
+ args_mem = new_argv;
+ process_title = pt;
+
+ return new_argv;
+}
+
+
+int uv_set_process_title(const char* title) {
+ struct uv__process_title* pt;
+ size_t len;
+
+ /* If uv_setup_args wasn't called or failed, we can't continue. */
+ if (args_mem == NULL)
+ return UV_ENOBUFS;
+
+ pt = &process_title;
+ len = strlen(title);
+
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+
+ if (len >= pt->cap) {
+ len = 0;
+ if (pt->cap > 0)
+ len = pt->cap - 1;
+ }
+
+ memcpy(pt->str, title, len);
+ memset(pt->str + len, '\0', pt->cap - len);
+ pt->len = len;
+
+ uv_mutex_unlock(&process_title_mutex);
+
+ return 0;
+}
+
+
+int uv_get_process_title(char* buffer, size_t size) {
+ if (buffer == NULL || size == 0)
+ return UV_EINVAL;
+
+ /* If uv_setup_args wasn't called or failed, we can't continue. */
+ if (args_mem == NULL)
+ return UV_ENOBUFS;
+
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+
+ if (size <= process_title.len) {
+ uv_mutex_unlock(&process_title_mutex);
+ return UV_ENOBUFS;
+ }
+
+ if (process_title.len != 0)
+ memcpy(buffer, process_title.str, process_title.len + 1);
+
+ buffer[process_title.len] = '\0';
+
+ uv_mutex_unlock(&process_title_mutex);
+
+ return 0;
+}
+
+
+void uv__process_title_cleanup(void) {
+ uv__free(args_mem); /* Keep valgrind happy. */
+ args_mem = NULL;
+}
diff --git a/Utilities/cmlibuv/src/unix/pthread-fixes.c b/Utilities/cmlibuv/src/unix/pthread-fixes.c
new file mode 100644
index 0000000..022d79c
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/pthread-fixes.c
@@ -0,0 +1,58 @@
+/* Copyright (c) 2013, Sony Mobile Communications AB
+ * Copyright (c) 2012, Google Inc.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Google Inc. nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/* Android versions < 4.1 have a broken pthread_sigmask. */
+#include "uv-common.h"
+
+#include <errno.h>
+#include <pthread.h>
+#include <signal.h>
+
+int uv__pthread_sigmask(int how, const sigset_t* set, sigset_t* oset) {
+ static int workaround;
+ int err;
+
+ if (uv__load_relaxed(&workaround)) {
+ return sigprocmask(how, set, oset);
+ } else {
+ err = pthread_sigmask(how, set, oset);
+ if (err) {
+ if (err == EINVAL && sigprocmask(how, set, oset) == 0) {
+ uv__store_relaxed(&workaround, 1);
+ return 0;
+ } else {
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/unix/qnx.c b/Utilities/cmlibuv/src/unix/qnx.c
new file mode 100644
index 0000000..ca148d3
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/qnx.c
@@ -0,0 +1,137 @@
+/* Copyright libuv contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <string.h>
+#include <sys/process.h>
+#include <sys/neutrino.h>
+#include <sys/memmsg.h>
+#include <sys/syspage.h>
+#include <sys/procfs.h>
+
+static void
+get_mem_info(uint64_t* totalmem, uint64_t* freemem) {
+ mem_info_t msg;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.i.type = _MEM_INFO;
+ msg.i.fd = -1;
+
+ if (MsgSend(MEMMGR_COID, &msg.i, sizeof(msg.i), &msg.o, sizeof(msg.o))
+ != -1) {
+ *totalmem = msg.o.info.__posix_tmi_total;
+ *freemem = msg.o.info.posix_tmi_length;
+ } else {
+ *totalmem = 0;
+ *freemem = 0;
+ }
+}
+
+
+void uv_loadavg(double avg[3]) {
+ avg[0] = 0.0;
+ avg[1] = 0.0;
+ avg[2] = 0.0;
+}
+
+
+int uv_exepath(char* buffer, size_t* size) {
+ char path[PATH_MAX];
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ realpath(_cmdname(NULL), path);
+ strlcpy(buffer, path, *size);
+ *size = strlen(buffer);
+ return 0;
+}
+
+
+uint64_t uv_get_free_memory(void) {
+ uint64_t totalmem;
+ uint64_t freemem;
+ get_mem_info(&totalmem, &freemem);
+ return freemem;
+}
+
+
+uint64_t uv_get_total_memory(void) {
+ uint64_t totalmem;
+ uint64_t freemem;
+ get_mem_info(&totalmem, &freemem);
+ return totalmem;
+}
+
+
+uint64_t uv_get_constrained_memory(void) {
+ return 0;
+}
+
+
+int uv_resident_set_memory(size_t* rss) {
+ int fd;
+ procfs_asinfo asinfo;
+
+ fd = uv__open_cloexec("/proc/self/ctl", O_RDONLY);
+ if (fd == -1)
+ return UV__ERR(errno);
+
+ if (devctl(fd, DCMD_PROC_ASINFO, &asinfo, sizeof(asinfo), 0) == -1) {
+ uv__close(fd);
+ return UV__ERR(errno);
+ }
+
+ uv__close(fd);
+ *rss = asinfo.rss;
+ return 0;
+}
+
+
+int uv_uptime(double* uptime) {
+ struct qtime_entry* qtime = _SYSPAGE_ENTRY(_syspage_ptr, qtime);
+ *uptime = (qtime->nsec / 1000000000.0);
+ return 0;
+}
+
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+ struct cpuinfo_entry* cpuinfo =
+ (struct cpuinfo_entry*)_SYSPAGE_ENTRY(_syspage_ptr, new_cpuinfo);
+ size_t cpuinfo_size = _SYSPAGE_ELEMENT_SIZE(_syspage_ptr, cpuinfo);
+ struct strings_entry* strings = _SYSPAGE_ENTRY(_syspage_ptr, strings);
+ int num_cpus = _syspage_ptr->num_cpu;
+ int i;
+
+ *count = num_cpus;
+ *cpu_infos = uv__malloc(num_cpus * sizeof(**cpu_infos));
+ if (*cpu_infos == NULL)
+ return UV_ENOMEM;
+
+ for (i = 0; i < num_cpus; i++) {
+ (*cpu_infos)[i].model = strdup(&strings->data[cpuinfo->name]);
+ (*cpu_infos)[i].speed = cpuinfo->speed;
+ SYSPAGE_ARRAY_ADJ_OFFSET(cpuinfo, cpuinfo, cpuinfo_size);
+ }
+
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/unix/random-devurandom.c b/Utilities/cmlibuv/src/unix/random-devurandom.c
new file mode 100644
index 0000000..05e52a5
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/random-devurandom.c
@@ -0,0 +1,93 @@
+/* Copyright libuv contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <sys/stat.h>
+#include <unistd.h>
+
+static uv_once_t once = UV_ONCE_INIT;
+static int status;
+
+
+int uv__random_readpath(const char* path, void* buf, size_t buflen) {
+ struct stat s;
+ size_t pos;
+ ssize_t n;
+ int fd;
+
+ fd = uv__open_cloexec(path, O_RDONLY);
+
+ if (fd < 0)
+ return fd;
+
+ if (fstat(fd, &s)) {
+ uv__close(fd);
+ return UV__ERR(errno);
+ }
+
+ if (!S_ISCHR(s.st_mode)) {
+ uv__close(fd);
+ return UV_EIO;
+ }
+
+ for (pos = 0; pos != buflen; pos += n) {
+ do
+ n = read(fd, (char*) buf + pos, buflen - pos);
+ while (n == -1 && errno == EINTR);
+
+ if (n == -1) {
+ uv__close(fd);
+ return UV__ERR(errno);
+ }
+
+ if (n == 0) {
+ uv__close(fd);
+ return UV_EIO;
+ }
+ }
+
+ uv__close(fd);
+ return 0;
+}
+
+
+static void uv__random_devurandom_init(void) {
+ char c;
+
+ /* Linux's random(4) man page suggests applications should read at least
+ * once from /dev/random before switching to /dev/urandom in order to seed
+ * the system RNG. Reads from /dev/random can of course block indefinitely
+ * until entropy is available but that's the point.
+ */
+ status = uv__random_readpath("/dev/random", &c, 1);
+}
+
+
+int uv__random_devurandom(void* buf, size_t buflen) {
+ uv_once(&once, uv__random_devurandom_init);
+
+ if (status != 0)
+ return status;
+
+ return uv__random_readpath("/dev/urandom", buf, buflen);
+}
diff --git a/Utilities/cmlibuv/src/unix/random-getentropy.c b/Utilities/cmlibuv/src/unix/random-getentropy.c
new file mode 100644
index 0000000..c45d9fd
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/random-getentropy.c
@@ -0,0 +1,57 @@
+/* Copyright libuv contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stddef.h>
+#include <dlfcn.h>
+
+typedef int (*uv__getentropy_cb)(void *, size_t);
+
+static uv__getentropy_cb uv__getentropy;
+static uv_once_t once = UV_ONCE_INIT;
+
+
+static void uv__random_getentropy_init(void) {
+ uv__getentropy = (uv__getentropy_cb) dlsym(RTLD_DEFAULT, "getentropy");
+}
+
+
+int uv__random_getentropy(void* buf, size_t buflen) {
+ size_t pos;
+ size_t stride;
+
+ uv_once(&once, uv__random_getentropy_init);
+
+ if (uv__getentropy == NULL)
+ return UV_ENOSYS;
+
+ /* getentropy() returns an error for requests > 256 bytes. */
+ for (pos = 0, stride = 256; pos + stride < buflen; pos += stride)
+ if (uv__getentropy((char *) buf + pos, stride))
+ return UV__ERR(errno);
+
+ if (uv__getentropy((char *) buf + pos, buflen - pos))
+ return UV__ERR(errno);
+
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/unix/random-getrandom.c b/Utilities/cmlibuv/src/unix/random-getrandom.c
new file mode 100644
index 0000000..bcc9408
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/random-getrandom.c
@@ -0,0 +1,88 @@
+/* Copyright libuv contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#ifdef __linux__
+
+#include "linux-syscalls.h"
+
+#define uv__random_getrandom_init() 0
+
+#else /* !__linux__ */
+
+#include <stddef.h>
+#include <dlfcn.h>
+
+typedef ssize_t (*uv__getrandom_cb)(void *, size_t, unsigned);
+
+static uv__getrandom_cb uv__getrandom;
+static uv_once_t once = UV_ONCE_INIT;
+
+static void uv__random_getrandom_init_once(void) {
+ uv__getrandom = (uv__getrandom_cb) dlsym(RTLD_DEFAULT, "getrandom");
+}
+
+static int uv__random_getrandom_init(void) {
+ uv_once(&once, uv__random_getrandom_init_once);
+
+ if (uv__getrandom == NULL)
+ return UV_ENOSYS;
+
+ return 0;
+}
+
+#endif /* !__linux__ */
+
+int uv__random_getrandom(void* buf, size_t buflen) {
+ ssize_t n;
+ size_t pos;
+ int rc;
+
+ rc = uv__random_getrandom_init();
+ if (rc != 0)
+ return rc;
+
+ for (pos = 0; pos != buflen; pos += n) {
+ do {
+ n = buflen - pos;
+
+ /* Most getrandom() implementations promise that reads <= 256 bytes
+ * will always succeed and won't be interrupted by signals.
+ * It's therefore useful to split it up in smaller reads because
+ * one big read may, in theory, continuously fail with EINTR.
+ */
+ if (n > 256)
+ n = 256;
+
+ n = uv__getrandom((char *) buf + pos, n, 0);
+ } while (n == -1 && errno == EINTR);
+
+ if (n == -1)
+ return UV__ERR(errno);
+
+ if (n == 0)
+ return UV_EIO;
+ }
+
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/unix/random-sysctl-linux.c b/Utilities/cmlibuv/src/unix/random-sysctl-linux.c
new file mode 100644
index 0000000..66ba8d7
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/random-sysctl-linux.c
@@ -0,0 +1,99 @@
+/* Copyright libuv contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <errno.h>
+#include <string.h>
+
+#include <syscall.h>
+#include <unistd.h>
+
+
+struct uv__sysctl_args {
+ int* name;
+ int nlen;
+ void* oldval;
+ size_t* oldlenp;
+ void* newval;
+ size_t newlen;
+ unsigned long unused[4];
+};
+
+
+int uv__random_sysctl(void* buf, size_t buflen) {
+ static int name[] = {1 /*CTL_KERN*/, 40 /*KERN_RANDOM*/, 6 /*RANDOM_UUID*/};
+ struct uv__sysctl_args args;
+ char uuid[16];
+ char* p;
+ char* pe;
+ size_t n;
+
+ p = buf;
+ pe = p + buflen;
+
+ while (p < pe) {
+ memset(&args, 0, sizeof(args));
+
+ args.name = name;
+ args.nlen = ARRAY_SIZE(name);
+ args.oldval = uuid;
+ args.oldlenp = &n;
+ n = sizeof(uuid);
+
+ /* Emits a deprecation warning with some kernels but that seems like
+ * an okay trade-off for the fallback of the fallback: this function is
+ * only called when neither getrandom(2) nor /dev/urandom are available.
+ * Fails with ENOSYS on kernels configured without CONFIG_SYSCTL_SYSCALL.
+ * At least arm64 never had a _sysctl system call and therefore doesn't
+ * have a SYS__sysctl define either.
+ */
+#ifdef SYS__sysctl
+ if (syscall(SYS__sysctl, &args) == -1)
+ return UV__ERR(errno);
+#else
+ {
+ (void) &args;
+ return UV_ENOSYS;
+ }
+#endif
+
+ if (n != sizeof(uuid))
+ return UV_EIO; /* Can't happen. */
+
+ /* uuid[] is now a type 4 UUID. Bytes 6 and 8 (counting from zero) contain
+ * 4 and 5 bits of entropy, respectively. For ease of use, we skip those
+ * and only use 14 of the 16 bytes.
+ */
+ uuid[6] = uuid[14];
+ uuid[8] = uuid[15];
+
+ n = pe - p;
+ if (n > 14)
+ n = 14;
+
+ memcpy(p, uuid, n);
+ p += n;
+ }
+
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/unix/signal.c b/Utilities/cmlibuv/src/unix/signal.c
new file mode 100644
index 0000000..f40a3e5
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/signal.c
@@ -0,0 +1,558 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <assert.h>
+#include <errno.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#ifndef SA_RESTART
+# define SA_RESTART 0
+#endif
+
+typedef struct {
+ uv_signal_t* handle;
+ int signum;
+} uv__signal_msg_t;
+
+RB_HEAD(uv__signal_tree_s, uv_signal_s);
+
+
+static int uv__signal_unlock(void);
+static int uv__signal_start(uv_signal_t* handle,
+ uv_signal_cb signal_cb,
+ int signum,
+ int oneshot);
+static void uv__signal_event(uv_loop_t* loop, uv__io_t* w, unsigned int events);
+static int uv__signal_compare(uv_signal_t* w1, uv_signal_t* w2);
+static void uv__signal_stop(uv_signal_t* handle);
+static void uv__signal_unregister_handler(int signum);
+
+
+static uv_once_t uv__signal_global_init_guard = UV_ONCE_INIT;
+static struct uv__signal_tree_s uv__signal_tree =
+ RB_INITIALIZER(uv__signal_tree);
+static int uv__signal_lock_pipefd[2] = { -1, -1 };
+
+RB_GENERATE_STATIC(uv__signal_tree_s,
+ uv_signal_s, tree_entry,
+ uv__signal_compare)
+
+static void uv__signal_global_reinit(void);
+
+static void uv__signal_global_init(void) {
+ if (uv__signal_lock_pipefd[0] == -1)
+ /* pthread_atfork can register before and after handlers, one
+ * for each child. This only registers one for the child. That
+ * state is both persistent and cumulative, so if we keep doing
+ * it the handler functions will be called multiple times. Thus
+ * we only want to do it once.
+ */
+ if (pthread_atfork(NULL, NULL, &uv__signal_global_reinit))
+ abort();
+
+ uv__signal_global_reinit();
+}
+
+
+void uv__signal_cleanup(void) {
+ /* We can only use signal-safe functions here.
+ * That includes read/write and close, fortunately.
+ * We do all of this directly here instead of resetting
+ * uv__signal_global_init_guard because
+ * uv__signal_global_once_init is only called from uv_loop_init
+ * and this needs to function in existing loops.
+ */
+ if (uv__signal_lock_pipefd[0] != -1) {
+ uv__close(uv__signal_lock_pipefd[0]);
+ uv__signal_lock_pipefd[0] = -1;
+ }
+
+ if (uv__signal_lock_pipefd[1] != -1) {
+ uv__close(uv__signal_lock_pipefd[1]);
+ uv__signal_lock_pipefd[1] = -1;
+ }
+}
+
+
+static void uv__signal_global_reinit(void) {
+ uv__signal_cleanup();
+
+ if (uv__make_pipe(uv__signal_lock_pipefd, 0))
+ abort();
+
+ if (uv__signal_unlock())
+ abort();
+}
+
+
+void uv__signal_global_once_init(void) {
+ uv_once(&uv__signal_global_init_guard, uv__signal_global_init);
+}
+
+
+static int uv__signal_lock(void) {
+ int r;
+ char data;
+
+ do {
+ r = read(uv__signal_lock_pipefd[0], &data, sizeof data);
+ } while (r < 0 && errno == EINTR);
+
+ return (r < 0) ? -1 : 0;
+}
+
+
+static int uv__signal_unlock(void) {
+ int r;
+ char data = 42;
+
+ do {
+ r = write(uv__signal_lock_pipefd[1], &data, sizeof data);
+ } while (r < 0 && errno == EINTR);
+
+ return (r < 0) ? -1 : 0;
+}
+
+
+static void uv__signal_block_and_lock(sigset_t* saved_sigmask) {
+ sigset_t new_mask;
+
+ if (sigfillset(&new_mask))
+ abort();
+
+ /* to shut up valgrind */
+ sigemptyset(saved_sigmask);
+ if (pthread_sigmask(SIG_SETMASK, &new_mask, saved_sigmask))
+ abort();
+
+ if (uv__signal_lock())
+ abort();
+}
+
+
+static void uv__signal_unlock_and_unblock(sigset_t* saved_sigmask) {
+ if (uv__signal_unlock())
+ abort();
+
+ if (pthread_sigmask(SIG_SETMASK, saved_sigmask, NULL))
+ abort();
+}
+
+
+static uv_signal_t* uv__signal_first_handle(int signum) {
+ /* This function must be called with the signal lock held. */
+ uv_signal_t lookup;
+ uv_signal_t* handle;
+
+ lookup.signum = signum;
+ lookup.flags = 0;
+ lookup.loop = NULL;
+
+ handle = RB_NFIND(uv__signal_tree_s, &uv__signal_tree, &lookup);
+
+ if (handle != NULL && handle->signum == signum)
+ return handle;
+
+ return NULL;
+}
+
+
+static void uv__signal_handler(int signum) {
+ uv__signal_msg_t msg;
+ uv_signal_t* handle;
+ int saved_errno;
+
+ saved_errno = errno;
+ memset(&msg, 0, sizeof msg);
+
+ if (uv__signal_lock()) {
+ errno = saved_errno;
+ return;
+ }
+
+ for (handle = uv__signal_first_handle(signum);
+ handle != NULL && handle->signum == signum;
+ handle = RB_NEXT(uv__signal_tree_s, &uv__signal_tree, handle)) {
+ int r;
+
+ msg.signum = signum;
+ msg.handle = handle;
+
+ /* write() should be atomic for small data chunks, so the entire message
+ * should be written at once. In theory the pipe could become full, in
+ * which case the user is out of luck.
+ */
+ do {
+ r = write(handle->loop->signal_pipefd[1], &msg, sizeof msg);
+ } while (r == -1 && errno == EINTR);
+
+ assert(r == sizeof msg ||
+ (r == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)));
+
+ if (r != -1)
+ handle->caught_signals++;
+ }
+
+ uv__signal_unlock();
+ errno = saved_errno;
+}
+
+
+static int uv__signal_register_handler(int signum, int oneshot) {
+ /* When this function is called, the signal lock must be held. */
+ struct sigaction sa;
+
+ /* XXX use a separate signal stack? */
+ memset(&sa, 0, sizeof(sa));
+ if (sigfillset(&sa.sa_mask))
+ abort();
+ sa.sa_handler = uv__signal_handler;
+ sa.sa_flags = SA_RESTART;
+ if (oneshot)
+ sa.sa_flags |= SA_RESETHAND;
+
+ /* XXX save old action so we can restore it later on? */
+ if (sigaction(signum, &sa, NULL))
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+
+static void uv__signal_unregister_handler(int signum) {
+ /* When this function is called, the signal lock must be held. */
+ struct sigaction sa;
+
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_handler = SIG_DFL;
+
+ /* sigaction can only fail with EINVAL or EFAULT; an attempt to deregister a
+ * signal implies that it was successfully registered earlier, so EINVAL
+ * should never happen.
+ */
+ if (sigaction(signum, &sa, NULL))
+ abort();
+}
+
+
+static int uv__signal_loop_once_init(uv_loop_t* loop) {
+ int err;
+
+ /* Return if already initialized. */
+ if (loop->signal_pipefd[0] != -1)
+ return 0;
+
+ err = uv__make_pipe(loop->signal_pipefd, UV__F_NONBLOCK);
+ if (err)
+ return err;
+
+ uv__io_init(&loop->signal_io_watcher,
+ uv__signal_event,
+ loop->signal_pipefd[0]);
+ uv__io_start(loop, &loop->signal_io_watcher, POLLIN);
+
+ return 0;
+}
+
+
+int uv__signal_loop_fork(uv_loop_t* loop) {
+ uv__io_stop(loop, &loop->signal_io_watcher, POLLIN);
+ uv__close(loop->signal_pipefd[0]);
+ uv__close(loop->signal_pipefd[1]);
+ loop->signal_pipefd[0] = -1;
+ loop->signal_pipefd[1] = -1;
+ return uv__signal_loop_once_init(loop);
+}
+
+
+void uv__signal_loop_cleanup(uv_loop_t* loop) {
+ QUEUE* q;
+
+ /* Stop all the signal watchers that are still attached to this loop. This
+ * ensures that the (shared) signal tree doesn't contain any invalid entries
+ * entries, and that signal handlers are removed when appropriate.
+ * It's safe to use QUEUE_FOREACH here because the handles and the handle
+ * queue are not modified by uv__signal_stop().
+ */
+ QUEUE_FOREACH(q, &loop->handle_queue) {
+ uv_handle_t* handle = QUEUE_DATA(q, uv_handle_t, handle_queue);
+
+ if (handle->type == UV_SIGNAL)
+ uv__signal_stop((uv_signal_t*) handle);
+ }
+
+ if (loop->signal_pipefd[0] != -1) {
+ uv__close(loop->signal_pipefd[0]);
+ loop->signal_pipefd[0] = -1;
+ }
+
+ if (loop->signal_pipefd[1] != -1) {
+ uv__close(loop->signal_pipefd[1]);
+ loop->signal_pipefd[1] = -1;
+ }
+}
+
+
+int uv_signal_init(uv_loop_t* loop, uv_signal_t* handle) {
+ int err;
+
+ err = uv__signal_loop_once_init(loop);
+ if (err)
+ return err;
+
+ uv__handle_init(loop, (uv_handle_t*) handle, UV_SIGNAL);
+ handle->signum = 0;
+ handle->caught_signals = 0;
+ handle->dispatched_signals = 0;
+
+ return 0;
+}
+
+
+void uv__signal_close(uv_signal_t* handle) {
+ uv__signal_stop(handle);
+}
+
+
+int uv_signal_start(uv_signal_t* handle, uv_signal_cb signal_cb, int signum) {
+ return uv__signal_start(handle, signal_cb, signum, 0);
+}
+
+
+int uv_signal_start_oneshot(uv_signal_t* handle,
+ uv_signal_cb signal_cb,
+ int signum) {
+ return uv__signal_start(handle, signal_cb, signum, 1);
+}
+
+
+static int uv__signal_start(uv_signal_t* handle,
+ uv_signal_cb signal_cb,
+ int signum,
+ int oneshot) {
+ sigset_t saved_sigmask;
+ int err;
+ uv_signal_t* first_handle;
+
+ assert(!uv__is_closing(handle));
+
+ /* If the user supplies signum == 0, then return an error already. If the
+ * signum is otherwise invalid then uv__signal_register will find out
+ * eventually.
+ */
+ if (signum == 0)
+ return UV_EINVAL;
+
+ /* Short circuit: if the signal watcher is already watching {signum} don't
+ * go through the process of deregistering and registering the handler.
+ * Additionally, this avoids pending signals getting lost in the small
+ * time frame that handle->signum == 0.
+ */
+ if (signum == handle->signum) {
+ handle->signal_cb = signal_cb;
+ return 0;
+ }
+
+ /* If the signal handler was already active, stop it first. */
+ if (handle->signum != 0) {
+ uv__signal_stop(handle);
+ }
+
+ uv__signal_block_and_lock(&saved_sigmask);
+
+ /* If at this point there are no active signal watchers for this signum (in
+ * any of the loops), it's time to try and register a handler for it here.
+ * Also in case there's only one-shot handlers and a regular handler comes in.
+ */
+ first_handle = uv__signal_first_handle(signum);
+ if (first_handle == NULL ||
+ (!oneshot && (first_handle->flags & UV_SIGNAL_ONE_SHOT))) {
+ err = uv__signal_register_handler(signum, oneshot);
+ if (err) {
+ /* Registering the signal handler failed. Must be an invalid signal. */
+ uv__signal_unlock_and_unblock(&saved_sigmask);
+ return err;
+ }
+ }
+
+ handle->signum = signum;
+ if (oneshot)
+ handle->flags |= UV_SIGNAL_ONE_SHOT;
+
+ RB_INSERT(uv__signal_tree_s, &uv__signal_tree, handle);
+
+ uv__signal_unlock_and_unblock(&saved_sigmask);
+
+ handle->signal_cb = signal_cb;
+ uv__handle_start(handle);
+
+ return 0;
+}
+
+
+static void uv__signal_event(uv_loop_t* loop,
+ uv__io_t* w,
+ unsigned int events) {
+ uv__signal_msg_t* msg;
+ uv_signal_t* handle;
+ char buf[sizeof(uv__signal_msg_t) * 32];
+ size_t bytes, end, i;
+ int r;
+
+ bytes = 0;
+ end = 0;
+
+ do {
+ r = read(loop->signal_pipefd[0], buf + bytes, sizeof(buf) - bytes);
+
+ if (r == -1 && errno == EINTR)
+ continue;
+
+ if (r == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) {
+ /* If there are bytes in the buffer already (which really is extremely
+ * unlikely if possible at all) we can't exit the function here. We'll
+ * spin until more bytes are read instead.
+ */
+ if (bytes > 0)
+ continue;
+
+ /* Otherwise, there was nothing there. */
+ return;
+ }
+
+ /* Other errors really should never happen. */
+ if (r == -1)
+ abort();
+
+ bytes += r;
+
+ /* `end` is rounded down to a multiple of sizeof(uv__signal_msg_t). */
+ end = (bytes / sizeof(uv__signal_msg_t)) * sizeof(uv__signal_msg_t);
+
+ for (i = 0; i < end; i += sizeof(uv__signal_msg_t)) {
+ msg = (uv__signal_msg_t*) (buf + i);
+ handle = msg->handle;
+
+ if (msg->signum == handle->signum) {
+ assert(!(handle->flags & UV_HANDLE_CLOSING));
+ handle->signal_cb(handle, handle->signum);
+ }
+
+ handle->dispatched_signals++;
+
+ if (handle->flags & UV_SIGNAL_ONE_SHOT)
+ uv__signal_stop(handle);
+ }
+
+ bytes -= end;
+
+ /* If there are any "partial" messages left, move them to the start of the
+ * the buffer, and spin. This should not happen.
+ */
+ if (bytes) {
+ memmove(buf, buf + end, bytes);
+ continue;
+ }
+ } while (end == sizeof buf);
+}
+
+
+static int uv__signal_compare(uv_signal_t* w1, uv_signal_t* w2) {
+ int f1;
+ int f2;
+ /* Compare signums first so all watchers with the same signnum end up
+ * adjacent.
+ */
+ if (w1->signum < w2->signum) return -1;
+ if (w1->signum > w2->signum) return 1;
+
+ /* Handlers without UV_SIGNAL_ONE_SHOT set will come first, so if the first
+ * handler returned is a one-shot handler, the rest will be too.
+ */
+ f1 = w1->flags & UV_SIGNAL_ONE_SHOT;
+ f2 = w2->flags & UV_SIGNAL_ONE_SHOT;
+ if (f1 < f2) return -1;
+ if (f1 > f2) return 1;
+
+ /* Sort by loop pointer, so we can easily look up the first item after
+ * { .signum = x, .loop = NULL }.
+ */
+ if (w1->loop < w2->loop) return -1;
+ if (w1->loop > w2->loop) return 1;
+
+ if (w1 < w2) return -1;
+ if (w1 > w2) return 1;
+
+ return 0;
+}
+
+
+int uv_signal_stop(uv_signal_t* handle) {
+ assert(!uv__is_closing(handle));
+ uv__signal_stop(handle);
+ return 0;
+}
+
+
+static void uv__signal_stop(uv_signal_t* handle) {
+ uv_signal_t* removed_handle;
+ sigset_t saved_sigmask;
+ uv_signal_t* first_handle;
+ int rem_oneshot;
+ int first_oneshot;
+ int ret;
+
+ /* If the watcher wasn't started, this is a no-op. */
+ if (handle->signum == 0)
+ return;
+
+ uv__signal_block_and_lock(&saved_sigmask);
+
+ removed_handle = RB_REMOVE(uv__signal_tree_s, &uv__signal_tree, handle);
+ assert(removed_handle == handle);
+ (void) removed_handle;
+
+ /* Check if there are other active signal watchers observing this signal. If
+ * not, unregister the signal handler.
+ */
+ first_handle = uv__signal_first_handle(handle->signum);
+ if (first_handle == NULL) {
+ uv__signal_unregister_handler(handle->signum);
+ } else {
+ rem_oneshot = handle->flags & UV_SIGNAL_ONE_SHOT;
+ first_oneshot = first_handle->flags & UV_SIGNAL_ONE_SHOT;
+ if (first_oneshot && !rem_oneshot) {
+ ret = uv__signal_register_handler(handle->signum, 1);
+ assert(ret == 0);
+ (void)ret;
+ }
+ }
+
+ uv__signal_unlock_and_unblock(&saved_sigmask);
+
+ handle->signum = 0;
+ uv__handle_stop(handle);
+}
diff --git a/Utilities/cmlibuv/src/unix/spinlock.h b/Utilities/cmlibuv/src/unix/spinlock.h
new file mode 100644
index 0000000..a20c83c
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/spinlock.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef UV_SPINLOCK_H_
+#define UV_SPINLOCK_H_
+
+#include "internal.h" /* ACCESS_ONCE, UV_UNUSED */
+#include "atomic-ops.h"
+
+#define UV_SPINLOCK_INITIALIZER { 0 }
+
+typedef struct {
+ int lock;
+} uv_spinlock_t;
+
+UV_UNUSED(static void uv_spinlock_init(uv_spinlock_t* spinlock));
+UV_UNUSED(static void uv_spinlock_lock(uv_spinlock_t* spinlock));
+UV_UNUSED(static void uv_spinlock_unlock(uv_spinlock_t* spinlock));
+UV_UNUSED(static int uv_spinlock_trylock(uv_spinlock_t* spinlock));
+
+UV_UNUSED(static void uv_spinlock_init(uv_spinlock_t* spinlock)) {
+ ACCESS_ONCE(int, spinlock->lock) = 0;
+}
+
+UV_UNUSED(static void uv_spinlock_lock(uv_spinlock_t* spinlock)) {
+ while (!uv_spinlock_trylock(spinlock)) cpu_relax();
+}
+
+UV_UNUSED(static void uv_spinlock_unlock(uv_spinlock_t* spinlock)) {
+ ACCESS_ONCE(int, spinlock->lock) = 0;
+}
+
+UV_UNUSED(static int uv_spinlock_trylock(uv_spinlock_t* spinlock)) {
+ /* TODO(bnoordhuis) Maybe change to a ticket lock to guarantee fair queueing.
+ * Not really critical until we have locks that are (frequently) contended
+ * for by several threads.
+ */
+ return 0 == cmpxchgi(&spinlock->lock, 0, 1);
+}
+
+#endif /* UV_SPINLOCK_H_ */
diff --git a/Utilities/cmlibuv/src/unix/stream.c b/Utilities/cmlibuv/src/unix/stream.c
new file mode 100644
index 0000000..3b6da8d
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/stream.c
@@ -0,0 +1,1693 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/uio.h>
+#include <sys/un.h>
+#include <unistd.h>
+#include <limits.h> /* IOV_MAX */
+
+#if defined(__APPLE__)
+# include <sys/event.h>
+# include <sys/time.h>
+# include <sys/select.h>
+
+/* Forward declaration */
+typedef struct uv__stream_select_s uv__stream_select_t;
+
+struct uv__stream_select_s {
+ uv_stream_t* stream;
+ uv_thread_t thread;
+ uv_sem_t close_sem;
+ uv_sem_t async_sem;
+ uv_async_t async;
+ int events;
+ int fake_fd;
+ int int_fd;
+ int fd;
+ fd_set* sread;
+ size_t sread_sz;
+ fd_set* swrite;
+ size_t swrite_sz;
+};
+
+/* Due to a possible kernel bug at least in OS X 10.10 "Yosemite",
+ * EPROTOTYPE can be returned while trying to write to a socket that is
+ * shutting down. If we retry the write, we should get the expected EPIPE
+ * instead.
+ */
+# define RETRY_ON_WRITE_ERROR(errno) (errno == EINTR || errno == EPROTOTYPE)
+# define IS_TRANSIENT_WRITE_ERROR(errno, send_handle) \
+ (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS || \
+ (errno == EMSGSIZE && send_handle != NULL))
+#else
+# define RETRY_ON_WRITE_ERROR(errno) (errno == EINTR)
+# define IS_TRANSIENT_WRITE_ERROR(errno, send_handle) \
+ (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
+#endif /* defined(__APPLE__) */
+
+static void uv__stream_connect(uv_stream_t*);
+static void uv__write(uv_stream_t* stream);
+static void uv__read(uv_stream_t* stream);
+static void uv__stream_io(uv_loop_t* loop, uv__io_t* w, unsigned int events);
+static void uv__write_callbacks(uv_stream_t* stream);
+static size_t uv__write_req_size(uv_write_t* req);
+
+
+void uv__stream_init(uv_loop_t* loop,
+ uv_stream_t* stream,
+ uv_handle_type type) {
+ int err;
+
+ uv__handle_init(loop, (uv_handle_t*)stream, type);
+ stream->read_cb = NULL;
+ stream->alloc_cb = NULL;
+ stream->close_cb = NULL;
+ stream->connection_cb = NULL;
+ stream->connect_req = NULL;
+ stream->shutdown_req = NULL;
+ stream->accepted_fd = -1;
+ stream->queued_fds = NULL;
+ stream->delayed_error = 0;
+ QUEUE_INIT(&stream->write_queue);
+ QUEUE_INIT(&stream->write_completed_queue);
+ stream->write_queue_size = 0;
+
+ if (loop->emfile_fd == -1) {
+ err = uv__open_cloexec("/dev/null", O_RDONLY);
+ if (err < 0)
+ /* In the rare case that "/dev/null" isn't mounted open "/"
+ * instead.
+ */
+ err = uv__open_cloexec("/", O_RDONLY);
+ if (err >= 0)
+ loop->emfile_fd = err;
+ }
+
+#if defined(__APPLE__) && !defined(CMAKE_BOOTSTRAP)
+ stream->select = NULL;
+#endif /* defined(__APPLE_) */
+
+ uv__io_init(&stream->io_watcher, uv__stream_io, -1);
+}
+
+
+static void uv__stream_osx_interrupt_select(uv_stream_t* stream) {
+#if defined(__APPLE__) && !defined(CMAKE_BOOTSTRAP)
+ /* Notify select() thread about state change */
+ uv__stream_select_t* s;
+ int r;
+
+ s = stream->select;
+ if (s == NULL)
+ return;
+
+ /* Interrupt select() loop
+ * NOTE: fake_fd and int_fd are socketpair(), thus writing to one will
+ * emit read event on other side
+ */
+ do
+ r = write(s->fake_fd, "x", 1);
+ while (r == -1 && errno == EINTR);
+
+ assert(r == 1);
+#else /* !defined(__APPLE__) */
+ /* No-op on any other platform */
+#endif /* !defined(__APPLE__) */
+}
+
+
+#if defined(__APPLE__) && !defined(CMAKE_BOOTSTRAP)
+static void uv__stream_osx_select(void* arg) {
+ uv_stream_t* stream;
+ uv__stream_select_t* s;
+ char buf[1024];
+ int events;
+ int fd;
+ int r;
+ int max_fd;
+
+ stream = arg;
+ s = stream->select;
+ fd = s->fd;
+
+ if (fd > s->int_fd)
+ max_fd = fd;
+ else
+ max_fd = s->int_fd;
+
+ while (1) {
+ /* Terminate on semaphore */
+ if (uv_sem_trywait(&s->close_sem) == 0)
+ break;
+
+ /* Watch fd using select(2) */
+ memset(s->sread, 0, s->sread_sz);
+ memset(s->swrite, 0, s->swrite_sz);
+
+ if (uv__io_active(&stream->io_watcher, POLLIN))
+ FD_SET(fd, s->sread);
+ if (uv__io_active(&stream->io_watcher, POLLOUT))
+ FD_SET(fd, s->swrite);
+ FD_SET(s->int_fd, s->sread);
+
+ /* Wait indefinitely for fd events */
+ r = select(max_fd + 1, s->sread, s->swrite, NULL, NULL);
+ if (r == -1) {
+ if (errno == EINTR)
+ continue;
+
+ /* XXX: Possible?! */
+ abort();
+ }
+
+ /* Ignore timeouts */
+ if (r == 0)
+ continue;
+
+ /* Empty socketpair's buffer in case of interruption */
+ if (FD_ISSET(s->int_fd, s->sread))
+ while (1) {
+ r = read(s->int_fd, buf, sizeof(buf));
+
+ if (r == sizeof(buf))
+ continue;
+
+ if (r != -1)
+ break;
+
+ if (errno == EAGAIN || errno == EWOULDBLOCK)
+ break;
+
+ if (errno == EINTR)
+ continue;
+
+ abort();
+ }
+
+ /* Handle events */
+ events = 0;
+ if (FD_ISSET(fd, s->sread))
+ events |= POLLIN;
+ if (FD_ISSET(fd, s->swrite))
+ events |= POLLOUT;
+
+ assert(events != 0 || FD_ISSET(s->int_fd, s->sread));
+ if (events != 0) {
+ ACCESS_ONCE(int, s->events) = events;
+
+ uv_async_send(&s->async);
+ uv_sem_wait(&s->async_sem);
+
+ /* Should be processed at this stage */
+ assert((s->events == 0) || (stream->flags & UV_HANDLE_CLOSING));
+ }
+ }
+}
+
+
+static void uv__stream_osx_select_cb(uv_async_t* handle) {
+ uv__stream_select_t* s;
+ uv_stream_t* stream;
+ int events;
+
+ s = container_of(handle, uv__stream_select_t, async);
+ stream = s->stream;
+
+ /* Get and reset stream's events */
+ events = s->events;
+ ACCESS_ONCE(int, s->events) = 0;
+
+ assert(events != 0);
+ assert(events == (events & (POLLIN | POLLOUT)));
+
+ /* Invoke callback on event-loop */
+ if ((events & POLLIN) && uv__io_active(&stream->io_watcher, POLLIN))
+ uv__stream_io(stream->loop, &stream->io_watcher, POLLIN);
+
+ if ((events & POLLOUT) && uv__io_active(&stream->io_watcher, POLLOUT))
+ uv__stream_io(stream->loop, &stream->io_watcher, POLLOUT);
+
+ if (stream->flags & UV_HANDLE_CLOSING)
+ return;
+
+ /* NOTE: It is important to do it here, otherwise `select()` might be called
+ * before the actual `uv__read()`, leading to the blocking syscall
+ */
+ uv_sem_post(&s->async_sem);
+}
+
+
+static void uv__stream_osx_cb_close(uv_handle_t* async) {
+ uv__stream_select_t* s;
+
+ s = container_of(async, uv__stream_select_t, async);
+ uv__free(s);
+}
+
+
+int uv__stream_try_select(uv_stream_t* stream, int* fd) {
+ /*
+ * kqueue doesn't work with some files from /dev mount on osx.
+ * select(2) in separate thread for those fds
+ */
+
+ struct kevent filter[1];
+ struct kevent events[1];
+ struct timespec timeout;
+ uv__stream_select_t* s;
+ int fds[2];
+ int err;
+ int ret;
+ int kq;
+ int old_fd;
+ int max_fd;
+ size_t sread_sz;
+ size_t swrite_sz;
+
+ kq = kqueue();
+ if (kq == -1) {
+ perror("(libuv) kqueue()");
+ return UV__ERR(errno);
+ }
+
+ EV_SET(&filter[0], *fd, EVFILT_READ, EV_ADD | EV_ENABLE, 0, 0, 0);
+
+ /* Use small timeout, because we only want to capture EINVALs */
+ timeout.tv_sec = 0;
+ timeout.tv_nsec = 1;
+
+ do
+ ret = kevent(kq, filter, 1, events, 1, &timeout);
+ while (ret == -1 && errno == EINTR);
+
+ uv__close(kq);
+
+ if (ret == -1)
+ return UV__ERR(errno);
+
+ if (ret == 0 || (events[0].flags & EV_ERROR) == 0 || events[0].data != EINVAL)
+ return 0;
+
+ /* At this point we definitely know that this fd won't work with kqueue */
+
+ /*
+ * Create fds for io watcher and to interrupt the select() loop.
+ * NOTE: do it ahead of malloc below to allocate enough space for fd_sets
+ */
+ if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds))
+ return UV__ERR(errno);
+
+ max_fd = *fd;
+ if (fds[1] > max_fd)
+ max_fd = fds[1];
+
+ sread_sz = ROUND_UP(max_fd + 1, sizeof(uint32_t) * NBBY) / NBBY;
+ swrite_sz = sread_sz;
+
+ s = uv__malloc(sizeof(*s) + sread_sz + swrite_sz);
+ if (s == NULL) {
+ err = UV_ENOMEM;
+ goto failed_malloc;
+ }
+
+ s->events = 0;
+ s->fd = *fd;
+ s->sread = (fd_set*) ((char*) s + sizeof(*s));
+ s->sread_sz = sread_sz;
+ s->swrite = (fd_set*) ((char*) s->sread + sread_sz);
+ s->swrite_sz = swrite_sz;
+
+ err = uv_async_init(stream->loop, &s->async, uv__stream_osx_select_cb);
+ if (err)
+ goto failed_async_init;
+
+ s->async.flags |= UV_HANDLE_INTERNAL;
+ uv__handle_unref(&s->async);
+
+ err = uv_sem_init(&s->close_sem, 0);
+ if (err != 0)
+ goto failed_close_sem_init;
+
+ err = uv_sem_init(&s->async_sem, 0);
+ if (err != 0)
+ goto failed_async_sem_init;
+
+ s->fake_fd = fds[0];
+ s->int_fd = fds[1];
+
+ old_fd = *fd;
+ s->stream = stream;
+ stream->select = s;
+ *fd = s->fake_fd;
+
+ err = uv_thread_create(&s->thread, uv__stream_osx_select, stream);
+ if (err != 0)
+ goto failed_thread_create;
+
+ return 0;
+
+failed_thread_create:
+ s->stream = NULL;
+ stream->select = NULL;
+ *fd = old_fd;
+
+ uv_sem_destroy(&s->async_sem);
+
+failed_async_sem_init:
+ uv_sem_destroy(&s->close_sem);
+
+failed_close_sem_init:
+ uv__close(fds[0]);
+ uv__close(fds[1]);
+ uv_close((uv_handle_t*) &s->async, uv__stream_osx_cb_close);
+ return err;
+
+failed_async_init:
+ uv__free(s);
+
+failed_malloc:
+ uv__close(fds[0]);
+ uv__close(fds[1]);
+
+ return err;
+}
+#endif /* defined(__APPLE__) */
+
+
+int uv__stream_open(uv_stream_t* stream, int fd, int flags) {
+#if defined(__APPLE__)
+ int enable;
+#endif
+
+ if (!(stream->io_watcher.fd == -1 || stream->io_watcher.fd == fd))
+ return UV_EBUSY;
+
+ assert(fd >= 0);
+ stream->flags |= flags;
+
+ if (stream->type == UV_TCP) {
+ if ((stream->flags & UV_HANDLE_TCP_NODELAY) && uv__tcp_nodelay(fd, 1))
+ return UV__ERR(errno);
+
+ /* TODO Use delay the user passed in. */
+ if ((stream->flags & UV_HANDLE_TCP_KEEPALIVE) &&
+ uv__tcp_keepalive(fd, 1, 60)) {
+ return UV__ERR(errno);
+ }
+ }
+
+#if defined(__APPLE__)
+ enable = 1;
+ if (setsockopt(fd, SOL_SOCKET, SO_OOBINLINE, &enable, sizeof(enable)) &&
+ errno != ENOTSOCK &&
+ errno != EINVAL) {
+ return UV__ERR(errno);
+ }
+#endif
+
+ stream->io_watcher.fd = fd;
+
+ return 0;
+}
+
+
+void uv__stream_flush_write_queue(uv_stream_t* stream, int error) {
+ uv_write_t* req;
+ QUEUE* q;
+ while (!QUEUE_EMPTY(&stream->write_queue)) {
+ q = QUEUE_HEAD(&stream->write_queue);
+ QUEUE_REMOVE(q);
+
+ req = QUEUE_DATA(q, uv_write_t, queue);
+ req->error = error;
+
+ QUEUE_INSERT_TAIL(&stream->write_completed_queue, &req->queue);
+ }
+}
+
+
+void uv__stream_destroy(uv_stream_t* stream) {
+ assert(!uv__io_active(&stream->io_watcher, POLLIN | POLLOUT));
+ assert(stream->flags & UV_HANDLE_CLOSED);
+
+ if (stream->connect_req) {
+ uv__req_unregister(stream->loop, stream->connect_req);
+ stream->connect_req->cb(stream->connect_req, UV_ECANCELED);
+ stream->connect_req = NULL;
+ }
+
+ uv__stream_flush_write_queue(stream, UV_ECANCELED);
+ uv__write_callbacks(stream);
+
+ if (stream->shutdown_req) {
+ /* The ECANCELED error code is a lie, the shutdown(2) syscall is a
+ * fait accompli at this point. Maybe we should revisit this in v0.11.
+ * A possible reason for leaving it unchanged is that it informs the
+ * callee that the handle has been destroyed.
+ */
+ uv__req_unregister(stream->loop, stream->shutdown_req);
+ stream->shutdown_req->cb(stream->shutdown_req, UV_ECANCELED);
+ stream->shutdown_req = NULL;
+ }
+
+ assert(stream->write_queue_size == 0);
+}
+
+
+/* Implements a best effort approach to mitigating accept() EMFILE errors.
+ * We have a spare file descriptor stashed away that we close to get below
+ * the EMFILE limit. Next, we accept all pending connections and close them
+ * immediately to signal the clients that we're overloaded - and we are, but
+ * we still keep on trucking.
+ *
+ * There is one caveat: it's not reliable in a multi-threaded environment.
+ * The file descriptor limit is per process. Our party trick fails if another
+ * thread opens a file or creates a socket in the time window between us
+ * calling close() and accept().
+ */
+static int uv__emfile_trick(uv_loop_t* loop, int accept_fd) {
+ int err;
+ int emfile_fd;
+
+ if (loop->emfile_fd == -1)
+ return UV_EMFILE;
+
+ uv__close(loop->emfile_fd);
+ loop->emfile_fd = -1;
+
+ do {
+ err = uv__accept(accept_fd);
+ if (err >= 0)
+ uv__close(err);
+ } while (err >= 0 || err == UV_EINTR);
+
+ emfile_fd = uv__open_cloexec("/", O_RDONLY);
+ if (emfile_fd >= 0)
+ loop->emfile_fd = emfile_fd;
+
+ return err;
+}
+
+
+#if defined(UV_HAVE_KQUEUE)
+# define UV_DEC_BACKLOG(w) w->rcount--;
+#else
+# define UV_DEC_BACKLOG(w) /* no-op */
+#endif /* defined(UV_HAVE_KQUEUE) */
+
+
+void uv__server_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
+ uv_stream_t* stream;
+ int err;
+
+ stream = container_of(w, uv_stream_t, io_watcher);
+ assert(events & POLLIN);
+ assert(stream->accepted_fd == -1);
+ assert(!(stream->flags & UV_HANDLE_CLOSING));
+
+ uv__io_start(stream->loop, &stream->io_watcher, POLLIN);
+
+ /* connection_cb can close the server socket while we're
+ * in the loop so check it on each iteration.
+ */
+ while (uv__stream_fd(stream) != -1) {
+ assert(stream->accepted_fd == -1);
+
+#if defined(UV_HAVE_KQUEUE)
+ if (w->rcount <= 0)
+ return;
+#endif /* defined(UV_HAVE_KQUEUE) */
+
+ err = uv__accept(uv__stream_fd(stream));
+ if (err < 0) {
+ if (err == UV_EAGAIN || err == UV__ERR(EWOULDBLOCK))
+ return; /* Not an error. */
+
+ if (err == UV_ECONNABORTED)
+ continue; /* Ignore. Nothing we can do about that. */
+
+ if (err == UV_EMFILE || err == UV_ENFILE) {
+ err = uv__emfile_trick(loop, uv__stream_fd(stream));
+ if (err == UV_EAGAIN || err == UV__ERR(EWOULDBLOCK))
+ break;
+ }
+
+ stream->connection_cb(stream, err);
+ continue;
+ }
+
+ UV_DEC_BACKLOG(w)
+ stream->accepted_fd = err;
+ stream->connection_cb(stream, 0);
+
+ if (stream->accepted_fd != -1) {
+ /* The user hasn't yet accepted called uv_accept() */
+ uv__io_stop(loop, &stream->io_watcher, POLLIN);
+ return;
+ }
+
+ if (stream->type == UV_TCP &&
+ (stream->flags & UV_HANDLE_TCP_SINGLE_ACCEPT)) {
+ /* Give other processes a chance to accept connections. */
+ struct timespec timeout = { 0, 1 };
+ nanosleep(&timeout, NULL);
+ }
+ }
+}
+
+
+#undef UV_DEC_BACKLOG
+
+
+int uv_accept(uv_stream_t* server, uv_stream_t* client) {
+ int err;
+
+ assert(server->loop == client->loop);
+
+ if (server->accepted_fd == -1)
+ return UV_EAGAIN;
+
+ switch (client->type) {
+ case UV_NAMED_PIPE:
+ case UV_TCP:
+ err = uv__stream_open(client,
+ server->accepted_fd,
+ UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
+ if (err) {
+ /* TODO handle error */
+ uv__close(server->accepted_fd);
+ goto done;
+ }
+ break;
+
+ case UV_UDP:
+ err = uv_udp_open((uv_udp_t*) client, server->accepted_fd);
+ if (err) {
+ uv__close(server->accepted_fd);
+ goto done;
+ }
+ break;
+
+ default:
+ return UV_EINVAL;
+ }
+
+ client->flags |= UV_HANDLE_BOUND;
+
+done:
+ /* Process queued fds */
+ if (server->queued_fds != NULL) {
+ uv__stream_queued_fds_t* queued_fds;
+
+ queued_fds = server->queued_fds;
+
+ /* Read first */
+ server->accepted_fd = queued_fds->fds[0];
+
+ /* All read, free */
+ assert(queued_fds->offset > 0);
+ if (--queued_fds->offset == 0) {
+ uv__free(queued_fds);
+ server->queued_fds = NULL;
+ } else {
+ /* Shift rest */
+ memmove(queued_fds->fds,
+ queued_fds->fds + 1,
+ queued_fds->offset * sizeof(*queued_fds->fds));
+ }
+ } else {
+ server->accepted_fd = -1;
+ if (err == 0)
+ uv__io_start(server->loop, &server->io_watcher, POLLIN);
+ }
+ return err;
+}
+
+
+int uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb) {
+ int err;
+
+ switch (stream->type) {
+ case UV_TCP:
+ err = uv_tcp_listen((uv_tcp_t*)stream, backlog, cb);
+ break;
+
+ case UV_NAMED_PIPE:
+ err = uv_pipe_listen((uv_pipe_t*)stream, backlog, cb);
+ break;
+
+ default:
+ err = UV_EINVAL;
+ }
+
+ if (err == 0)
+ uv__handle_start(stream);
+
+ return err;
+}
+
+
+static void uv__drain(uv_stream_t* stream) {
+ uv_shutdown_t* req;
+ int err;
+
+ assert(QUEUE_EMPTY(&stream->write_queue));
+ uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT);
+ uv__stream_osx_interrupt_select(stream);
+
+ /* Shutdown? */
+ if ((stream->flags & UV_HANDLE_SHUTTING) &&
+ !(stream->flags & UV_HANDLE_CLOSING) &&
+ !(stream->flags & UV_HANDLE_SHUT)) {
+ assert(stream->shutdown_req);
+
+ req = stream->shutdown_req;
+ stream->shutdown_req = NULL;
+ stream->flags &= ~UV_HANDLE_SHUTTING;
+ uv__req_unregister(stream->loop, req);
+
+ err = 0;
+ if (shutdown(uv__stream_fd(stream), SHUT_WR))
+ err = UV__ERR(errno);
+
+ if (err == 0)
+ stream->flags |= UV_HANDLE_SHUT;
+
+ if (req->cb != NULL)
+ req->cb(req, err);
+ }
+}
+
+
+static ssize_t uv__writev(int fd, struct iovec* vec, size_t n) {
+ if (n == 1)
+ return write(fd, vec->iov_base, vec->iov_len);
+ else
+ return writev(fd, vec, n);
+}
+
+
+static size_t uv__write_req_size(uv_write_t* req) {
+ size_t size;
+
+ assert(req->bufs != NULL);
+ size = uv__count_bufs(req->bufs + req->write_index,
+ req->nbufs - req->write_index);
+ assert(req->handle->write_queue_size >= size);
+
+ return size;
+}
+
+
+/* Returns 1 if all write request data has been written, or 0 if there is still
+ * more data to write.
+ *
+ * Note: the return value only says something about the *current* request.
+ * There may still be other write requests sitting in the queue.
+ */
+static int uv__write_req_update(uv_stream_t* stream,
+ uv_write_t* req,
+ size_t n) {
+ uv_buf_t* buf;
+ size_t len;
+
+ assert(n <= stream->write_queue_size);
+ stream->write_queue_size -= n;
+
+ buf = req->bufs + req->write_index;
+
+ do {
+ len = n < buf->len ? n : buf->len;
+ buf->base += len;
+ buf->len -= len;
+ buf += (buf->len == 0); /* Advance to next buffer if this one is empty. */
+ n -= len;
+ } while (n > 0);
+
+ req->write_index = buf - req->bufs;
+
+ return req->write_index == req->nbufs;
+}
+
+
+static void uv__write_req_finish(uv_write_t* req) {
+ uv_stream_t* stream = req->handle;
+
+ /* Pop the req off tcp->write_queue. */
+ QUEUE_REMOVE(&req->queue);
+
+ /* Only free when there was no error. On error, we touch up write_queue_size
+ * right before making the callback. The reason we don't do that right away
+ * is that a write_queue_size > 0 is our only way to signal to the user that
+ * they should stop writing - which they should if we got an error. Something
+ * to revisit in future revisions of the libuv API.
+ */
+ if (req->error == 0) {
+ if (req->bufs != req->bufsml)
+ uv__free(req->bufs);
+ req->bufs = NULL;
+ }
+
+ /* Add it to the write_completed_queue where it will have its
+ * callback called in the near future.
+ */
+ QUEUE_INSERT_TAIL(&stream->write_completed_queue, &req->queue);
+ uv__io_feed(stream->loop, &stream->io_watcher);
+}
+
+
+static int uv__handle_fd(uv_handle_t* handle) {
+ switch (handle->type) {
+ case UV_NAMED_PIPE:
+ case UV_TCP:
+ return ((uv_stream_t*) handle)->io_watcher.fd;
+
+ case UV_UDP:
+ return ((uv_udp_t*) handle)->io_watcher.fd;
+
+ default:
+ return -1;
+ }
+}
+
+static void uv__write(uv_stream_t* stream) {
+ struct iovec* iov;
+ QUEUE* q;
+ uv_write_t* req;
+ int iovmax;
+ int iovcnt;
+ ssize_t n;
+ int err;
+
+start:
+
+ assert(uv__stream_fd(stream) >= 0);
+
+ if (QUEUE_EMPTY(&stream->write_queue))
+ return;
+
+ q = QUEUE_HEAD(&stream->write_queue);
+ req = QUEUE_DATA(q, uv_write_t, queue);
+ assert(req->handle == stream);
+
+ /*
+ * Cast to iovec. We had to have our own uv_buf_t instead of iovec
+ * because Windows's WSABUF is not an iovec.
+ */
+ assert(sizeof(uv_buf_t) == sizeof(struct iovec));
+ iov = (struct iovec*) &(req->bufs[req->write_index]);
+ iovcnt = req->nbufs - req->write_index;
+
+ iovmax = uv__getiovmax();
+
+ /* Limit iov count to avoid EINVALs from writev() */
+ if (iovcnt > iovmax)
+ iovcnt = iovmax;
+
+ /*
+ * Now do the actual writev. Note that we've been updating the pointers
+ * inside the iov each time we write. So there is no need to offset it.
+ */
+
+ if (req->send_handle) {
+ int fd_to_send;
+ struct msghdr msg;
+ struct cmsghdr *cmsg;
+ union {
+ char data[64];
+ struct cmsghdr alias;
+ } scratch;
+
+ if (uv__is_closing(req->send_handle)) {
+ err = UV_EBADF;
+ goto error;
+ }
+
+ fd_to_send = uv__handle_fd((uv_handle_t*) req->send_handle);
+
+ memset(&scratch, 0, sizeof(scratch));
+
+ assert(fd_to_send >= 0);
+
+ msg.msg_name = NULL;
+ msg.msg_namelen = 0;
+ msg.msg_iov = iov;
+ msg.msg_iovlen = iovcnt;
+ msg.msg_flags = 0;
+
+ msg.msg_control = &scratch.alias;
+ msg.msg_controllen = CMSG_SPACE(sizeof(fd_to_send));
+
+ cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(fd_to_send));
+
+ /* silence aliasing warning */
+ {
+ void* pv = CMSG_DATA(cmsg);
+ int* pi = pv;
+ *pi = fd_to_send;
+ }
+
+ do
+ n = sendmsg(uv__stream_fd(stream), &msg, 0);
+ while (n == -1 && RETRY_ON_WRITE_ERROR(errno));
+
+ /* Ensure the handle isn't sent again in case this is a partial write. */
+ if (n >= 0)
+ req->send_handle = NULL;
+ } else {
+ do
+ n = uv__writev(uv__stream_fd(stream), iov, iovcnt);
+ while (n == -1 && RETRY_ON_WRITE_ERROR(errno));
+ }
+
+ if (n == -1 && !IS_TRANSIENT_WRITE_ERROR(errno, req->send_handle)) {
+ err = UV__ERR(errno);
+ goto error;
+ }
+
+ if (n >= 0 && uv__write_req_update(stream, req, n)) {
+ uv__write_req_finish(req);
+ return; /* TODO(bnoordhuis) Start trying to write the next request. */
+ }
+
+ /* If this is a blocking stream, try again. */
+ if (stream->flags & UV_HANDLE_BLOCKING_WRITES)
+ goto start;
+
+ /* We're not done. */
+ uv__io_start(stream->loop, &stream->io_watcher, POLLOUT);
+
+ /* Notify select() thread about state change */
+ uv__stream_osx_interrupt_select(stream);
+
+ return;
+
+error:
+ req->error = err;
+ uv__write_req_finish(req);
+ uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT);
+ if (!uv__io_active(&stream->io_watcher, POLLIN))
+ uv__handle_stop(stream);
+ uv__stream_osx_interrupt_select(stream);
+}
+
+
+static void uv__write_callbacks(uv_stream_t* stream) {
+ uv_write_t* req;
+ QUEUE* q;
+ QUEUE pq;
+
+ if (QUEUE_EMPTY(&stream->write_completed_queue))
+ return;
+
+ QUEUE_MOVE(&stream->write_completed_queue, &pq);
+
+ while (!QUEUE_EMPTY(&pq)) {
+ /* Pop a req off write_completed_queue. */
+ q = QUEUE_HEAD(&pq);
+ req = QUEUE_DATA(q, uv_write_t, queue);
+ QUEUE_REMOVE(q);
+ uv__req_unregister(stream->loop, req);
+
+ if (req->bufs != NULL) {
+ stream->write_queue_size -= uv__write_req_size(req);
+ if (req->bufs != req->bufsml)
+ uv__free(req->bufs);
+ req->bufs = NULL;
+ }
+
+ /* NOTE: call callback AFTER freeing the request data. */
+ if (req->cb)
+ req->cb(req, req->error);
+ }
+}
+
+
+uv_handle_type uv__handle_type(int fd) {
+ struct sockaddr_storage ss;
+ socklen_t sslen;
+ socklen_t len;
+ int type;
+
+ memset(&ss, 0, sizeof(ss));
+ sslen = sizeof(ss);
+
+ if (getsockname(fd, (struct sockaddr*)&ss, &sslen))
+ return UV_UNKNOWN_HANDLE;
+
+ len = sizeof type;
+
+ if (getsockopt(fd, SOL_SOCKET, SO_TYPE, &type, &len))
+ return UV_UNKNOWN_HANDLE;
+
+ if (type == SOCK_STREAM) {
+#if defined(_AIX) || defined(__DragonFly__)
+ /* on AIX/DragonFly the getsockname call returns an empty sa structure
+ * for sockets of type AF_UNIX. For all other types it will
+ * return a properly filled in structure.
+ */
+ if (sslen == 0)
+ return UV_NAMED_PIPE;
+#endif
+ switch (ss.ss_family) {
+ case AF_UNIX:
+ return UV_NAMED_PIPE;
+ case AF_INET:
+ case AF_INET6:
+ return UV_TCP;
+ }
+ }
+
+ if (type == SOCK_DGRAM &&
+ (ss.ss_family == AF_INET || ss.ss_family == AF_INET6))
+ return UV_UDP;
+
+ return UV_UNKNOWN_HANDLE;
+}
+
+
+static void uv__stream_eof(uv_stream_t* stream, const uv_buf_t* buf) {
+ stream->flags |= UV_HANDLE_READ_EOF;
+ stream->flags &= ~UV_HANDLE_READING;
+ uv__io_stop(stream->loop, &stream->io_watcher, POLLIN);
+ if (!uv__io_active(&stream->io_watcher, POLLOUT))
+ uv__handle_stop(stream);
+ uv__stream_osx_interrupt_select(stream);
+ stream->read_cb(stream, UV_EOF, buf);
+}
+
+
+static int uv__stream_queue_fd(uv_stream_t* stream, int fd) {
+ uv__stream_queued_fds_t* queued_fds;
+ unsigned int queue_size;
+
+ queued_fds = stream->queued_fds;
+ if (queued_fds == NULL) {
+ queue_size = 8;
+ queued_fds = uv__malloc((queue_size - 1) * sizeof(*queued_fds->fds) +
+ sizeof(*queued_fds));
+ if (queued_fds == NULL)
+ return UV_ENOMEM;
+ queued_fds->size = queue_size;
+ queued_fds->offset = 0;
+ stream->queued_fds = queued_fds;
+
+ /* Grow */
+ } else if (queued_fds->size == queued_fds->offset) {
+ queue_size = queued_fds->size + 8;
+ queued_fds = uv__realloc(queued_fds,
+ (queue_size - 1) * sizeof(*queued_fds->fds) +
+ sizeof(*queued_fds));
+
+ /*
+ * Allocation failure, report back.
+ * NOTE: if it is fatal - sockets will be closed in uv__stream_close
+ */
+ if (queued_fds == NULL)
+ return UV_ENOMEM;
+ queued_fds->size = queue_size;
+ stream->queued_fds = queued_fds;
+ }
+
+ /* Put fd in a queue */
+ queued_fds->fds[queued_fds->offset++] = fd;
+
+ return 0;
+}
+
+
+#if defined(__PASE__)
+/* on IBMi PASE the control message length can not exceed 256. */
+# define UV__CMSG_FD_COUNT 60
+#else
+# define UV__CMSG_FD_COUNT 64
+#endif
+#define UV__CMSG_FD_SIZE (UV__CMSG_FD_COUNT * sizeof(int))
+
+
+static int uv__stream_recv_cmsg(uv_stream_t* stream, struct msghdr* msg) {
+ struct cmsghdr* cmsg;
+
+ for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) {
+ char* start;
+ char* end;
+ int err;
+ void* pv;
+ int* pi;
+ unsigned int i;
+ unsigned int count;
+
+ if (cmsg->cmsg_type != SCM_RIGHTS) {
+ fprintf(stderr, "ignoring non-SCM_RIGHTS ancillary data: %d\n",
+ cmsg->cmsg_type);
+ continue;
+ }
+
+ /* silence aliasing warning */
+ pv = CMSG_DATA(cmsg);
+ pi = pv;
+
+ /* Count available fds */
+ start = (char*) cmsg;
+ end = (char*) cmsg + cmsg->cmsg_len;
+ count = 0;
+ while (start + CMSG_LEN(count * sizeof(*pi)) < end)
+ count++;
+ assert(start + CMSG_LEN(count * sizeof(*pi)) == end);
+
+ for (i = 0; i < count; i++) {
+ /* Already has accepted fd, queue now */
+ if (stream->accepted_fd != -1) {
+ err = uv__stream_queue_fd(stream, pi[i]);
+ if (err != 0) {
+ /* Close rest */
+ for (; i < count; i++)
+ uv__close(pi[i]);
+ return err;
+ }
+ } else {
+ stream->accepted_fd = pi[i];
+ }
+ }
+ }
+
+ return 0;
+}
+
+
+#ifdef __clang__
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wgnu-folding-constant"
+# pragma clang diagnostic ignored "-Wvla-extension"
+#endif
+
+static void uv__read(uv_stream_t* stream) {
+ uv_buf_t buf;
+ ssize_t nread;
+ struct msghdr msg;
+ char cmsg_space[CMSG_SPACE(UV__CMSG_FD_SIZE)];
+ int count;
+ int err;
+ int is_ipc;
+
+ stream->flags &= ~UV_HANDLE_READ_PARTIAL;
+
+ /* Prevent loop starvation when the data comes in as fast as (or faster than)
+ * we can read it. XXX Need to rearm fd if we switch to edge-triggered I/O.
+ */
+ count = 32;
+
+ is_ipc = stream->type == UV_NAMED_PIPE && ((uv_pipe_t*) stream)->ipc;
+
+ /* XXX: Maybe instead of having UV_HANDLE_READING we just test if
+ * tcp->read_cb is NULL or not?
+ */
+ while (stream->read_cb
+ && (stream->flags & UV_HANDLE_READING)
+ && (count-- > 0)) {
+ assert(stream->alloc_cb != NULL);
+
+ buf = uv_buf_init(NULL, 0);
+ stream->alloc_cb((uv_handle_t*)stream, 64 * 1024, &buf);
+ if (buf.base == NULL || buf.len == 0) {
+ /* User indicates it can't or won't handle the read. */
+ stream->read_cb(stream, UV_ENOBUFS, &buf);
+ return;
+ }
+
+ assert(buf.base != NULL);
+ assert(uv__stream_fd(stream) >= 0);
+
+ if (!is_ipc) {
+ do {
+ nread = read(uv__stream_fd(stream), buf.base, buf.len);
+ }
+ while (nread < 0 && errno == EINTR);
+ } else {
+ /* ipc uses recvmsg */
+ msg.msg_flags = 0;
+ msg.msg_iov = (struct iovec*) &buf;
+ msg.msg_iovlen = 1;
+ msg.msg_name = NULL;
+ msg.msg_namelen = 0;
+ /* Set up to receive a descriptor even if one isn't in the message */
+ msg.msg_controllen = sizeof(cmsg_space);
+ msg.msg_control = cmsg_space;
+
+ do {
+ nread = uv__recvmsg(uv__stream_fd(stream), &msg, 0);
+ }
+ while (nread < 0 && errno == EINTR);
+ }
+
+ if (nread < 0) {
+ /* Error */
+ if (errno == EAGAIN || errno == EWOULDBLOCK) {
+ /* Wait for the next one. */
+ if (stream->flags & UV_HANDLE_READING) {
+ uv__io_start(stream->loop, &stream->io_watcher, POLLIN);
+ uv__stream_osx_interrupt_select(stream);
+ }
+ stream->read_cb(stream, 0, &buf);
+#if defined(__CYGWIN__) || defined(__MSYS__)
+ } else if (errno == ECONNRESET && stream->type == UV_NAMED_PIPE) {
+ uv__stream_eof(stream, &buf);
+ return;
+#endif
+ } else {
+ /* Error. User should call uv_close(). */
+ stream->read_cb(stream, UV__ERR(errno), &buf);
+ if (stream->flags & UV_HANDLE_READING) {
+ stream->flags &= ~UV_HANDLE_READING;
+ uv__io_stop(stream->loop, &stream->io_watcher, POLLIN);
+ if (!uv__io_active(&stream->io_watcher, POLLOUT))
+ uv__handle_stop(stream);
+ uv__stream_osx_interrupt_select(stream);
+ }
+ }
+ return;
+ } else if (nread == 0) {
+ uv__stream_eof(stream, &buf);
+ return;
+ } else {
+ /* Successful read */
+ ssize_t buflen = buf.len;
+
+ if (is_ipc) {
+ err = uv__stream_recv_cmsg(stream, &msg);
+ if (err != 0) {
+ stream->read_cb(stream, err, &buf);
+ return;
+ }
+ }
+
+#if defined(__MVS__)
+ if (is_ipc && msg.msg_controllen > 0) {
+ uv_buf_t blankbuf;
+ int nread;
+ struct iovec *old;
+
+ blankbuf.base = 0;
+ blankbuf.len = 0;
+ old = msg.msg_iov;
+ msg.msg_iov = (struct iovec*) &blankbuf;
+ nread = 0;
+ do {
+ nread = uv__recvmsg(uv__stream_fd(stream), &msg, 0);
+ err = uv__stream_recv_cmsg(stream, &msg);
+ if (err != 0) {
+ stream->read_cb(stream, err, &buf);
+ msg.msg_iov = old;
+ return;
+ }
+ } while (nread == 0 && msg.msg_controllen > 0);
+ msg.msg_iov = old;
+ }
+#endif
+ stream->read_cb(stream, nread, &buf);
+
+ /* Return if we didn't fill the buffer, there is no more data to read. */
+ if (nread < buflen) {
+ stream->flags |= UV_HANDLE_READ_PARTIAL;
+ return;
+ }
+ }
+ }
+}
+
+
+#ifdef __clang__
+# pragma clang diagnostic pop
+#endif
+
+#undef UV__CMSG_FD_COUNT
+#undef UV__CMSG_FD_SIZE
+
+
+int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) {
+ assert(stream->type == UV_TCP ||
+ stream->type == UV_TTY ||
+ stream->type == UV_NAMED_PIPE);
+
+ if (!(stream->flags & UV_HANDLE_WRITABLE) ||
+ stream->flags & UV_HANDLE_SHUT ||
+ stream->flags & UV_HANDLE_SHUTTING ||
+ uv__is_closing(stream)) {
+ return UV_ENOTCONN;
+ }
+
+ assert(uv__stream_fd(stream) >= 0);
+
+ /* Initialize request */
+ uv__req_init(stream->loop, req, UV_SHUTDOWN);
+ req->handle = stream;
+ req->cb = cb;
+ stream->shutdown_req = req;
+ stream->flags |= UV_HANDLE_SHUTTING;
+
+ uv__io_start(stream->loop, &stream->io_watcher, POLLOUT);
+ uv__stream_osx_interrupt_select(stream);
+
+ return 0;
+}
+
+
+static void uv__stream_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
+ uv_stream_t* stream;
+
+ stream = container_of(w, uv_stream_t, io_watcher);
+
+ assert(stream->type == UV_TCP ||
+ stream->type == UV_NAMED_PIPE ||
+ stream->type == UV_TTY);
+ assert(!(stream->flags & UV_HANDLE_CLOSING));
+
+ if (stream->connect_req) {
+ uv__stream_connect(stream);
+ return;
+ }
+
+ assert(uv__stream_fd(stream) >= 0);
+
+ /* Ignore POLLHUP here. Even if it's set, there may still be data to read. */
+ if (events & (POLLIN | POLLERR | POLLHUP))
+ uv__read(stream);
+
+ if (uv__stream_fd(stream) == -1)
+ return; /* read_cb closed stream. */
+
+ /* Short-circuit iff POLLHUP is set, the user is still interested in read
+ * events and uv__read() reported a partial read but not EOF. If the EOF
+ * flag is set, uv__read() called read_cb with err=UV_EOF and we don't
+ * have to do anything. If the partial read flag is not set, we can't
+ * report the EOF yet because there is still data to read.
+ */
+ if ((events & POLLHUP) &&
+ (stream->flags & UV_HANDLE_READING) &&
+ (stream->flags & UV_HANDLE_READ_PARTIAL) &&
+ !(stream->flags & UV_HANDLE_READ_EOF)) {
+ uv_buf_t buf = { NULL, 0 };
+ uv__stream_eof(stream, &buf);
+ }
+
+ if (uv__stream_fd(stream) == -1)
+ return; /* read_cb closed stream. */
+
+ if (events & (POLLOUT | POLLERR | POLLHUP)) {
+ uv__write(stream);
+ uv__write_callbacks(stream);
+
+ /* Write queue drained. */
+ if (QUEUE_EMPTY(&stream->write_queue))
+ uv__drain(stream);
+ }
+}
+
+
+/**
+ * We get called here from directly following a call to connect(2).
+ * In order to determine if we've errored out or succeeded must call
+ * getsockopt.
+ */
+static void uv__stream_connect(uv_stream_t* stream) {
+ int error;
+ uv_connect_t* req = stream->connect_req;
+ socklen_t errorsize = sizeof(int);
+
+ assert(stream->type == UV_TCP || stream->type == UV_NAMED_PIPE);
+ assert(req);
+
+ if (stream->delayed_error) {
+ /* To smooth over the differences between unixes errors that
+ * were reported synchronously on the first connect can be delayed
+ * until the next tick--which is now.
+ */
+ error = stream->delayed_error;
+ stream->delayed_error = 0;
+ } else {
+ /* Normal situation: we need to get the socket error from the kernel. */
+ assert(uv__stream_fd(stream) >= 0);
+ getsockopt(uv__stream_fd(stream),
+ SOL_SOCKET,
+ SO_ERROR,
+ &error,
+ &errorsize);
+ error = UV__ERR(error);
+ }
+
+ if (error == UV__ERR(EINPROGRESS))
+ return;
+
+ stream->connect_req = NULL;
+ uv__req_unregister(stream->loop, req);
+
+ if (error < 0 || QUEUE_EMPTY(&stream->write_queue)) {
+ uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT);
+ }
+
+ if (req->cb)
+ req->cb(req, error);
+
+ if (uv__stream_fd(stream) == -1)
+ return;
+
+ if (error < 0) {
+ uv__stream_flush_write_queue(stream, UV_ECANCELED);
+ uv__write_callbacks(stream);
+ }
+}
+
+
+int uv_write2(uv_write_t* req,
+ uv_stream_t* stream,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ uv_stream_t* send_handle,
+ uv_write_cb cb) {
+ int empty_queue;
+
+ assert(nbufs > 0);
+ assert((stream->type == UV_TCP ||
+ stream->type == UV_NAMED_PIPE ||
+ stream->type == UV_TTY) &&
+ "uv_write (unix) does not yet support other types of streams");
+
+ if (uv__stream_fd(stream) < 0)
+ return UV_EBADF;
+
+ if (!(stream->flags & UV_HANDLE_WRITABLE))
+ return UV_EPIPE;
+
+ if (send_handle) {
+ if (stream->type != UV_NAMED_PIPE || !((uv_pipe_t*)stream)->ipc)
+ return UV_EINVAL;
+
+ /* XXX We abuse uv_write2() to send over UDP handles to child processes.
+ * Don't call uv__stream_fd() on those handles, it's a macro that on OS X
+ * evaluates to a function that operates on a uv_stream_t with a couple of
+ * OS X specific fields. On other Unices it does (handle)->io_watcher.fd,
+ * which works but only by accident.
+ */
+ if (uv__handle_fd((uv_handle_t*) send_handle) < 0)
+ return UV_EBADF;
+
+#if defined(__CYGWIN__) || defined(__MSYS__)
+ /* Cygwin recvmsg always sets msg_controllen to zero, so we cannot send it.
+ See https://github.com/mirror/newlib-cygwin/blob/86fc4bf0/winsup/cygwin/fhandler_socket.cc#L1736-L1743 */
+ return UV_ENOSYS;
+#endif
+ }
+
+ /* It's legal for write_queue_size > 0 even when the write_queue is empty;
+ * it means there are error-state requests in the write_completed_queue that
+ * will touch up write_queue_size later, see also uv__write_req_finish().
+ * We could check that write_queue is empty instead but that implies making
+ * a write() syscall when we know that the handle is in error mode.
+ */
+ empty_queue = (stream->write_queue_size == 0);
+
+ /* Initialize the req */
+ uv__req_init(stream->loop, req, UV_WRITE);
+ req->cb = cb;
+ req->handle = stream;
+ req->error = 0;
+ req->send_handle = send_handle;
+ QUEUE_INIT(&req->queue);
+
+ req->bufs = req->bufsml;
+ if (nbufs > ARRAY_SIZE(req->bufsml))
+ req->bufs = uv__malloc(nbufs * sizeof(bufs[0]));
+
+ if (req->bufs == NULL)
+ return UV_ENOMEM;
+
+ memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0]));
+ req->nbufs = nbufs;
+ req->write_index = 0;
+ stream->write_queue_size += uv__count_bufs(bufs, nbufs);
+
+ /* Append the request to write_queue. */
+ QUEUE_INSERT_TAIL(&stream->write_queue, &req->queue);
+
+ /* If the queue was empty when this function began, we should attempt to
+ * do the write immediately. Otherwise start the write_watcher and wait
+ * for the fd to become writable.
+ */
+ if (stream->connect_req) {
+ /* Still connecting, do nothing. */
+ }
+ else if (empty_queue) {
+ uv__write(stream);
+ }
+ else {
+ /*
+ * blocking streams should never have anything in the queue.
+ * if this assert fires then somehow the blocking stream isn't being
+ * sufficiently flushed in uv__write.
+ */
+ assert(!(stream->flags & UV_HANDLE_BLOCKING_WRITES));
+ uv__io_start(stream->loop, &stream->io_watcher, POLLOUT);
+ uv__stream_osx_interrupt_select(stream);
+ }
+
+ return 0;
+}
+
+
+/* The buffers to be written must remain valid until the callback is called.
+ * This is not required for the uv_buf_t array.
+ */
+int uv_write(uv_write_t* req,
+ uv_stream_t* handle,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ uv_write_cb cb) {
+ return uv_write2(req, handle, bufs, nbufs, NULL, cb);
+}
+
+
+void uv_try_write_cb(uv_write_t* req, int status) {
+ /* Should not be called */
+ abort();
+}
+
+
+int uv_try_write(uv_stream_t* stream,
+ const uv_buf_t bufs[],
+ unsigned int nbufs) {
+ int r;
+ int has_pollout;
+ size_t written;
+ size_t req_size;
+ uv_write_t req;
+
+ /* Connecting or already writing some data */
+ if (stream->connect_req != NULL || stream->write_queue_size != 0)
+ return UV_EAGAIN;
+
+ has_pollout = uv__io_active(&stream->io_watcher, POLLOUT);
+
+ r = uv_write(&req, stream, bufs, nbufs, uv_try_write_cb);
+ if (r != 0)
+ return r;
+
+ /* Remove not written bytes from write queue size */
+ written = uv__count_bufs(bufs, nbufs);
+ if (req.bufs != NULL)
+ req_size = uv__write_req_size(&req);
+ else
+ req_size = 0;
+ written -= req_size;
+ stream->write_queue_size -= req_size;
+
+ /* Unqueue request, regardless of immediateness */
+ QUEUE_REMOVE(&req.queue);
+ uv__req_unregister(stream->loop, &req);
+ if (req.bufs != req.bufsml)
+ uv__free(req.bufs);
+ req.bufs = NULL;
+
+ /* Do not poll for writable, if we wasn't before calling this */
+ if (!has_pollout) {
+ uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT);
+ uv__stream_osx_interrupt_select(stream);
+ }
+
+ if (written == 0 && req_size != 0)
+ return req.error < 0 ? req.error : UV_EAGAIN;
+ else
+ return written;
+}
+
+
+int uv_read_start(uv_stream_t* stream,
+ uv_alloc_cb alloc_cb,
+ uv_read_cb read_cb) {
+ assert(stream->type == UV_TCP || stream->type == UV_NAMED_PIPE ||
+ stream->type == UV_TTY);
+
+ if (stream->flags & UV_HANDLE_CLOSING)
+ return UV_EINVAL;
+
+ if (!(stream->flags & UV_HANDLE_READABLE))
+ return UV_ENOTCONN;
+
+ /* The UV_HANDLE_READING flag is irrelevant of the state of the tcp - it just
+ * expresses the desired state of the user.
+ */
+ stream->flags |= UV_HANDLE_READING;
+
+ /* TODO: try to do the read inline? */
+ /* TODO: keep track of tcp state. If we've gotten a EOF then we should
+ * not start the IO watcher.
+ */
+ assert(uv__stream_fd(stream) >= 0);
+ assert(alloc_cb);
+
+ stream->read_cb = read_cb;
+ stream->alloc_cb = alloc_cb;
+
+ uv__io_start(stream->loop, &stream->io_watcher, POLLIN);
+ uv__handle_start(stream);
+ uv__stream_osx_interrupt_select(stream);
+
+ return 0;
+}
+
+
+int uv_read_stop(uv_stream_t* stream) {
+ if (!(stream->flags & UV_HANDLE_READING))
+ return 0;
+
+ stream->flags &= ~UV_HANDLE_READING;
+ uv__io_stop(stream->loop, &stream->io_watcher, POLLIN);
+ if (!uv__io_active(&stream->io_watcher, POLLOUT))
+ uv__handle_stop(stream);
+ uv__stream_osx_interrupt_select(stream);
+
+ stream->read_cb = NULL;
+ stream->alloc_cb = NULL;
+ return 0;
+}
+
+
+int uv_is_readable(const uv_stream_t* stream) {
+ return !!(stream->flags & UV_HANDLE_READABLE);
+}
+
+
+int uv_is_writable(const uv_stream_t* stream) {
+ return !!(stream->flags & UV_HANDLE_WRITABLE);
+}
+
+
+#if defined(__APPLE__) && !defined(CMAKE_BOOTSTRAP)
+int uv___stream_fd(const uv_stream_t* handle) {
+ const uv__stream_select_t* s;
+
+ assert(handle->type == UV_TCP ||
+ handle->type == UV_TTY ||
+ handle->type == UV_NAMED_PIPE);
+
+ s = handle->select;
+ if (s != NULL)
+ return s->fd;
+
+ return handle->io_watcher.fd;
+}
+#endif /* defined(__APPLE__) */
+
+
+void uv__stream_close(uv_stream_t* handle) {
+ unsigned int i;
+ uv__stream_queued_fds_t* queued_fds;
+
+#if defined(__APPLE__) && !defined(CMAKE_BOOTSTRAP)
+ /* Terminate select loop first */
+ if (handle->select != NULL) {
+ uv__stream_select_t* s;
+
+ s = handle->select;
+
+ uv_sem_post(&s->close_sem);
+ uv_sem_post(&s->async_sem);
+ uv__stream_osx_interrupt_select(handle);
+ uv_thread_join(&s->thread);
+ uv_sem_destroy(&s->close_sem);
+ uv_sem_destroy(&s->async_sem);
+ uv__close(s->fake_fd);
+ uv__close(s->int_fd);
+ uv_close((uv_handle_t*) &s->async, uv__stream_osx_cb_close);
+
+ handle->select = NULL;
+ }
+#endif /* defined(__APPLE__) */
+
+ uv__io_close(handle->loop, &handle->io_watcher);
+ uv_read_stop(handle);
+ uv__handle_stop(handle);
+ handle->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
+
+ if (handle->io_watcher.fd != -1) {
+ /* Don't close stdio file descriptors. Nothing good comes from it. */
+ if (handle->io_watcher.fd > STDERR_FILENO)
+ uv__close(handle->io_watcher.fd);
+ handle->io_watcher.fd = -1;
+ }
+
+ if (handle->accepted_fd != -1) {
+ uv__close(handle->accepted_fd);
+ handle->accepted_fd = -1;
+ }
+
+ /* Close all queued fds */
+ if (handle->queued_fds != NULL) {
+ queued_fds = handle->queued_fds;
+ for (i = 0; i < queued_fds->offset; i++)
+ uv__close(queued_fds->fds[i]);
+ uv__free(handle->queued_fds);
+ handle->queued_fds = NULL;
+ }
+
+ assert(!uv__io_active(&handle->io_watcher, POLLIN | POLLOUT));
+}
+
+
+int uv_stream_set_blocking(uv_stream_t* handle, int blocking) {
+ /* Don't need to check the file descriptor, uv__nonblock()
+ * will fail with EBADF if it's not valid.
+ */
+ return uv__nonblock(uv__stream_fd(handle), !blocking);
+}
diff --git a/Utilities/cmlibuv/src/unix/sunos.c b/Utilities/cmlibuv/src/unix/sunos.c
new file mode 100644
index 0000000..2166e8f
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/sunos.c
@@ -0,0 +1,871 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+
+#if !defined(SUNOS_NO_IFADDRS) && _XOPEN_SOURCE < 600
+#define SUNOS_NO_IFADDRS
+#endif
+
+#ifndef SUNOS_NO_IFADDRS
+# include <ifaddrs.h>
+#endif
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/if_arp.h>
+#include <sys/sockio.h>
+
+#include <sys/loadavg.h>
+#include <sys/time.h>
+#include <unistd.h>
+#include <kstat.h>
+#include <fcntl.h>
+
+#include <sys/port.h>
+#include <port.h>
+
+#define PORT_FIRED 0x69
+#define PORT_UNUSED 0x0
+#define PORT_LOADED 0x99
+#define PORT_DELETED -1
+
+#if (!defined(_LP64)) && (_FILE_OFFSET_BITS - 0 == 64)
+#define PROCFS_FILE_OFFSET_BITS_HACK 1
+#undef _FILE_OFFSET_BITS
+#else
+#define PROCFS_FILE_OFFSET_BITS_HACK 0
+#endif
+
+#include <procfs.h>
+
+#if (PROCFS_FILE_OFFSET_BITS_HACK - 0 == 1)
+#define _FILE_OFFSET_BITS 64
+#endif
+
+
+int uv__platform_loop_init(uv_loop_t* loop) {
+ int err;
+ int fd;
+
+ loop->fs_fd = -1;
+ loop->backend_fd = -1;
+
+ fd = port_create();
+ if (fd == -1)
+ return UV__ERR(errno);
+
+ err = uv__cloexec(fd, 1);
+ if (err) {
+ uv__close(fd);
+ return err;
+ }
+ loop->backend_fd = fd;
+
+ return 0;
+}
+
+
+void uv__platform_loop_delete(uv_loop_t* loop) {
+ if (loop->fs_fd != -1) {
+ uv__close(loop->fs_fd);
+ loop->fs_fd = -1;
+ }
+
+ if (loop->backend_fd != -1) {
+ uv__close(loop->backend_fd);
+ loop->backend_fd = -1;
+ }
+}
+
+
+int uv__io_fork(uv_loop_t* loop) {
+#if defined(PORT_SOURCE_FILE)
+ if (loop->fs_fd != -1) {
+ /* stop the watcher before we blow away its fileno */
+ uv__io_stop(loop, &loop->fs_event_watcher, POLLIN);
+ }
+#endif
+ uv__platform_loop_delete(loop);
+ return uv__platform_loop_init(loop);
+}
+
+
+void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
+ struct port_event* events;
+ uintptr_t i;
+ uintptr_t nfds;
+
+ assert(loop->watchers != NULL);
+ assert(fd >= 0);
+
+ events = (struct port_event*) loop->watchers[loop->nwatchers];
+ nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
+ if (events == NULL)
+ return;
+
+ /* Invalidate events with same file descriptor */
+ for (i = 0; i < nfds; i++)
+ if ((int) events[i].portev_object == fd)
+ events[i].portev_object = -1;
+}
+
+
+int uv__io_check_fd(uv_loop_t* loop, int fd) {
+ if (port_associate(loop->backend_fd, PORT_SOURCE_FD, fd, POLLIN, 0))
+ return UV__ERR(errno);
+
+ if (port_dissociate(loop->backend_fd, PORT_SOURCE_FD, fd)) {
+ perror("(libuv) port_dissociate()");
+ abort();
+ }
+
+ return 0;
+}
+
+
+void uv__io_poll(uv_loop_t* loop, int timeout) {
+ struct port_event events[1024];
+ struct port_event* pe;
+ struct timespec spec;
+ QUEUE* q;
+ uv__io_t* w;
+ sigset_t* pset;
+ sigset_t set;
+ uint64_t base;
+ uint64_t diff;
+ uint64_t idle_poll;
+ unsigned int nfds;
+ unsigned int i;
+ int saved_errno;
+ int have_signals;
+ int nevents;
+ int count;
+ int err;
+ int fd;
+ int user_timeout;
+ int reset_timeout;
+
+ if (loop->nfds == 0) {
+ assert(QUEUE_EMPTY(&loop->watcher_queue));
+ return;
+ }
+
+ while (!QUEUE_EMPTY(&loop->watcher_queue)) {
+ q = QUEUE_HEAD(&loop->watcher_queue);
+ QUEUE_REMOVE(q);
+ QUEUE_INIT(q);
+
+ w = QUEUE_DATA(q, uv__io_t, watcher_queue);
+ assert(w->pevents != 0);
+
+ if (port_associate(loop->backend_fd,
+ PORT_SOURCE_FD,
+ w->fd,
+ w->pevents,
+ 0)) {
+ perror("(libuv) port_associate()");
+ abort();
+ }
+
+ w->events = w->pevents;
+ }
+
+ pset = NULL;
+ if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
+ pset = &set;
+ sigemptyset(pset);
+ sigaddset(pset, SIGPROF);
+ }
+
+ assert(timeout >= -1);
+ base = loop->time;
+ count = 48; /* Benchmarks suggest this gives the best throughput. */
+
+ if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
+ reset_timeout = 1;
+ user_timeout = timeout;
+ timeout = 0;
+ } else {
+ reset_timeout = 0;
+ }
+
+ for (;;) {
+ /* Only need to set the provider_entry_time if timeout != 0. The function
+ * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
+ */
+ if (timeout != 0)
+ uv__metrics_set_provider_entry_time(loop);
+
+ if (timeout != -1) {
+ spec.tv_sec = timeout / 1000;
+ spec.tv_nsec = (timeout % 1000) * 1000000;
+ }
+
+ /* Work around a kernel bug where nfds is not updated. */
+ events[0].portev_source = 0;
+
+ nfds = 1;
+ saved_errno = 0;
+
+ if (pset != NULL)
+ pthread_sigmask(SIG_BLOCK, pset, NULL);
+
+ err = port_getn(loop->backend_fd,
+ events,
+ ARRAY_SIZE(events),
+ &nfds,
+ timeout == -1 ? NULL : &spec);
+
+ if (pset != NULL)
+ pthread_sigmask(SIG_UNBLOCK, pset, NULL);
+
+ if (err) {
+ /* Work around another kernel bug: port_getn() may return events even
+ * on error.
+ */
+ if (errno == EINTR || errno == ETIME) {
+ saved_errno = errno;
+ } else {
+ perror("(libuv) port_getn()");
+ abort();
+ }
+ }
+
+ /* Update loop->time unconditionally. It's tempting to skip the update when
+ * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
+ * operating system didn't reschedule our process while in the syscall.
+ */
+ SAVE_ERRNO(uv__update_time(loop));
+
+ if (events[0].portev_source == 0) {
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (timeout == 0)
+ return;
+
+ if (timeout == -1)
+ continue;
+
+ goto update_timeout;
+ }
+
+ if (nfds == 0) {
+ assert(timeout != -1);
+ return;
+ }
+
+ have_signals = 0;
+ nevents = 0;
+
+ assert(loop->watchers != NULL);
+ loop->watchers[loop->nwatchers] = (void*) events;
+ loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
+ for (i = 0; i < nfds; i++) {
+ pe = events + i;
+ fd = pe->portev_object;
+
+ /* Skip invalidated events, see uv__platform_invalidate_fd */
+ if (fd == -1)
+ continue;
+
+ assert(fd >= 0);
+ assert((unsigned) fd < loop->nwatchers);
+
+ w = loop->watchers[fd];
+
+ /* File descriptor that we've stopped watching, ignore. */
+ if (w == NULL)
+ continue;
+
+ /* Run signal watchers last. This also affects child process watchers
+ * because those are implemented in terms of signal watchers.
+ */
+ if (w == &loop->signal_io_watcher) {
+ have_signals = 1;
+ } else {
+ uv__metrics_update_idle_time(loop);
+ w->cb(loop, w, pe->portev_events);
+ }
+
+ nevents++;
+
+ if (w != loop->watchers[fd])
+ continue; /* Disabled by callback. */
+
+ /* Events Ports operates in oneshot mode, rearm timer on next run. */
+ if (w->pevents != 0 && QUEUE_EMPTY(&w->watcher_queue))
+ QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
+ }
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (have_signals != 0) {
+ uv__metrics_update_idle_time(loop);
+ loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
+ }
+
+ loop->watchers[loop->nwatchers] = NULL;
+ loop->watchers[loop->nwatchers + 1] = NULL;
+
+ if (have_signals != 0)
+ return; /* Event loop should cycle now so don't poll again. */
+
+ if (nevents != 0) {
+ if (nfds == ARRAY_SIZE(events) && --count != 0) {
+ /* Poll for more events but don't block this time. */
+ timeout = 0;
+ continue;
+ }
+ return;
+ }
+
+ if (saved_errno == ETIME) {
+ assert(timeout != -1);
+ return;
+ }
+
+ if (timeout == 0)
+ return;
+
+ if (timeout == -1)
+ continue;
+
+update_timeout:
+ assert(timeout > 0);
+
+ diff = loop->time - base;
+ if (diff >= (uint64_t) timeout)
+ return;
+
+ timeout -= diff;
+ }
+}
+
+
+uint64_t uv__hrtime(uv_clocktype_t type) {
+ return gethrtime();
+}
+
+
+/*
+ * We could use a static buffer for the path manipulations that we need outside
+ * of the function, but this function could be called by multiple consumers and
+ * we don't want to potentially create a race condition in the use of snprintf.
+ */
+int uv_exepath(char* buffer, size_t* size) {
+ ssize_t res;
+ char buf[128];
+
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ snprintf(buf, sizeof(buf), "/proc/%lu/path/a.out", (unsigned long) getpid());
+
+ res = *size - 1;
+ if (res > 0)
+ res = readlink(buf, buffer, res);
+
+ if (res == -1)
+ return UV__ERR(errno);
+
+ buffer[res] = '\0';
+ *size = res;
+ return 0;
+}
+
+
+uint64_t uv_get_free_memory(void) {
+ return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_AVPHYS_PAGES);
+}
+
+
+uint64_t uv_get_total_memory(void) {
+ return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_PHYS_PAGES);
+}
+
+
+uint64_t uv_get_constrained_memory(void) {
+ return 0; /* Memory constraints are unknown. */
+}
+
+
+void uv_loadavg(double avg[3]) {
+ (void) getloadavg(avg, 3);
+}
+
+
+#if defined(PORT_SOURCE_FILE)
+
+static int uv__fs_event_rearm(uv_fs_event_t *handle) {
+ if (handle->fd == -1)
+ return UV_EBADF;
+
+ if (port_associate(handle->loop->fs_fd,
+ PORT_SOURCE_FILE,
+ (uintptr_t) &handle->fo,
+ FILE_ATTRIB | FILE_MODIFIED,
+ handle) == -1) {
+ return UV__ERR(errno);
+ }
+ handle->fd = PORT_LOADED;
+
+ return 0;
+}
+
+
+static void uv__fs_event_read(uv_loop_t* loop,
+ uv__io_t* w,
+ unsigned int revents) {
+ uv_fs_event_t *handle = NULL;
+ timespec_t timeout;
+ port_event_t pe;
+ int events;
+ int r;
+
+ (void) w;
+ (void) revents;
+
+ do {
+ uint_t n = 1;
+
+ /*
+ * Note that our use of port_getn() here (and not port_get()) is deliberate:
+ * there is a bug in event ports (Sun bug 6456558) whereby a zeroed timeout
+ * causes port_get() to return success instead of ETIME when there aren't
+ * actually any events (!); by using port_getn() in lieu of port_get(),
+ * we can at least workaround the bug by checking for zero returned events
+ * and treating it as we would ETIME.
+ */
+ do {
+ memset(&timeout, 0, sizeof timeout);
+ r = port_getn(loop->fs_fd, &pe, 1, &n, &timeout);
+ }
+ while (r == -1 && errno == EINTR);
+
+ if ((r == -1 && errno == ETIME) || n == 0)
+ break;
+
+ handle = (uv_fs_event_t*) pe.portev_user;
+ assert((r == 0) && "unexpected port_get() error");
+
+ events = 0;
+ if (pe.portev_events & (FILE_ATTRIB | FILE_MODIFIED))
+ events |= UV_CHANGE;
+ if (pe.portev_events & ~(FILE_ATTRIB | FILE_MODIFIED))
+ events |= UV_RENAME;
+ assert(events != 0);
+ handle->fd = PORT_FIRED;
+ handle->cb(handle, NULL, events, 0);
+
+ if (handle->fd != PORT_DELETED) {
+ r = uv__fs_event_rearm(handle);
+ if (r != 0)
+ handle->cb(handle, NULL, 0, r);
+ }
+ }
+ while (handle->fd != PORT_DELETED);
+}
+
+
+int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
+ uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
+ return 0;
+}
+
+
+int uv_fs_event_start(uv_fs_event_t* handle,
+ uv_fs_event_cb cb,
+ const char* path,
+ unsigned int flags) {
+ int portfd;
+ int first_run;
+ int err;
+
+ if (uv__is_active(handle))
+ return UV_EINVAL;
+
+ first_run = 0;
+ if (handle->loop->fs_fd == -1) {
+ portfd = port_create();
+ if (portfd == -1)
+ return UV__ERR(errno);
+ handle->loop->fs_fd = portfd;
+ first_run = 1;
+ }
+
+ uv__handle_start(handle);
+ handle->path = uv__strdup(path);
+ handle->fd = PORT_UNUSED;
+ handle->cb = cb;
+
+ memset(&handle->fo, 0, sizeof handle->fo);
+ handle->fo.fo_name = handle->path;
+ err = uv__fs_event_rearm(handle);
+ if (err != 0) {
+ uv_fs_event_stop(handle);
+ return err;
+ }
+
+ if (first_run) {
+ uv__io_init(&handle->loop->fs_event_watcher, uv__fs_event_read, portfd);
+ uv__io_start(handle->loop, &handle->loop->fs_event_watcher, POLLIN);
+ }
+
+ return 0;
+}
+
+
+int uv_fs_event_stop(uv_fs_event_t* handle) {
+ if (!uv__is_active(handle))
+ return 0;
+
+ if (handle->fd == PORT_FIRED || handle->fd == PORT_LOADED) {
+ port_dissociate(handle->loop->fs_fd,
+ PORT_SOURCE_FILE,
+ (uintptr_t) &handle->fo);
+ }
+
+ handle->fd = PORT_DELETED;
+ uv__free(handle->path);
+ handle->path = NULL;
+ handle->fo.fo_name = NULL;
+ uv__handle_stop(handle);
+
+ return 0;
+}
+
+void uv__fs_event_close(uv_fs_event_t* handle) {
+ uv_fs_event_stop(handle);
+}
+
+#else /* !defined(PORT_SOURCE_FILE) */
+
+int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
+ return UV_ENOSYS;
+}
+
+
+int uv_fs_event_start(uv_fs_event_t* handle,
+ uv_fs_event_cb cb,
+ const char* filename,
+ unsigned int flags) {
+ return UV_ENOSYS;
+}
+
+
+int uv_fs_event_stop(uv_fs_event_t* handle) {
+ return UV_ENOSYS;
+}
+
+
+void uv__fs_event_close(uv_fs_event_t* handle) {
+ UNREACHABLE();
+}
+
+#endif /* defined(PORT_SOURCE_FILE) */
+
+
+int uv_resident_set_memory(size_t* rss) {
+ psinfo_t psinfo;
+ int err;
+ int fd;
+
+ fd = open("/proc/self/psinfo", O_RDONLY);
+ if (fd == -1)
+ return UV__ERR(errno);
+
+ /* FIXME(bnoordhuis) Handle EINTR. */
+ err = UV_EINVAL;
+ if (read(fd, &psinfo, sizeof(psinfo)) == sizeof(psinfo)) {
+ *rss = (size_t)psinfo.pr_rssize * 1024;
+ err = 0;
+ }
+ uv__close(fd);
+
+ return err;
+}
+
+
+int uv_uptime(double* uptime) {
+ kstat_ctl_t *kc;
+ kstat_t *ksp;
+ kstat_named_t *knp;
+
+ long hz = sysconf(_SC_CLK_TCK);
+
+ kc = kstat_open();
+ if (kc == NULL)
+ return UV_EPERM;
+
+ ksp = kstat_lookup(kc, (char*) "unix", 0, (char*) "system_misc");
+ if (kstat_read(kc, ksp, NULL) == -1) {
+ *uptime = -1;
+ } else {
+ knp = (kstat_named_t*) kstat_data_lookup(ksp, (char*) "clk_intr");
+ *uptime = knp->value.ul / hz;
+ }
+ kstat_close(kc);
+
+ return 0;
+}
+
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+ int lookup_instance;
+ kstat_ctl_t *kc;
+ kstat_t *ksp;
+ kstat_named_t *knp;
+ uv_cpu_info_t* cpu_info;
+
+ kc = kstat_open();
+ if (kc == NULL)
+ return UV_EPERM;
+
+ /* Get count of cpus */
+ lookup_instance = 0;
+ while ((ksp = kstat_lookup(kc, (char*) "cpu_info", lookup_instance, NULL))) {
+ lookup_instance++;
+ }
+
+ *cpu_infos = uv__malloc(lookup_instance * sizeof(**cpu_infos));
+ if (!(*cpu_infos)) {
+ kstat_close(kc);
+ return UV_ENOMEM;
+ }
+
+ *count = lookup_instance;
+
+ cpu_info = *cpu_infos;
+ lookup_instance = 0;
+ while ((ksp = kstat_lookup(kc, (char*) "cpu_info", lookup_instance, NULL))) {
+ if (kstat_read(kc, ksp, NULL) == -1) {
+ cpu_info->speed = 0;
+ cpu_info->model = NULL;
+ } else {
+ knp = kstat_data_lookup(ksp, (char*) "clock_MHz");
+ assert(knp->data_type == KSTAT_DATA_INT32 ||
+ knp->data_type == KSTAT_DATA_INT64);
+ cpu_info->speed = (knp->data_type == KSTAT_DATA_INT32) ? knp->value.i32
+ : knp->value.i64;
+
+ knp = kstat_data_lookup(ksp, (char*) "brand");
+ assert(knp->data_type == KSTAT_DATA_STRING);
+ cpu_info->model = uv__strdup(KSTAT_NAMED_STR_PTR(knp));
+ }
+
+ lookup_instance++;
+ cpu_info++;
+ }
+
+ cpu_info = *cpu_infos;
+ lookup_instance = 0;
+ for (;;) {
+ ksp = kstat_lookup(kc, (char*) "cpu", lookup_instance, (char*) "sys");
+
+ if (ksp == NULL)
+ break;
+
+ if (kstat_read(kc, ksp, NULL) == -1) {
+ cpu_info->cpu_times.user = 0;
+ cpu_info->cpu_times.nice = 0;
+ cpu_info->cpu_times.sys = 0;
+ cpu_info->cpu_times.idle = 0;
+ cpu_info->cpu_times.irq = 0;
+ } else {
+ knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_user");
+ assert(knp->data_type == KSTAT_DATA_UINT64);
+ cpu_info->cpu_times.user = knp->value.ui64;
+
+ knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_kernel");
+ assert(knp->data_type == KSTAT_DATA_UINT64);
+ cpu_info->cpu_times.sys = knp->value.ui64;
+
+ knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_idle");
+ assert(knp->data_type == KSTAT_DATA_UINT64);
+ cpu_info->cpu_times.idle = knp->value.ui64;
+
+ knp = kstat_data_lookup(ksp, (char*) "intr");
+ assert(knp->data_type == KSTAT_DATA_UINT64);
+ cpu_info->cpu_times.irq = knp->value.ui64;
+ cpu_info->cpu_times.nice = 0;
+ }
+
+ lookup_instance++;
+ cpu_info++;
+ }
+
+ kstat_close(kc);
+
+ return 0;
+}
+
+
+#ifdef SUNOS_NO_IFADDRS
+int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
+ *count = 0;
+ *addresses = NULL;
+ return UV_ENOSYS;
+}
+#else /* SUNOS_NO_IFADDRS */
+/*
+ * Inspired By:
+ * https://blogs.oracle.com/paulie/entry/retrieving_mac_address_in_solaris
+ * http://www.pauliesworld.org/project/getmac.c
+ */
+static int uv__set_phys_addr(uv_interface_address_t* address,
+ struct ifaddrs* ent) {
+
+ struct sockaddr_dl* sa_addr;
+ int sockfd;
+ size_t i;
+ struct arpreq arpreq;
+
+ /* This appears to only work as root */
+ sa_addr = (struct sockaddr_dl*)(ent->ifa_addr);
+ memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr));
+ for (i = 0; i < sizeof(address->phys_addr); i++) {
+ /* Check that all bytes of phys_addr are zero. */
+ if (address->phys_addr[i] != 0)
+ return 0;
+ }
+ memset(&arpreq, 0, sizeof(arpreq));
+ if (address->address.address4.sin_family == AF_INET) {
+ struct sockaddr_in* sin = ((struct sockaddr_in*)&arpreq.arp_pa);
+ sin->sin_addr.s_addr = address->address.address4.sin_addr.s_addr;
+ } else if (address->address.address4.sin_family == AF_INET6) {
+ struct sockaddr_in6* sin = ((struct sockaddr_in6*)&arpreq.arp_pa);
+ memcpy(sin->sin6_addr.s6_addr,
+ address->address.address6.sin6_addr.s6_addr,
+ sizeof(address->address.address6.sin6_addr.s6_addr));
+ } else {
+ return 0;
+ }
+
+ sockfd = socket(AF_INET, SOCK_DGRAM, 0);
+ if (sockfd < 0)
+ return UV__ERR(errno);
+
+ if (ioctl(sockfd, SIOCGARP, (char*)&arpreq) == -1) {
+ uv__close(sockfd);
+ return UV__ERR(errno);
+ }
+ memcpy(address->phys_addr, arpreq.arp_ha.sa_data, sizeof(address->phys_addr));
+ uv__close(sockfd);
+ return 0;
+}
+
+
+static int uv__ifaddr_exclude(struct ifaddrs *ent) {
+ if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
+ return 1;
+ if (ent->ifa_addr == NULL)
+ return 1;
+ if (ent->ifa_addr->sa_family != AF_INET &&
+ ent->ifa_addr->sa_family != AF_INET6)
+ return 1;
+ return 0;
+}
+
+int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
+ uv_interface_address_t* address;
+ struct ifaddrs* addrs;
+ struct ifaddrs* ent;
+
+ *count = 0;
+ *addresses = NULL;
+
+ if (getifaddrs(&addrs))
+ return UV__ERR(errno);
+
+ /* Count the number of interfaces */
+ for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+ if (uv__ifaddr_exclude(ent))
+ continue;
+ (*count)++;
+ }
+
+ if (*count == 0) {
+ freeifaddrs(addrs);
+ return 0;
+ }
+
+ *addresses = uv__malloc(*count * sizeof(**addresses));
+ if (!(*addresses)) {
+ freeifaddrs(addrs);
+ return UV_ENOMEM;
+ }
+
+ address = *addresses;
+
+ for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+ if (uv__ifaddr_exclude(ent))
+ continue;
+
+ address->name = uv__strdup(ent->ifa_name);
+
+ if (ent->ifa_addr->sa_family == AF_INET6) {
+ address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
+ } else {
+ address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
+ }
+
+ if (ent->ifa_netmask->sa_family == AF_INET6) {
+ address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
+ } else {
+ address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
+ }
+
+ address->is_internal = !!((ent->ifa_flags & IFF_PRIVATE) ||
+ (ent->ifa_flags & IFF_LOOPBACK));
+
+ uv__set_phys_addr(address, ent);
+ address++;
+ }
+
+ freeifaddrs(addrs);
+
+ return 0;
+}
+#endif /* SUNOS_NO_IFADDRS */
+
+void uv_free_interface_addresses(uv_interface_address_t* addresses,
+ int count) {
+ int i;
+
+ for (i = 0; i < count; i++) {
+ uv__free(addresses[i].name);
+ }
+
+ uv__free(addresses);
+}
diff --git a/Utilities/cmlibuv/src/unix/sysinfo-loadavg.c b/Utilities/cmlibuv/src/unix/sysinfo-loadavg.c
new file mode 100644
index 0000000..ebad0e8
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/sysinfo-loadavg.c
@@ -0,0 +1,36 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdint.h>
+#include <sys/sysinfo.h>
+
+void uv_loadavg(double avg[3]) {
+ struct sysinfo info;
+
+ if (sysinfo(&info) < 0) return;
+
+ avg[0] = (double) info.loads[0] / 65536.0;
+ avg[1] = (double) info.loads[1] / 65536.0;
+ avg[2] = (double) info.loads[2] / 65536.0;
+}
diff --git a/Utilities/cmlibuv/src/unix/sysinfo-memory.c b/Utilities/cmlibuv/src/unix/sysinfo-memory.c
new file mode 100644
index 0000000..23b4fc6
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/sysinfo-memory.c
@@ -0,0 +1,42 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdint.h>
+#include <sys/sysinfo.h>
+
+uint64_t uv_get_free_memory(void) {
+ struct sysinfo info;
+
+ if (sysinfo(&info) == 0)
+ return (uint64_t) info.freeram * info.mem_unit;
+ return 0;
+}
+
+uint64_t uv_get_total_memory(void) {
+ struct sysinfo info;
+
+ if (sysinfo(&info) == 0)
+ return (uint64_t) info.totalram * info.mem_unit;
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/unix/tcp.c b/Utilities/cmlibuv/src/unix/tcp.c
new file mode 100644
index 0000000..18acd20
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/tcp.c
@@ -0,0 +1,461 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <assert.h>
+#include <errno.h>
+
+
+static int new_socket(uv_tcp_t* handle, int domain, unsigned long flags) {
+ struct sockaddr_storage saddr;
+ socklen_t slen;
+ int sockfd;
+ int err;
+
+ err = uv__socket(domain, SOCK_STREAM, 0);
+ if (err < 0)
+ return err;
+ sockfd = err;
+
+ err = uv__stream_open((uv_stream_t*) handle, sockfd, flags);
+ if (err) {
+ uv__close(sockfd);
+ return err;
+ }
+
+ if (flags & UV_HANDLE_BOUND) {
+ /* Bind this new socket to an arbitrary port */
+ slen = sizeof(saddr);
+ memset(&saddr, 0, sizeof(saddr));
+ if (getsockname(uv__stream_fd(handle), (struct sockaddr*) &saddr, &slen)) {
+ uv__close(sockfd);
+ return UV__ERR(errno);
+ }
+
+ if (bind(uv__stream_fd(handle), (struct sockaddr*) &saddr, slen)) {
+ uv__close(sockfd);
+ return UV__ERR(errno);
+ }
+ }
+
+ return 0;
+}
+
+
+static int maybe_new_socket(uv_tcp_t* handle, int domain, unsigned long flags) {
+ struct sockaddr_storage saddr;
+ socklen_t slen;
+
+ if (domain == AF_UNSPEC) {
+ handle->flags |= flags;
+ return 0;
+ }
+
+ if (uv__stream_fd(handle) != -1) {
+
+ if (flags & UV_HANDLE_BOUND) {
+
+ if (handle->flags & UV_HANDLE_BOUND) {
+ /* It is already bound to a port. */
+ handle->flags |= flags;
+ return 0;
+ }
+
+ /* Query to see if tcp socket is bound. */
+ slen = sizeof(saddr);
+ memset(&saddr, 0, sizeof(saddr));
+ if (getsockname(uv__stream_fd(handle), (struct sockaddr*) &saddr, &slen))
+ return UV__ERR(errno);
+
+ if ((saddr.ss_family == AF_INET6 &&
+ ((struct sockaddr_in6*) &saddr)->sin6_port != 0) ||
+ (saddr.ss_family == AF_INET &&
+ ((struct sockaddr_in*) &saddr)->sin_port != 0)) {
+ /* Handle is already bound to a port. */
+ handle->flags |= flags;
+ return 0;
+ }
+
+ /* Bind to arbitrary port */
+ if (bind(uv__stream_fd(handle), (struct sockaddr*) &saddr, slen))
+ return UV__ERR(errno);
+ }
+
+ handle->flags |= flags;
+ return 0;
+ }
+
+ return new_socket(handle, domain, flags);
+}
+
+
+int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* tcp, unsigned int flags) {
+ int domain;
+
+ /* Use the lower 8 bits for the domain */
+ domain = flags & 0xFF;
+ if (domain != AF_INET && domain != AF_INET6 && domain != AF_UNSPEC)
+ return UV_EINVAL;
+
+ if (flags & ~0xFF)
+ return UV_EINVAL;
+
+ uv__stream_init(loop, (uv_stream_t*)tcp, UV_TCP);
+
+ /* If anything fails beyond this point we need to remove the handle from
+ * the handle queue, since it was added by uv__handle_init in uv_stream_init.
+ */
+
+ if (domain != AF_UNSPEC) {
+ int err = maybe_new_socket(tcp, domain, 0);
+ if (err) {
+ QUEUE_REMOVE(&tcp->handle_queue);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+
+int uv_tcp_init(uv_loop_t* loop, uv_tcp_t* tcp) {
+ return uv_tcp_init_ex(loop, tcp, AF_UNSPEC);
+}
+
+
+int uv__tcp_bind(uv_tcp_t* tcp,
+ const struct sockaddr* addr,
+ unsigned int addrlen,
+ unsigned int flags) {
+ int err;
+ int on;
+
+ /* Cannot set IPv6-only mode on non-IPv6 socket. */
+ if ((flags & UV_TCP_IPV6ONLY) && addr->sa_family != AF_INET6)
+ return UV_EINVAL;
+
+ err = maybe_new_socket(tcp, addr->sa_family, 0);
+ if (err)
+ return err;
+
+ on = 1;
+ if (setsockopt(tcp->io_watcher.fd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)))
+ return UV__ERR(errno);
+
+#ifndef __OpenBSD__
+#ifdef IPV6_V6ONLY
+ if (addr->sa_family == AF_INET6) {
+ on = (flags & UV_TCP_IPV6ONLY) != 0;
+ if (setsockopt(tcp->io_watcher.fd,
+ IPPROTO_IPV6,
+ IPV6_V6ONLY,
+ &on,
+ sizeof on) == -1) {
+#if defined(__MVS__)
+ if (errno == EOPNOTSUPP)
+ return UV_EINVAL;
+#endif
+ return UV__ERR(errno);
+ }
+ }
+#endif
+#endif
+
+ errno = 0;
+ if (bind(tcp->io_watcher.fd, addr, addrlen) && errno != EADDRINUSE) {
+ if (errno == EAFNOSUPPORT)
+ /* OSX, other BSDs and SunoS fail with EAFNOSUPPORT when binding a
+ * socket created with AF_INET to an AF_INET6 address or vice versa. */
+ return UV_EINVAL;
+ return UV__ERR(errno);
+ }
+ tcp->delayed_error = UV__ERR(errno);
+
+ tcp->flags |= UV_HANDLE_BOUND;
+ if (addr->sa_family == AF_INET6)
+ tcp->flags |= UV_HANDLE_IPV6;
+
+ return 0;
+}
+
+
+int uv__tcp_connect(uv_connect_t* req,
+ uv_tcp_t* handle,
+ const struct sockaddr* addr,
+ unsigned int addrlen,
+ uv_connect_cb cb) {
+ int err;
+ int r;
+
+ assert(handle->type == UV_TCP);
+
+ if (handle->connect_req != NULL)
+ return UV_EALREADY; /* FIXME(bnoordhuis) UV_EINVAL or maybe UV_EBUSY. */
+
+ err = maybe_new_socket(handle,
+ addr->sa_family,
+ UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
+ if (err)
+ return err;
+
+ handle->delayed_error = 0;
+
+ do {
+ errno = 0;
+ r = connect(uv__stream_fd(handle), addr, addrlen);
+ } while (r == -1 && errno == EINTR);
+
+ /* We not only check the return value, but also check the errno != 0.
+ * Because in rare cases connect() will return -1 but the errno
+ * is 0 (for example, on Android 4.3, OnePlus phone A0001_12_150227)
+ * and actually the tcp three-way handshake is completed.
+ */
+ if (r == -1 && errno != 0) {
+ if (errno == EINPROGRESS)
+ ; /* not an error */
+ else if (errno == ECONNREFUSED
+#if defined(__OpenBSD__)
+ || errno == EINVAL
+#endif
+ )
+ /* If we get ECONNREFUSED (Solaris) or EINVAL (OpenBSD) wait until the
+ * next tick to report the error. Solaris and OpenBSD wants to report
+ * immediately -- other unixes want to wait.
+ */
+ handle->delayed_error = UV__ERR(ECONNREFUSED);
+ else
+ return UV__ERR(errno);
+ }
+
+ uv__req_init(handle->loop, req, UV_CONNECT);
+ req->cb = cb;
+ req->handle = (uv_stream_t*) handle;
+ QUEUE_INIT(&req->queue);
+ handle->connect_req = req;
+
+ uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
+
+ if (handle->delayed_error)
+ uv__io_feed(handle->loop, &handle->io_watcher);
+
+ return 0;
+}
+
+
+int uv_tcp_open(uv_tcp_t* handle, uv_os_sock_t sock) {
+ int err;
+
+ if (uv__fd_exists(handle->loop, sock))
+ return UV_EEXIST;
+
+ err = uv__nonblock(sock, 1);
+ if (err)
+ return err;
+
+ return uv__stream_open((uv_stream_t*)handle,
+ sock,
+ UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
+}
+
+
+int uv_tcp_getsockname(const uv_tcp_t* handle,
+ struct sockaddr* name,
+ int* namelen) {
+
+ if (handle->delayed_error)
+ return handle->delayed_error;
+
+ return uv__getsockpeername((const uv_handle_t*) handle,
+ getsockname,
+ name,
+ namelen);
+}
+
+
+int uv_tcp_getpeername(const uv_tcp_t* handle,
+ struct sockaddr* name,
+ int* namelen) {
+
+ if (handle->delayed_error)
+ return handle->delayed_error;
+
+ return uv__getsockpeername((const uv_handle_t*) handle,
+ getpeername,
+ name,
+ namelen);
+}
+
+
+int uv_tcp_close_reset(uv_tcp_t* handle, uv_close_cb close_cb) {
+ int fd;
+ struct linger l = { 1, 0 };
+
+ /* Disallow setting SO_LINGER to zero due to some platform inconsistencies */
+ if (handle->flags & UV_HANDLE_SHUTTING)
+ return UV_EINVAL;
+
+ fd = uv__stream_fd(handle);
+ if (0 != setsockopt(fd, SOL_SOCKET, SO_LINGER, &l, sizeof(l)))
+ return UV__ERR(errno);
+
+ uv_close((uv_handle_t*) handle, close_cb);
+ return 0;
+}
+
+
+int uv_tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) {
+ static int single_accept_cached = -1;
+ unsigned long flags;
+ int single_accept;
+ int err;
+
+ if (tcp->delayed_error)
+ return tcp->delayed_error;
+
+ single_accept = uv__load_relaxed(&single_accept_cached);
+ if (single_accept == -1) {
+ const char* val = getenv("UV_TCP_SINGLE_ACCEPT");
+ single_accept = (val != NULL && atoi(val) != 0); /* Off by default. */
+ uv__store_relaxed(&single_accept_cached, single_accept);
+ }
+
+ if (single_accept)
+ tcp->flags |= UV_HANDLE_TCP_SINGLE_ACCEPT;
+
+ flags = 0;
+#if defined(__MVS__)
+ /* on zOS the listen call does not bind automatically
+ if the socket is unbound. Hence the manual binding to
+ an arbitrary port is required to be done manually
+ */
+ flags |= UV_HANDLE_BOUND;
+#endif
+ err = maybe_new_socket(tcp, AF_INET, flags);
+ if (err)
+ return err;
+
+ if (listen(tcp->io_watcher.fd, backlog))
+ return UV__ERR(errno);
+
+ tcp->connection_cb = cb;
+ tcp->flags |= UV_HANDLE_BOUND;
+
+ /* Start listening for connections. */
+ tcp->io_watcher.cb = uv__server_io;
+ uv__io_start(tcp->loop, &tcp->io_watcher, POLLIN);
+
+ return 0;
+}
+
+
+int uv__tcp_nodelay(int fd, int on) {
+ if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on)))
+ return UV__ERR(errno);
+ return 0;
+}
+
+
+int uv__tcp_keepalive(int fd, int on, unsigned int delay) {
+ if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on)))
+ return UV__ERR(errno);
+
+#ifdef TCP_KEEPIDLE
+ if (on) {
+ int intvl = 1; /* 1 second; same as default on Win32 */
+ int cnt = 10; /* 10 retries; same as hardcoded on Win32 */
+ if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &delay, sizeof(delay)))
+ return UV__ERR(errno);
+ if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &intvl, sizeof(intvl)))
+ return UV__ERR(errno);
+ if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &cnt, sizeof(cnt)))
+ return UV__ERR(errno);
+ }
+#endif
+
+ /* Solaris/SmartOS, if you don't support keep-alive,
+ * then don't advertise it in your system headers...
+ */
+ /* FIXME(bnoordhuis) That's possibly because sizeof(delay) should be 1. */
+#if defined(TCP_KEEPALIVE) && !defined(__sun)
+ if (on && setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &delay, sizeof(delay)))
+ return UV__ERR(errno);
+#endif
+
+ return 0;
+}
+
+
+int uv_tcp_nodelay(uv_tcp_t* handle, int on) {
+ int err;
+
+ if (uv__stream_fd(handle) != -1) {
+ err = uv__tcp_nodelay(uv__stream_fd(handle), on);
+ if (err)
+ return err;
+ }
+
+ if (on)
+ handle->flags |= UV_HANDLE_TCP_NODELAY;
+ else
+ handle->flags &= ~UV_HANDLE_TCP_NODELAY;
+
+ return 0;
+}
+
+
+int uv_tcp_keepalive(uv_tcp_t* handle, int on, unsigned int delay) {
+ int err;
+
+ if (uv__stream_fd(handle) != -1) {
+ err =uv__tcp_keepalive(uv__stream_fd(handle), on, delay);
+ if (err)
+ return err;
+ }
+
+ if (on)
+ handle->flags |= UV_HANDLE_TCP_KEEPALIVE;
+ else
+ handle->flags &= ~UV_HANDLE_TCP_KEEPALIVE;
+
+ /* TODO Store delay if uv__stream_fd(handle) == -1 but don't want to enlarge
+ * uv_tcp_t with an int that's almost never used...
+ */
+
+ return 0;
+}
+
+
+int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) {
+ if (enable)
+ handle->flags &= ~UV_HANDLE_TCP_SINGLE_ACCEPT;
+ else
+ handle->flags |= UV_HANDLE_TCP_SINGLE_ACCEPT;
+ return 0;
+}
+
+
+void uv__tcp_close(uv_tcp_t* handle) {
+ uv__stream_close((uv_stream_t*)handle);
+}
diff --git a/Utilities/cmlibuv/src/unix/thread.c b/Utilities/cmlibuv/src/unix/thread.c
new file mode 100644
index 0000000..8aeb0ca
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/thread.c
@@ -0,0 +1,844 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <pthread.h>
+#include <assert.h>
+#include <errno.h>
+
+#include <sys/time.h>
+#include <sys/resource.h> /* getrlimit() */
+#include <unistd.h> /* getpagesize() */
+
+#include <limits.h>
+
+#ifdef __MVS__
+#include <sys/ipc.h>
+#include <sys/sem.h>
+#endif
+
+#if defined(__GLIBC__) && !defined(__UCLIBC__)
+#include <gnu/libc-version.h> /* gnu_get_libc_version() */
+#endif
+
+#undef NANOSEC
+#define NANOSEC ((uint64_t) 1e9)
+
+#if defined(PTHREAD_BARRIER_SERIAL_THREAD)
+STATIC_ASSERT(sizeof(uv_barrier_t) == sizeof(pthread_barrier_t));
+#endif
+
+/* Note: guard clauses should match uv_barrier_t's in include/uv/unix.h. */
+#if defined(_AIX) || \
+ defined(__OpenBSD__) || \
+ !defined(PTHREAD_BARRIER_SERIAL_THREAD)
+int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
+ struct _uv_barrier* b;
+ int rc;
+
+ if (barrier == NULL || count == 0)
+ return UV_EINVAL;
+
+ b = uv__malloc(sizeof(*b));
+ if (b == NULL)
+ return UV_ENOMEM;
+
+ b->in = 0;
+ b->out = 0;
+ b->threshold = count;
+
+ rc = uv_mutex_init(&b->mutex);
+ if (rc != 0)
+ goto error2;
+
+ rc = uv_cond_init(&b->cond);
+ if (rc != 0)
+ goto error;
+
+ barrier->b = b;
+ return 0;
+
+error:
+ uv_mutex_destroy(&b->mutex);
+error2:
+ uv__free(b);
+ return rc;
+}
+
+
+int uv_barrier_wait(uv_barrier_t* barrier) {
+ struct _uv_barrier* b;
+ int last;
+
+ if (barrier == NULL || barrier->b == NULL)
+ return UV_EINVAL;
+
+ b = barrier->b;
+ uv_mutex_lock(&b->mutex);
+
+ if (++b->in == b->threshold) {
+ b->in = 0;
+ b->out = b->threshold;
+ uv_cond_signal(&b->cond);
+ } else {
+ do
+ uv_cond_wait(&b->cond, &b->mutex);
+ while (b->in != 0);
+ }
+
+ last = (--b->out == 0);
+ if (!last)
+ uv_cond_signal(&b->cond); /* Not needed for last thread. */
+
+ uv_mutex_unlock(&b->mutex);
+ return last;
+}
+
+
+void uv_barrier_destroy(uv_barrier_t* barrier) {
+ struct _uv_barrier* b;
+
+ b = barrier->b;
+ uv_mutex_lock(&b->mutex);
+
+ assert(b->in == 0);
+ assert(b->out == 0);
+
+ if (b->in != 0 || b->out != 0)
+ abort();
+
+ uv_mutex_unlock(&b->mutex);
+ uv_mutex_destroy(&b->mutex);
+ uv_cond_destroy(&b->cond);
+
+ uv__free(barrier->b);
+ barrier->b = NULL;
+}
+
+#else
+
+int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
+ return UV__ERR(pthread_barrier_init(barrier, NULL, count));
+}
+
+
+int uv_barrier_wait(uv_barrier_t* barrier) {
+ int rc;
+
+ rc = pthread_barrier_wait(barrier);
+ if (rc != 0)
+ if (rc != PTHREAD_BARRIER_SERIAL_THREAD)
+ abort();
+
+ return rc == PTHREAD_BARRIER_SERIAL_THREAD;
+}
+
+
+void uv_barrier_destroy(uv_barrier_t* barrier) {
+ if (pthread_barrier_destroy(barrier))
+ abort();
+}
+
+#endif
+
+
+/* On MacOS, threads other than the main thread are created with a reduced
+ * stack size by default. Adjust to RLIMIT_STACK aligned to the page size.
+ *
+ * On Linux, threads created by musl have a much smaller stack than threads
+ * created by glibc (80 vs. 2048 or 4096 kB.) Follow glibc for consistency.
+ */
+static size_t thread_stack_size(void) {
+#if defined(__APPLE__) || defined(__linux__)
+ struct rlimit lim;
+
+ /* getrlimit() can fail on some aarch64 systems due to a glibc bug where
+ * the system call wrapper invokes the wrong system call. Don't treat
+ * that as fatal, just use the default stack size instead.
+ */
+ if (0 == getrlimit(RLIMIT_STACK, &lim) && lim.rlim_cur != RLIM_INFINITY) {
+ /* pthread_attr_setstacksize() expects page-aligned values. */
+ lim.rlim_cur -= lim.rlim_cur % (rlim_t) getpagesize();
+
+ /* Musl's PTHREAD_STACK_MIN is 2 KB on all architectures, which is
+ * too small to safely receive signals on.
+ *
+ * Musl's PTHREAD_STACK_MIN + MINSIGSTKSZ == 8192 on arm64 (which has
+ * the largest MINSIGSTKSZ of the architectures that musl supports) so
+ * let's use that as a lower bound.
+ *
+ * We use a hardcoded value because PTHREAD_STACK_MIN + MINSIGSTKSZ
+ * is between 28 and 133 KB when compiling against glibc, depending
+ * on the architecture.
+ */
+ if (lim.rlim_cur >= 8192)
+ if (lim.rlim_cur >= PTHREAD_STACK_MIN)
+ return lim.rlim_cur;
+ }
+#endif
+
+#if !defined(__linux__)
+ return 0;
+#elif defined(__PPC__) || defined(__ppc__) || defined(__powerpc__)
+ return 4 << 20; /* glibc default. */
+#else
+ return 2 << 20; /* glibc default. */
+#endif
+}
+
+
+int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) {
+ uv_thread_options_t params;
+ params.flags = UV_THREAD_NO_FLAGS;
+ return uv_thread_create_ex(tid, &params, entry, arg);
+}
+
+int uv_thread_create_ex(uv_thread_t* tid,
+ const uv_thread_options_t* params,
+ void (*entry)(void *arg),
+ void *arg) {
+ int err;
+ pthread_attr_t* attr;
+ pthread_attr_t attr_storage;
+ size_t pagesize;
+ size_t stack_size;
+
+ /* Used to squelch a -Wcast-function-type warning. */
+ union {
+ void (*in)(void*);
+ void* (*out)(void*);
+ } f;
+
+ stack_size =
+ params->flags & UV_THREAD_HAS_STACK_SIZE ? params->stack_size : 0;
+
+ attr = NULL;
+ if (stack_size == 0) {
+ stack_size = thread_stack_size();
+ } else {
+ pagesize = (size_t)getpagesize();
+ /* Round up to the nearest page boundary. */
+ stack_size = (stack_size + pagesize - 1) &~ (pagesize - 1);
+#ifdef PTHREAD_STACK_MIN
+ if (stack_size < PTHREAD_STACK_MIN)
+ stack_size = PTHREAD_STACK_MIN;
+#endif
+ }
+
+ if (stack_size > 0) {
+ attr = &attr_storage;
+
+ if (pthread_attr_init(attr))
+ abort();
+
+ if (pthread_attr_setstacksize(attr, stack_size))
+ abort();
+ }
+
+ f.in = entry;
+ err = pthread_create(tid, attr, f.out, arg);
+
+ if (attr != NULL)
+ pthread_attr_destroy(attr);
+
+ return UV__ERR(err);
+}
+
+
+uv_thread_t uv_thread_self(void) {
+ return pthread_self();
+}
+
+int uv_thread_join(uv_thread_t *tid) {
+ return UV__ERR(pthread_join(*tid, NULL));
+}
+
+
+int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) {
+ return pthread_equal(*t1, *t2);
+}
+
+
+int uv_mutex_init(uv_mutex_t* mutex) {
+#if defined(NDEBUG) || !defined(PTHREAD_MUTEX_ERRORCHECK)
+ return UV__ERR(pthread_mutex_init(mutex, NULL));
+#else
+ pthread_mutexattr_t attr;
+ int err;
+
+ if (pthread_mutexattr_init(&attr))
+ abort();
+
+ if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK))
+ abort();
+
+ err = pthread_mutex_init(mutex, &attr);
+
+ if (pthread_mutexattr_destroy(&attr))
+ abort();
+
+ return UV__ERR(err);
+#endif
+}
+
+
+int uv_mutex_init_recursive(uv_mutex_t* mutex) {
+ pthread_mutexattr_t attr;
+ int err;
+
+ if (pthread_mutexattr_init(&attr))
+ abort();
+
+ if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE))
+ abort();
+
+ err = pthread_mutex_init(mutex, &attr);
+
+ if (pthread_mutexattr_destroy(&attr))
+ abort();
+
+ return UV__ERR(err);
+}
+
+
+void uv_mutex_destroy(uv_mutex_t* mutex) {
+ if (pthread_mutex_destroy(mutex))
+ abort();
+}
+
+
+void uv_mutex_lock(uv_mutex_t* mutex) {
+ if (pthread_mutex_lock(mutex))
+ abort();
+}
+
+
+int uv_mutex_trylock(uv_mutex_t* mutex) {
+ int err;
+
+ err = pthread_mutex_trylock(mutex);
+ if (err) {
+ if (err != EBUSY && err != EAGAIN)
+ abort();
+ return UV_EBUSY;
+ }
+
+ return 0;
+}
+
+
+void uv_mutex_unlock(uv_mutex_t* mutex) {
+ if (pthread_mutex_unlock(mutex))
+ abort();
+}
+
+
+int uv_rwlock_init(uv_rwlock_t* rwlock) {
+ return UV__ERR(pthread_rwlock_init(rwlock, NULL));
+}
+
+
+void uv_rwlock_destroy(uv_rwlock_t* rwlock) {
+ if (pthread_rwlock_destroy(rwlock))
+ abort();
+}
+
+
+void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {
+ if (pthread_rwlock_rdlock(rwlock))
+ abort();
+}
+
+
+int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) {
+ int err;
+
+ err = pthread_rwlock_tryrdlock(rwlock);
+ if (err) {
+ if (err != EBUSY && err != EAGAIN)
+ abort();
+ return UV_EBUSY;
+ }
+
+ return 0;
+}
+
+
+void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {
+ if (pthread_rwlock_unlock(rwlock))
+ abort();
+}
+
+
+void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {
+ if (pthread_rwlock_wrlock(rwlock))
+ abort();
+}
+
+
+int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) {
+ int err;
+
+ err = pthread_rwlock_trywrlock(rwlock);
+ if (err) {
+ if (err != EBUSY && err != EAGAIN)
+ abort();
+ return UV_EBUSY;
+ }
+
+ return 0;
+}
+
+
+void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {
+ if (pthread_rwlock_unlock(rwlock))
+ abort();
+}
+
+
+void uv_once(uv_once_t* guard, void (*callback)(void)) {
+ if (pthread_once(guard, callback))
+ abort();
+}
+
+#if defined(__APPLE__) && defined(__MACH__)
+
+int uv_sem_init(uv_sem_t* sem, unsigned int value) {
+ kern_return_t err;
+
+ err = semaphore_create(mach_task_self(), sem, SYNC_POLICY_FIFO, value);
+ if (err == KERN_SUCCESS)
+ return 0;
+ if (err == KERN_INVALID_ARGUMENT)
+ return UV_EINVAL;
+ if (err == KERN_RESOURCE_SHORTAGE)
+ return UV_ENOMEM;
+
+ abort();
+ return UV_EINVAL; /* Satisfy the compiler. */
+}
+
+
+void uv_sem_destroy(uv_sem_t* sem) {
+ if (semaphore_destroy(mach_task_self(), *sem))
+ abort();
+}
+
+
+void uv_sem_post(uv_sem_t* sem) {
+ if (semaphore_signal(*sem))
+ abort();
+}
+
+
+void uv_sem_wait(uv_sem_t* sem) {
+ int r;
+
+ do
+ r = semaphore_wait(*sem);
+ while (r == KERN_ABORTED);
+
+ if (r != KERN_SUCCESS)
+ abort();
+}
+
+
+int uv_sem_trywait(uv_sem_t* sem) {
+ mach_timespec_t interval;
+ kern_return_t err;
+
+ interval.tv_sec = 0;
+ interval.tv_nsec = 0;
+
+ err = semaphore_timedwait(*sem, interval);
+ if (err == KERN_SUCCESS)
+ return 0;
+ if (err == KERN_OPERATION_TIMED_OUT)
+ return UV_EAGAIN;
+
+ abort();
+ return UV_EINVAL; /* Satisfy the compiler. */
+}
+
+#else /* !(defined(__APPLE__) && defined(__MACH__)) */
+
+#if defined(__GLIBC__) && !defined(__UCLIBC__)
+
+/* Hack around https://sourceware.org/bugzilla/show_bug.cgi?id=12674
+ * by providing a custom implementation for glibc < 2.21 in terms of other
+ * concurrency primitives.
+ * Refs: https://github.com/nodejs/node/issues/19903 */
+
+/* To preserve ABI compatibility, we treat the uv_sem_t as storage for
+ * a pointer to the actual struct we're using underneath. */
+
+static uv_once_t glibc_version_check_once = UV_ONCE_INIT;
+static int platform_needs_custom_semaphore = 0;
+
+static void glibc_version_check(void) {
+ const char* version = gnu_get_libc_version();
+ platform_needs_custom_semaphore =
+ version[0] == '2' && version[1] == '.' &&
+ atoi(version + 2) < 21;
+}
+
+#elif defined(__MVS__)
+
+#define platform_needs_custom_semaphore 1
+
+#else /* !defined(__GLIBC__) && !defined(__MVS__) */
+
+#define platform_needs_custom_semaphore 0
+
+#endif
+
+typedef struct uv_semaphore_s {
+ uv_mutex_t mutex;
+ uv_cond_t cond;
+ unsigned int value;
+} uv_semaphore_t;
+
+#if (defined(__GLIBC__) && !defined(__UCLIBC__)) || \
+ platform_needs_custom_semaphore
+STATIC_ASSERT(sizeof(uv_sem_t) >= sizeof(uv_semaphore_t*));
+#endif
+
+static int uv__custom_sem_init(uv_sem_t* sem_, unsigned int value) {
+ int err;
+ uv_semaphore_t* sem;
+
+ sem = uv__malloc(sizeof(*sem));
+ if (sem == NULL)
+ return UV_ENOMEM;
+
+ if ((err = uv_mutex_init(&sem->mutex)) != 0) {
+ uv__free(sem);
+ return err;
+ }
+
+ if ((err = uv_cond_init(&sem->cond)) != 0) {
+ uv_mutex_destroy(&sem->mutex);
+ uv__free(sem);
+ return err;
+ }
+
+ sem->value = value;
+ *(uv_semaphore_t**)sem_ = sem;
+ return 0;
+}
+
+
+static void uv__custom_sem_destroy(uv_sem_t* sem_) {
+ uv_semaphore_t* sem;
+
+ sem = *(uv_semaphore_t**)sem_;
+ uv_cond_destroy(&sem->cond);
+ uv_mutex_destroy(&sem->mutex);
+ uv__free(sem);
+}
+
+
+static void uv__custom_sem_post(uv_sem_t* sem_) {
+ uv_semaphore_t* sem;
+
+ sem = *(uv_semaphore_t**)sem_;
+ uv_mutex_lock(&sem->mutex);
+ sem->value++;
+ if (sem->value == 1)
+ uv_cond_signal(&sem->cond);
+ uv_mutex_unlock(&sem->mutex);
+}
+
+
+static void uv__custom_sem_wait(uv_sem_t* sem_) {
+ uv_semaphore_t* sem;
+
+ sem = *(uv_semaphore_t**)sem_;
+ uv_mutex_lock(&sem->mutex);
+ while (sem->value == 0)
+ uv_cond_wait(&sem->cond, &sem->mutex);
+ sem->value--;
+ uv_mutex_unlock(&sem->mutex);
+}
+
+
+static int uv__custom_sem_trywait(uv_sem_t* sem_) {
+ uv_semaphore_t* sem;
+
+ sem = *(uv_semaphore_t**)sem_;
+ if (uv_mutex_trylock(&sem->mutex) != 0)
+ return UV_EAGAIN;
+
+ if (sem->value == 0) {
+ uv_mutex_unlock(&sem->mutex);
+ return UV_EAGAIN;
+ }
+
+ sem->value--;
+ uv_mutex_unlock(&sem->mutex);
+
+ return 0;
+}
+
+static int uv__sem_init(uv_sem_t* sem, unsigned int value) {
+ if (sem_init(sem, 0, value))
+ return UV__ERR(errno);
+ return 0;
+}
+
+
+static void uv__sem_destroy(uv_sem_t* sem) {
+ if (sem_destroy(sem))
+ abort();
+}
+
+
+static void uv__sem_post(uv_sem_t* sem) {
+ if (sem_post(sem))
+ abort();
+}
+
+
+static void uv__sem_wait(uv_sem_t* sem) {
+ int r;
+
+ do
+ r = sem_wait(sem);
+ while (r == -1 && errno == EINTR);
+
+ if (r)
+ abort();
+}
+
+
+static int uv__sem_trywait(uv_sem_t* sem) {
+ int r;
+
+ do
+ r = sem_trywait(sem);
+ while (r == -1 && errno == EINTR);
+
+ if (r) {
+ if (errno == EAGAIN)
+ return UV_EAGAIN;
+ abort();
+ }
+
+ return 0;
+}
+
+int uv_sem_init(uv_sem_t* sem, unsigned int value) {
+#if defined(__GLIBC__) && !defined(__UCLIBC__)
+ uv_once(&glibc_version_check_once, glibc_version_check);
+#endif
+
+ if (platform_needs_custom_semaphore)
+ return uv__custom_sem_init(sem, value);
+ else
+ return uv__sem_init(sem, value);
+}
+
+
+void uv_sem_destroy(uv_sem_t* sem) {
+ if (platform_needs_custom_semaphore)
+ uv__custom_sem_destroy(sem);
+ else
+ uv__sem_destroy(sem);
+}
+
+
+void uv_sem_post(uv_sem_t* sem) {
+ if (platform_needs_custom_semaphore)
+ uv__custom_sem_post(sem);
+ else
+ uv__sem_post(sem);
+}
+
+
+void uv_sem_wait(uv_sem_t* sem) {
+ if (platform_needs_custom_semaphore)
+ uv__custom_sem_wait(sem);
+ else
+ uv__sem_wait(sem);
+}
+
+
+int uv_sem_trywait(uv_sem_t* sem) {
+ if (platform_needs_custom_semaphore)
+ return uv__custom_sem_trywait(sem);
+ else
+ return uv__sem_trywait(sem);
+}
+
+#endif /* defined(__APPLE__) && defined(__MACH__) */
+
+
+#if defined(__APPLE__) && defined(__MACH__) || defined(__MVS__)
+
+int uv_cond_init(uv_cond_t* cond) {
+ return UV__ERR(pthread_cond_init(cond, NULL));
+}
+
+#else /* !(defined(__APPLE__) && defined(__MACH__)) */
+
+int uv_cond_init(uv_cond_t* cond) {
+ pthread_condattr_t attr;
+ int err;
+
+ err = pthread_condattr_init(&attr);
+ if (err)
+ return UV__ERR(err);
+
+#if !defined(__hpux)
+ err = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
+ if (err)
+ goto error2;
+#endif
+
+ err = pthread_cond_init(cond, &attr);
+ if (err)
+ goto error2;
+
+ err = pthread_condattr_destroy(&attr);
+ if (err)
+ goto error;
+
+ return 0;
+
+error:
+ pthread_cond_destroy(cond);
+error2:
+ pthread_condattr_destroy(&attr);
+ return UV__ERR(err);
+}
+
+#endif /* defined(__APPLE__) && defined(__MACH__) */
+
+void uv_cond_destroy(uv_cond_t* cond) {
+#if defined(__APPLE__) && defined(__MACH__)
+ /* It has been reported that destroying condition variables that have been
+ * signalled but not waited on can sometimes result in application crashes.
+ * See https://codereview.chromium.org/1323293005.
+ */
+ pthread_mutex_t mutex;
+ struct timespec ts;
+ int err;
+
+ if (pthread_mutex_init(&mutex, NULL))
+ abort();
+
+ if (pthread_mutex_lock(&mutex))
+ abort();
+
+ ts.tv_sec = 0;
+ ts.tv_nsec = 1;
+
+ err = pthread_cond_timedwait_relative_np(cond, &mutex, &ts);
+ if (err != 0 && err != ETIMEDOUT)
+ abort();
+
+ if (pthread_mutex_unlock(&mutex))
+ abort();
+
+ if (pthread_mutex_destroy(&mutex))
+ abort();
+#endif /* defined(__APPLE__) && defined(__MACH__) */
+
+ if (pthread_cond_destroy(cond))
+ abort();
+}
+
+void uv_cond_signal(uv_cond_t* cond) {
+ if (pthread_cond_signal(cond))
+ abort();
+}
+
+void uv_cond_broadcast(uv_cond_t* cond) {
+ if (pthread_cond_broadcast(cond))
+ abort();
+}
+
+void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
+ if (pthread_cond_wait(cond, mutex))
+ abort();
+}
+
+
+int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
+ int r;
+ struct timespec ts;
+#if defined(__MVS__)
+ struct timeval tv;
+#endif
+
+#if defined(__APPLE__) && defined(__MACH__)
+ ts.tv_sec = timeout / NANOSEC;
+ ts.tv_nsec = timeout % NANOSEC;
+ r = pthread_cond_timedwait_relative_np(cond, mutex, &ts);
+#else
+#if defined(__MVS__)
+ if (gettimeofday(&tv, NULL))
+ abort();
+ timeout += tv.tv_sec * NANOSEC + tv.tv_usec * 1e3;
+#else
+ timeout += uv__hrtime(UV_CLOCK_PRECISE);
+#endif
+ ts.tv_sec = timeout / NANOSEC;
+ ts.tv_nsec = timeout % NANOSEC;
+ r = pthread_cond_timedwait(cond, mutex, &ts);
+#endif
+
+
+ if (r == 0)
+ return 0;
+
+ if (r == ETIMEDOUT)
+ return UV_ETIMEDOUT;
+
+ abort();
+#ifndef __SUNPRO_C
+ return UV_EINVAL; /* Satisfy the compiler. */
+#endif
+}
+
+
+int uv_key_create(uv_key_t* key) {
+ return UV__ERR(pthread_key_create(key, NULL));
+}
+
+
+void uv_key_delete(uv_key_t* key) {
+ if (pthread_key_delete(*key))
+ abort();
+}
+
+
+void* uv_key_get(uv_key_t* key) {
+ return pthread_getspecific(*key);
+}
+
+
+void uv_key_set(uv_key_t* key, void* value) {
+ if (pthread_setspecific(*key, value))
+ abort();
+}
diff --git a/Utilities/cmlibuv/src/unix/tty.c b/Utilities/cmlibuv/src/unix/tty.c
new file mode 100644
index 0000000..82cd723
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/tty.c
@@ -0,0 +1,399 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+#include "spinlock.h"
+
+#include <stdlib.h>
+#include <assert.h>
+#include <unistd.h>
+#include <termios.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+
+#if defined(__MVS__) && !defined(IMAXBEL)
+#define IMAXBEL 0
+#endif
+
+#if defined(__PASE__)
+/* On IBM i PASE, for better compatibility with running interactive programs in
+ * a 5250 environment, isatty() will return true for the stdin/stdout/stderr
+ * streams created by QSH/QP2TERM.
+ *
+ * For more, see docs on PASE_STDIO_ISATTY in
+ * https://www.ibm.com/support/knowledgecenter/ssw_ibm_i_74/apis/pase_environ.htm
+ *
+ * This behavior causes problems for Node as it expects that if isatty() returns
+ * true that TTY ioctls will be supported by that fd (which is not an
+ * unreasonable expectation) and when they don't it crashes with assertion
+ * errors.
+ *
+ * Here, we create our own version of isatty() that uses ioctl() to identify
+ * whether the fd is *really* a TTY or not.
+ */
+static int isreallyatty(int file) {
+ int rc;
+
+ rc = !ioctl(file, TXISATTY + 0x81, NULL);
+ if (!rc && errno != EBADF)
+ errno = ENOTTY;
+
+ return rc;
+}
+#define isatty(fd) isreallyatty(fd)
+#endif
+
+static int orig_termios_fd = -1;
+static struct termios orig_termios;
+static uv_spinlock_t termios_spinlock = UV_SPINLOCK_INITIALIZER;
+
+static int uv__tty_is_slave(const int fd) {
+ int result;
+#if defined(__linux__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
+ int dummy;
+
+ result = ioctl(fd, TIOCGPTN, &dummy) != 0;
+#elif defined(__APPLE__)
+ char dummy[256];
+
+ result = ioctl(fd, TIOCPTYGNAME, &dummy) != 0;
+#elif defined(__NetBSD__)
+ /*
+ * NetBSD as an extension returns with ptsname(3) and ptsname_r(3) the slave
+ * device name for both descriptors, the master one and slave one.
+ *
+ * Implement function to compare major device number with pts devices.
+ *
+ * The major numbers are machine-dependent, on NetBSD/amd64 they are
+ * respectively:
+ * - master tty: ptc - major 6
+ * - slave tty: pts - major 5
+ */
+
+ struct stat sb;
+ /* Lookup device's major for the pts driver and cache it. */
+ static devmajor_t pts = NODEVMAJOR;
+
+ if (pts == NODEVMAJOR) {
+ pts = getdevmajor("pts", S_IFCHR);
+ if (pts == NODEVMAJOR)
+ abort();
+ }
+
+ /* Lookup stat structure behind the file descriptor. */
+ if (fstat(fd, &sb) != 0)
+ abort();
+
+ /* Assert character device. */
+ if (!S_ISCHR(sb.st_mode))
+ abort();
+
+ /* Assert valid major. */
+ if (major(sb.st_rdev) == NODEVMAJOR)
+ abort();
+
+ result = (pts == major(sb.st_rdev));
+#else
+ /* Fallback to ptsname
+ */
+ result = ptsname(fd) == NULL;
+#endif
+ return result;
+}
+
+int uv_tty_init(uv_loop_t* loop, uv_tty_t* tty, int fd, int unused) {
+ uv_handle_type type;
+ int flags = 0;
+ int newfd = -1;
+ int r;
+ int saved_flags;
+ int mode;
+ char path[256];
+ (void)unused; /* deprecated parameter is no longer needed */
+
+ /* File descriptors that refer to files cannot be monitored with epoll.
+ * That restriction also applies to character devices like /dev/random
+ * (but obviously not /dev/tty.)
+ */
+ type = uv_guess_handle(fd);
+ if (type == UV_FILE || type == UV_UNKNOWN_HANDLE)
+ return UV_EINVAL;
+
+ /* Save the fd flags in case we need to restore them due to an error. */
+ do
+ saved_flags = fcntl(fd, F_GETFL);
+ while (saved_flags == -1 && errno == EINTR);
+
+ if (saved_flags == -1)
+ return UV__ERR(errno);
+ mode = saved_flags & O_ACCMODE;
+
+ /* Reopen the file descriptor when it refers to a tty. This lets us put the
+ * tty in non-blocking mode without affecting other processes that share it
+ * with us.
+ *
+ * Example: `node | cat` - if we put our fd 0 in non-blocking mode, it also
+ * affects fd 1 of `cat` because both file descriptors refer to the same
+ * struct file in the kernel. When we reopen our fd 0, it points to a
+ * different struct file, hence changing its properties doesn't affect
+ * other processes.
+ */
+ if (type == UV_TTY) {
+ /* Reopening a pty in master mode won't work either because the reopened
+ * pty will be in slave mode (*BSD) or reopening will allocate a new
+ * master/slave pair (Linux). Therefore check if the fd points to a
+ * slave device.
+ */
+ if (uv__tty_is_slave(fd) && ttyname_r(fd, path, sizeof(path)) == 0)
+ r = uv__open_cloexec(path, mode | O_NOCTTY);
+ else
+ r = -1;
+
+ if (r < 0) {
+ /* fallback to using blocking writes */
+ if (mode != O_RDONLY)
+ flags |= UV_HANDLE_BLOCKING_WRITES;
+ goto skip;
+ }
+
+ newfd = r;
+
+ r = uv__dup2_cloexec(newfd, fd);
+ if (r < 0 && r != UV_EINVAL) {
+ /* EINVAL means newfd == fd which could conceivably happen if another
+ * thread called close(fd) between our calls to isatty() and open().
+ * That's a rather unlikely event but let's handle it anyway.
+ */
+ uv__close(newfd);
+ return r;
+ }
+
+ fd = newfd;
+ }
+
+skip:
+ uv__stream_init(loop, (uv_stream_t*) tty, UV_TTY);
+
+ /* If anything fails beyond this point we need to remove the handle from
+ * the handle queue, since it was added by uv__handle_init in uv_stream_init.
+ */
+
+ if (!(flags & UV_HANDLE_BLOCKING_WRITES))
+ uv__nonblock(fd, 1);
+
+#if defined(__APPLE__)
+ r = uv__stream_try_select((uv_stream_t*) tty, &fd);
+ if (r) {
+ int rc = r;
+ if (newfd != -1)
+ uv__close(newfd);
+ QUEUE_REMOVE(&tty->handle_queue);
+ do
+ r = fcntl(fd, F_SETFL, saved_flags);
+ while (r == -1 && errno == EINTR);
+ return rc;
+ }
+#endif
+
+ if (mode != O_WRONLY)
+ flags |= UV_HANDLE_READABLE;
+ if (mode != O_RDONLY)
+ flags |= UV_HANDLE_WRITABLE;
+
+ uv__stream_open((uv_stream_t*) tty, fd, flags);
+ tty->mode = UV_TTY_MODE_NORMAL;
+
+ return 0;
+}
+
+static void uv__tty_make_raw(struct termios* tio) {
+ assert(tio != NULL);
+
+#if defined __sun || defined __MVS__ || defined __hpux
+ /*
+ * This implementation of cfmakeraw for Solaris and derivatives is taken from
+ * http://www.perkin.org.uk/posts/solaris-portability-cfmakeraw.html.
+ */
+ tio->c_iflag &= ~(IMAXBEL | IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR |
+ IGNCR | ICRNL | IXON);
+ tio->c_oflag &= ~OPOST;
+ tio->c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN);
+ tio->c_cflag &= ~(CSIZE | PARENB);
+ tio->c_cflag |= CS8;
+#else
+ cfmakeraw(tio);
+#endif /* #ifdef __sun */
+}
+
+int uv_tty_set_mode(uv_tty_t* tty, uv_tty_mode_t mode) {
+ struct termios tmp;
+ int fd;
+
+ if (tty->mode == (int) mode)
+ return 0;
+
+ fd = uv__stream_fd(tty);
+ if (tty->mode == UV_TTY_MODE_NORMAL && mode != UV_TTY_MODE_NORMAL) {
+ if (tcgetattr(fd, &tty->orig_termios))
+ return UV__ERR(errno);
+
+ /* This is used for uv_tty_reset_mode() */
+ uv_spinlock_lock(&termios_spinlock);
+ if (orig_termios_fd == -1) {
+ orig_termios = tty->orig_termios;
+ orig_termios_fd = fd;
+ }
+ uv_spinlock_unlock(&termios_spinlock);
+ }
+
+ tmp = tty->orig_termios;
+ switch (mode) {
+ case UV_TTY_MODE_NORMAL:
+ break;
+ case UV_TTY_MODE_RAW:
+ tmp.c_iflag &= ~(BRKINT | ICRNL | INPCK | ISTRIP | IXON);
+ tmp.c_oflag |= (ONLCR);
+ tmp.c_cflag |= (CS8);
+ tmp.c_lflag &= ~(ECHO | ICANON | IEXTEN | ISIG);
+ tmp.c_cc[VMIN] = 1;
+ tmp.c_cc[VTIME] = 0;
+ break;
+ case UV_TTY_MODE_IO:
+ uv__tty_make_raw(&tmp);
+ break;
+ }
+
+ /* Apply changes after draining */
+ if (tcsetattr(fd, TCSADRAIN, &tmp))
+ return UV__ERR(errno);
+
+ tty->mode = mode;
+ return 0;
+}
+
+
+int uv_tty_get_winsize(uv_tty_t* tty, int* width, int* height) {
+ struct winsize ws;
+ int err;
+
+ do
+ err = ioctl(uv__stream_fd(tty), TIOCGWINSZ, &ws);
+ while (err == -1 && errno == EINTR);
+
+ if (err == -1)
+ return UV__ERR(errno);
+
+ *width = ws.ws_col;
+ *height = ws.ws_row;
+
+ return 0;
+}
+
+
+uv_handle_type uv_guess_handle(uv_file file) {
+ struct sockaddr sa;
+ struct stat s;
+ socklen_t len;
+ int type;
+
+ if (file < 0)
+ return UV_UNKNOWN_HANDLE;
+
+ if (isatty(file))
+ return UV_TTY;
+
+ if (fstat(file, &s))
+ return UV_UNKNOWN_HANDLE;
+
+ if (S_ISREG(s.st_mode))
+ return UV_FILE;
+
+ if (S_ISCHR(s.st_mode))
+ return UV_FILE; /* XXX UV_NAMED_PIPE? */
+
+ if (S_ISFIFO(s.st_mode))
+ return UV_NAMED_PIPE;
+
+ if (!S_ISSOCK(s.st_mode))
+ return UV_UNKNOWN_HANDLE;
+
+ len = sizeof(type);
+ if (getsockopt(file, SOL_SOCKET, SO_TYPE, &type, &len))
+ return UV_UNKNOWN_HANDLE;
+
+ len = sizeof(sa);
+ if (getsockname(file, &sa, &len))
+ return UV_UNKNOWN_HANDLE;
+
+ if (type == SOCK_DGRAM)
+ if (sa.sa_family == AF_INET || sa.sa_family == AF_INET6)
+ return UV_UDP;
+
+ if (type == SOCK_STREAM) {
+#if defined(_AIX) || defined(__DragonFly__)
+ /* on AIX/DragonFly the getsockname call returns an empty sa structure
+ * for sockets of type AF_UNIX. For all other types it will
+ * return a properly filled in structure.
+ */
+ if (len == 0)
+ return UV_NAMED_PIPE;
+#endif /* defined(_AIX) || defined(__DragonFly__) */
+
+ if (sa.sa_family == AF_INET || sa.sa_family == AF_INET6)
+ return UV_TCP;
+ if (sa.sa_family == AF_UNIX)
+ return UV_NAMED_PIPE;
+ }
+
+ return UV_UNKNOWN_HANDLE;
+}
+
+
+/* This function is async signal-safe, meaning that it's safe to call from
+ * inside a signal handler _unless_ execution was inside uv_tty_set_mode()'s
+ * critical section when the signal was raised.
+ */
+int uv_tty_reset_mode(void) {
+ int saved_errno;
+ int err;
+
+ saved_errno = errno;
+ if (!uv_spinlock_trylock(&termios_spinlock))
+ return UV_EBUSY; /* In uv_tty_set_mode(). */
+
+ err = 0;
+ if (orig_termios_fd != -1)
+ if (tcsetattr(orig_termios_fd, TCSANOW, &orig_termios))
+ err = UV__ERR(errno);
+
+ uv_spinlock_unlock(&termios_spinlock);
+ errno = saved_errno;
+
+ return err;
+}
+
+void uv_tty_set_vterm_state(uv_tty_vtermstate_t state) {
+}
+
+int uv_tty_get_vterm_state(uv_tty_vtermstate_t* state) {
+ return UV_ENOTSUP;
+}
diff --git a/Utilities/cmlibuv/src/unix/udp.c b/Utilities/cmlibuv/src/unix/udp.c
new file mode 100644
index 0000000..7d699a1
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/udp.c
@@ -0,0 +1,1332 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <assert.h>
+#include <string.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <unistd.h>
+#if defined(__MVS__)
+#include <xti.h>
+#endif
+#include <sys/un.h>
+
+#define UV__UDP_DGRAM_MAXSIZE (64 * 1024)
+
+#if defined(IPV6_JOIN_GROUP) && !defined(IPV6_ADD_MEMBERSHIP)
+# define IPV6_ADD_MEMBERSHIP IPV6_JOIN_GROUP
+#endif
+
+#if defined(IPV6_LEAVE_GROUP) && !defined(IPV6_DROP_MEMBERSHIP)
+# define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP
+#endif
+
+union uv__sockaddr {
+ struct sockaddr_in6 in6;
+ struct sockaddr_in in;
+ struct sockaddr addr;
+};
+
+static void uv__udp_run_completed(uv_udp_t* handle);
+static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents);
+static void uv__udp_recvmsg(uv_udp_t* handle);
+static void uv__udp_sendmsg(uv_udp_t* handle);
+static int uv__udp_maybe_deferred_bind(uv_udp_t* handle,
+ int domain,
+ unsigned int flags);
+
+#if HAVE_MMSG
+
+#define UV__MMSG_MAXWIDTH 20
+
+static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf);
+static void uv__udp_sendmmsg(uv_udp_t* handle);
+
+static int uv__recvmmsg_avail;
+static int uv__sendmmsg_avail;
+static uv_once_t once = UV_ONCE_INIT;
+
+static void uv__udp_mmsg_init(void) {
+ int ret;
+ int s;
+ s = uv__socket(AF_INET, SOCK_DGRAM, 0);
+ if (s < 0)
+ return;
+ ret = uv__sendmmsg(s, NULL, 0);
+ if (ret == 0 || errno != ENOSYS) {
+ uv__sendmmsg_avail = 1;
+ uv__recvmmsg_avail = 1;
+ } else {
+ ret = uv__recvmmsg(s, NULL, 0);
+ if (ret == 0 || errno != ENOSYS)
+ uv__recvmmsg_avail = 1;
+ }
+ uv__close(s);
+}
+
+#endif
+
+void uv__udp_close(uv_udp_t* handle) {
+ uv__io_close(handle->loop, &handle->io_watcher);
+ uv__handle_stop(handle);
+
+ if (handle->io_watcher.fd != -1) {
+ uv__close(handle->io_watcher.fd);
+ handle->io_watcher.fd = -1;
+ }
+}
+
+
+void uv__udp_finish_close(uv_udp_t* handle) {
+ uv_udp_send_t* req;
+ QUEUE* q;
+
+ assert(!uv__io_active(&handle->io_watcher, POLLIN | POLLOUT));
+ assert(handle->io_watcher.fd == -1);
+
+ while (!QUEUE_EMPTY(&handle->write_queue)) {
+ q = QUEUE_HEAD(&handle->write_queue);
+ QUEUE_REMOVE(q);
+
+ req = QUEUE_DATA(q, uv_udp_send_t, queue);
+ req->status = UV_ECANCELED;
+ QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
+ }
+
+ uv__udp_run_completed(handle);
+
+ assert(handle->send_queue_size == 0);
+ assert(handle->send_queue_count == 0);
+
+ /* Now tear down the handle. */
+ handle->recv_cb = NULL;
+ handle->alloc_cb = NULL;
+ /* but _do not_ touch close_cb */
+}
+
+
+static void uv__udp_run_completed(uv_udp_t* handle) {
+ uv_udp_send_t* req;
+ QUEUE* q;
+
+ assert(!(handle->flags & UV_HANDLE_UDP_PROCESSING));
+ handle->flags |= UV_HANDLE_UDP_PROCESSING;
+
+ while (!QUEUE_EMPTY(&handle->write_completed_queue)) {
+ q = QUEUE_HEAD(&handle->write_completed_queue);
+ QUEUE_REMOVE(q);
+
+ req = QUEUE_DATA(q, uv_udp_send_t, queue);
+ uv__req_unregister(handle->loop, req);
+
+ handle->send_queue_size -= uv__count_bufs(req->bufs, req->nbufs);
+ handle->send_queue_count--;
+
+ if (req->bufs != req->bufsml)
+ uv__free(req->bufs);
+ req->bufs = NULL;
+
+ if (req->send_cb == NULL)
+ continue;
+
+ /* req->status >= 0 == bytes written
+ * req->status < 0 == errno
+ */
+ if (req->status >= 0)
+ req->send_cb(req, 0);
+ else
+ req->send_cb(req, req->status);
+ }
+
+ if (QUEUE_EMPTY(&handle->write_queue)) {
+ /* Pending queue and completion queue empty, stop watcher. */
+ uv__io_stop(handle->loop, &handle->io_watcher, POLLOUT);
+ if (!uv__io_active(&handle->io_watcher, POLLIN))
+ uv__handle_stop(handle);
+ }
+
+ handle->flags &= ~UV_HANDLE_UDP_PROCESSING;
+}
+
+
+static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents) {
+ uv_udp_t* handle;
+
+ handle = container_of(w, uv_udp_t, io_watcher);
+ assert(handle->type == UV_UDP);
+
+ if (revents & POLLIN)
+ uv__udp_recvmsg(handle);
+
+ if (revents & POLLOUT) {
+ uv__udp_sendmsg(handle);
+ uv__udp_run_completed(handle);
+ }
+}
+
+#if HAVE_MMSG
+static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) {
+ struct sockaddr_in6 peers[UV__MMSG_MAXWIDTH];
+ struct iovec iov[UV__MMSG_MAXWIDTH];
+ struct uv__mmsghdr msgs[UV__MMSG_MAXWIDTH];
+ ssize_t nread;
+ uv_buf_t chunk_buf;
+ size_t chunks;
+ int flags;
+ size_t k;
+
+ /* prepare structures for recvmmsg */
+ chunks = buf->len / UV__UDP_DGRAM_MAXSIZE;
+ if (chunks > ARRAY_SIZE(iov))
+ chunks = ARRAY_SIZE(iov);
+ for (k = 0; k < chunks; ++k) {
+ iov[k].iov_base = buf->base + k * UV__UDP_DGRAM_MAXSIZE;
+ iov[k].iov_len = UV__UDP_DGRAM_MAXSIZE;
+ msgs[k].msg_hdr.msg_iov = iov + k;
+ msgs[k].msg_hdr.msg_iovlen = 1;
+ msgs[k].msg_hdr.msg_name = peers + k;
+ msgs[k].msg_hdr.msg_namelen = sizeof(peers[0]);
+ msgs[k].msg_hdr.msg_control = NULL;
+ msgs[k].msg_hdr.msg_controllen = 0;
+ msgs[k].msg_hdr.msg_flags = 0;
+ }
+
+ do
+ nread = uv__recvmmsg(handle->io_watcher.fd, msgs, chunks);
+ while (nread == -1 && errno == EINTR);
+
+ if (nread < 1) {
+ if (nread == 0 || errno == EAGAIN || errno == EWOULDBLOCK)
+ handle->recv_cb(handle, 0, buf, NULL, 0);
+ else
+ handle->recv_cb(handle, UV__ERR(errno), buf, NULL, 0);
+ } else {
+ /* pass each chunk to the application */
+ for (k = 0; k < (size_t) nread && handle->recv_cb != NULL; k++) {
+ flags = UV_UDP_MMSG_CHUNK;
+ if (msgs[k].msg_hdr.msg_flags & MSG_TRUNC)
+ flags |= UV_UDP_PARTIAL;
+
+ chunk_buf = uv_buf_init(iov[k].iov_base, iov[k].iov_len);
+ handle->recv_cb(handle,
+ msgs[k].msg_len,
+ &chunk_buf,
+ msgs[k].msg_hdr.msg_name,
+ flags);
+ }
+
+ /* one last callback so the original buffer is freed */
+ if (handle->recv_cb != NULL)
+ handle->recv_cb(handle, 0, buf, NULL, UV_UDP_MMSG_FREE);
+ }
+ return nread;
+}
+#endif
+
+static void uv__udp_recvmsg(uv_udp_t* handle) {
+ struct sockaddr_storage peer;
+ struct msghdr h;
+ ssize_t nread;
+ uv_buf_t buf;
+ int flags;
+ int count;
+
+ assert(handle->recv_cb != NULL);
+ assert(handle->alloc_cb != NULL);
+
+ /* Prevent loop starvation when the data comes in as fast as (or faster than)
+ * we can read it. XXX Need to rearm fd if we switch to edge-triggered I/O.
+ */
+ count = 32;
+
+ do {
+ buf = uv_buf_init(NULL, 0);
+ handle->alloc_cb((uv_handle_t*) handle, UV__UDP_DGRAM_MAXSIZE, &buf);
+ if (buf.base == NULL || buf.len == 0) {
+ handle->recv_cb(handle, UV_ENOBUFS, &buf, NULL, 0);
+ return;
+ }
+ assert(buf.base != NULL);
+
+#if HAVE_MMSG
+ if (uv_udp_using_recvmmsg(handle)) {
+ nread = uv__udp_recvmmsg(handle, &buf);
+ if (nread > 0)
+ count -= nread;
+ continue;
+ }
+#endif
+
+ memset(&h, 0, sizeof(h));
+ memset(&peer, 0, sizeof(peer));
+ h.msg_name = &peer;
+ h.msg_namelen = sizeof(peer);
+ h.msg_iov = (void*) &buf;
+ h.msg_iovlen = 1;
+
+ do {
+ nread = recvmsg(handle->io_watcher.fd, &h, 0);
+ }
+ while (nread == -1 && errno == EINTR);
+
+ if (nread == -1) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK)
+ handle->recv_cb(handle, 0, &buf, NULL, 0);
+ else
+ handle->recv_cb(handle, UV__ERR(errno), &buf, NULL, 0);
+ }
+ else {
+ flags = 0;
+ if (h.msg_flags & MSG_TRUNC)
+ flags |= UV_UDP_PARTIAL;
+
+ handle->recv_cb(handle, nread, &buf, (const struct sockaddr*) &peer, flags);
+ }
+ count--;
+ }
+ /* recv_cb callback may decide to pause or close the handle */
+ while (nread != -1
+ && count > 0
+ && handle->io_watcher.fd != -1
+ && handle->recv_cb != NULL);
+}
+
+#if HAVE_MMSG
+static void uv__udp_sendmmsg(uv_udp_t* handle) {
+ uv_udp_send_t* req;
+ struct uv__mmsghdr h[UV__MMSG_MAXWIDTH];
+ struct uv__mmsghdr *p;
+ QUEUE* q;
+ ssize_t npkts;
+ size_t pkts;
+ size_t i;
+
+ if (QUEUE_EMPTY(&handle->write_queue))
+ return;
+
+write_queue_drain:
+ for (pkts = 0, q = QUEUE_HEAD(&handle->write_queue);
+ pkts < UV__MMSG_MAXWIDTH && q != &handle->write_queue;
+ ++pkts, q = QUEUE_HEAD(q)) {
+ assert(q != NULL);
+ req = QUEUE_DATA(q, uv_udp_send_t, queue);
+ assert(req != NULL);
+
+ p = &h[pkts];
+ memset(p, 0, sizeof(*p));
+ if (req->addr.ss_family == AF_UNSPEC) {
+ p->msg_hdr.msg_name = NULL;
+ p->msg_hdr.msg_namelen = 0;
+ } else {
+ p->msg_hdr.msg_name = &req->addr;
+ if (req->addr.ss_family == AF_INET6)
+ p->msg_hdr.msg_namelen = sizeof(struct sockaddr_in6);
+ else if (req->addr.ss_family == AF_INET)
+ p->msg_hdr.msg_namelen = sizeof(struct sockaddr_in);
+ else if (req->addr.ss_family == AF_UNIX)
+ p->msg_hdr.msg_namelen = sizeof(struct sockaddr_un);
+ else {
+ assert(0 && "unsupported address family");
+ abort();
+ }
+ }
+ h[pkts].msg_hdr.msg_iov = (struct iovec*) req->bufs;
+ h[pkts].msg_hdr.msg_iovlen = req->nbufs;
+ }
+
+ do
+ npkts = uv__sendmmsg(handle->io_watcher.fd, h, pkts);
+ while (npkts == -1 && errno == EINTR);
+
+ if (npkts < 1) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
+ return;
+ for (i = 0, q = QUEUE_HEAD(&handle->write_queue);
+ i < pkts && q != &handle->write_queue;
+ ++i, q = QUEUE_HEAD(&handle->write_queue)) {
+ assert(q != NULL);
+ req = QUEUE_DATA(q, uv_udp_send_t, queue);
+ assert(req != NULL);
+
+ req->status = UV__ERR(errno);
+ QUEUE_REMOVE(&req->queue);
+ QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
+ }
+ uv__io_feed(handle->loop, &handle->io_watcher);
+ return;
+ }
+
+ for (i = 0, q = QUEUE_HEAD(&handle->write_queue);
+ i < pkts && q != &handle->write_queue;
+ ++i, q = QUEUE_HEAD(&handle->write_queue)) {
+ assert(q != NULL);
+ req = QUEUE_DATA(q, uv_udp_send_t, queue);
+ assert(req != NULL);
+
+ req->status = req->bufs[0].len;
+
+ /* Sending a datagram is an atomic operation: either all data
+ * is written or nothing is (and EMSGSIZE is raised). That is
+ * why we don't handle partial writes. Just pop the request
+ * off the write queue and onto the completed queue, done.
+ */
+ QUEUE_REMOVE(&req->queue);
+ QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
+ }
+
+ /* couldn't batch everything, continue sending (jump to avoid stack growth) */
+ if (!QUEUE_EMPTY(&handle->write_queue))
+ goto write_queue_drain;
+ uv__io_feed(handle->loop, &handle->io_watcher);
+ return;
+}
+#endif
+
+static void uv__udp_sendmsg(uv_udp_t* handle) {
+ uv_udp_send_t* req;
+ struct msghdr h;
+ QUEUE* q;
+ ssize_t size;
+
+#if HAVE_MMSG
+ uv_once(&once, uv__udp_mmsg_init);
+ if (uv__sendmmsg_avail) {
+ uv__udp_sendmmsg(handle);
+ return;
+ }
+#endif
+
+ while (!QUEUE_EMPTY(&handle->write_queue)) {
+ q = QUEUE_HEAD(&handle->write_queue);
+ assert(q != NULL);
+
+ req = QUEUE_DATA(q, uv_udp_send_t, queue);
+ assert(req != NULL);
+
+ memset(&h, 0, sizeof h);
+ if (req->addr.ss_family == AF_UNSPEC) {
+ h.msg_name = NULL;
+ h.msg_namelen = 0;
+ } else {
+ h.msg_name = &req->addr;
+ if (req->addr.ss_family == AF_INET6)
+ h.msg_namelen = sizeof(struct sockaddr_in6);
+ else if (req->addr.ss_family == AF_INET)
+ h.msg_namelen = sizeof(struct sockaddr_in);
+ else if (req->addr.ss_family == AF_UNIX)
+ h.msg_namelen = sizeof(struct sockaddr_un);
+ else {
+ assert(0 && "unsupported address family");
+ abort();
+ }
+ }
+ h.msg_iov = (struct iovec*) req->bufs;
+ h.msg_iovlen = req->nbufs;
+
+ do {
+ size = sendmsg(handle->io_watcher.fd, &h, 0);
+ } while (size == -1 && errno == EINTR);
+
+ if (size == -1) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
+ break;
+ }
+
+ req->status = (size == -1 ? UV__ERR(errno) : size);
+
+ /* Sending a datagram is an atomic operation: either all data
+ * is written or nothing is (and EMSGSIZE is raised). That is
+ * why we don't handle partial writes. Just pop the request
+ * off the write queue and onto the completed queue, done.
+ */
+ QUEUE_REMOVE(&req->queue);
+ QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
+ uv__io_feed(handle->loop, &handle->io_watcher);
+ }
+}
+
+/* On the BSDs, SO_REUSEPORT implies SO_REUSEADDR but with some additional
+ * refinements for programs that use multicast.
+ *
+ * Linux as of 3.9 has a SO_REUSEPORT socket option but with semantics that
+ * are different from the BSDs: it _shares_ the port rather than steal it
+ * from the current listener. While useful, it's not something we can emulate
+ * on other platforms so we don't enable it.
+ *
+ * zOS does not support getsockname with SO_REUSEPORT option when using
+ * AF_UNIX.
+ */
+static int uv__set_reuse(int fd) {
+ int yes;
+ yes = 1;
+
+#if defined(SO_REUSEPORT) && defined(__MVS__)
+ struct sockaddr_in sockfd;
+ unsigned int sockfd_len = sizeof(sockfd);
+ if (getsockname(fd, (struct sockaddr*) &sockfd, &sockfd_len) == -1)
+ return UV__ERR(errno);
+ if (sockfd.sin_family == AF_UNIX) {
+ if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)))
+ return UV__ERR(errno);
+ } else {
+ if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)))
+ return UV__ERR(errno);
+ }
+#elif defined(SO_REUSEPORT) && !defined(__linux__)
+ if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)))
+ return UV__ERR(errno);
+#else
+ if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)))
+ return UV__ERR(errno);
+#endif
+
+ return 0;
+}
+
+
+int uv__udp_bind(uv_udp_t* handle,
+ const struct sockaddr* addr,
+ unsigned int addrlen,
+ unsigned int flags) {
+ int err;
+ int yes;
+ int fd;
+
+ /* Check for bad flags. */
+ if (flags & ~(UV_UDP_IPV6ONLY | UV_UDP_REUSEADDR))
+ return UV_EINVAL;
+
+ /* Cannot set IPv6-only mode on non-IPv6 socket. */
+ if ((flags & UV_UDP_IPV6ONLY) && addr->sa_family != AF_INET6)
+ return UV_EINVAL;
+
+ fd = handle->io_watcher.fd;
+ if (fd == -1) {
+ err = uv__socket(addr->sa_family, SOCK_DGRAM, 0);
+ if (err < 0)
+ return err;
+ fd = err;
+ handle->io_watcher.fd = fd;
+ }
+
+ if (flags & UV_UDP_REUSEADDR) {
+ err = uv__set_reuse(fd);
+ if (err)
+ return err;
+ }
+
+ if (flags & UV_UDP_IPV6ONLY) {
+#ifdef IPV6_V6ONLY
+ yes = 1;
+ if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &yes, sizeof yes) == -1) {
+ err = UV__ERR(errno);
+ return err;
+ }
+#else
+ err = UV_ENOTSUP;
+ return err;
+#endif
+ }
+
+ if (bind(fd, addr, addrlen)) {
+ err = UV__ERR(errno);
+ if (errno == EAFNOSUPPORT)
+ /* OSX, other BSDs and SunoS fail with EAFNOSUPPORT when binding a
+ * socket created with AF_INET to an AF_INET6 address or vice versa. */
+ err = UV_EINVAL;
+ return err;
+ }
+
+ if (addr->sa_family == AF_INET6)
+ handle->flags |= UV_HANDLE_IPV6;
+
+ handle->flags |= UV_HANDLE_BOUND;
+ return 0;
+}
+
+
+static int uv__udp_maybe_deferred_bind(uv_udp_t* handle,
+ int domain,
+ unsigned int flags) {
+ union uv__sockaddr taddr;
+ socklen_t addrlen;
+
+ if (handle->io_watcher.fd != -1)
+ return 0;
+
+ switch (domain) {
+ case AF_INET:
+ {
+ struct sockaddr_in* addr = &taddr.in;
+ memset(addr, 0, sizeof *addr);
+ addr->sin_family = AF_INET;
+ addr->sin_addr.s_addr = INADDR_ANY;
+ addrlen = sizeof *addr;
+ break;
+ }
+ case AF_INET6:
+ {
+ struct sockaddr_in6* addr = &taddr.in6;
+ memset(addr, 0, sizeof *addr);
+ addr->sin6_family = AF_INET6;
+ addr->sin6_addr = in6addr_any;
+ addrlen = sizeof *addr;
+ break;
+ }
+ default:
+ assert(0 && "unsupported address family");
+ abort();
+ }
+
+ return uv__udp_bind(handle, &taddr.addr, addrlen, flags);
+}
+
+
+int uv__udp_connect(uv_udp_t* handle,
+ const struct sockaddr* addr,
+ unsigned int addrlen) {
+ int err;
+
+ err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
+ if (err)
+ return err;
+
+ do {
+ errno = 0;
+ err = connect(handle->io_watcher.fd, addr, addrlen);
+ } while (err == -1 && errno == EINTR);
+
+ if (err)
+ return UV__ERR(errno);
+
+ handle->flags |= UV_HANDLE_UDP_CONNECTED;
+
+ return 0;
+}
+
+
+int uv__udp_disconnect(uv_udp_t* handle) {
+ int r;
+ struct sockaddr addr;
+
+ memset(&addr, 0, sizeof(addr));
+
+ addr.sa_family = AF_UNSPEC;
+
+ do {
+ errno = 0;
+ r = connect(handle->io_watcher.fd, &addr, sizeof(addr));
+ } while (r == -1 && errno == EINTR);
+
+ if (r == -1 && errno != EAFNOSUPPORT)
+ return UV__ERR(errno);
+
+ handle->flags &= ~UV_HANDLE_UDP_CONNECTED;
+ return 0;
+}
+
+
+int uv__udp_send(uv_udp_send_t* req,
+ uv_udp_t* handle,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ const struct sockaddr* addr,
+ unsigned int addrlen,
+ uv_udp_send_cb send_cb) {
+ int err;
+ int empty_queue;
+
+ assert(nbufs > 0);
+
+ if (addr) {
+ err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
+ if (err)
+ return err;
+ }
+
+ /* It's legal for send_queue_count > 0 even when the write_queue is empty;
+ * it means there are error-state requests in the write_completed_queue that
+ * will touch up send_queue_size/count later.
+ */
+ empty_queue = (handle->send_queue_count == 0);
+
+ uv__req_init(handle->loop, req, UV_UDP_SEND);
+ assert(addrlen <= sizeof(req->addr));
+ if (addr == NULL)
+ req->addr.ss_family = AF_UNSPEC;
+ else
+ memcpy(&req->addr, addr, addrlen);
+ req->send_cb = send_cb;
+ req->handle = handle;
+ req->nbufs = nbufs;
+
+ req->bufs = req->bufsml;
+ if (nbufs > ARRAY_SIZE(req->bufsml))
+ req->bufs = uv__malloc(nbufs * sizeof(bufs[0]));
+
+ if (req->bufs == NULL) {
+ uv__req_unregister(handle->loop, req);
+ return UV_ENOMEM;
+ }
+
+ memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0]));
+ handle->send_queue_size += uv__count_bufs(req->bufs, req->nbufs);
+ handle->send_queue_count++;
+ QUEUE_INSERT_TAIL(&handle->write_queue, &req->queue);
+ uv__handle_start(handle);
+
+ if (empty_queue && !(handle->flags & UV_HANDLE_UDP_PROCESSING)) {
+ uv__udp_sendmsg(handle);
+
+ /* `uv__udp_sendmsg` may not be able to do non-blocking write straight
+ * away. In such cases the `io_watcher` has to be queued for asynchronous
+ * write.
+ */
+ if (!QUEUE_EMPTY(&handle->write_queue))
+ uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
+ } else {
+ uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
+ }
+
+ return 0;
+}
+
+
+int uv__udp_try_send(uv_udp_t* handle,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ const struct sockaddr* addr,
+ unsigned int addrlen) {
+ int err;
+ struct msghdr h;
+ ssize_t size;
+
+ assert(nbufs > 0);
+
+ /* already sending a message */
+ if (handle->send_queue_count != 0)
+ return UV_EAGAIN;
+
+ if (addr) {
+ err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
+ if (err)
+ return err;
+ } else {
+ assert(handle->flags & UV_HANDLE_UDP_CONNECTED);
+ }
+
+ memset(&h, 0, sizeof h);
+ h.msg_name = (struct sockaddr*) addr;
+ h.msg_namelen = addrlen;
+ h.msg_iov = (struct iovec*) bufs;
+ h.msg_iovlen = nbufs;
+
+ do {
+ size = sendmsg(handle->io_watcher.fd, &h, 0);
+ } while (size == -1 && errno == EINTR);
+
+ if (size == -1) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
+ return UV_EAGAIN;
+ else
+ return UV__ERR(errno);
+ }
+
+ return size;
+}
+
+
+static int uv__udp_set_membership4(uv_udp_t* handle,
+ const struct sockaddr_in* multicast_addr,
+ const char* interface_addr,
+ uv_membership membership) {
+ struct ip_mreq mreq;
+ int optname;
+ int err;
+
+ memset(&mreq, 0, sizeof mreq);
+
+ if (interface_addr) {
+ err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr);
+ if (err)
+ return err;
+ } else {
+ mreq.imr_interface.s_addr = htonl(INADDR_ANY);
+ }
+
+ mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr;
+
+ switch (membership) {
+ case UV_JOIN_GROUP:
+ optname = IP_ADD_MEMBERSHIP;
+ break;
+ case UV_LEAVE_GROUP:
+ optname = IP_DROP_MEMBERSHIP;
+ break;
+ default:
+ return UV_EINVAL;
+ }
+
+ if (setsockopt(handle->io_watcher.fd,
+ IPPROTO_IP,
+ optname,
+ &mreq,
+ sizeof(mreq))) {
+#if defined(__MVS__)
+ if (errno == ENXIO)
+ return UV_ENODEV;
+#endif
+ return UV__ERR(errno);
+ }
+
+ return 0;
+}
+
+
+static int uv__udp_set_membership6(uv_udp_t* handle,
+ const struct sockaddr_in6* multicast_addr,
+ const char* interface_addr,
+ uv_membership membership) {
+ int optname;
+ struct ipv6_mreq mreq;
+ struct sockaddr_in6 addr6;
+
+ memset(&mreq, 0, sizeof mreq);
+
+ if (interface_addr) {
+ if (uv_ip6_addr(interface_addr, 0, &addr6))
+ return UV_EINVAL;
+ mreq.ipv6mr_interface = addr6.sin6_scope_id;
+ } else {
+ mreq.ipv6mr_interface = 0;
+ }
+
+ mreq.ipv6mr_multiaddr = multicast_addr->sin6_addr;
+
+ switch (membership) {
+ case UV_JOIN_GROUP:
+ optname = IPV6_ADD_MEMBERSHIP;
+ break;
+ case UV_LEAVE_GROUP:
+ optname = IPV6_DROP_MEMBERSHIP;
+ break;
+ default:
+ return UV_EINVAL;
+ }
+
+ if (setsockopt(handle->io_watcher.fd,
+ IPPROTO_IPV6,
+ optname,
+ &mreq,
+ sizeof(mreq))) {
+#if defined(__MVS__)
+ if (errno == ENXIO)
+ return UV_ENODEV;
+#endif
+ return UV__ERR(errno);
+ }
+
+ return 0;
+}
+
+
+#if !defined(__OpenBSD__) && \
+ !defined(__NetBSD__) && \
+ !defined(__ANDROID__) && \
+ !defined(__DragonFly__) & \
+ !defined(__QNX__)
+static int uv__udp_set_source_membership4(uv_udp_t* handle,
+ const struct sockaddr_in* multicast_addr,
+ const char* interface_addr,
+ const struct sockaddr_in* source_addr,
+ uv_membership membership) {
+ struct ip_mreq_source mreq;
+ int optname;
+ int err;
+
+ err = uv__udp_maybe_deferred_bind(handle, AF_INET, UV_UDP_REUSEADDR);
+ if (err)
+ return err;
+
+ memset(&mreq, 0, sizeof(mreq));
+
+ if (interface_addr != NULL) {
+ err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr);
+ if (err)
+ return err;
+ } else {
+ mreq.imr_interface.s_addr = htonl(INADDR_ANY);
+ }
+
+ mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr;
+ mreq.imr_sourceaddr.s_addr = source_addr->sin_addr.s_addr;
+
+ if (membership == UV_JOIN_GROUP)
+ optname = IP_ADD_SOURCE_MEMBERSHIP;
+ else if (membership == UV_LEAVE_GROUP)
+ optname = IP_DROP_SOURCE_MEMBERSHIP;
+ else
+ return UV_EINVAL;
+
+ if (setsockopt(handle->io_watcher.fd,
+ IPPROTO_IP,
+ optname,
+ &mreq,
+ sizeof(mreq))) {
+ return UV__ERR(errno);
+ }
+
+ return 0;
+}
+
+
+static int uv__udp_set_source_membership6(uv_udp_t* handle,
+ const struct sockaddr_in6* multicast_addr,
+ const char* interface_addr,
+ const struct sockaddr_in6* source_addr,
+ uv_membership membership) {
+ struct group_source_req mreq;
+ struct sockaddr_in6 addr6;
+ int optname;
+ int err;
+
+ err = uv__udp_maybe_deferred_bind(handle, AF_INET6, UV_UDP_REUSEADDR);
+ if (err)
+ return err;
+
+ memset(&mreq, 0, sizeof(mreq));
+
+ if (interface_addr != NULL) {
+ err = uv_ip6_addr(interface_addr, 0, &addr6);
+ if (err)
+ return err;
+ mreq.gsr_interface = addr6.sin6_scope_id;
+ } else {
+ mreq.gsr_interface = 0;
+ }
+
+ STATIC_ASSERT(sizeof(mreq.gsr_group) >= sizeof(*multicast_addr));
+ STATIC_ASSERT(sizeof(mreq.gsr_source) >= sizeof(*source_addr));
+ memcpy(&mreq.gsr_group, multicast_addr, sizeof(*multicast_addr));
+ memcpy(&mreq.gsr_source, source_addr, sizeof(*source_addr));
+
+ if (membership == UV_JOIN_GROUP)
+ optname = MCAST_JOIN_SOURCE_GROUP;
+ else if (membership == UV_LEAVE_GROUP)
+ optname = MCAST_LEAVE_SOURCE_GROUP;
+ else
+ return UV_EINVAL;
+
+ if (setsockopt(handle->io_watcher.fd,
+ IPPROTO_IPV6,
+ optname,
+ &mreq,
+ sizeof(mreq))) {
+ return UV__ERR(errno);
+ }
+
+ return 0;
+}
+#endif
+
+
+int uv__udp_init_ex(uv_loop_t* loop,
+ uv_udp_t* handle,
+ unsigned flags,
+ int domain) {
+ int fd;
+
+ fd = -1;
+ if (domain != AF_UNSPEC) {
+ fd = uv__socket(domain, SOCK_DGRAM, 0);
+ if (fd < 0)
+ return fd;
+ }
+
+ uv__handle_init(loop, (uv_handle_t*)handle, UV_UDP);
+ handle->alloc_cb = NULL;
+ handle->recv_cb = NULL;
+ handle->send_queue_size = 0;
+ handle->send_queue_count = 0;
+ uv__io_init(&handle->io_watcher, uv__udp_io, fd);
+ QUEUE_INIT(&handle->write_queue);
+ QUEUE_INIT(&handle->write_completed_queue);
+
+ return 0;
+}
+
+
+int uv_udp_using_recvmmsg(const uv_udp_t* handle) {
+#if HAVE_MMSG
+ if (handle->flags & UV_HANDLE_UDP_RECVMMSG) {
+ uv_once(&once, uv__udp_mmsg_init);
+ return uv__recvmmsg_avail;
+ }
+#endif
+ return 0;
+}
+
+
+int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock) {
+ int err;
+
+ /* Check for already active socket. */
+ if (handle->io_watcher.fd != -1)
+ return UV_EBUSY;
+
+ if (uv__fd_exists(handle->loop, sock))
+ return UV_EEXIST;
+
+ err = uv__nonblock(sock, 1);
+ if (err)
+ return err;
+
+ err = uv__set_reuse(sock);
+ if (err)
+ return err;
+
+ handle->io_watcher.fd = sock;
+ if (uv__udp_is_connected(handle))
+ handle->flags |= UV_HANDLE_UDP_CONNECTED;
+
+ return 0;
+}
+
+
+int uv_udp_set_membership(uv_udp_t* handle,
+ const char* multicast_addr,
+ const char* interface_addr,
+ uv_membership membership) {
+ int err;
+ struct sockaddr_in addr4;
+ struct sockaddr_in6 addr6;
+
+ if (uv_ip4_addr(multicast_addr, 0, &addr4) == 0) {
+ err = uv__udp_maybe_deferred_bind(handle, AF_INET, UV_UDP_REUSEADDR);
+ if (err)
+ return err;
+ return uv__udp_set_membership4(handle, &addr4, interface_addr, membership);
+ } else if (uv_ip6_addr(multicast_addr, 0, &addr6) == 0) {
+ err = uv__udp_maybe_deferred_bind(handle, AF_INET6, UV_UDP_REUSEADDR);
+ if (err)
+ return err;
+ return uv__udp_set_membership6(handle, &addr6, interface_addr, membership);
+ } else {
+ return UV_EINVAL;
+ }
+}
+
+
+int uv_udp_set_source_membership(uv_udp_t* handle,
+ const char* multicast_addr,
+ const char* interface_addr,
+ const char* source_addr,
+ uv_membership membership) {
+#if !defined(__OpenBSD__) && \
+ !defined(__NetBSD__) && \
+ !defined(__ANDROID__) && \
+ !defined(__DragonFly__) && \
+ !defined(__QNX__)
+ int err;
+ union uv__sockaddr mcast_addr;
+ union uv__sockaddr src_addr;
+
+ err = uv_ip4_addr(multicast_addr, 0, &mcast_addr.in);
+ if (err) {
+ err = uv_ip6_addr(multicast_addr, 0, &mcast_addr.in6);
+ if (err)
+ return err;
+ err = uv_ip6_addr(source_addr, 0, &src_addr.in6);
+ if (err)
+ return err;
+ return uv__udp_set_source_membership6(handle,
+ &mcast_addr.in6,
+ interface_addr,
+ &src_addr.in6,
+ membership);
+ }
+
+ err = uv_ip4_addr(source_addr, 0, &src_addr.in);
+ if (err)
+ return err;
+ return uv__udp_set_source_membership4(handle,
+ &mcast_addr.in,
+ interface_addr,
+ &src_addr.in,
+ membership);
+#else
+ return UV_ENOSYS;
+#endif
+}
+
+
+static int uv__setsockopt(uv_udp_t* handle,
+ int option4,
+ int option6,
+ const void* val,
+ socklen_t size) {
+ int r;
+
+ if (handle->flags & UV_HANDLE_IPV6)
+ r = setsockopt(handle->io_watcher.fd,
+ IPPROTO_IPV6,
+ option6,
+ val,
+ size);
+ else
+ r = setsockopt(handle->io_watcher.fd,
+ IPPROTO_IP,
+ option4,
+ val,
+ size);
+ if (r)
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+static int uv__setsockopt_maybe_char(uv_udp_t* handle,
+ int option4,
+ int option6,
+ int val) {
+#if defined(__sun) || defined(_AIX) || defined(__MVS__)
+ char arg = val;
+#elif defined(__OpenBSD__)
+ unsigned char arg = val;
+#else
+ int arg = val;
+#endif
+
+ if (val < 0 || val > 255)
+ return UV_EINVAL;
+
+ return uv__setsockopt(handle, option4, option6, &arg, sizeof(arg));
+}
+
+
+int uv_udp_set_broadcast(uv_udp_t* handle, int on) {
+ if (setsockopt(handle->io_watcher.fd,
+ SOL_SOCKET,
+ SO_BROADCAST,
+ &on,
+ sizeof(on))) {
+ return UV__ERR(errno);
+ }
+
+ return 0;
+}
+
+
+int uv_udp_set_ttl(uv_udp_t* handle, int ttl) {
+ if (ttl < 1 || ttl > 255)
+ return UV_EINVAL;
+
+#if defined(__MVS__)
+ if (!(handle->flags & UV_HANDLE_IPV6))
+ return UV_ENOTSUP; /* zOS does not support setting ttl for IPv4 */
+#endif
+
+/*
+ * On Solaris and derivatives such as SmartOS, the length of socket options
+ * is sizeof(int) for IP_TTL and IPV6_UNICAST_HOPS,
+ * so hardcode the size of these options on this platform,
+ * and use the general uv__setsockopt_maybe_char call on other platforms.
+ */
+#if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
+ defined(__MVS__) || defined(__QNX__)
+
+ return uv__setsockopt(handle,
+ IP_TTL,
+ IPV6_UNICAST_HOPS,
+ &ttl,
+ sizeof(ttl));
+
+#else /* !(defined(__sun) || defined(_AIX) || defined (__OpenBSD__) ||
+ defined(__MVS__) || defined(__QNX__)) */
+
+ return uv__setsockopt_maybe_char(handle,
+ IP_TTL,
+ IPV6_UNICAST_HOPS,
+ ttl);
+
+#endif /* defined(__sun) || defined(_AIX) || defined (__OpenBSD__) ||
+ defined(__MVS__) || defined(__QNX__) */
+}
+
+
+int uv_udp_set_multicast_ttl(uv_udp_t* handle, int ttl) {
+/*
+ * On Solaris and derivatives such as SmartOS, the length of socket options
+ * is sizeof(int) for IPV6_MULTICAST_HOPS and sizeof(char) for
+ * IP_MULTICAST_TTL, so hardcode the size of the option in the IPv6 case,
+ * and use the general uv__setsockopt_maybe_char call otherwise.
+ */
+#if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
+ defined(__MVS__) || defined(__QNX__)
+ if (handle->flags & UV_HANDLE_IPV6)
+ return uv__setsockopt(handle,
+ IP_MULTICAST_TTL,
+ IPV6_MULTICAST_HOPS,
+ &ttl,
+ sizeof(ttl));
+#endif /* defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
+ defined(__MVS__) || defined(__QNX__) */
+
+ return uv__setsockopt_maybe_char(handle,
+ IP_MULTICAST_TTL,
+ IPV6_MULTICAST_HOPS,
+ ttl);
+}
+
+
+int uv_udp_set_multicast_loop(uv_udp_t* handle, int on) {
+/*
+ * On Solaris and derivatives such as SmartOS, the length of socket options
+ * is sizeof(int) for IPV6_MULTICAST_LOOP and sizeof(char) for
+ * IP_MULTICAST_LOOP, so hardcode the size of the option in the IPv6 case,
+ * and use the general uv__setsockopt_maybe_char call otherwise.
+ */
+#if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
+ defined(__MVS__) || defined(__QNX__)
+ if (handle->flags & UV_HANDLE_IPV6)
+ return uv__setsockopt(handle,
+ IP_MULTICAST_LOOP,
+ IPV6_MULTICAST_LOOP,
+ &on,
+ sizeof(on));
+#endif /* defined(__sun) || defined(_AIX) ||defined(__OpenBSD__) ||
+ defined(__MVS__) || defined(__QNX__) */
+
+ return uv__setsockopt_maybe_char(handle,
+ IP_MULTICAST_LOOP,
+ IPV6_MULTICAST_LOOP,
+ on);
+}
+
+int uv_udp_set_multicast_interface(uv_udp_t* handle, const char* interface_addr) {
+ struct sockaddr_storage addr_st;
+ struct sockaddr_in* addr4;
+ struct sockaddr_in6* addr6;
+
+ addr4 = (struct sockaddr_in*) &addr_st;
+ addr6 = (struct sockaddr_in6*) &addr_st;
+
+ if (!interface_addr) {
+ memset(&addr_st, 0, sizeof addr_st);
+ if (handle->flags & UV_HANDLE_IPV6) {
+ addr_st.ss_family = AF_INET6;
+ addr6->sin6_scope_id = 0;
+ } else {
+ addr_st.ss_family = AF_INET;
+ addr4->sin_addr.s_addr = htonl(INADDR_ANY);
+ }
+ } else if (uv_ip4_addr(interface_addr, 0, addr4) == 0) {
+ /* nothing, address was parsed */
+ } else if (uv_ip6_addr(interface_addr, 0, addr6) == 0) {
+ /* nothing, address was parsed */
+ } else {
+ return UV_EINVAL;
+ }
+
+ if (addr_st.ss_family == AF_INET) {
+ if (setsockopt(handle->io_watcher.fd,
+ IPPROTO_IP,
+ IP_MULTICAST_IF,
+ (void*) &addr4->sin_addr,
+ sizeof(addr4->sin_addr)) == -1) {
+ return UV__ERR(errno);
+ }
+ } else if (addr_st.ss_family == AF_INET6) {
+ if (setsockopt(handle->io_watcher.fd,
+ IPPROTO_IPV6,
+ IPV6_MULTICAST_IF,
+ &addr6->sin6_scope_id,
+ sizeof(addr6->sin6_scope_id)) == -1) {
+ return UV__ERR(errno);
+ }
+ } else {
+ assert(0 && "unexpected address family");
+ abort();
+ }
+
+ return 0;
+}
+
+int uv_udp_getpeername(const uv_udp_t* handle,
+ struct sockaddr* name,
+ int* namelen) {
+
+ return uv__getsockpeername((const uv_handle_t*) handle,
+ getpeername,
+ name,
+ namelen);
+}
+
+int uv_udp_getsockname(const uv_udp_t* handle,
+ struct sockaddr* name,
+ int* namelen) {
+
+ return uv__getsockpeername((const uv_handle_t*) handle,
+ getsockname,
+ name,
+ namelen);
+}
+
+
+int uv__udp_recv_start(uv_udp_t* handle,
+ uv_alloc_cb alloc_cb,
+ uv_udp_recv_cb recv_cb) {
+ int err;
+
+ if (alloc_cb == NULL || recv_cb == NULL)
+ return UV_EINVAL;
+
+ if (uv__io_active(&handle->io_watcher, POLLIN))
+ return UV_EALREADY; /* FIXME(bnoordhuis) Should be UV_EBUSY. */
+
+ err = uv__udp_maybe_deferred_bind(handle, AF_INET, 0);
+ if (err)
+ return err;
+
+ handle->alloc_cb = alloc_cb;
+ handle->recv_cb = recv_cb;
+
+ uv__io_start(handle->loop, &handle->io_watcher, POLLIN);
+ uv__handle_start(handle);
+
+ return 0;
+}
+
+
+int uv__udp_recv_stop(uv_udp_t* handle) {
+ uv__io_stop(handle->loop, &handle->io_watcher, POLLIN);
+
+ if (!uv__io_active(&handle->io_watcher, POLLOUT))
+ uv__handle_stop(handle);
+
+ handle->alloc_cb = NULL;
+ handle->recv_cb = NULL;
+
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/uv-common.c b/Utilities/cmlibuv/src/uv-common.c
new file mode 100644
index 0000000..f986d75
--- /dev/null
+++ b/Utilities/cmlibuv/src/uv-common.c
@@ -0,0 +1,930 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "uv-common.h"
+
+#include <assert.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <stddef.h> /* NULL */
+#include <stdio.h>
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+#if defined(_WIN32)
+# include <malloc.h> /* malloc */
+#else
+# include <net/if.h> /* if_nametoindex */
+# include <sys/un.h> /* AF_UNIX, sockaddr_un */
+#endif
+
+
+typedef struct {
+ uv_malloc_func local_malloc;
+ uv_realloc_func local_realloc;
+ uv_calloc_func local_calloc;
+ uv_free_func local_free;
+} uv__allocator_t;
+
+static uv__allocator_t uv__allocator = {
+ malloc,
+ realloc,
+ calloc,
+ free,
+};
+
+char* uv__strdup(const char* s) {
+ size_t len = strlen(s) + 1;
+ char* m = uv__malloc(len);
+ if (m == NULL)
+ return NULL;
+ return memcpy(m, s, len);
+}
+
+char* uv__strndup(const char* s, size_t n) {
+ char* m;
+ size_t len = strlen(s);
+ if (n < len)
+ len = n;
+ m = uv__malloc(len + 1);
+ if (m == NULL)
+ return NULL;
+ m[len] = '\0';
+ return memcpy(m, s, len);
+}
+
+void* uv__malloc(size_t size) {
+ if (size > 0)
+ return uv__allocator.local_malloc(size);
+ return NULL;
+}
+
+void uv__free(void* ptr) {
+ int saved_errno;
+
+ /* Libuv expects that free() does not clobber errno. The system allocator
+ * honors that assumption but custom allocators may not be so careful.
+ */
+ saved_errno = errno;
+ uv__allocator.local_free(ptr);
+ errno = saved_errno;
+}
+
+void* uv__calloc(size_t count, size_t size) {
+ return uv__allocator.local_calloc(count, size);
+}
+
+void* uv__realloc(void* ptr, size_t size) {
+ if (size > 0)
+ return uv__allocator.local_realloc(ptr, size);
+ uv__free(ptr);
+ return NULL;
+}
+
+void* uv__reallocf(void* ptr, size_t size) {
+ void* newptr;
+
+ newptr = uv__realloc(ptr, size);
+ if (newptr == NULL)
+ if (size > 0)
+ uv__free(ptr);
+
+ return newptr;
+}
+
+int uv_replace_allocator(uv_malloc_func malloc_func,
+ uv_realloc_func realloc_func,
+ uv_calloc_func calloc_func,
+ uv_free_func free_func) {
+ if (malloc_func == NULL || realloc_func == NULL ||
+ calloc_func == NULL || free_func == NULL) {
+ return UV_EINVAL;
+ }
+
+ uv__allocator.local_malloc = malloc_func;
+ uv__allocator.local_realloc = realloc_func;
+ uv__allocator.local_calloc = calloc_func;
+ uv__allocator.local_free = free_func;
+
+ return 0;
+}
+
+#define XX(uc, lc) case UV_##uc: return sizeof(uv_##lc##_t);
+
+size_t uv_handle_size(uv_handle_type type) {
+ switch (type) {
+ UV_HANDLE_TYPE_MAP(XX)
+ default:
+ return -1;
+ }
+}
+
+size_t uv_req_size(uv_req_type type) {
+ switch(type) {
+ UV_REQ_TYPE_MAP(XX)
+ default:
+ return -1;
+ }
+}
+
+#undef XX
+
+
+size_t uv_loop_size(void) {
+ return sizeof(uv_loop_t);
+}
+
+
+uv_buf_t uv_buf_init(char* base, unsigned int len) {
+ uv_buf_t buf;
+ buf.base = base;
+ buf.len = len;
+ return buf;
+}
+
+
+static const char* uv__unknown_err_code(int err) {
+ char buf[32];
+ char* copy;
+
+ snprintf(buf, sizeof(buf), "Unknown system error %d", err);
+ copy = uv__strdup(buf);
+
+ return copy != NULL ? copy : "Unknown system error";
+}
+
+#define UV_ERR_NAME_GEN_R(name, _) \
+case UV_## name: \
+ uv__strscpy(buf, #name, buflen); break;
+char* uv_err_name_r(int err, char* buf, size_t buflen) {
+ switch (err) {
+ UV_ERRNO_MAP(UV_ERR_NAME_GEN_R)
+ default: snprintf(buf, buflen, "Unknown system error %d", err);
+ }
+ return buf;
+}
+#undef UV_ERR_NAME_GEN_R
+
+
+#define UV_ERR_NAME_GEN(name, _) case UV_ ## name: return #name;
+const char* uv_err_name(int err) {
+ switch (err) {
+ UV_ERRNO_MAP(UV_ERR_NAME_GEN)
+ }
+ return uv__unknown_err_code(err);
+}
+#undef UV_ERR_NAME_GEN
+
+
+#define UV_STRERROR_GEN_R(name, msg) \
+case UV_ ## name: \
+ snprintf(buf, buflen, "%s", msg); break;
+char* uv_strerror_r(int err, char* buf, size_t buflen) {
+ switch (err) {
+ UV_ERRNO_MAP(UV_STRERROR_GEN_R)
+ default: snprintf(buf, buflen, "Unknown system error %d", err);
+ }
+ return buf;
+}
+#undef UV_STRERROR_GEN_R
+
+
+#define UV_STRERROR_GEN(name, msg) case UV_ ## name: return msg;
+const char* uv_strerror(int err) {
+ switch (err) {
+ UV_ERRNO_MAP(UV_STRERROR_GEN)
+ }
+ return uv__unknown_err_code(err);
+}
+#undef UV_STRERROR_GEN
+
+#if !defined(CMAKE_BOOTSTRAP) || defined(_WIN32)
+
+int uv_ip4_addr(const char* ip, int port, struct sockaddr_in* addr) {
+ memset(addr, 0, sizeof(*addr));
+ addr->sin_family = AF_INET;
+ addr->sin_port = htons(port);
+#ifdef SIN6_LEN
+ addr->sin_len = sizeof(*addr);
+#endif
+ return uv_inet_pton(AF_INET, ip, &(addr->sin_addr.s_addr));
+}
+
+
+int uv_ip6_addr(const char* ip, int port, struct sockaddr_in6* addr) {
+ char address_part[40];
+ size_t address_part_size;
+ const char* zone_index;
+
+ memset(addr, 0, sizeof(*addr));
+ addr->sin6_family = AF_INET6;
+ addr->sin6_port = htons(port);
+#ifdef SIN6_LEN
+ addr->sin6_len = sizeof(*addr);
+#endif
+
+ zone_index = strchr(ip, '%');
+ if (zone_index != NULL) {
+ address_part_size = zone_index - ip;
+ if (address_part_size >= sizeof(address_part))
+ address_part_size = sizeof(address_part) - 1;
+
+ memcpy(address_part, ip, address_part_size);
+ address_part[address_part_size] = '\0';
+ ip = address_part;
+
+ zone_index++; /* skip '%' */
+ /* NOTE: unknown interface (id=0) is silently ignored */
+#ifdef _WIN32
+ addr->sin6_scope_id = atoi(zone_index);
+#else
+ addr->sin6_scope_id = if_nametoindex(zone_index);
+#endif
+ }
+
+ return uv_inet_pton(AF_INET6, ip, &addr->sin6_addr);
+}
+
+
+int uv_ip4_name(const struct sockaddr_in* src, char* dst, size_t size) {
+ return uv_inet_ntop(AF_INET, &src->sin_addr, dst, size);
+}
+
+
+int uv_ip6_name(const struct sockaddr_in6* src, char* dst, size_t size) {
+ return uv_inet_ntop(AF_INET6, &src->sin6_addr, dst, size);
+}
+
+
+int uv_tcp_bind(uv_tcp_t* handle,
+ const struct sockaddr* addr,
+ unsigned int flags) {
+ unsigned int addrlen;
+
+ if (handle->type != UV_TCP)
+ return UV_EINVAL;
+
+ if (addr->sa_family == AF_INET)
+ addrlen = sizeof(struct sockaddr_in);
+ else if (addr->sa_family == AF_INET6)
+ addrlen = sizeof(struct sockaddr_in6);
+ else
+ return UV_EINVAL;
+
+ return uv__tcp_bind(handle, addr, addrlen, flags);
+}
+
+
+int uv_udp_init_ex(uv_loop_t* loop, uv_udp_t* handle, unsigned flags) {
+ unsigned extra_flags;
+ int domain;
+ int rc;
+
+ /* Use the lower 8 bits for the domain. */
+ domain = flags & 0xFF;
+ if (domain != AF_INET && domain != AF_INET6 && domain != AF_UNSPEC)
+ return UV_EINVAL;
+
+ /* Use the higher bits for extra flags. */
+ extra_flags = flags & ~0xFF;
+ if (extra_flags & ~UV_UDP_RECVMMSG)
+ return UV_EINVAL;
+
+ rc = uv__udp_init_ex(loop, handle, flags, domain);
+
+ if (rc == 0)
+ if (extra_flags & UV_UDP_RECVMMSG)
+ handle->flags |= UV_HANDLE_UDP_RECVMMSG;
+
+ return rc;
+}
+
+
+int uv_udp_init(uv_loop_t* loop, uv_udp_t* handle) {
+ return uv_udp_init_ex(loop, handle, AF_UNSPEC);
+}
+
+
+int uv_udp_bind(uv_udp_t* handle,
+ const struct sockaddr* addr,
+ unsigned int flags) {
+ unsigned int addrlen;
+
+ if (handle->type != UV_UDP)
+ return UV_EINVAL;
+
+ if (addr->sa_family == AF_INET)
+ addrlen = sizeof(struct sockaddr_in);
+ else if (addr->sa_family == AF_INET6)
+ addrlen = sizeof(struct sockaddr_in6);
+ else
+ return UV_EINVAL;
+
+ return uv__udp_bind(handle, addr, addrlen, flags);
+}
+
+
+int uv_tcp_connect(uv_connect_t* req,
+ uv_tcp_t* handle,
+ const struct sockaddr* addr,
+ uv_connect_cb cb) {
+ unsigned int addrlen;
+
+ if (handle->type != UV_TCP)
+ return UV_EINVAL;
+
+ if (addr->sa_family == AF_INET)
+ addrlen = sizeof(struct sockaddr_in);
+ else if (addr->sa_family == AF_INET6)
+ addrlen = sizeof(struct sockaddr_in6);
+ else
+ return UV_EINVAL;
+
+ return uv__tcp_connect(req, handle, addr, addrlen, cb);
+}
+
+
+int uv_udp_connect(uv_udp_t* handle, const struct sockaddr* addr) {
+ unsigned int addrlen;
+
+ if (handle->type != UV_UDP)
+ return UV_EINVAL;
+
+ /* Disconnect the handle */
+ if (addr == NULL) {
+ if (!(handle->flags & UV_HANDLE_UDP_CONNECTED))
+ return UV_ENOTCONN;
+
+ return uv__udp_disconnect(handle);
+ }
+
+ if (addr->sa_family == AF_INET)
+ addrlen = sizeof(struct sockaddr_in);
+ else if (addr->sa_family == AF_INET6)
+ addrlen = sizeof(struct sockaddr_in6);
+ else
+ return UV_EINVAL;
+
+ if (handle->flags & UV_HANDLE_UDP_CONNECTED)
+ return UV_EISCONN;
+
+ return uv__udp_connect(handle, addr, addrlen);
+}
+
+
+int uv__udp_is_connected(uv_udp_t* handle) {
+ struct sockaddr_storage addr;
+ int addrlen;
+ if (handle->type != UV_UDP)
+ return 0;
+
+ addrlen = sizeof(addr);
+ if (uv_udp_getpeername(handle, (struct sockaddr*) &addr, &addrlen) != 0)
+ return 0;
+
+ return addrlen > 0;
+}
+
+
+int uv__udp_check_before_send(uv_udp_t* handle, const struct sockaddr* addr) {
+ unsigned int addrlen;
+
+ if (handle->type != UV_UDP)
+ return UV_EINVAL;
+
+ if (addr != NULL && (handle->flags & UV_HANDLE_UDP_CONNECTED))
+ return UV_EISCONN;
+
+ if (addr == NULL && !(handle->flags & UV_HANDLE_UDP_CONNECTED))
+ return UV_EDESTADDRREQ;
+
+ if (addr != NULL) {
+ if (addr->sa_family == AF_INET)
+ addrlen = sizeof(struct sockaddr_in);
+ else if (addr->sa_family == AF_INET6)
+ addrlen = sizeof(struct sockaddr_in6);
+#if defined(AF_UNIX) && !defined(_WIN32)
+ else if (addr->sa_family == AF_UNIX)
+ addrlen = sizeof(struct sockaddr_un);
+#endif
+ else
+ return UV_EINVAL;
+ } else {
+ addrlen = 0;
+ }
+
+ return addrlen;
+}
+
+
+int uv_udp_send(uv_udp_send_t* req,
+ uv_udp_t* handle,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ const struct sockaddr* addr,
+ uv_udp_send_cb send_cb) {
+ int addrlen;
+
+ addrlen = uv__udp_check_before_send(handle, addr);
+ if (addrlen < 0)
+ return addrlen;
+
+ return uv__udp_send(req, handle, bufs, nbufs, addr, addrlen, send_cb);
+}
+
+
+int uv_udp_try_send(uv_udp_t* handle,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ const struct sockaddr* addr) {
+ int addrlen;
+
+ addrlen = uv__udp_check_before_send(handle, addr);
+ if (addrlen < 0)
+ return addrlen;
+
+ return uv__udp_try_send(handle, bufs, nbufs, addr, addrlen);
+}
+
+
+int uv_udp_recv_start(uv_udp_t* handle,
+ uv_alloc_cb alloc_cb,
+ uv_udp_recv_cb recv_cb) {
+ if (handle->type != UV_UDP || alloc_cb == NULL || recv_cb == NULL)
+ return UV_EINVAL;
+ else
+ return uv__udp_recv_start(handle, alloc_cb, recv_cb);
+}
+
+
+int uv_udp_recv_stop(uv_udp_t* handle) {
+ if (handle->type != UV_UDP)
+ return UV_EINVAL;
+ else
+ return uv__udp_recv_stop(handle);
+}
+
+#endif
+
+void uv_walk(uv_loop_t* loop, uv_walk_cb walk_cb, void* arg) {
+ QUEUE queue;
+ QUEUE* q;
+ uv_handle_t* h;
+
+ QUEUE_MOVE(&loop->handle_queue, &queue);
+ while (!QUEUE_EMPTY(&queue)) {
+ q = QUEUE_HEAD(&queue);
+ h = QUEUE_DATA(q, uv_handle_t, handle_queue);
+
+ QUEUE_REMOVE(q);
+ QUEUE_INSERT_TAIL(&loop->handle_queue, q);
+
+ if (h->flags & UV_HANDLE_INTERNAL) continue;
+ walk_cb(h, arg);
+ }
+}
+
+
+static void uv__print_handles(uv_loop_t* loop, int only_active, FILE* stream) {
+ const char* type;
+ QUEUE* q;
+ uv_handle_t* h;
+
+ if (loop == NULL)
+ loop = uv_default_loop();
+
+ QUEUE_FOREACH(q, &loop->handle_queue) {
+ h = QUEUE_DATA(q, uv_handle_t, handle_queue);
+
+ if (only_active && !uv__is_active(h))
+ continue;
+
+ switch (h->type) {
+#define X(uc, lc) case UV_##uc: type = #lc; break;
+ UV_HANDLE_TYPE_MAP(X)
+#undef X
+ default: type = "<unknown>";
+ }
+
+ fprintf(stream,
+ "[%c%c%c] %-8s %p\n",
+ "R-"[!(h->flags & UV_HANDLE_REF)],
+ "A-"[!(h->flags & UV_HANDLE_ACTIVE)],
+ "I-"[!(h->flags & UV_HANDLE_INTERNAL)],
+ type,
+ (void*)h);
+ }
+}
+
+
+void uv_print_all_handles(uv_loop_t* loop, FILE* stream) {
+ uv__print_handles(loop, 0, stream);
+}
+
+
+void uv_print_active_handles(uv_loop_t* loop, FILE* stream) {
+ uv__print_handles(loop, 1, stream);
+}
+
+
+void uv_ref(uv_handle_t* handle) {
+ uv__handle_ref(handle);
+}
+
+
+void uv_unref(uv_handle_t* handle) {
+ uv__handle_unref(handle);
+}
+
+
+int uv_has_ref(const uv_handle_t* handle) {
+ return uv__has_ref(handle);
+}
+
+
+void uv_stop(uv_loop_t* loop) {
+ loop->stop_flag = 1;
+}
+
+
+uint64_t uv_now(const uv_loop_t* loop) {
+ return loop->time;
+}
+
+
+
+size_t uv__count_bufs(const uv_buf_t bufs[], unsigned int nbufs) {
+ unsigned int i;
+ size_t bytes;
+
+ bytes = 0;
+ for (i = 0; i < nbufs; i++)
+ bytes += (size_t) bufs[i].len;
+
+ return bytes;
+}
+
+int uv_recv_buffer_size(uv_handle_t* handle, int* value) {
+ return uv__socket_sockopt(handle, SO_RCVBUF, value);
+}
+
+int uv_send_buffer_size(uv_handle_t* handle, int *value) {
+ return uv__socket_sockopt(handle, SO_SNDBUF, value);
+}
+
+int uv_fs_event_getpath(uv_fs_event_t* handle, char* buffer, size_t* size) {
+ size_t required_len;
+
+ if (!uv__is_active(handle)) {
+ *size = 0;
+ return UV_EINVAL;
+ }
+
+ required_len = strlen(handle->path);
+ if (required_len >= *size) {
+ *size = required_len + 1;
+ return UV_ENOBUFS;
+ }
+
+ memcpy(buffer, handle->path, required_len);
+ *size = required_len;
+ buffer[required_len] = '\0';
+
+ return 0;
+}
+
+/* The windows implementation does not have the same structure layout as
+ * the unix implementation (nbufs is not directly inside req but is
+ * contained in a nested union/struct) so this function locates it.
+*/
+static unsigned int* uv__get_nbufs(uv_fs_t* req) {
+#ifdef _WIN32
+ return &req->fs.info.nbufs;
+#else
+ return &req->nbufs;
+#endif
+}
+
+/* uv_fs_scandir() uses the system allocator to allocate memory on non-Windows
+ * systems. So, the memory should be released using free(). On Windows,
+ * uv__malloc() is used, so use uv__free() to free memory.
+*/
+#ifdef _WIN32
+# define uv__fs_scandir_free uv__free
+#else
+# define uv__fs_scandir_free free
+#endif
+
+void uv__fs_scandir_cleanup(uv_fs_t* req) {
+ uv__dirent_t** dents;
+
+ unsigned int* nbufs = uv__get_nbufs(req);
+
+ dents = req->ptr;
+ if (*nbufs > 0 && *nbufs != (unsigned int) req->result)
+ (*nbufs)--;
+ for (; *nbufs < (unsigned int) req->result; (*nbufs)++)
+ uv__fs_scandir_free(dents[*nbufs]);
+
+ uv__fs_scandir_free(req->ptr);
+ req->ptr = NULL;
+}
+
+
+int uv_fs_scandir_next(uv_fs_t* req, uv_dirent_t* ent) {
+ uv__dirent_t** dents;
+ uv__dirent_t* dent;
+ unsigned int* nbufs;
+
+ /* Check to see if req passed */
+ if (req->result < 0)
+ return req->result;
+
+ /* Ptr will be null if req was canceled or no files found */
+ if (!req->ptr)
+ return UV_EOF;
+
+ nbufs = uv__get_nbufs(req);
+ assert(nbufs);
+
+ dents = req->ptr;
+
+ /* Free previous entity */
+ if (*nbufs > 0)
+ uv__fs_scandir_free(dents[*nbufs - 1]);
+
+ /* End was already reached */
+ if (*nbufs == (unsigned int) req->result) {
+ uv__fs_scandir_free(dents);
+ req->ptr = NULL;
+ return UV_EOF;
+ }
+
+ dent = dents[(*nbufs)++];
+
+ ent->name = dent->d_name;
+ ent->type = uv__fs_get_dirent_type(dent);
+
+ return 0;
+}
+
+uv_dirent_type_t uv__fs_get_dirent_type(uv__dirent_t* dent) {
+ uv_dirent_type_t type;
+
+#ifdef HAVE_DIRENT_TYPES
+ switch (dent->d_type) {
+ case UV__DT_DIR:
+ type = UV_DIRENT_DIR;
+ break;
+ case UV__DT_FILE:
+ type = UV_DIRENT_FILE;
+ break;
+ case UV__DT_LINK:
+ type = UV_DIRENT_LINK;
+ break;
+ case UV__DT_FIFO:
+ type = UV_DIRENT_FIFO;
+ break;
+ case UV__DT_SOCKET:
+ type = UV_DIRENT_SOCKET;
+ break;
+ case UV__DT_CHAR:
+ type = UV_DIRENT_CHAR;
+ break;
+ case UV__DT_BLOCK:
+ type = UV_DIRENT_BLOCK;
+ break;
+ default:
+ type = UV_DIRENT_UNKNOWN;
+ }
+#else
+ type = UV_DIRENT_UNKNOWN;
+#endif
+
+ return type;
+}
+
+void uv__fs_readdir_cleanup(uv_fs_t* req) {
+ uv_dir_t* dir;
+ uv_dirent_t* dirents;
+ int i;
+
+ if (req->ptr == NULL)
+ return;
+
+ dir = req->ptr;
+ dirents = dir->dirents;
+ req->ptr = NULL;
+
+ if (dirents == NULL)
+ return;
+
+ for (i = 0; i < req->result; ++i) {
+ uv__free((char*) dirents[i].name);
+ dirents[i].name = NULL;
+ }
+}
+
+
+int uv_loop_configure(uv_loop_t* loop, uv_loop_option option, ...) {
+ va_list ap;
+ int err;
+
+ va_start(ap, option);
+ /* Any platform-agnostic options should be handled here. */
+ err = uv__loop_configure(loop, option, ap);
+ va_end(ap);
+
+ return err;
+}
+
+
+static uv_loop_t default_loop_struct;
+static uv_loop_t* default_loop_ptr;
+
+
+uv_loop_t* uv_default_loop(void) {
+ if (default_loop_ptr != NULL)
+ return default_loop_ptr;
+
+ if (uv_loop_init(&default_loop_struct))
+ return NULL;
+
+ default_loop_ptr = &default_loop_struct;
+ return default_loop_ptr;
+}
+
+
+uv_loop_t* uv_loop_new(void) {
+ uv_loop_t* loop;
+
+ loop = uv__malloc(sizeof(*loop));
+ if (loop == NULL)
+ return NULL;
+
+ if (uv_loop_init(loop)) {
+ uv__free(loop);
+ return NULL;
+ }
+
+ return loop;
+}
+
+
+int uv_loop_close(uv_loop_t* loop) {
+ QUEUE* q;
+ uv_handle_t* h;
+#ifndef NDEBUG
+ void* saved_data;
+#endif
+
+ if (uv__has_active_reqs(loop))
+ return UV_EBUSY;
+
+ QUEUE_FOREACH(q, &loop->handle_queue) {
+ h = QUEUE_DATA(q, uv_handle_t, handle_queue);
+ if (!(h->flags & UV_HANDLE_INTERNAL))
+ return UV_EBUSY;
+ }
+
+ uv__loop_close(loop);
+
+#ifndef NDEBUG
+ saved_data = loop->data;
+ memset(loop, -1, sizeof(*loop));
+ loop->data = saved_data;
+#endif
+ if (loop == default_loop_ptr)
+ default_loop_ptr = NULL;
+
+ return 0;
+}
+
+
+void uv_loop_delete(uv_loop_t* loop) {
+ uv_loop_t* default_loop;
+ int err;
+
+ default_loop = default_loop_ptr;
+
+ err = uv_loop_close(loop);
+ (void) err; /* Squelch compiler warnings. */
+ assert(err == 0);
+ if (loop != default_loop)
+ uv__free(loop);
+}
+
+
+void uv_os_free_environ(uv_env_item_t* envitems, int count) {
+ int i;
+
+ for (i = 0; i < count; i++) {
+ uv__free(envitems[i].name);
+ }
+
+ uv__free(envitems);
+}
+
+
+void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
+ int i;
+
+ for (i = 0; i < count; i++)
+ uv__free(cpu_infos[i].model);
+
+ uv__free(cpu_infos);
+}
+
+
+#ifdef __GNUC__ /* Also covers __clang__ and __INTEL_COMPILER. */
+__attribute__((destructor))
+#endif
+void uv_library_shutdown(void) {
+ static int was_shutdown;
+
+ if (uv__load_relaxed(&was_shutdown))
+ return;
+
+ uv__process_title_cleanup();
+ uv__signal_cleanup();
+ uv__threadpool_cleanup();
+ uv__store_relaxed(&was_shutdown, 1);
+}
+
+
+void uv__metrics_update_idle_time(uv_loop_t* loop) {
+ uv__loop_metrics_t* loop_metrics;
+ uint64_t entry_time;
+ uint64_t exit_time;
+
+ if (!(uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME))
+ return;
+
+ loop_metrics = uv__get_loop_metrics(loop);
+
+ /* The thread running uv__metrics_update_idle_time() is always the same
+ * thread that sets provider_entry_time. So it's unnecessary to lock before
+ * retrieving this value.
+ */
+ if (loop_metrics->provider_entry_time == 0)
+ return;
+
+ exit_time = uv_hrtime();
+
+ uv_mutex_lock(&loop_metrics->lock);
+ entry_time = loop_metrics->provider_entry_time;
+ loop_metrics->provider_entry_time = 0;
+ loop_metrics->provider_idle_time += exit_time - entry_time;
+ uv_mutex_unlock(&loop_metrics->lock);
+}
+
+
+void uv__metrics_set_provider_entry_time(uv_loop_t* loop) {
+ uv__loop_metrics_t* loop_metrics;
+ uint64_t now;
+
+ if (!(uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME))
+ return;
+
+ now = uv_hrtime();
+ loop_metrics = uv__get_loop_metrics(loop);
+ uv_mutex_lock(&loop_metrics->lock);
+ loop_metrics->provider_entry_time = now;
+ uv_mutex_unlock(&loop_metrics->lock);
+}
+
+
+uint64_t uv_metrics_idle_time(uv_loop_t* loop) {
+ uv__loop_metrics_t* loop_metrics;
+ uint64_t entry_time;
+ uint64_t idle_time;
+
+ loop_metrics = uv__get_loop_metrics(loop);
+ uv_mutex_lock(&loop_metrics->lock);
+ idle_time = loop_metrics->provider_idle_time;
+ entry_time = loop_metrics->provider_entry_time;
+ uv_mutex_unlock(&loop_metrics->lock);
+
+ if (entry_time > 0)
+ idle_time += uv_hrtime() - entry_time;
+ return idle_time;
+}
diff --git a/Utilities/cmlibuv/src/uv-common.h b/Utilities/cmlibuv/src/uv-common.h
new file mode 100644
index 0000000..e851291
--- /dev/null
+++ b/Utilities/cmlibuv/src/uv-common.h
@@ -0,0 +1,368 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/*
+ * This file is private to libuv. It provides common functionality to both
+ * Windows and Unix backends.
+ */
+
+#ifndef UV_COMMON_H_
+#define UV_COMMON_H_
+
+#include <assert.h>
+#include <stdarg.h>
+#include <stddef.h>
+
+#if defined(_MSC_VER) && _MSC_VER < 1600
+# include "uv/stdint-msvc2008.h"
+#else
+# include <stdint.h>
+#endif
+
+#include "uv.h"
+#include "uv/tree.h"
+#include "queue.h"
+#include "strscpy.h"
+
+#if EDOM > 0
+# define UV__ERR(x) (-(x))
+#else
+# define UV__ERR(x) (x)
+#endif
+
+#if !defined(snprintf) && defined(_MSC_VER) && _MSC_VER < 1900
+extern int snprintf(char*, size_t, const char*, ...);
+#endif
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+#define container_of(ptr, type, member) \
+ ((type *) ((char *) (ptr) - offsetof(type, member)))
+
+#define STATIC_ASSERT(expr) \
+ void uv__static_assert(int static_assert_failed[1 - 2 * !(expr)])
+
+#if defined(__GNUC__) && (__GNUC__ > 4 || __GNUC__ == 4 && __GNUC_MINOR__ >= 7)
+#define uv__load_relaxed(p) __atomic_load_n(p, __ATOMIC_RELAXED)
+#define uv__store_relaxed(p, v) __atomic_store_n(p, v, __ATOMIC_RELAXED)
+#else
+#define uv__load_relaxed(p) (*p)
+#define uv__store_relaxed(p, v) do *p = v; while (0)
+#endif
+
+/* Handle flags. Some flags are specific to Windows or UNIX. */
+enum {
+ /* Used by all handles. */
+ UV_HANDLE_CLOSING = 0x00000001,
+ UV_HANDLE_CLOSED = 0x00000002,
+ UV_HANDLE_ACTIVE = 0x00000004,
+ UV_HANDLE_REF = 0x00000008,
+ UV_HANDLE_INTERNAL = 0x00000010,
+ UV_HANDLE_ENDGAME_QUEUED = 0x00000020,
+
+ /* Used by streams. */
+ UV_HANDLE_LISTENING = 0x00000040,
+ UV_HANDLE_CONNECTION = 0x00000080,
+ UV_HANDLE_SHUTTING = 0x00000100,
+ UV_HANDLE_SHUT = 0x00000200,
+ UV_HANDLE_READ_PARTIAL = 0x00000400,
+ UV_HANDLE_READ_EOF = 0x00000800,
+
+ /* Used by streams and UDP handles. */
+ UV_HANDLE_READING = 0x00001000,
+ UV_HANDLE_BOUND = 0x00002000,
+ UV_HANDLE_READABLE = 0x00004000,
+ UV_HANDLE_WRITABLE = 0x00008000,
+ UV_HANDLE_READ_PENDING = 0x00010000,
+ UV_HANDLE_SYNC_BYPASS_IOCP = 0x00020000,
+ UV_HANDLE_ZERO_READ = 0x00040000,
+ UV_HANDLE_EMULATE_IOCP = 0x00080000,
+ UV_HANDLE_BLOCKING_WRITES = 0x00100000,
+ UV_HANDLE_CANCELLATION_PENDING = 0x00200000,
+
+ /* Used by uv_tcp_t and uv_udp_t handles */
+ UV_HANDLE_IPV6 = 0x00400000,
+
+ /* Only used by uv_tcp_t handles. */
+ UV_HANDLE_TCP_NODELAY = 0x01000000,
+ UV_HANDLE_TCP_KEEPALIVE = 0x02000000,
+ UV_HANDLE_TCP_SINGLE_ACCEPT = 0x04000000,
+ UV_HANDLE_TCP_ACCEPT_STATE_CHANGING = 0x08000000,
+ UV_HANDLE_TCP_SOCKET_CLOSED = 0x10000000,
+ UV_HANDLE_SHARED_TCP_SOCKET = 0x20000000,
+
+ /* Only used by uv_udp_t handles. */
+ UV_HANDLE_UDP_PROCESSING = 0x01000000,
+ UV_HANDLE_UDP_CONNECTED = 0x02000000,
+ UV_HANDLE_UDP_RECVMMSG = 0x04000000,
+
+ /* Only used by uv_pipe_t handles. */
+ UV_HANDLE_NON_OVERLAPPED_PIPE = 0x01000000,
+ UV_HANDLE_PIPESERVER = 0x02000000,
+
+ /* Only used by uv_tty_t handles. */
+ UV_HANDLE_TTY_READABLE = 0x01000000,
+ UV_HANDLE_TTY_RAW = 0x02000000,
+ UV_HANDLE_TTY_SAVED_POSITION = 0x04000000,
+ UV_HANDLE_TTY_SAVED_ATTRIBUTES = 0x08000000,
+
+ /* Only used by uv_signal_t handles. */
+ UV_SIGNAL_ONE_SHOT_DISPATCHED = 0x01000000,
+ UV_SIGNAL_ONE_SHOT = 0x02000000,
+
+ /* Only used by uv_poll_t handles. */
+ UV_HANDLE_POLL_SLOW = 0x01000000
+};
+
+int uv__loop_configure(uv_loop_t* loop, uv_loop_option option, va_list ap);
+
+void uv__loop_close(uv_loop_t* loop);
+
+int uv__tcp_bind(uv_tcp_t* tcp,
+ const struct sockaddr* addr,
+ unsigned int addrlen,
+ unsigned int flags);
+
+int uv__tcp_connect(uv_connect_t* req,
+ uv_tcp_t* handle,
+ const struct sockaddr* addr,
+ unsigned int addrlen,
+ uv_connect_cb cb);
+
+int uv__udp_init_ex(uv_loop_t* loop,
+ uv_udp_t* handle,
+ unsigned flags,
+ int domain);
+
+int uv__udp_bind(uv_udp_t* handle,
+ const struct sockaddr* addr,
+ unsigned int addrlen,
+ unsigned int flags);
+
+int uv__udp_connect(uv_udp_t* handle,
+ const struct sockaddr* addr,
+ unsigned int addrlen);
+
+int uv__udp_disconnect(uv_udp_t* handle);
+
+int uv__udp_is_connected(uv_udp_t* handle);
+
+int uv__udp_send(uv_udp_send_t* req,
+ uv_udp_t* handle,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ const struct sockaddr* addr,
+ unsigned int addrlen,
+ uv_udp_send_cb send_cb);
+
+int uv__udp_try_send(uv_udp_t* handle,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ const struct sockaddr* addr,
+ unsigned int addrlen);
+
+int uv__udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloccb,
+ uv_udp_recv_cb recv_cb);
+
+int uv__udp_recv_stop(uv_udp_t* handle);
+
+void uv__fs_poll_close(uv_fs_poll_t* handle);
+
+int uv__getaddrinfo_translate_error(int sys_err); /* EAI_* error. */
+
+enum uv__work_kind {
+ UV__WORK_CPU,
+ UV__WORK_FAST_IO,
+ UV__WORK_SLOW_IO
+};
+
+void uv__work_submit(uv_loop_t* loop,
+ struct uv__work *w,
+ enum uv__work_kind kind,
+ void (*work)(struct uv__work *w),
+ void (*done)(struct uv__work *w, int status));
+
+void uv__work_done(uv_async_t* handle);
+
+size_t uv__count_bufs(const uv_buf_t bufs[], unsigned int nbufs);
+
+int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value);
+
+void uv__fs_scandir_cleanup(uv_fs_t* req);
+void uv__fs_readdir_cleanup(uv_fs_t* req);
+uv_dirent_type_t uv__fs_get_dirent_type(uv__dirent_t* dent);
+
+int uv__next_timeout(const uv_loop_t* loop);
+void uv__run_timers(uv_loop_t* loop);
+void uv__timer_close(uv_timer_t* handle);
+
+void uv__process_title_cleanup(void);
+void uv__signal_cleanup(void);
+void uv__threadpool_cleanup(void);
+
+#define uv__has_active_reqs(loop) \
+ ((loop)->active_reqs.count > 0)
+
+#define uv__req_register(loop, req) \
+ do { \
+ (loop)->active_reqs.count++; \
+ } \
+ while (0)
+
+#define uv__req_unregister(loop, req) \
+ do { \
+ assert(uv__has_active_reqs(loop)); \
+ (loop)->active_reqs.count--; \
+ } \
+ while (0)
+
+#define uv__has_active_handles(loop) \
+ ((loop)->active_handles > 0)
+
+#define uv__active_handle_add(h) \
+ do { \
+ (h)->loop->active_handles++; \
+ } \
+ while (0)
+
+#define uv__active_handle_rm(h) \
+ do { \
+ (h)->loop->active_handles--; \
+ } \
+ while (0)
+
+#define uv__is_active(h) \
+ (((h)->flags & UV_HANDLE_ACTIVE) != 0)
+
+#define uv__is_closing(h) \
+ (((h)->flags & (UV_HANDLE_CLOSING | UV_HANDLE_CLOSED)) != 0)
+
+#define uv__handle_start(h) \
+ do { \
+ if (((h)->flags & UV_HANDLE_ACTIVE) != 0) break; \
+ (h)->flags |= UV_HANDLE_ACTIVE; \
+ if (((h)->flags & UV_HANDLE_REF) != 0) uv__active_handle_add(h); \
+ } \
+ while (0)
+
+#define uv__handle_stop(h) \
+ do { \
+ if (((h)->flags & UV_HANDLE_ACTIVE) == 0) break; \
+ (h)->flags &= ~UV_HANDLE_ACTIVE; \
+ if (((h)->flags & UV_HANDLE_REF) != 0) uv__active_handle_rm(h); \
+ } \
+ while (0)
+
+#define uv__handle_ref(h) \
+ do { \
+ if (((h)->flags & UV_HANDLE_REF) != 0) break; \
+ (h)->flags |= UV_HANDLE_REF; \
+ if (((h)->flags & UV_HANDLE_CLOSING) != 0) break; \
+ if (((h)->flags & UV_HANDLE_ACTIVE) != 0) uv__active_handle_add(h); \
+ } \
+ while (0)
+
+#define uv__handle_unref(h) \
+ do { \
+ if (((h)->flags & UV_HANDLE_REF) == 0) break; \
+ (h)->flags &= ~UV_HANDLE_REF; \
+ if (((h)->flags & UV_HANDLE_CLOSING) != 0) break; \
+ if (((h)->flags & UV_HANDLE_ACTIVE) != 0) uv__active_handle_rm(h); \
+ } \
+ while (0)
+
+#define uv__has_ref(h) \
+ (((h)->flags & UV_HANDLE_REF) != 0)
+
+#if defined(_WIN32)
+# define uv__handle_platform_init(h) ((h)->u.fd = -1)
+#else
+# define uv__handle_platform_init(h) ((h)->next_closing = NULL)
+#endif
+
+#define uv__handle_init(loop_, h, type_) \
+ do { \
+ (h)->loop = (loop_); \
+ (h)->type = (type_); \
+ (h)->flags = UV_HANDLE_REF; /* Ref the loop when active. */ \
+ QUEUE_INSERT_TAIL(&(loop_)->handle_queue, &(h)->handle_queue); \
+ uv__handle_platform_init(h); \
+ } \
+ while (0)
+
+/* Note: uses an open-coded version of SET_REQ_SUCCESS() because of
+ * a circular dependency between src/uv-common.h and src/win/internal.h.
+ */
+#if defined(_WIN32)
+# define UV_REQ_INIT(req, typ) \
+ do { \
+ (req)->type = (typ); \
+ (req)->u.io.overlapped.Internal = 0; /* SET_REQ_SUCCESS() */ \
+ } \
+ while (0)
+#else
+# define UV_REQ_INIT(req, typ) \
+ do { \
+ (req)->type = (typ); \
+ } \
+ while (0)
+#endif
+
+#define uv__req_init(loop, req, typ) \
+ do { \
+ UV_REQ_INIT(req, typ); \
+ uv__req_register(loop, req); \
+ } \
+ while (0)
+
+#define uv__get_internal_fields(loop) \
+ ((uv__loop_internal_fields_t*) loop->internal_fields)
+
+#define uv__get_loop_metrics(loop) \
+ (&uv__get_internal_fields(loop)->loop_metrics)
+
+/* Allocator prototypes */
+void *uv__calloc(size_t count, size_t size);
+char *uv__strdup(const char* s);
+char *uv__strndup(const char* s, size_t n);
+void* uv__malloc(size_t size);
+void uv__free(void* ptr);
+void* uv__realloc(void* ptr, size_t size);
+void* uv__reallocf(void* ptr, size_t size);
+
+typedef struct uv__loop_metrics_s uv__loop_metrics_t;
+typedef struct uv__loop_internal_fields_s uv__loop_internal_fields_t;
+
+struct uv__loop_metrics_s {
+ uint64_t provider_entry_time;
+ uint64_t provider_idle_time;
+ uv_mutex_t lock;
+};
+
+void uv__metrics_update_idle_time(uv_loop_t* loop);
+void uv__metrics_set_provider_entry_time(uv_loop_t* loop);
+
+struct uv__loop_internal_fields_s {
+ unsigned int flags;
+ uv__loop_metrics_t loop_metrics;
+};
+
+#endif /* UV_COMMON_H_ */
diff --git a/Utilities/cmlibuv/src/uv-data-getter-setters.c b/Utilities/cmlibuv/src/uv-data-getter-setters.c
new file mode 100644
index 0000000..0bd0448
--- /dev/null
+++ b/Utilities/cmlibuv/src/uv-data-getter-setters.c
@@ -0,0 +1,119 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+
+const char* uv_handle_type_name(uv_handle_type type) {
+ switch (type) {
+#define XX(uc,lc) case UV_##uc: return #lc;
+ UV_HANDLE_TYPE_MAP(XX)
+#undef XX
+ case UV_FILE: return "file";
+ case UV_HANDLE_TYPE_MAX:
+ case UV_UNKNOWN_HANDLE: return NULL;
+ }
+ return NULL;
+}
+
+uv_handle_type uv_handle_get_type(const uv_handle_t* handle) {
+ return handle->type;
+}
+
+void* uv_handle_get_data(const uv_handle_t* handle) {
+ return handle->data;
+}
+
+uv_loop_t* uv_handle_get_loop(const uv_handle_t* handle) {
+ return handle->loop;
+}
+
+void uv_handle_set_data(uv_handle_t* handle, void* data) {
+ handle->data = data;
+}
+
+const char* uv_req_type_name(uv_req_type type) {
+ switch (type) {
+#define XX(uc,lc) case UV_##uc: return #lc;
+ UV_REQ_TYPE_MAP(XX)
+#undef XX
+ case UV_REQ_TYPE_MAX:
+ case UV_UNKNOWN_REQ:
+ default: /* UV_REQ_TYPE_PRIVATE */
+ break;
+ }
+ return NULL;
+}
+
+uv_req_type uv_req_get_type(const uv_req_t* req) {
+ return req->type;
+}
+
+void* uv_req_get_data(const uv_req_t* req) {
+ return req->data;
+}
+
+void uv_req_set_data(uv_req_t* req, void* data) {
+ req->data = data;
+}
+
+size_t uv_stream_get_write_queue_size(const uv_stream_t* stream) {
+ return stream->write_queue_size;
+}
+
+size_t uv_udp_get_send_queue_size(const uv_udp_t* handle) {
+ return handle->send_queue_size;
+}
+
+size_t uv_udp_get_send_queue_count(const uv_udp_t* handle) {
+ return handle->send_queue_count;
+}
+
+uv_pid_t uv_process_get_pid(const uv_process_t* proc) {
+ return proc->pid;
+}
+
+uv_fs_type uv_fs_get_type(const uv_fs_t* req) {
+ return req->fs_type;
+}
+
+ssize_t uv_fs_get_result(const uv_fs_t* req) {
+ return req->result;
+}
+
+void* uv_fs_get_ptr(const uv_fs_t* req) {
+ return req->ptr;
+}
+
+const char* uv_fs_get_path(const uv_fs_t* req) {
+ return req->path;
+}
+
+uv_stat_t* uv_fs_get_statbuf(uv_fs_t* req) {
+ return &req->statbuf;
+}
+
+void* uv_loop_get_data(const uv_loop_t* loop) {
+ return loop->data;
+}
+
+void uv_loop_set_data(uv_loop_t* loop, void* data) {
+ loop->data = data;
+}
diff --git a/Utilities/cmlibuv/src/version.c b/Utilities/cmlibuv/src/version.c
new file mode 100644
index 0000000..686dedd
--- /dev/null
+++ b/Utilities/cmlibuv/src/version.c
@@ -0,0 +1,45 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+
+#define UV_STRINGIFY(v) UV_STRINGIFY_HELPER(v)
+#define UV_STRINGIFY_HELPER(v) #v
+
+#define UV_VERSION_STRING_BASE UV_STRINGIFY(UV_VERSION_MAJOR) "." \
+ UV_STRINGIFY(UV_VERSION_MINOR) "." \
+ UV_STRINGIFY(UV_VERSION_PATCH)
+
+#if UV_VERSION_IS_RELEASE
+# define UV_VERSION_STRING UV_VERSION_STRING_BASE
+#else
+# define UV_VERSION_STRING UV_VERSION_STRING_BASE "-" UV_VERSION_SUFFIX
+#endif
+
+
+unsigned int uv_version(void) {
+ return UV_VERSION_HEX;
+}
+
+
+const char* uv_version_string(void) {
+ return UV_VERSION_STRING;
+}
diff --git a/Utilities/cmlibuv/src/win/async.c b/Utilities/cmlibuv/src/win/async.c
new file mode 100644
index 0000000..d787f66
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/async.c
@@ -0,0 +1,98 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+
+#include "uv.h"
+#include "internal.h"
+#include "atomicops-inl.h"
+#include "handle-inl.h"
+#include "req-inl.h"
+
+
+void uv_async_endgame(uv_loop_t* loop, uv_async_t* handle) {
+ if (handle->flags & UV_HANDLE_CLOSING &&
+ !handle->async_sent) {
+ assert(!(handle->flags & UV_HANDLE_CLOSED));
+ uv__handle_close(handle);
+ }
+}
+
+
+int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
+ uv_req_t* req;
+
+ uv__handle_init(loop, (uv_handle_t*) handle, UV_ASYNC);
+ handle->async_sent = 0;
+ handle->async_cb = async_cb;
+
+ req = &handle->async_req;
+ UV_REQ_INIT(req, UV_WAKEUP);
+ req->data = handle;
+
+ uv__handle_start(handle);
+
+ return 0;
+}
+
+
+void uv_async_close(uv_loop_t* loop, uv_async_t* handle) {
+ if (!((uv_async_t*)handle)->async_sent) {
+ uv_want_endgame(loop, (uv_handle_t*) handle);
+ }
+
+ uv__handle_closing(handle);
+}
+
+
+int uv_async_send(uv_async_t* handle) {
+ uv_loop_t* loop = handle->loop;
+
+ if (handle->type != UV_ASYNC) {
+ /* Can't set errno because that's not thread-safe. */
+ return -1;
+ }
+
+ /* The user should make sure never to call uv_async_send to a closing or
+ * closed handle. */
+ assert(!(handle->flags & UV_HANDLE_CLOSING));
+
+ if (!uv__atomic_exchange_set(&handle->async_sent)) {
+ POST_COMPLETION_FOR_REQ(loop, &handle->async_req);
+ }
+
+ return 0;
+}
+
+
+void uv_process_async_wakeup_req(uv_loop_t* loop, uv_async_t* handle,
+ uv_req_t* req) {
+ assert(handle->type == UV_ASYNC);
+ assert(req->type == UV_WAKEUP);
+
+ handle->async_sent = 0;
+
+ if (handle->flags & UV_HANDLE_CLOSING) {
+ uv_want_endgame(loop, (uv_handle_t*)handle);
+ } else if (handle->async_cb != NULL) {
+ handle->async_cb(handle);
+ }
+}
diff --git a/Utilities/cmlibuv/src/win/atomicops-inl.h b/Utilities/cmlibuv/src/win/atomicops-inl.h
new file mode 100644
index 0000000..52713cf
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/atomicops-inl.h
@@ -0,0 +1,57 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_WIN_ATOMICOPS_INL_H_
+#define UV_WIN_ATOMICOPS_INL_H_
+
+#include "uv.h"
+#include "internal.h"
+
+
+/* Atomic set operation on char */
+#ifdef _MSC_VER /* MSVC */
+
+/* _InterlockedOr8 is supported by MSVC on x32 and x64. It is slightly less
+ * efficient than InterlockedExchange, but InterlockedExchange8 does not exist,
+ * and interlocked operations on larger targets might require the target to be
+ * aligned. */
+#pragma intrinsic(_InterlockedOr8)
+
+static char INLINE uv__atomic_exchange_set(char volatile* target) {
+ return _InterlockedOr8(target, 1);
+}
+
+#else /* GCC */
+
+/* Mingw-32 version, hopefully this works for 64-bit gcc as well. */
+static inline char uv__atomic_exchange_set(char volatile* target) {
+ const char one = 1;
+ char old_value;
+ __asm__ __volatile__ ("lock xchgb %0, %1\n\t"
+ : "=r"(old_value), "=m"(*target)
+ : "0"(one), "m"(*target)
+ : "memory");
+ return old_value;
+}
+
+#endif
+
+#endif /* UV_WIN_ATOMICOPS_INL_H_ */
diff --git a/Utilities/cmlibuv/src/win/core.c b/Utilities/cmlibuv/src/win/core.c
new file mode 100644
index 0000000..e53a0f8
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/core.c
@@ -0,0 +1,748 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#if defined(_MSC_VER) || defined(__MINGW64_VERSION_MAJOR)
+#include <crtdbg.h>
+#endif
+
+#include "uv.h"
+#include "internal.h"
+#include "queue.h"
+#include "handle-inl.h"
+#include "heap-inl.h"
+#include "req-inl.h"
+
+/* uv_once initialization guards */
+static uv_once_t uv_init_guard_ = UV_ONCE_INIT;
+
+
+#if defined(_DEBUG) && (defined(_MSC_VER) || defined(__MINGW64_VERSION_MAJOR))
+/* Our crt debug report handler allows us to temporarily disable asserts
+ * just for the current thread.
+ */
+
+UV_THREAD_LOCAL int uv__crt_assert_enabled = TRUE;
+
+static int uv__crt_dbg_report_handler(int report_type, char *message, int *ret_val) {
+ if (uv__crt_assert_enabled || report_type != _CRT_ASSERT)
+ return FALSE;
+
+ if (ret_val) {
+ /* Set ret_val to 0 to continue with normal execution.
+ * Set ret_val to 1 to trigger a breakpoint.
+ */
+
+ if(IsDebuggerPresent())
+ *ret_val = 1;
+ else
+ *ret_val = 0;
+ }
+
+ /* Don't call _CrtDbgReport. */
+ return TRUE;
+}
+#else
+UV_THREAD_LOCAL int uv__crt_assert_enabled = FALSE;
+#endif
+
+
+#if !defined(__MINGW32__) || __MSVCRT_VERSION__ >= 0x800
+static void uv__crt_invalid_parameter_handler(const wchar_t* expression,
+ const wchar_t* function, const wchar_t * file, unsigned int line,
+ uintptr_t reserved) {
+ /* No-op. */
+}
+#endif
+
+static uv_loop_t** uv__loops;
+static int uv__loops_size;
+static int uv__loops_capacity;
+#define UV__LOOPS_CHUNK_SIZE 8
+static uv_mutex_t uv__loops_lock;
+
+static void uv__loops_init(void) {
+ uv_mutex_init(&uv__loops_lock);
+}
+
+static int uv__loops_add(uv_loop_t* loop) {
+ uv_loop_t** new_loops;
+ int new_capacity, i;
+
+ uv_mutex_lock(&uv__loops_lock);
+
+ if (uv__loops_size == uv__loops_capacity) {
+ new_capacity = uv__loops_capacity + UV__LOOPS_CHUNK_SIZE;
+ new_loops = uv__realloc(uv__loops, sizeof(uv_loop_t*) * new_capacity);
+ if (!new_loops)
+ goto failed_loops_realloc;
+ uv__loops = new_loops;
+ for (i = uv__loops_capacity; i < new_capacity; ++i)
+ uv__loops[i] = NULL;
+ uv__loops_capacity = new_capacity;
+ }
+ uv__loops[uv__loops_size] = loop;
+ ++uv__loops_size;
+
+ uv_mutex_unlock(&uv__loops_lock);
+ return 0;
+
+failed_loops_realloc:
+ uv_mutex_unlock(&uv__loops_lock);
+ return ERROR_OUTOFMEMORY;
+}
+
+static void uv__loops_remove(uv_loop_t* loop) {
+ int loop_index;
+ int smaller_capacity;
+ uv_loop_t** new_loops;
+
+ uv_mutex_lock(&uv__loops_lock);
+
+ for (loop_index = 0; loop_index < uv__loops_size; ++loop_index) {
+ if (uv__loops[loop_index] == loop)
+ break;
+ }
+ /* If loop was not found, ignore */
+ if (loop_index == uv__loops_size)
+ goto loop_removed;
+
+ uv__loops[loop_index] = uv__loops[uv__loops_size - 1];
+ uv__loops[uv__loops_size - 1] = NULL;
+ --uv__loops_size;
+
+ if (uv__loops_size == 0) {
+ uv__loops_capacity = 0;
+ uv__free(uv__loops);
+ uv__loops = NULL;
+ goto loop_removed;
+ }
+
+ /* If we didn't grow to big skip downsizing */
+ if (uv__loops_capacity < 4 * UV__LOOPS_CHUNK_SIZE)
+ goto loop_removed;
+
+ /* Downsize only if more than half of buffer is free */
+ smaller_capacity = uv__loops_capacity / 2;
+ if (uv__loops_size >= smaller_capacity)
+ goto loop_removed;
+ new_loops = uv__realloc(uv__loops, sizeof(uv_loop_t*) * smaller_capacity);
+ if (!new_loops)
+ goto loop_removed;
+ uv__loops = new_loops;
+ uv__loops_capacity = smaller_capacity;
+
+loop_removed:
+ uv_mutex_unlock(&uv__loops_lock);
+}
+
+void uv__wake_all_loops(void) {
+ int i;
+ uv_loop_t* loop;
+
+ uv_mutex_lock(&uv__loops_lock);
+ for (i = 0; i < uv__loops_size; ++i) {
+ loop = uv__loops[i];
+ assert(loop);
+ if (loop->iocp != INVALID_HANDLE_VALUE)
+ PostQueuedCompletionStatus(loop->iocp, 0, 0, NULL);
+ }
+ uv_mutex_unlock(&uv__loops_lock);
+}
+
+static void uv_init(void) {
+ /* Tell Windows that we will handle critical errors. */
+ SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX |
+ SEM_NOOPENFILEERRORBOX);
+
+ /* Tell the CRT to not exit the application when an invalid parameter is
+ * passed. The main issue is that invalid FDs will trigger this behavior.
+ */
+#if !defined(__MINGW32__) || __MSVCRT_VERSION__ >= 0x800
+ _set_invalid_parameter_handler(uv__crt_invalid_parameter_handler);
+#endif
+
+ /* We also need to setup our debug report handler because some CRT
+ * functions (eg _get_osfhandle) raise an assert when called with invalid
+ * FDs even though they return the proper error code in the release build.
+ */
+#if defined(_DEBUG) && (defined(_MSC_VER) || defined(__MINGW64_VERSION_MAJOR))
+ _CrtSetReportHook(uv__crt_dbg_report_handler);
+#endif
+
+ /* Initialize tracking of all uv loops */
+ uv__loops_init();
+
+ /* Fetch winapi function pointers. This must be done first because other
+ * initialization code might need these function pointers to be loaded.
+ */
+ uv_winapi_init();
+
+ /* Initialize winsock */
+ uv_winsock_init();
+
+ /* Initialize FS */
+ uv_fs_init();
+
+ /* Initialize signal stuff */
+ uv_signals_init();
+
+ /* Initialize console */
+ uv_console_init();
+
+ /* Initialize utilities */
+ uv__util_init();
+
+ /* Initialize system wakeup detection */
+ uv__init_detect_system_wakeup();
+}
+
+
+int uv_loop_init(uv_loop_t* loop) {
+ uv__loop_internal_fields_t* lfields;
+ struct heap* timer_heap;
+ int err;
+
+ /* Initialize libuv itself first */
+ uv__once_init();
+
+ /* Create an I/O completion port */
+ loop->iocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 1);
+ if (loop->iocp == NULL)
+ return uv_translate_sys_error(GetLastError());
+
+ lfields = (uv__loop_internal_fields_t*) uv__calloc(1, sizeof(*lfields));
+ if (lfields == NULL)
+ return UV_ENOMEM;
+ loop->internal_fields = lfields;
+
+ err = uv_mutex_init(&lfields->loop_metrics.lock);
+ if (err)
+ goto fail_metrics_mutex_init;
+
+ /* To prevent uninitialized memory access, loop->time must be initialized
+ * to zero before calling uv_update_time for the first time.
+ */
+ loop->time = 0;
+ uv_update_time(loop);
+
+ QUEUE_INIT(&loop->wq);
+ QUEUE_INIT(&loop->handle_queue);
+ loop->active_reqs.count = 0;
+ loop->active_handles = 0;
+
+ loop->pending_reqs_tail = NULL;
+
+ loop->endgame_handles = NULL;
+
+ loop->timer_heap = timer_heap = uv__malloc(sizeof(*timer_heap));
+ if (timer_heap == NULL) {
+ err = UV_ENOMEM;
+ goto fail_timers_alloc;
+ }
+
+ heap_init(timer_heap);
+
+ loop->check_handles = NULL;
+ loop->prepare_handles = NULL;
+ loop->idle_handles = NULL;
+
+ loop->next_prepare_handle = NULL;
+ loop->next_check_handle = NULL;
+ loop->next_idle_handle = NULL;
+
+ memset(&loop->poll_peer_sockets, 0, sizeof loop->poll_peer_sockets);
+
+ loop->active_tcp_streams = 0;
+ loop->active_udp_streams = 0;
+
+ loop->timer_counter = 0;
+ loop->stop_flag = 0;
+
+ err = uv_mutex_init(&loop->wq_mutex);
+ if (err)
+ goto fail_mutex_init;
+
+ err = uv_async_init(loop, &loop->wq_async, uv__work_done);
+ if (err)
+ goto fail_async_init;
+
+ uv__handle_unref(&loop->wq_async);
+ loop->wq_async.flags |= UV_HANDLE_INTERNAL;
+
+ err = uv__loops_add(loop);
+ if (err)
+ goto fail_async_init;
+
+ return 0;
+
+fail_async_init:
+ uv_mutex_destroy(&loop->wq_mutex);
+
+fail_mutex_init:
+ uv__free(timer_heap);
+ loop->timer_heap = NULL;
+
+fail_timers_alloc:
+ uv_mutex_destroy(&lfields->loop_metrics.lock);
+
+fail_metrics_mutex_init:
+ uv__free(lfields);
+ loop->internal_fields = NULL;
+ CloseHandle(loop->iocp);
+ loop->iocp = INVALID_HANDLE_VALUE;
+
+ return err;
+}
+
+
+void uv_update_time(uv_loop_t* loop) {
+ uint64_t new_time = uv__hrtime(1000);
+ assert(new_time >= loop->time);
+ loop->time = new_time;
+}
+
+
+void uv__once_init(void) {
+ uv_once(&uv_init_guard_, uv_init);
+}
+
+
+void uv__loop_close(uv_loop_t* loop) {
+ uv__loop_internal_fields_t* lfields;
+ size_t i;
+
+ uv__loops_remove(loop);
+
+ /* Close the async handle without needing an extra loop iteration.
+ * We might have a pending message, but we're just going to destroy the IOCP
+ * soon, so we can just discard it now without the usual risk of a getting
+ * another notification from GetQueuedCompletionStatusEx after calling the
+ * close_cb (which we also skip defining). We'll assert later that queue was
+ * actually empty and all reqs handled. */
+ loop->wq_async.async_sent = 0;
+ loop->wq_async.close_cb = NULL;
+ uv__handle_closing(&loop->wq_async);
+ uv__handle_close(&loop->wq_async);
+
+ for (i = 0; i < ARRAY_SIZE(loop->poll_peer_sockets); i++) {
+ SOCKET sock = loop->poll_peer_sockets[i];
+ if (sock != 0 && sock != INVALID_SOCKET)
+ closesocket(sock);
+ }
+
+ uv_mutex_lock(&loop->wq_mutex);
+ assert(QUEUE_EMPTY(&loop->wq) && "thread pool work queue not empty!");
+ assert(!uv__has_active_reqs(loop));
+ uv_mutex_unlock(&loop->wq_mutex);
+ uv_mutex_destroy(&loop->wq_mutex);
+
+ uv__free(loop->timer_heap);
+ loop->timer_heap = NULL;
+
+ lfields = uv__get_internal_fields(loop);
+ uv_mutex_destroy(&lfields->loop_metrics.lock);
+ uv__free(lfields);
+ loop->internal_fields = NULL;
+
+ CloseHandle(loop->iocp);
+}
+
+
+int uv__loop_configure(uv_loop_t* loop, uv_loop_option option, va_list ap) {
+ uv__loop_internal_fields_t* lfields;
+
+ lfields = uv__get_internal_fields(loop);
+ if (option == UV_METRICS_IDLE_TIME) {
+ lfields->flags |= UV_METRICS_IDLE_TIME;
+ return 0;
+ }
+
+ return UV_ENOSYS;
+}
+
+
+int uv_backend_fd(const uv_loop_t* loop) {
+ return -1;
+}
+
+
+int uv_loop_fork(uv_loop_t* loop) {
+ return UV_ENOSYS;
+}
+
+
+int uv_backend_timeout(const uv_loop_t* loop) {
+ if (loop->stop_flag != 0)
+ return 0;
+
+ if (!uv__has_active_handles(loop) && !uv__has_active_reqs(loop))
+ return 0;
+
+ if (loop->pending_reqs_tail)
+ return 0;
+
+ if (loop->endgame_handles)
+ return 0;
+
+ if (loop->idle_handles)
+ return 0;
+
+ return uv__next_timeout(loop);
+}
+
+
+static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) {
+ DWORD bytes;
+ ULONG_PTR key;
+ OVERLAPPED* overlapped;
+ uv_req_t* req;
+ int repeat;
+ uint64_t timeout_time;
+ uint64_t user_timeout;
+ int reset_timeout;
+
+ timeout_time = loop->time + timeout;
+
+ if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
+ reset_timeout = 1;
+ user_timeout = timeout;
+ timeout = 0;
+ } else {
+ reset_timeout = 0;
+ }
+
+ for (repeat = 0; ; repeat++) {
+ /* Only need to set the provider_entry_time if timeout != 0. The function
+ * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
+ */
+ if (timeout != 0)
+ uv__metrics_set_provider_entry_time(loop);
+
+ GetQueuedCompletionStatus(loop->iocp,
+ &bytes,
+ &key,
+ &overlapped,
+ timeout);
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ /* Placed here because on success the loop will break whether there is an
+ * empty package or not, or if GetQueuedCompletionStatus returned early then
+ * the timeout will be updated and the loop will run again. In either case
+ * the idle time will need to be updated.
+ */
+ uv__metrics_update_idle_time(loop);
+
+ if (overlapped) {
+ /* Package was dequeued */
+ req = uv_overlapped_to_req(overlapped);
+ uv_insert_pending_req(loop, req);
+
+ /* Some time might have passed waiting for I/O,
+ * so update the loop time here.
+ */
+ uv_update_time(loop);
+ } else if (GetLastError() != WAIT_TIMEOUT) {
+ /* Serious error */
+ uv_fatal_error(GetLastError(), "GetQueuedCompletionStatus");
+ } else if (timeout > 0) {
+ /* GetQueuedCompletionStatus can occasionally return a little early.
+ * Make sure that the desired timeout target time is reached.
+ */
+ uv_update_time(loop);
+ if (timeout_time > loop->time) {
+ timeout = (DWORD)(timeout_time - loop->time);
+ /* The first call to GetQueuedCompletionStatus should return very
+ * close to the target time and the second should reach it, but
+ * this is not stated in the documentation. To make sure a busy
+ * loop cannot happen, the timeout is increased exponentially
+ * starting on the third round.
+ */
+ timeout += repeat ? (1 << (repeat - 1)) : 0;
+ continue;
+ }
+ }
+ break;
+ }
+}
+
+
+static void uv__poll(uv_loop_t* loop, DWORD timeout) {
+ BOOL success;
+ uv_req_t* req;
+ OVERLAPPED_ENTRY overlappeds[128];
+ ULONG count;
+ ULONG i;
+ int repeat;
+ uint64_t timeout_time;
+ uint64_t user_timeout;
+ int reset_timeout;
+
+ timeout_time = loop->time + timeout;
+
+ if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
+ reset_timeout = 1;
+ user_timeout = timeout;
+ timeout = 0;
+ } else {
+ reset_timeout = 0;
+ }
+
+ for (repeat = 0; ; repeat++) {
+ /* Only need to set the provider_entry_time if timeout != 0. The function
+ * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
+ */
+ if (timeout != 0)
+ uv__metrics_set_provider_entry_time(loop);
+
+ success = pGetQueuedCompletionStatusEx(loop->iocp,
+ overlappeds,
+ ARRAY_SIZE(overlappeds),
+ &count,
+ timeout,
+ FALSE);
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ /* Placed here because on success the loop will break whether there is an
+ * empty package or not, or if GetQueuedCompletionStatus returned early then
+ * the timeout will be updated and the loop will run again. In either case
+ * the idle time will need to be updated.
+ */
+ uv__metrics_update_idle_time(loop);
+
+ if (success) {
+ for (i = 0; i < count; i++) {
+ /* Package was dequeued, but see if it is not a empty package
+ * meant only to wake us up.
+ */
+ if (overlappeds[i].lpOverlapped) {
+ req = uv_overlapped_to_req(overlappeds[i].lpOverlapped);
+ uv_insert_pending_req(loop, req);
+ }
+ }
+
+ /* Some time might have passed waiting for I/O,
+ * so update the loop time here.
+ */
+ uv_update_time(loop);
+ } else if (GetLastError() != WAIT_TIMEOUT) {
+ /* Serious error */
+ uv_fatal_error(GetLastError(), "GetQueuedCompletionStatusEx");
+ } else if (timeout > 0) {
+ /* GetQueuedCompletionStatus can occasionally return a little early.
+ * Make sure that the desired timeout target time is reached.
+ */
+ uv_update_time(loop);
+ if (timeout_time > loop->time) {
+ timeout = (DWORD)(timeout_time - loop->time);
+ /* The first call to GetQueuedCompletionStatus should return very
+ * close to the target time and the second should reach it, but
+ * this is not stated in the documentation. To make sure a busy
+ * loop cannot happen, the timeout is increased exponentially
+ * starting on the third round.
+ */
+ timeout += repeat ? (1 << (repeat - 1)) : 0;
+ continue;
+ }
+ }
+ break;
+ }
+}
+
+
+static int uv__loop_alive(const uv_loop_t* loop) {
+ return uv__has_active_handles(loop) ||
+ uv__has_active_reqs(loop) ||
+ loop->endgame_handles != NULL;
+}
+
+
+int uv_loop_alive(const uv_loop_t* loop) {
+ return uv__loop_alive(loop);
+}
+
+
+int uv_run(uv_loop_t *loop, uv_run_mode mode) {
+ DWORD timeout;
+ int r;
+ int ran_pending;
+
+ r = uv__loop_alive(loop);
+ if (!r)
+ uv_update_time(loop);
+
+ while (r != 0 && loop->stop_flag == 0) {
+ uv_update_time(loop);
+ uv__run_timers(loop);
+
+ ran_pending = uv_process_reqs(loop);
+ uv_idle_invoke(loop);
+ uv_prepare_invoke(loop);
+
+ timeout = 0;
+ if ((mode == UV_RUN_ONCE && !ran_pending) || mode == UV_RUN_DEFAULT)
+ timeout = uv_backend_timeout(loop);
+
+ if (pGetQueuedCompletionStatusEx)
+ uv__poll(loop, timeout);
+ else
+ uv__poll_wine(loop, timeout);
+
+ /* Run one final update on the provider_idle_time in case uv__poll*
+ * returned because the timeout expired, but no events were received. This
+ * call will be ignored if the provider_entry_time was either never set (if
+ * the timeout == 0) or was already updated b/c an event was received.
+ */
+ uv__metrics_update_idle_time(loop);
+
+ uv_check_invoke(loop);
+ uv_process_endgames(loop);
+
+ if (mode == UV_RUN_ONCE) {
+ /* UV_RUN_ONCE implies forward progress: at least one callback must have
+ * been invoked when it returns. uv__io_poll() can return without doing
+ * I/O (meaning: no callbacks) when its timeout expires - which means we
+ * have pending timers that satisfy the forward progress constraint.
+ *
+ * UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
+ * the check.
+ */
+ uv__run_timers(loop);
+ }
+
+ r = uv__loop_alive(loop);
+ if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT)
+ break;
+ }
+
+ /* The if statement lets the compiler compile it to a conditional store.
+ * Avoids dirtying a cache line.
+ */
+ if (loop->stop_flag != 0)
+ loop->stop_flag = 0;
+
+ return r;
+}
+
+
+int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) {
+ uv_os_fd_t fd_out;
+
+ switch (handle->type) {
+ case UV_TCP:
+ fd_out = (uv_os_fd_t)((uv_tcp_t*) handle)->socket;
+ break;
+
+ case UV_NAMED_PIPE:
+ fd_out = ((uv_pipe_t*) handle)->handle;
+ break;
+
+ case UV_TTY:
+ fd_out = ((uv_tty_t*) handle)->handle;
+ break;
+
+ case UV_UDP:
+ fd_out = (uv_os_fd_t)((uv_udp_t*) handle)->socket;
+ break;
+
+ case UV_POLL:
+ fd_out = (uv_os_fd_t)((uv_poll_t*) handle)->socket;
+ break;
+
+ default:
+ return UV_EINVAL;
+ }
+
+ if (uv_is_closing(handle) || fd_out == INVALID_HANDLE_VALUE)
+ return UV_EBADF;
+
+ *fd = fd_out;
+ return 0;
+}
+
+
+int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value) {
+ int r;
+ int len;
+ SOCKET socket;
+
+ if (handle == NULL || value == NULL)
+ return UV_EINVAL;
+
+ if (handle->type == UV_TCP)
+ socket = ((uv_tcp_t*) handle)->socket;
+ else if (handle->type == UV_UDP)
+ socket = ((uv_udp_t*) handle)->socket;
+ else
+ return UV_ENOTSUP;
+
+ len = sizeof(*value);
+
+ if (*value == 0)
+ r = getsockopt(socket, SOL_SOCKET, optname, (char*) value, &len);
+ else
+ r = setsockopt(socket, SOL_SOCKET, optname, (const char*) value, len);
+
+ if (r == SOCKET_ERROR)
+ return uv_translate_sys_error(WSAGetLastError());
+
+ return 0;
+}
+
+int uv_cpumask_size(void) {
+ return (int)(sizeof(DWORD_PTR) * 8);
+}
+
+int uv__getsockpeername(const uv_handle_t* handle,
+ uv__peersockfunc func,
+ struct sockaddr* name,
+ int* namelen,
+ int delayed_error) {
+
+ int result;
+ uv_os_fd_t fd;
+
+ result = uv_fileno(handle, &fd);
+ if (result != 0)
+ return result;
+
+ if (delayed_error)
+ return uv_translate_sys_error(delayed_error);
+
+ result = func((SOCKET) fd, name, namelen);
+ if (result != 0)
+ return uv_translate_sys_error(WSAGetLastError());
+
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/win/detect-wakeup.c b/Utilities/cmlibuv/src/win/detect-wakeup.c
new file mode 100644
index 0000000..ab19361
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/detect-wakeup.c
@@ -0,0 +1,56 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+#include "winapi.h"
+
+static void uv__register_system_resume_callback(void);
+
+void uv__init_detect_system_wakeup(void) {
+ /* Try registering system power event callback. This is the cleanest
+ * method, but it will only work on Win8 and above.
+ */
+ uv__register_system_resume_callback();
+}
+
+static ULONG CALLBACK uv__system_resume_callback(PVOID Context,
+ ULONG Type,
+ PVOID Setting) {
+ if (Type == PBT_APMRESUMESUSPEND || Type == PBT_APMRESUMEAUTOMATIC)
+ uv__wake_all_loops();
+
+ return 0;
+}
+
+static void uv__register_system_resume_callback(void) {
+ _DEVICE_NOTIFY_SUBSCRIBE_PARAMETERS recipient;
+ _HPOWERNOTIFY registration_handle;
+
+ if (pPowerRegisterSuspendResumeNotification == NULL)
+ return;
+
+ recipient.Callback = uv__system_resume_callback;
+ recipient.Context = NULL;
+ (*pPowerRegisterSuspendResumeNotification)(DEVICE_NOTIFY_CALLBACK,
+ &recipient,
+ &registration_handle);
+}
diff --git a/Utilities/cmlibuv/src/win/dl.c b/Utilities/cmlibuv/src/win/dl.c
new file mode 100644
index 0000000..676be4d
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/dl.c
@@ -0,0 +1,136 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+static int uv__dlerror(uv_lib_t* lib, const char* filename, DWORD errorno);
+
+
+int uv_dlopen(const char* filename, uv_lib_t* lib) {
+ WCHAR filename_w[32768];
+
+ lib->handle = NULL;
+ lib->errmsg = NULL;
+
+ if (!MultiByteToWideChar(CP_UTF8,
+ 0,
+ filename,
+ -1,
+ filename_w,
+ ARRAY_SIZE(filename_w))) {
+ return uv__dlerror(lib, filename, GetLastError());
+ }
+
+ lib->handle = LoadLibraryExW(filename_w, NULL, LOAD_WITH_ALTERED_SEARCH_PATH);
+ if (lib->handle == NULL) {
+ return uv__dlerror(lib, filename, GetLastError());
+ }
+
+ return 0;
+}
+
+
+void uv_dlclose(uv_lib_t* lib) {
+ if (lib->errmsg) {
+ LocalFree((void*)lib->errmsg);
+ lib->errmsg = NULL;
+ }
+
+ if (lib->handle) {
+ /* Ignore errors. No good way to signal them without leaking memory. */
+ FreeLibrary(lib->handle);
+ lib->handle = NULL;
+ }
+}
+
+
+int uv_dlsym(uv_lib_t* lib, const char* name, void** ptr) {
+ /* Cast though integer to suppress pedantic warning about forbidden cast. */
+ *ptr = (void*)(uintptr_t) GetProcAddress(lib->handle, name);
+ return uv__dlerror(lib, "", *ptr ? 0 : GetLastError());
+}
+
+
+const char* uv_dlerror(const uv_lib_t* lib) {
+ return lib->errmsg ? lib->errmsg : "no error";
+}
+
+
+static void uv__format_fallback_error(uv_lib_t* lib, int errorno){
+ static const CHAR fallback_error[] = "error: %1!d!";
+ DWORD_PTR args[1];
+ args[0] = (DWORD_PTR) errorno;
+
+ FormatMessageA(FORMAT_MESSAGE_FROM_STRING |
+ FORMAT_MESSAGE_ARGUMENT_ARRAY |
+ FORMAT_MESSAGE_ALLOCATE_BUFFER,
+ fallback_error, 0, 0,
+ (LPSTR) &lib->errmsg,
+ 0, (va_list*) args);
+}
+
+
+
+static int uv__dlerror(uv_lib_t* lib, const char* filename, DWORD errorno) {
+ DWORD_PTR arg;
+ DWORD res;
+ char* msg;
+
+ if (lib->errmsg) {
+ LocalFree(lib->errmsg);
+ lib->errmsg = NULL;
+ }
+
+ if (errorno == 0)
+ return 0;
+
+ res = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS, NULL, errorno,
+ MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US),
+ (LPSTR) &lib->errmsg, 0, NULL);
+
+ if (!res && (GetLastError() == ERROR_MUI_FILE_NOT_FOUND ||
+ GetLastError() == ERROR_RESOURCE_TYPE_NOT_FOUND)) {
+ res = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS, NULL, errorno,
+ 0, (LPSTR) &lib->errmsg, 0, NULL);
+ }
+
+ if (res && errorno == ERROR_BAD_EXE_FORMAT && strstr(lib->errmsg, "%1")) {
+ msg = lib->errmsg;
+ lib->errmsg = NULL;
+ arg = (DWORD_PTR) filename;
+ res = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_ARGUMENT_ARRAY |
+ FORMAT_MESSAGE_FROM_STRING,
+ msg,
+ 0, 0, (LPSTR) &lib->errmsg, 0, (va_list*) &arg);
+ LocalFree(msg);
+ }
+
+ if (!res)
+ uv__format_fallback_error(lib, errorno);
+
+ return -1;
+}
diff --git a/Utilities/cmlibuv/src/win/error.c b/Utilities/cmlibuv/src/win/error.c
new file mode 100644
index 0000000..3ec984c
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/error.c
@@ -0,0 +1,173 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include "uv.h"
+#include "internal.h"
+
+
+/*
+ * Display an error message and abort the event loop.
+ */
+void uv_fatal_error(const int errorno, const char* syscall) {
+ char* buf = NULL;
+ const char* errmsg;
+
+ FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS, NULL, errorno,
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL);
+
+ if (buf) {
+ errmsg = buf;
+ } else {
+ errmsg = "Unknown error";
+ }
+
+ /* FormatMessage messages include a newline character already, so don't add
+ * another. */
+ if (syscall) {
+ fprintf(stderr, "%s: (%d) %s", syscall, errorno, errmsg);
+ } else {
+ fprintf(stderr, "(%d) %s", errorno, errmsg);
+ }
+
+ if (buf) {
+ LocalFree(buf);
+ }
+
+ DebugBreak();
+ abort();
+}
+
+
+int uv_translate_sys_error(int sys_errno) {
+ if (sys_errno <= 0) {
+ return sys_errno; /* If < 0 then it's already a libuv error. */
+ }
+
+ switch (sys_errno) {
+ case ERROR_NOACCESS: return UV_EACCES;
+ case WSAEACCES: return UV_EACCES;
+ case ERROR_ELEVATION_REQUIRED: return UV_EACCES;
+ case ERROR_CANT_ACCESS_FILE: return UV_EACCES;
+ case ERROR_ADDRESS_ALREADY_ASSOCIATED: return UV_EADDRINUSE;
+ case WSAEADDRINUSE: return UV_EADDRINUSE;
+ case WSAEADDRNOTAVAIL: return UV_EADDRNOTAVAIL;
+ case WSAEAFNOSUPPORT: return UV_EAFNOSUPPORT;
+ case WSAEWOULDBLOCK: return UV_EAGAIN;
+ case WSAEALREADY: return UV_EALREADY;
+ case ERROR_INVALID_FLAGS: return UV_EBADF;
+ case ERROR_INVALID_HANDLE: return UV_EBADF;
+ case ERROR_LOCK_VIOLATION: return UV_EBUSY;
+ case ERROR_PIPE_BUSY: return UV_EBUSY;
+ case ERROR_SHARING_VIOLATION: return UV_EBUSY;
+ case ERROR_OPERATION_ABORTED: return UV_ECANCELED;
+ case WSAEINTR: return UV_ECANCELED;
+ case ERROR_NO_UNICODE_TRANSLATION: return UV_ECHARSET;
+ case ERROR_CONNECTION_ABORTED: return UV_ECONNABORTED;
+ case WSAECONNABORTED: return UV_ECONNABORTED;
+ case ERROR_CONNECTION_REFUSED: return UV_ECONNREFUSED;
+ case WSAECONNREFUSED: return UV_ECONNREFUSED;
+ case ERROR_NETNAME_DELETED: return UV_ECONNRESET;
+ case WSAECONNRESET: return UV_ECONNRESET;
+ case ERROR_ALREADY_EXISTS: return UV_EEXIST;
+ case ERROR_FILE_EXISTS: return UV_EEXIST;
+ case ERROR_BUFFER_OVERFLOW: return UV_EFAULT;
+ case WSAEFAULT: return UV_EFAULT;
+ case ERROR_HOST_UNREACHABLE: return UV_EHOSTUNREACH;
+ case WSAEHOSTUNREACH: return UV_EHOSTUNREACH;
+ case ERROR_INSUFFICIENT_BUFFER: return UV_EINVAL;
+ case ERROR_INVALID_DATA: return UV_EINVAL;
+ case ERROR_INVALID_PARAMETER: return UV_EINVAL;
+ case ERROR_SYMLINK_NOT_SUPPORTED: return UV_EINVAL;
+ case WSAEINVAL: return UV_EINVAL;
+ case WSAEPFNOSUPPORT: return UV_EINVAL;
+ case WSAESOCKTNOSUPPORT: return UV_EINVAL;
+ case ERROR_BEGINNING_OF_MEDIA: return UV_EIO;
+ case ERROR_BUS_RESET: return UV_EIO;
+ case ERROR_CRC: return UV_EIO;
+ case ERROR_DEVICE_DOOR_OPEN: return UV_EIO;
+ case ERROR_DEVICE_REQUIRES_CLEANING: return UV_EIO;
+ case ERROR_DISK_CORRUPT: return UV_EIO;
+ case ERROR_EOM_OVERFLOW: return UV_EIO;
+ case ERROR_FILEMARK_DETECTED: return UV_EIO;
+ case ERROR_GEN_FAILURE: return UV_EIO;
+ case ERROR_INVALID_BLOCK_LENGTH: return UV_EIO;
+ case ERROR_IO_DEVICE: return UV_EIO;
+ case ERROR_NO_DATA_DETECTED: return UV_EIO;
+ case ERROR_NO_SIGNAL_SENT: return UV_EIO;
+ case ERROR_OPEN_FAILED: return UV_EIO;
+ case ERROR_SETMARK_DETECTED: return UV_EIO;
+ case ERROR_SIGNAL_REFUSED: return UV_EIO;
+ case WSAEISCONN: return UV_EISCONN;
+ case ERROR_CANT_RESOLVE_FILENAME: return UV_ELOOP;
+ case ERROR_TOO_MANY_OPEN_FILES: return UV_EMFILE;
+ case WSAEMFILE: return UV_EMFILE;
+ case WSAEMSGSIZE: return UV_EMSGSIZE;
+ case ERROR_FILENAME_EXCED_RANGE: return UV_ENAMETOOLONG;
+ case ERROR_NETWORK_UNREACHABLE: return UV_ENETUNREACH;
+ case WSAENETUNREACH: return UV_ENETUNREACH;
+ case WSAENOBUFS: return UV_ENOBUFS;
+ case ERROR_BAD_PATHNAME: return UV_ENOENT;
+ case ERROR_DIRECTORY: return UV_ENOENT;
+ case ERROR_ENVVAR_NOT_FOUND: return UV_ENOENT;
+ case ERROR_FILE_NOT_FOUND: return UV_ENOENT;
+ case ERROR_INVALID_NAME: return UV_ENOENT;
+ case ERROR_INVALID_DRIVE: return UV_ENOENT;
+ case ERROR_INVALID_REPARSE_DATA: return UV_ENOENT;
+ case ERROR_MOD_NOT_FOUND: return UV_ENOENT;
+ case ERROR_PATH_NOT_FOUND: return UV_ENOENT;
+ case WSAHOST_NOT_FOUND: return UV_ENOENT;
+ case WSANO_DATA: return UV_ENOENT;
+ case ERROR_NOT_ENOUGH_MEMORY: return UV_ENOMEM;
+ case ERROR_OUTOFMEMORY: return UV_ENOMEM;
+ case ERROR_CANNOT_MAKE: return UV_ENOSPC;
+ case ERROR_DISK_FULL: return UV_ENOSPC;
+ case ERROR_EA_TABLE_FULL: return UV_ENOSPC;
+ case ERROR_END_OF_MEDIA: return UV_ENOSPC;
+ case ERROR_HANDLE_DISK_FULL: return UV_ENOSPC;
+ case ERROR_NOT_CONNECTED: return UV_ENOTCONN;
+ case WSAENOTCONN: return UV_ENOTCONN;
+ case ERROR_DIR_NOT_EMPTY: return UV_ENOTEMPTY;
+ case WSAENOTSOCK: return UV_ENOTSOCK;
+ case ERROR_NOT_SUPPORTED: return UV_ENOTSUP;
+ case ERROR_BROKEN_PIPE: return UV_EOF;
+ case ERROR_ACCESS_DENIED: return UV_EPERM;
+ case ERROR_PRIVILEGE_NOT_HELD: return UV_EPERM;
+ case ERROR_BAD_PIPE: return UV_EPIPE;
+ case ERROR_NO_DATA: return UV_EPIPE;
+ case ERROR_PIPE_NOT_CONNECTED: return UV_EPIPE;
+ case WSAESHUTDOWN: return UV_EPIPE;
+ case WSAEPROTONOSUPPORT: return UV_EPROTONOSUPPORT;
+ case ERROR_WRITE_PROTECT: return UV_EROFS;
+ case ERROR_SEM_TIMEOUT: return UV_ETIMEDOUT;
+ case WSAETIMEDOUT: return UV_ETIMEDOUT;
+ case ERROR_NOT_SAME_DEVICE: return UV_EXDEV;
+ case ERROR_INVALID_FUNCTION: return UV_EISDIR;
+ case ERROR_META_EXPANSION_TOO_LONG: return UV_E2BIG;
+ default: return UV_UNKNOWN;
+ }
+}
diff --git a/Utilities/cmlibuv/src/win/fs-event.c b/Utilities/cmlibuv/src/win/fs-event.c
new file mode 100644
index 0000000..0126c5e
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/fs-event.c
@@ -0,0 +1,608 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "uv.h"
+#include "internal.h"
+#include "handle-inl.h"
+#include "req-inl.h"
+
+
+const unsigned int uv_directory_watcher_buffer_size = 4096;
+
+
+static void uv_fs_event_queue_readdirchanges(uv_loop_t* loop,
+ uv_fs_event_t* handle) {
+ assert(handle->dir_handle != INVALID_HANDLE_VALUE);
+ assert(!handle->req_pending);
+
+ memset(&(handle->req.u.io.overlapped), 0,
+ sizeof(handle->req.u.io.overlapped));
+ if (!ReadDirectoryChangesW(handle->dir_handle,
+ handle->buffer,
+ uv_directory_watcher_buffer_size,
+ (handle->flags & UV_FS_EVENT_RECURSIVE) ? TRUE : FALSE,
+ FILE_NOTIFY_CHANGE_FILE_NAME |
+ FILE_NOTIFY_CHANGE_DIR_NAME |
+ FILE_NOTIFY_CHANGE_ATTRIBUTES |
+ FILE_NOTIFY_CHANGE_SIZE |
+ FILE_NOTIFY_CHANGE_LAST_WRITE |
+ FILE_NOTIFY_CHANGE_LAST_ACCESS |
+ FILE_NOTIFY_CHANGE_CREATION |
+ FILE_NOTIFY_CHANGE_SECURITY,
+ NULL,
+ &handle->req.u.io.overlapped,
+ NULL)) {
+ /* Make this req pending reporting an error. */
+ SET_REQ_ERROR(&handle->req, GetLastError());
+ uv_insert_pending_req(loop, (uv_req_t*)&handle->req);
+ }
+
+ handle->req_pending = 1;
+}
+
+static void uv_relative_path(const WCHAR* filename,
+ const WCHAR* dir,
+ WCHAR** relpath) {
+ size_t relpathlen;
+ size_t filenamelen = wcslen(filename);
+ size_t dirlen = wcslen(dir);
+ assert(!_wcsnicmp(filename, dir, dirlen));
+ if (dirlen > 0 && dir[dirlen - 1] == '\\')
+ dirlen--;
+ relpathlen = filenamelen - dirlen - 1;
+ *relpath = uv__malloc((relpathlen + 1) * sizeof(WCHAR));
+ if (!*relpath)
+ uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+ wcsncpy(*relpath, filename + dirlen + 1, relpathlen);
+ (*relpath)[relpathlen] = L'\0';
+}
+
+static int uv_split_path(const WCHAR* filename, WCHAR** dir,
+ WCHAR** file) {
+ size_t len, i;
+ DWORD dir_len;
+
+ if (filename == NULL) {
+ if (dir != NULL)
+ *dir = NULL;
+ *file = NULL;
+ return 0;
+ }
+
+ len = wcslen(filename);
+ i = len;
+ while (i > 0 && filename[--i] != '\\' && filename[i] != '/');
+
+ if (i == 0) {
+ if (dir) {
+ dir_len = GetCurrentDirectoryW(0, NULL);
+ if (dir_len == 0) {
+ return -1;
+ }
+ *dir = (WCHAR*)uv__malloc(dir_len * sizeof(WCHAR));
+ if (!*dir) {
+ uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+ }
+
+ if (!GetCurrentDirectoryW(dir_len, *dir)) {
+ uv__free(*dir);
+ *dir = NULL;
+ return -1;
+ }
+ }
+
+ *file = wcsdup(filename);
+ } else {
+ if (dir) {
+ *dir = (WCHAR*)uv__malloc((i + 2) * sizeof(WCHAR));
+ if (!*dir) {
+ uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+ }
+ wcsncpy(*dir, filename, i + 1);
+ (*dir)[i + 1] = L'\0';
+ }
+
+ *file = (WCHAR*)uv__malloc((len - i) * sizeof(WCHAR));
+ if (!*file) {
+ uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+ }
+ wcsncpy(*file, filename + i + 1, len - i - 1);
+ (*file)[len - i - 1] = L'\0';
+ }
+
+ return 0;
+}
+
+
+int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
+ uv__handle_init(loop, (uv_handle_t*) handle, UV_FS_EVENT);
+ handle->dir_handle = INVALID_HANDLE_VALUE;
+ handle->buffer = NULL;
+ handle->req_pending = 0;
+ handle->filew = NULL;
+ handle->short_filew = NULL;
+ handle->dirw = NULL;
+
+ UV_REQ_INIT(&handle->req, UV_FS_EVENT_REQ);
+ handle->req.data = handle;
+
+ return 0;
+}
+
+
+int uv_fs_event_start(uv_fs_event_t* handle,
+ uv_fs_event_cb cb,
+ const char* path,
+ unsigned int flags) {
+ int name_size, is_path_dir, size;
+ DWORD attr, last_error;
+ WCHAR* dir = NULL, *dir_to_watch, *pathw = NULL;
+ DWORD short_path_buffer_len;
+ WCHAR *short_path_buffer;
+ WCHAR* short_path, *long_path;
+
+ short_path = NULL;
+ if (uv__is_active(handle))
+ return UV_EINVAL;
+
+ handle->cb = cb;
+ handle->path = uv__strdup(path);
+ if (!handle->path) {
+ uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+ }
+
+ uv__handle_start(handle);
+
+ /* Convert name to UTF16. */
+
+ name_size = MultiByteToWideChar(CP_UTF8, 0, path, -1, NULL, 0) *
+ sizeof(WCHAR);
+ pathw = (WCHAR*)uv__malloc(name_size);
+ if (!pathw) {
+ uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+ }
+
+ if (!MultiByteToWideChar(CP_UTF8,
+ 0,
+ path,
+ -1,
+ pathw,
+ name_size / sizeof(WCHAR))) {
+ return uv_translate_sys_error(GetLastError());
+ }
+
+ /* Determine whether path is a file or a directory. */
+ attr = GetFileAttributesW(pathw);
+ if (attr == INVALID_FILE_ATTRIBUTES) {
+ last_error = GetLastError();
+ goto error;
+ }
+
+ is_path_dir = (attr & FILE_ATTRIBUTE_DIRECTORY) ? 1 : 0;
+
+ if (is_path_dir) {
+ /* path is a directory, so that's the directory that we will watch. */
+
+ /* Convert to long path. */
+ size = GetLongPathNameW(pathw, NULL, 0);
+
+ if (size) {
+ long_path = (WCHAR*)uv__malloc(size * sizeof(WCHAR));
+ if (!long_path) {
+ uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+ }
+
+ size = GetLongPathNameW(pathw, long_path, size);
+ if (size) {
+ long_path[size] = '\0';
+ } else {
+ uv__free(long_path);
+ long_path = NULL;
+ }
+
+ if (long_path) {
+ uv__free(pathw);
+ pathw = long_path;
+ }
+ }
+
+ dir_to_watch = pathw;
+ } else {
+ /*
+ * path is a file. So we split path into dir & file parts, and
+ * watch the dir directory.
+ */
+
+ /* Convert to short path. */
+ short_path_buffer = NULL;
+ short_path_buffer_len = GetShortPathNameW(pathw, NULL, 0);
+ if (short_path_buffer_len == 0) {
+ goto short_path_done;
+ }
+ short_path_buffer = uv__malloc(short_path_buffer_len * sizeof(WCHAR));
+ if (short_path_buffer == NULL) {
+ goto short_path_done;
+ }
+ if (GetShortPathNameW(pathw,
+ short_path_buffer,
+ short_path_buffer_len) == 0) {
+ uv__free(short_path_buffer);
+ short_path_buffer = NULL;
+ }
+short_path_done:
+ short_path = short_path_buffer;
+
+ if (uv_split_path(pathw, &dir, &handle->filew) != 0) {
+ last_error = GetLastError();
+ goto error;
+ }
+
+ if (uv_split_path(short_path, NULL, &handle->short_filew) != 0) {
+ last_error = GetLastError();
+ goto error;
+ }
+
+ dir_to_watch = dir;
+ uv__free(pathw);
+ pathw = NULL;
+ }
+
+ handle->dir_handle = CreateFileW(dir_to_watch,
+ FILE_LIST_DIRECTORY,
+ FILE_SHARE_READ | FILE_SHARE_DELETE |
+ FILE_SHARE_WRITE,
+ NULL,
+ OPEN_EXISTING,
+ FILE_FLAG_BACKUP_SEMANTICS |
+ FILE_FLAG_OVERLAPPED,
+ NULL);
+
+ if (dir) {
+ uv__free(dir);
+ dir = NULL;
+ }
+
+ if (handle->dir_handle == INVALID_HANDLE_VALUE) {
+ last_error = GetLastError();
+ goto error;
+ }
+
+ if (CreateIoCompletionPort(handle->dir_handle,
+ handle->loop->iocp,
+ (ULONG_PTR)handle,
+ 0) == NULL) {
+ last_error = GetLastError();
+ goto error;
+ }
+
+ if (!handle->buffer) {
+ handle->buffer = (char*)uv__malloc(uv_directory_watcher_buffer_size);
+ }
+ if (!handle->buffer) {
+ uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+ }
+
+ memset(&(handle->req.u.io.overlapped), 0,
+ sizeof(handle->req.u.io.overlapped));
+
+ if (!ReadDirectoryChangesW(handle->dir_handle,
+ handle->buffer,
+ uv_directory_watcher_buffer_size,
+ (flags & UV_FS_EVENT_RECURSIVE) ? TRUE : FALSE,
+ FILE_NOTIFY_CHANGE_FILE_NAME |
+ FILE_NOTIFY_CHANGE_DIR_NAME |
+ FILE_NOTIFY_CHANGE_ATTRIBUTES |
+ FILE_NOTIFY_CHANGE_SIZE |
+ FILE_NOTIFY_CHANGE_LAST_WRITE |
+ FILE_NOTIFY_CHANGE_LAST_ACCESS |
+ FILE_NOTIFY_CHANGE_CREATION |
+ FILE_NOTIFY_CHANGE_SECURITY,
+ NULL,
+ &handle->req.u.io.overlapped,
+ NULL)) {
+ last_error = GetLastError();
+ goto error;
+ }
+
+ assert(is_path_dir ? pathw != NULL : pathw == NULL);
+ handle->dirw = pathw;
+ handle->req_pending = 1;
+ return 0;
+
+error:
+ if (handle->path) {
+ uv__free(handle->path);
+ handle->path = NULL;
+ }
+
+ if (handle->filew) {
+ uv__free(handle->filew);
+ handle->filew = NULL;
+ }
+
+ if (handle->short_filew) {
+ uv__free(handle->short_filew);
+ handle->short_filew = NULL;
+ }
+
+ uv__free(pathw);
+
+ if (handle->dir_handle != INVALID_HANDLE_VALUE) {
+ CloseHandle(handle->dir_handle);
+ handle->dir_handle = INVALID_HANDLE_VALUE;
+ }
+
+ if (handle->buffer) {
+ uv__free(handle->buffer);
+ handle->buffer = NULL;
+ }
+
+ if (uv__is_active(handle))
+ uv__handle_stop(handle);
+
+ uv__free(short_path);
+
+ return uv_translate_sys_error(last_error);
+}
+
+
+int uv_fs_event_stop(uv_fs_event_t* handle) {
+ if (!uv__is_active(handle))
+ return 0;
+
+ if (handle->dir_handle != INVALID_HANDLE_VALUE) {
+ CloseHandle(handle->dir_handle);
+ handle->dir_handle = INVALID_HANDLE_VALUE;
+ }
+
+ uv__handle_stop(handle);
+
+ if (handle->filew) {
+ uv__free(handle->filew);
+ handle->filew = NULL;
+ }
+
+ if (handle->short_filew) {
+ uv__free(handle->short_filew);
+ handle->short_filew = NULL;
+ }
+
+ if (handle->path) {
+ uv__free(handle->path);
+ handle->path = NULL;
+ }
+
+ if (handle->dirw) {
+ uv__free(handle->dirw);
+ handle->dirw = NULL;
+ }
+
+ return 0;
+}
+
+
+static int file_info_cmp(WCHAR* str, WCHAR* file_name, size_t file_name_len) {
+ size_t str_len;
+
+ if (str == NULL)
+ return -1;
+
+ str_len = wcslen(str);
+
+ /*
+ Since we only care about equality, return early if the strings
+ aren't the same length
+ */
+ if (str_len != (file_name_len / sizeof(WCHAR)))
+ return -1;
+
+ return _wcsnicmp(str, file_name, str_len);
+}
+
+
+void uv_process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
+ uv_fs_event_t* handle) {
+ FILE_NOTIFY_INFORMATION* file_info;
+ int err, sizew, size;
+ char* filename = NULL;
+ WCHAR* filenamew = NULL;
+ WCHAR* long_filenamew = NULL;
+ DWORD offset = 0;
+
+ assert(req->type == UV_FS_EVENT_REQ);
+ assert(handle->req_pending);
+ handle->req_pending = 0;
+
+ /* Don't report any callbacks if:
+ * - We're closing, just push the handle onto the endgame queue
+ * - We are not active, just ignore the callback
+ */
+ if (!uv__is_active(handle)) {
+ if (handle->flags & UV_HANDLE_CLOSING) {
+ uv_want_endgame(loop, (uv_handle_t*) handle);
+ }
+ return;
+ }
+
+ file_info = (FILE_NOTIFY_INFORMATION*)(handle->buffer + offset);
+
+ if (REQ_SUCCESS(req)) {
+ if (req->u.io.overlapped.InternalHigh > 0) {
+ do {
+ file_info = (FILE_NOTIFY_INFORMATION*)((char*)file_info + offset);
+ assert(!filename);
+ assert(!filenamew);
+ assert(!long_filenamew);
+
+ /*
+ * Fire the event only if we were asked to watch a directory,
+ * or if the filename filter matches.
+ */
+ if (handle->dirw ||
+ file_info_cmp(handle->filew,
+ file_info->FileName,
+ file_info->FileNameLength) == 0 ||
+ file_info_cmp(handle->short_filew,
+ file_info->FileName,
+ file_info->FileNameLength) == 0) {
+
+ if (handle->dirw) {
+ /*
+ * We attempt to resolve the long form of the file name explicitly.
+ * We only do this for file names that might still exist on disk.
+ * If this fails, we use the name given by ReadDirectoryChangesW.
+ * This may be the long form or the 8.3 short name in some cases.
+ */
+ if (file_info->Action != FILE_ACTION_REMOVED &&
+ file_info->Action != FILE_ACTION_RENAMED_OLD_NAME) {
+ /* Construct a full path to the file. */
+ size = wcslen(handle->dirw) +
+ file_info->FileNameLength / sizeof(WCHAR) + 2;
+
+ filenamew = (WCHAR*)uv__malloc(size * sizeof(WCHAR));
+ if (!filenamew) {
+ uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+ }
+
+ _snwprintf(filenamew, size, L"%s\\%.*s", handle->dirw,
+ file_info->FileNameLength / (DWORD)sizeof(WCHAR),
+ file_info->FileName);
+
+ filenamew[size - 1] = L'\0';
+
+ /* Convert to long name. */
+ size = GetLongPathNameW(filenamew, NULL, 0);
+
+ if (size) {
+ long_filenamew = (WCHAR*)uv__malloc(size * sizeof(WCHAR));
+ if (!long_filenamew) {
+ uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+ }
+
+ size = GetLongPathNameW(filenamew, long_filenamew, size);
+ if (size) {
+ long_filenamew[size] = '\0';
+ } else {
+ uv__free(long_filenamew);
+ long_filenamew = NULL;
+ }
+ }
+
+ uv__free(filenamew);
+
+ if (long_filenamew) {
+ /* Get the file name out of the long path. */
+ uv_relative_path(long_filenamew,
+ handle->dirw,
+ &filenamew);
+ uv__free(long_filenamew);
+ long_filenamew = filenamew;
+ sizew = -1;
+ } else {
+ /* We couldn't get the long filename, use the one reported. */
+ filenamew = file_info->FileName;
+ sizew = file_info->FileNameLength / sizeof(WCHAR);
+ }
+ } else {
+ /*
+ * Removed or renamed events cannot be resolved to the long form.
+ * We therefore use the name given by ReadDirectoryChangesW.
+ * This may be the long form or the 8.3 short name in some cases.
+ */
+ filenamew = file_info->FileName;
+ sizew = file_info->FileNameLength / sizeof(WCHAR);
+ }
+ } else {
+ /* We already have the long name of the file, so just use it. */
+ filenamew = handle->filew;
+ sizew = -1;
+ }
+
+ /* Convert the filename to utf8. */
+ uv__convert_utf16_to_utf8(filenamew, sizew, &filename);
+
+ switch (file_info->Action) {
+ case FILE_ACTION_ADDED:
+ case FILE_ACTION_REMOVED:
+ case FILE_ACTION_RENAMED_OLD_NAME:
+ case FILE_ACTION_RENAMED_NEW_NAME:
+ handle->cb(handle, filename, UV_RENAME, 0);
+ break;
+
+ case FILE_ACTION_MODIFIED:
+ handle->cb(handle, filename, UV_CHANGE, 0);
+ break;
+ }
+
+ uv__free(filename);
+ filename = NULL;
+ uv__free(long_filenamew);
+ long_filenamew = NULL;
+ filenamew = NULL;
+ }
+
+ offset = file_info->NextEntryOffset;
+ } while (offset && !(handle->flags & UV_HANDLE_CLOSING));
+ } else {
+ handle->cb(handle, NULL, UV_CHANGE, 0);
+ }
+ } else {
+ err = GET_REQ_ERROR(req);
+ handle->cb(handle, NULL, 0, uv_translate_sys_error(err));
+ }
+
+ if (!(handle->flags & UV_HANDLE_CLOSING)) {
+ uv_fs_event_queue_readdirchanges(loop, handle);
+ } else {
+ uv_want_endgame(loop, (uv_handle_t*)handle);
+ }
+}
+
+
+void uv_fs_event_close(uv_loop_t* loop, uv_fs_event_t* handle) {
+ uv_fs_event_stop(handle);
+
+ uv__handle_closing(handle);
+
+ if (!handle->req_pending) {
+ uv_want_endgame(loop, (uv_handle_t*)handle);
+ }
+
+}
+
+
+void uv_fs_event_endgame(uv_loop_t* loop, uv_fs_event_t* handle) {
+ if ((handle->flags & UV_HANDLE_CLOSING) && !handle->req_pending) {
+ assert(!(handle->flags & UV_HANDLE_CLOSED));
+
+ if (handle->buffer) {
+ uv__free(handle->buffer);
+ handle->buffer = NULL;
+ }
+
+ uv__handle_close(handle);
+ }
+}
diff --git a/Utilities/cmlibuv/src/win/fs-fd-hash-inl.h b/Utilities/cmlibuv/src/win/fs-fd-hash-inl.h
new file mode 100644
index 0000000..0b532af
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/fs-fd-hash-inl.h
@@ -0,0 +1,200 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_WIN_FS_FD_HASH_INL_H_
+#define UV_WIN_FS_FD_HASH_INL_H_
+
+#include "uv.h"
+#include "internal.h"
+
+/* Files are only inserted in uv__fd_hash when the UV_FS_O_FILEMAP flag is
+ * specified. Thus, when uv__fd_hash_get returns true, the file mapping in the
+ * info structure should be used for read/write operations.
+ *
+ * If the file is empty, the mapping field will be set to
+ * INVALID_HANDLE_VALUE. This is not an issue since the file mapping needs to
+ * be created anyway when the file size changes.
+ *
+ * Since file descriptors are sequential integers, the modulo operator is used
+ * as hashing function. For each bucket, a single linked list of arrays is
+ * kept to minimize allocations. A statically allocated memory buffer is kept
+ * for the first array in each bucket. */
+
+
+#define UV__FD_HASH_SIZE 256
+#define UV__FD_HASH_GROUP_SIZE 16
+
+struct uv__fd_info_s {
+ int flags;
+ BOOLEAN is_directory;
+ HANDLE mapping;
+ LARGE_INTEGER size;
+ LARGE_INTEGER current_pos;
+};
+
+struct uv__fd_hash_entry_s {
+ uv_file fd;
+ struct uv__fd_info_s info;
+};
+
+struct uv__fd_hash_entry_group_s {
+ struct uv__fd_hash_entry_s entries[UV__FD_HASH_GROUP_SIZE];
+ struct uv__fd_hash_entry_group_s* next;
+};
+
+struct uv__fd_hash_bucket_s {
+ size_t size;
+ struct uv__fd_hash_entry_group_s* data;
+};
+
+
+static uv_mutex_t uv__fd_hash_mutex;
+
+static struct uv__fd_hash_entry_group_s
+ uv__fd_hash_entry_initial[UV__FD_HASH_SIZE * UV__FD_HASH_GROUP_SIZE];
+static struct uv__fd_hash_bucket_s uv__fd_hash[UV__FD_HASH_SIZE];
+
+
+INLINE static void uv__fd_hash_init(void) {
+ size_t i;
+ int err;
+
+ err = uv_mutex_init(&uv__fd_hash_mutex);
+ if (err) {
+ uv_fatal_error(err, "uv_mutex_init");
+ }
+
+ for (i = 0; i < ARRAY_SIZE(uv__fd_hash); ++i) {
+ uv__fd_hash[i].size = 0;
+ uv__fd_hash[i].data =
+ uv__fd_hash_entry_initial + i * UV__FD_HASH_GROUP_SIZE;
+ }
+}
+
+#define FIND_COMMON_VARIABLES \
+ unsigned i; \
+ unsigned bucket = fd % ARRAY_SIZE(uv__fd_hash); \
+ struct uv__fd_hash_entry_s* entry_ptr = NULL; \
+ struct uv__fd_hash_entry_group_s* group_ptr; \
+ struct uv__fd_hash_bucket_s* bucket_ptr = &uv__fd_hash[bucket];
+
+#define FIND_IN_GROUP_PTR(group_size) \
+ do { \
+ for (i = 0; i < group_size; ++i) { \
+ if (group_ptr->entries[i].fd == fd) { \
+ entry_ptr = &group_ptr->entries[i]; \
+ break; \
+ } \
+ } \
+ } while (0)
+
+#define FIND_IN_BUCKET_PTR() \
+ do { \
+ size_t first_group_size = bucket_ptr->size % UV__FD_HASH_GROUP_SIZE; \
+ if (bucket_ptr->size != 0 && first_group_size == 0) \
+ first_group_size = UV__FD_HASH_GROUP_SIZE; \
+ group_ptr = bucket_ptr->data; \
+ FIND_IN_GROUP_PTR(first_group_size); \
+ for (group_ptr = group_ptr->next; \
+ group_ptr != NULL && entry_ptr == NULL; \
+ group_ptr = group_ptr->next) \
+ FIND_IN_GROUP_PTR(UV__FD_HASH_GROUP_SIZE); \
+ } while (0)
+
+INLINE static int uv__fd_hash_get(int fd, struct uv__fd_info_s* info) {
+ FIND_COMMON_VARIABLES
+
+ uv_mutex_lock(&uv__fd_hash_mutex);
+
+ FIND_IN_BUCKET_PTR();
+
+ if (entry_ptr != NULL) {
+ *info = entry_ptr->info;
+ }
+
+ uv_mutex_unlock(&uv__fd_hash_mutex);
+ return entry_ptr != NULL;
+}
+
+INLINE static void uv__fd_hash_add(int fd, struct uv__fd_info_s* info) {
+ FIND_COMMON_VARIABLES
+
+ uv_mutex_lock(&uv__fd_hash_mutex);
+
+ FIND_IN_BUCKET_PTR();
+
+ if (entry_ptr == NULL) {
+ i = bucket_ptr->size % UV__FD_HASH_GROUP_SIZE;
+
+ if (bucket_ptr->size != 0 && i == 0) {
+ struct uv__fd_hash_entry_group_s* new_group_ptr =
+ uv__malloc(sizeof(*new_group_ptr));
+ if (new_group_ptr == NULL) {
+ uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+ }
+ new_group_ptr->next = bucket_ptr->data;
+ bucket_ptr->data = new_group_ptr;
+ }
+
+ bucket_ptr->size += 1;
+ entry_ptr = &bucket_ptr->data->entries[i];
+ entry_ptr->fd = fd;
+ }
+
+ entry_ptr->info = *info;
+
+ uv_mutex_unlock(&uv__fd_hash_mutex);
+}
+
+INLINE static int uv__fd_hash_remove(int fd, struct uv__fd_info_s* info) {
+ FIND_COMMON_VARIABLES
+
+ uv_mutex_lock(&uv__fd_hash_mutex);
+
+ FIND_IN_BUCKET_PTR();
+
+ if (entry_ptr != NULL) {
+ *info = entry_ptr->info;
+
+ bucket_ptr->size -= 1;
+
+ i = bucket_ptr->size % UV__FD_HASH_GROUP_SIZE;
+ if (entry_ptr != &bucket_ptr->data->entries[i]) {
+ *entry_ptr = bucket_ptr->data->entries[i];
+ }
+
+ if (bucket_ptr->size != 0 &&
+ bucket_ptr->size % UV__FD_HASH_GROUP_SIZE == 0) {
+ struct uv__fd_hash_entry_group_s* old_group_ptr = bucket_ptr->data;
+ bucket_ptr->data = old_group_ptr->next;
+ uv__free(old_group_ptr);
+ }
+ }
+
+ uv_mutex_unlock(&uv__fd_hash_mutex);
+ return entry_ptr != NULL;
+}
+
+#undef FIND_COMMON_VARIABLES
+#undef FIND_IN_GROUP_PTR
+#undef FIND_IN_BUCKET_PTR
+
+#endif /* UV_WIN_FS_FD_HASH_INL_H_ */
diff --git a/Utilities/cmlibuv/src/win/fs.c b/Utilities/cmlibuv/src/win/fs.c
new file mode 100644
index 0000000..8a80174
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/fs.c
@@ -0,0 +1,3428 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <direct.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <io.h>
+#include <limits.h>
+#include <sys/stat.h>
+#include <sys/utime.h>
+#include <stdio.h>
+
+#include "uv.h"
+#include "internal.h"
+#include "req-inl.h"
+#include "handle-inl.h"
+#include "fs-fd-hash-inl.h"
+
+
+#define UV_FS_FREE_PATHS 0x0002
+#define UV_FS_FREE_PTR 0x0008
+#define UV_FS_CLEANEDUP 0x0010
+
+
+#define INIT(subtype) \
+ do { \
+ if (req == NULL) \
+ return UV_EINVAL; \
+ uv_fs_req_init(loop, req, subtype, cb); \
+ } \
+ while (0)
+
+#define POST \
+ do { \
+ if (cb != NULL) { \
+ uv__req_register(loop, req); \
+ uv__work_submit(loop, \
+ &req->work_req, \
+ UV__WORK_FAST_IO, \
+ uv__fs_work, \
+ uv__fs_done); \
+ return 0; \
+ } else { \
+ uv__fs_work(&req->work_req); \
+ return req->result; \
+ } \
+ } \
+ while (0)
+
+#define SET_REQ_RESULT(req, result_value) \
+ do { \
+ req->result = (result_value); \
+ assert(req->result != -1); \
+ } while (0)
+
+#define SET_REQ_WIN32_ERROR(req, sys_errno) \
+ do { \
+ req->sys_errno_ = (sys_errno); \
+ req->result = uv_translate_sys_error(req->sys_errno_); \
+ } while (0)
+
+#define SET_REQ_UV_ERROR(req, uv_errno, sys_errno) \
+ do { \
+ req->result = (uv_errno); \
+ req->sys_errno_ = (sys_errno); \
+ } while (0)
+
+#define VERIFY_FD(fd, req) \
+ if (fd == -1) { \
+ req->result = UV_EBADF; \
+ req->sys_errno_ = ERROR_INVALID_HANDLE; \
+ return; \
+ }
+
+#define MILLIONu (1000U * 1000U)
+#define BILLIONu (1000U * 1000U * 1000U)
+
+#define FILETIME_TO_UINT(filetime) \
+ (*((uint64_t*) &(filetime)) - (uint64_t) 116444736 * BILLIONu)
+
+#define FILETIME_TO_TIME_T(filetime) \
+ (FILETIME_TO_UINT(filetime) / (10u * MILLIONu))
+
+#define FILETIME_TO_TIME_NS(filetime, secs) \
+ ((FILETIME_TO_UINT(filetime) - (secs * (uint64_t) 10 * MILLIONu)) * 100U)
+
+#define FILETIME_TO_TIMESPEC(ts, filetime) \
+ do { \
+ (ts).tv_sec = (long) FILETIME_TO_TIME_T(filetime); \
+ (ts).tv_nsec = (long) FILETIME_TO_TIME_NS(filetime, (ts).tv_sec); \
+ } while(0)
+
+#define TIME_T_TO_FILETIME(time, filetime_ptr) \
+ do { \
+ uint64_t bigtime = ((uint64_t) ((time) * (uint64_t) 10 * MILLIONu)) + \
+ (uint64_t) 116444736 * BILLIONu; \
+ (filetime_ptr)->dwLowDateTime = bigtime & 0xFFFFFFFF; \
+ (filetime_ptr)->dwHighDateTime = bigtime >> 32; \
+ } while(0)
+
+#define IS_SLASH(c) ((c) == L'\\' || (c) == L'/')
+#define IS_LETTER(c) (((c) >= L'a' && (c) <= L'z') || \
+ ((c) >= L'A' && (c) <= L'Z'))
+
+#define MIN(a,b) (((a) < (b)) ? (a) : (b))
+
+const WCHAR JUNCTION_PREFIX[] = L"\\??\\";
+const WCHAR JUNCTION_PREFIX_LEN = 4;
+
+const WCHAR LONG_PATH_PREFIX[] = L"\\\\?\\";
+const WCHAR LONG_PATH_PREFIX_LEN = 4;
+
+const WCHAR UNC_PATH_PREFIX[] = L"\\\\?\\UNC\\";
+const WCHAR UNC_PATH_PREFIX_LEN = 8;
+
+static int uv__file_symlink_usermode_flag = SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE;
+
+static DWORD uv__allocation_granularity;
+
+
+void uv_fs_init(void) {
+ SYSTEM_INFO system_info;
+
+ GetSystemInfo(&system_info);
+ uv__allocation_granularity = system_info.dwAllocationGranularity;
+
+ uv__fd_hash_init();
+}
+
+
+INLINE static int fs__capture_path(uv_fs_t* req, const char* path,
+ const char* new_path, const int copy_path) {
+ char* buf;
+ char* pos;
+ ssize_t buf_sz = 0, path_len = 0, pathw_len = 0, new_pathw_len = 0;
+
+ /* new_path can only be set if path is also set. */
+ assert(new_path == NULL || path != NULL);
+
+ if (path != NULL) {
+ pathw_len = MultiByteToWideChar(CP_UTF8,
+ 0,
+ path,
+ -1,
+ NULL,
+ 0);
+ if (pathw_len == 0) {
+ return GetLastError();
+ }
+
+ buf_sz += pathw_len * sizeof(WCHAR);
+ }
+
+ if (path != NULL && copy_path) {
+ path_len = 1 + strlen(path);
+ buf_sz += path_len;
+ }
+
+ if (new_path != NULL) {
+ new_pathw_len = MultiByteToWideChar(CP_UTF8,
+ 0,
+ new_path,
+ -1,
+ NULL,
+ 0);
+ if (new_pathw_len == 0) {
+ return GetLastError();
+ }
+
+ buf_sz += new_pathw_len * sizeof(WCHAR);
+ }
+
+
+ if (buf_sz == 0) {
+ req->file.pathw = NULL;
+ req->fs.info.new_pathw = NULL;
+ req->path = NULL;
+ return 0;
+ }
+
+ buf = (char*) uv__malloc(buf_sz);
+ if (buf == NULL) {
+ return ERROR_OUTOFMEMORY;
+ }
+
+ pos = buf;
+
+ if (path != NULL) {
+ DWORD r = MultiByteToWideChar(CP_UTF8,
+ 0,
+ path,
+ -1,
+ (WCHAR*) pos,
+ pathw_len);
+ assert(r == (DWORD) pathw_len);
+ req->file.pathw = (WCHAR*) pos;
+ pos += r * sizeof(WCHAR);
+ } else {
+ req->file.pathw = NULL;
+ }
+
+ if (new_path != NULL) {
+ DWORD r = MultiByteToWideChar(CP_UTF8,
+ 0,
+ new_path,
+ -1,
+ (WCHAR*) pos,
+ new_pathw_len);
+ assert(r == (DWORD) new_pathw_len);
+ req->fs.info.new_pathw = (WCHAR*) pos;
+ pos += r * sizeof(WCHAR);
+ } else {
+ req->fs.info.new_pathw = NULL;
+ }
+
+ req->path = path;
+ if (path != NULL && copy_path) {
+ memcpy(pos, path, path_len);
+ assert(path_len == buf_sz - (pos - buf));
+ req->path = pos;
+ }
+
+ req->flags |= UV_FS_FREE_PATHS;
+
+ return 0;
+}
+
+
+
+INLINE static void uv_fs_req_init(uv_loop_t* loop, uv_fs_t* req,
+ uv_fs_type fs_type, const uv_fs_cb cb) {
+ uv__once_init();
+ UV_REQ_INIT(req, UV_FS);
+ req->loop = loop;
+ req->flags = 0;
+ req->fs_type = fs_type;
+ req->sys_errno_ = 0;
+ req->result = 0;
+ req->ptr = NULL;
+ req->path = NULL;
+ req->cb = cb;
+ memset(&req->fs, 0, sizeof(req->fs));
+}
+
+
+static int fs__wide_to_utf8(WCHAR* w_source_ptr,
+ DWORD w_source_len,
+ char** target_ptr,
+ uint64_t* target_len_ptr) {
+ int r;
+ int target_len;
+ char* target;
+ target_len = WideCharToMultiByte(CP_UTF8,
+ 0,
+ w_source_ptr,
+ w_source_len,
+ NULL,
+ 0,
+ NULL,
+ NULL);
+
+ if (target_len == 0) {
+ return -1;
+ }
+
+ if (target_len_ptr != NULL) {
+ *target_len_ptr = target_len;
+ }
+
+ if (target_ptr == NULL) {
+ return 0;
+ }
+
+ target = uv__malloc(target_len + 1);
+ if (target == NULL) {
+ SetLastError(ERROR_OUTOFMEMORY);
+ return -1;
+ }
+
+ r = WideCharToMultiByte(CP_UTF8,
+ 0,
+ w_source_ptr,
+ w_source_len,
+ target,
+ target_len,
+ NULL,
+ NULL);
+ assert(r == target_len);
+ target[target_len] = '\0';
+ *target_ptr = target;
+ return 0;
+}
+
+
+INLINE static int fs__readlink_handle(HANDLE handle, char** target_ptr,
+ uint64_t* target_len_ptr) {
+ char buffer[MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
+ REPARSE_DATA_BUFFER* reparse_data = (REPARSE_DATA_BUFFER*) buffer;
+ WCHAR* w_target;
+ DWORD w_target_len;
+ DWORD bytes;
+ size_t i;
+ size_t len;
+
+ if (!DeviceIoControl(handle,
+ FSCTL_GET_REPARSE_POINT,
+ NULL,
+ 0,
+ buffer,
+ sizeof buffer,
+ &bytes,
+ NULL)) {
+ return -1;
+ }
+
+ if (reparse_data->ReparseTag == IO_REPARSE_TAG_SYMLINK) {
+ /* Real symlink */
+ w_target = reparse_data->SymbolicLinkReparseBuffer.PathBuffer +
+ (reparse_data->SymbolicLinkReparseBuffer.SubstituteNameOffset /
+ sizeof(WCHAR));
+ w_target_len =
+ reparse_data->SymbolicLinkReparseBuffer.SubstituteNameLength /
+ sizeof(WCHAR);
+
+ /* Real symlinks can contain pretty much everything, but the only thing we
+ * really care about is undoing the implicit conversion to an NT namespaced
+ * path that CreateSymbolicLink will perform on absolute paths. If the path
+ * is win32-namespaced then the user must have explicitly made it so, and
+ * we better just return the unmodified reparse data. */
+ if (w_target_len >= 4 &&
+ w_target[0] == L'\\' &&
+ w_target[1] == L'?' &&
+ w_target[2] == L'?' &&
+ w_target[3] == L'\\') {
+ /* Starts with \??\ */
+ if (w_target_len >= 6 &&
+ ((w_target[4] >= L'A' && w_target[4] <= L'Z') ||
+ (w_target[4] >= L'a' && w_target[4] <= L'z')) &&
+ w_target[5] == L':' &&
+ (w_target_len == 6 || w_target[6] == L'\\')) {
+ /* \??\<drive>:\ */
+ w_target += 4;
+ w_target_len -= 4;
+
+ } else if (w_target_len >= 8 &&
+ (w_target[4] == L'U' || w_target[4] == L'u') &&
+ (w_target[5] == L'N' || w_target[5] == L'n') &&
+ (w_target[6] == L'C' || w_target[6] == L'c') &&
+ w_target[7] == L'\\') {
+ /* \??\UNC\<server>\<share>\ - make sure the final path looks like
+ * \\<server>\<share>\ */
+ w_target += 6;
+ w_target[0] = L'\\';
+ w_target_len -= 6;
+ }
+ }
+
+ } else if (reparse_data->ReparseTag == IO_REPARSE_TAG_MOUNT_POINT) {
+ /* Junction. */
+ w_target = reparse_data->MountPointReparseBuffer.PathBuffer +
+ (reparse_data->MountPointReparseBuffer.SubstituteNameOffset /
+ sizeof(WCHAR));
+ w_target_len = reparse_data->MountPointReparseBuffer.SubstituteNameLength /
+ sizeof(WCHAR);
+
+ /* Only treat junctions that look like \??\<drive>:\ as symlink. Junctions
+ * can also be used as mount points, like \??\Volume{<guid>}, but that's
+ * confusing for programs since they wouldn't be able to actually
+ * understand such a path when returned by uv_readlink(). UNC paths are
+ * never valid for junctions so we don't care about them. */
+ if (!(w_target_len >= 6 &&
+ w_target[0] == L'\\' &&
+ w_target[1] == L'?' &&
+ w_target[2] == L'?' &&
+ w_target[3] == L'\\' &&
+ ((w_target[4] >= L'A' && w_target[4] <= L'Z') ||
+ (w_target[4] >= L'a' && w_target[4] <= L'z')) &&
+ w_target[5] == L':' &&
+ (w_target_len == 6 || w_target[6] == L'\\'))) {
+ SetLastError(ERROR_SYMLINK_NOT_SUPPORTED);
+ return -1;
+ }
+
+ /* Remove leading \??\ */
+ w_target += 4;
+ w_target_len -= 4;
+
+ } else if (reparse_data->ReparseTag == IO_REPARSE_TAG_APPEXECLINK) {
+ /* String #3 in the list has the target filename. */
+ if (reparse_data->AppExecLinkReparseBuffer.StringCount < 3) {
+ SetLastError(ERROR_SYMLINK_NOT_SUPPORTED);
+ return -1;
+ }
+ w_target = reparse_data->AppExecLinkReparseBuffer.StringList;
+ /* The StringList buffer contains a list of strings separated by "\0", */
+ /* with "\0\0" terminating the list. Move to the 3rd string in the list: */
+ for (i = 0; i < 2; ++i) {
+ len = wcslen(w_target);
+ if (len == 0) {
+ SetLastError(ERROR_SYMLINK_NOT_SUPPORTED);
+ return -1;
+ }
+ w_target += len + 1;
+ }
+ w_target_len = wcslen(w_target);
+ if (w_target_len == 0) {
+ SetLastError(ERROR_SYMLINK_NOT_SUPPORTED);
+ return -1;
+ }
+ /* Make sure it is an absolute path. */
+ if (!(w_target_len >= 3 &&
+ ((w_target[0] >= L'a' && w_target[0] <= L'z') ||
+ (w_target[0] >= L'A' && w_target[0] <= L'Z')) &&
+ w_target[1] == L':' &&
+ w_target[2] == L'\\')) {
+ SetLastError(ERROR_SYMLINK_NOT_SUPPORTED);
+ return -1;
+ }
+
+ } else {
+ /* Reparse tag does not indicate a symlink. */
+ SetLastError(ERROR_SYMLINK_NOT_SUPPORTED);
+ return -1;
+ }
+
+ return fs__wide_to_utf8(w_target, w_target_len, target_ptr, target_len_ptr);
+}
+
+
+void fs__open(uv_fs_t* req) {
+ DWORD access;
+ DWORD share;
+ DWORD disposition;
+ DWORD attributes = 0;
+ HANDLE file;
+ int fd, current_umask;
+ int flags = req->fs.info.file_flags;
+ struct uv__fd_info_s fd_info;
+
+ /* Adjust flags to be compatible with the memory file mapping. Save the
+ * original flags to emulate the correct behavior. */
+ if (flags & UV_FS_O_FILEMAP) {
+ fd_info.flags = flags;
+ fd_info.current_pos.QuadPart = 0;
+
+ if ((flags & (UV_FS_O_RDONLY | UV_FS_O_WRONLY | UV_FS_O_RDWR)) ==
+ UV_FS_O_WRONLY) {
+ /* CreateFileMapping always needs read access */
+ flags = (flags & ~UV_FS_O_WRONLY) | UV_FS_O_RDWR;
+ }
+
+ if (flags & UV_FS_O_APPEND) {
+ /* Clear the append flag and ensure RDRW mode */
+ flags &= ~UV_FS_O_APPEND;
+ flags &= ~(UV_FS_O_RDONLY | UV_FS_O_WRONLY | UV_FS_O_RDWR);
+ flags |= UV_FS_O_RDWR;
+ }
+ }
+
+ /* Obtain the active umask. umask() never fails and returns the previous
+ * umask. */
+ current_umask = umask(0);
+ umask(current_umask);
+
+ /* convert flags and mode to CreateFile parameters */
+ switch (flags & (UV_FS_O_RDONLY | UV_FS_O_WRONLY | UV_FS_O_RDWR)) {
+ case UV_FS_O_RDONLY:
+ access = FILE_GENERIC_READ;
+ break;
+ case UV_FS_O_WRONLY:
+ access = FILE_GENERIC_WRITE;
+ break;
+ case UV_FS_O_RDWR:
+ access = FILE_GENERIC_READ | FILE_GENERIC_WRITE;
+ break;
+ default:
+ goto einval;
+ }
+
+ if (flags & UV_FS_O_APPEND) {
+ access &= ~FILE_WRITE_DATA;
+ access |= FILE_APPEND_DATA;
+ }
+
+ /*
+ * Here is where we deviate significantly from what CRT's _open()
+ * does. We indiscriminately use all the sharing modes, to match
+ * UNIX semantics. In particular, this ensures that the file can
+ * be deleted even whilst it's open, fixing issue
+ * https://github.com/nodejs/node-v0.x-archive/issues/1449.
+ * We still support exclusive sharing mode, since it is necessary
+ * for opening raw block devices, otherwise Windows will prevent
+ * any attempt to write past the master boot record.
+ */
+ if (flags & UV_FS_O_EXLOCK) {
+ share = 0;
+ } else {
+ share = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE;
+ }
+
+ switch (flags & (UV_FS_O_CREAT | UV_FS_O_EXCL | UV_FS_O_TRUNC)) {
+ case 0:
+ case UV_FS_O_EXCL:
+ disposition = OPEN_EXISTING;
+ break;
+ case UV_FS_O_CREAT:
+ disposition = OPEN_ALWAYS;
+ break;
+ case UV_FS_O_CREAT | UV_FS_O_EXCL:
+ case UV_FS_O_CREAT | UV_FS_O_TRUNC | UV_FS_O_EXCL:
+ disposition = CREATE_NEW;
+ break;
+ case UV_FS_O_TRUNC:
+ case UV_FS_O_TRUNC | UV_FS_O_EXCL:
+ disposition = TRUNCATE_EXISTING;
+ break;
+ case UV_FS_O_CREAT | UV_FS_O_TRUNC:
+ disposition = CREATE_ALWAYS;
+ break;
+ default:
+ goto einval;
+ }
+
+ attributes |= FILE_ATTRIBUTE_NORMAL;
+ if (flags & UV_FS_O_CREAT) {
+ if (!((req->fs.info.mode & ~current_umask) & _S_IWRITE)) {
+ attributes |= FILE_ATTRIBUTE_READONLY;
+ }
+ }
+
+ if (flags & UV_FS_O_TEMPORARY ) {
+ attributes |= FILE_FLAG_DELETE_ON_CLOSE | FILE_ATTRIBUTE_TEMPORARY;
+ access |= DELETE;
+ }
+
+ if (flags & UV_FS_O_SHORT_LIVED) {
+ attributes |= FILE_ATTRIBUTE_TEMPORARY;
+ }
+
+ switch (flags & (UV_FS_O_SEQUENTIAL | UV_FS_O_RANDOM)) {
+ case 0:
+ break;
+ case UV_FS_O_SEQUENTIAL:
+ attributes |= FILE_FLAG_SEQUENTIAL_SCAN;
+ break;
+ case UV_FS_O_RANDOM:
+ attributes |= FILE_FLAG_RANDOM_ACCESS;
+ break;
+ default:
+ goto einval;
+ }
+
+ if (flags & UV_FS_O_DIRECT) {
+ /*
+ * FILE_APPEND_DATA and FILE_FLAG_NO_BUFFERING are mutually exclusive.
+ * Windows returns 87, ERROR_INVALID_PARAMETER if these are combined.
+ *
+ * FILE_APPEND_DATA is included in FILE_GENERIC_WRITE:
+ *
+ * FILE_GENERIC_WRITE = STANDARD_RIGHTS_WRITE |
+ * FILE_WRITE_DATA |
+ * FILE_WRITE_ATTRIBUTES |
+ * FILE_WRITE_EA |
+ * FILE_APPEND_DATA |
+ * SYNCHRONIZE
+ *
+ * Note: Appends are also permitted by FILE_WRITE_DATA.
+ *
+ * In order for direct writes and direct appends to succeed, we therefore
+ * exclude FILE_APPEND_DATA if FILE_WRITE_DATA is specified, and otherwise
+ * fail if the user's sole permission is a direct append, since this
+ * particular combination is invalid.
+ */
+ if (access & FILE_APPEND_DATA) {
+ if (access & FILE_WRITE_DATA) {
+ access &= ~FILE_APPEND_DATA;
+ } else {
+ goto einval;
+ }
+ }
+ attributes |= FILE_FLAG_NO_BUFFERING;
+ }
+
+ switch (flags & (UV_FS_O_DSYNC | UV_FS_O_SYNC)) {
+ case 0:
+ break;
+ case UV_FS_O_DSYNC:
+ case UV_FS_O_SYNC:
+ attributes |= FILE_FLAG_WRITE_THROUGH;
+ break;
+ default:
+ goto einval;
+ }
+
+ /* Setting this flag makes it possible to open a directory. */
+ attributes |= FILE_FLAG_BACKUP_SEMANTICS;
+
+ file = CreateFileW(req->file.pathw,
+ access,
+ share,
+ NULL,
+ disposition,
+ attributes,
+ NULL);
+ if (file == INVALID_HANDLE_VALUE) {
+ DWORD error = GetLastError();
+ if (error == ERROR_FILE_EXISTS && (flags & UV_FS_O_CREAT) &&
+ !(flags & UV_FS_O_EXCL)) {
+ /* Special case: when ERROR_FILE_EXISTS happens and UV_FS_O_CREAT was
+ * specified, it means the path referred to a directory. */
+ SET_REQ_UV_ERROR(req, UV_EISDIR, error);
+ } else {
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ }
+ return;
+ }
+
+ fd = _open_osfhandle((intptr_t) file, flags);
+ if (fd < 0) {
+ /* The only known failure mode for _open_osfhandle() is EMFILE, in which
+ * case GetLastError() will return zero. However we'll try to handle other
+ * errors as well, should they ever occur.
+ */
+ if (errno == EMFILE)
+ SET_REQ_UV_ERROR(req, UV_EMFILE, ERROR_TOO_MANY_OPEN_FILES);
+ else if (GetLastError() != ERROR_SUCCESS)
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ else
+ SET_REQ_WIN32_ERROR(req, (DWORD) UV_UNKNOWN);
+ CloseHandle(file);
+ return;
+ }
+
+ if (flags & UV_FS_O_FILEMAP) {
+ FILE_STANDARD_INFO file_info;
+ if (!GetFileInformationByHandleEx(file,
+ FileStandardInfo,
+ &file_info,
+ sizeof file_info)) {
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ CloseHandle(file);
+ return;
+ }
+ fd_info.is_directory = file_info.Directory;
+
+ if (fd_info.is_directory) {
+ fd_info.size.QuadPart = 0;
+ fd_info.mapping = INVALID_HANDLE_VALUE;
+ } else {
+ if (!GetFileSizeEx(file, &fd_info.size)) {
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ CloseHandle(file);
+ return;
+ }
+
+ if (fd_info.size.QuadPart == 0) {
+ fd_info.mapping = INVALID_HANDLE_VALUE;
+ } else {
+ DWORD flProtect = (fd_info.flags & (UV_FS_O_RDONLY | UV_FS_O_WRONLY |
+ UV_FS_O_RDWR)) == UV_FS_O_RDONLY ? PAGE_READONLY : PAGE_READWRITE;
+ fd_info.mapping = CreateFileMapping(file,
+ NULL,
+ flProtect,
+ fd_info.size.HighPart,
+ fd_info.size.LowPart,
+ NULL);
+ if (fd_info.mapping == NULL) {
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ CloseHandle(file);
+ return;
+ }
+ }
+ }
+
+ uv__fd_hash_add(fd, &fd_info);
+ }
+
+ SET_REQ_RESULT(req, fd);
+ return;
+
+ einval:
+ SET_REQ_UV_ERROR(req, UV_EINVAL, ERROR_INVALID_PARAMETER);
+}
+
+void fs__close(uv_fs_t* req) {
+ int fd = req->file.fd;
+ int result;
+ struct uv__fd_info_s fd_info;
+
+ VERIFY_FD(fd, req);
+
+ if (uv__fd_hash_remove(fd, &fd_info)) {
+ if (fd_info.mapping != INVALID_HANDLE_VALUE) {
+ CloseHandle(fd_info.mapping);
+ }
+ }
+
+ if (fd > 2)
+ result = _close(fd);
+ else
+ result = 0;
+
+ /* _close doesn't set _doserrno on failure, but it does always set errno
+ * to EBADF on failure.
+ */
+ if (result == -1) {
+ assert(errno == EBADF);
+ SET_REQ_UV_ERROR(req, UV_EBADF, ERROR_INVALID_HANDLE);
+ } else {
+ SET_REQ_RESULT(req, 0);
+ }
+}
+
+
+LONG fs__filemap_ex_filter(LONG excode, PEXCEPTION_POINTERS pep,
+ int* perror) {
+ if (excode != (LONG)EXCEPTION_IN_PAGE_ERROR) {
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ assert(perror != NULL);
+ if (pep != NULL && pep->ExceptionRecord != NULL &&
+ pep->ExceptionRecord->NumberParameters >= 3) {
+ NTSTATUS status = (NTSTATUS)pep->ExceptionRecord->ExceptionInformation[3];
+ *perror = pRtlNtStatusToDosError(status);
+ if (*perror != ERROR_SUCCESS) {
+ return EXCEPTION_EXECUTE_HANDLER;
+ }
+ }
+ *perror = UV_UNKNOWN;
+ return EXCEPTION_EXECUTE_HANDLER;
+}
+
+
+void fs__read_filemap(uv_fs_t* req, struct uv__fd_info_s* fd_info) {
+ int fd = req->file.fd; /* VERIFY_FD done in fs__read */
+ int rw_flags = fd_info->flags &
+ (UV_FS_O_RDONLY | UV_FS_O_WRONLY | UV_FS_O_RDWR);
+ size_t read_size, done_read;
+ unsigned int index;
+ LARGE_INTEGER pos, end_pos;
+ size_t view_offset;
+ LARGE_INTEGER view_base;
+ void* view;
+
+ if (rw_flags == UV_FS_O_WRONLY) {
+ SET_REQ_WIN32_ERROR(req, ERROR_ACCESS_DENIED);
+ return;
+ }
+ if (fd_info->is_directory) {
+ SET_REQ_WIN32_ERROR(req, ERROR_INVALID_FUNCTION);
+ return;
+ }
+
+ if (req->fs.info.offset == -1) {
+ pos = fd_info->current_pos;
+ } else {
+ pos.QuadPart = req->fs.info.offset;
+ }
+
+ /* Make sure we wont read past EOF. */
+ if (pos.QuadPart >= fd_info->size.QuadPart) {
+ SET_REQ_RESULT(req, 0);
+ return;
+ }
+
+ read_size = 0;
+ for (index = 0; index < req->fs.info.nbufs; ++index) {
+ read_size += req->fs.info.bufs[index].len;
+ }
+ read_size = (size_t) MIN((LONGLONG) read_size,
+ fd_info->size.QuadPart - pos.QuadPart);
+ if (read_size == 0) {
+ SET_REQ_RESULT(req, 0);
+ return;
+ }
+
+ end_pos.QuadPart = pos.QuadPart + read_size;
+
+ view_offset = pos.QuadPart % uv__allocation_granularity;
+ view_base.QuadPart = pos.QuadPart - view_offset;
+ view = MapViewOfFile(fd_info->mapping,
+ FILE_MAP_READ,
+ view_base.HighPart,
+ view_base.LowPart,
+ view_offset + read_size);
+ if (view == NULL) {
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ return;
+ }
+
+ done_read = 0;
+ for (index = 0;
+ index < req->fs.info.nbufs && done_read < read_size;
+ ++index) {
+ size_t this_read_size = MIN(req->fs.info.bufs[index].len,
+ read_size - done_read);
+#ifdef _MSC_VER
+ int err = 0;
+ __try {
+#endif
+ memcpy(req->fs.info.bufs[index].base,
+ (char*)view + view_offset + done_read,
+ this_read_size);
+#ifdef _MSC_VER
+ }
+ __except (fs__filemap_ex_filter(GetExceptionCode(),
+ GetExceptionInformation(), &err)) {
+ SET_REQ_WIN32_ERROR(req, err);
+ UnmapViewOfFile(view);
+ return;
+ }
+#endif
+ done_read += this_read_size;
+ }
+ assert(done_read == read_size);
+
+ if (!UnmapViewOfFile(view)) {
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ return;
+ }
+
+ if (req->fs.info.offset == -1) {
+ fd_info->current_pos = end_pos;
+ uv__fd_hash_add(fd, fd_info);
+ }
+
+ SET_REQ_RESULT(req, read_size);
+ return;
+}
+
+void fs__read(uv_fs_t* req) {
+ int fd = req->file.fd;
+ int64_t offset = req->fs.info.offset;
+ HANDLE handle;
+ OVERLAPPED overlapped, *overlapped_ptr;
+ LARGE_INTEGER offset_;
+ DWORD bytes;
+ DWORD error;
+ int result;
+ unsigned int index;
+ LARGE_INTEGER original_position;
+ LARGE_INTEGER zero_offset;
+ int restore_position;
+ struct uv__fd_info_s fd_info;
+
+ VERIFY_FD(fd, req);
+
+ if (uv__fd_hash_get(fd, &fd_info)) {
+ fs__read_filemap(req, &fd_info);
+ return;
+ }
+
+ zero_offset.QuadPart = 0;
+ restore_position = 0;
+ handle = uv__get_osfhandle(fd);
+
+ if (handle == INVALID_HANDLE_VALUE) {
+ SET_REQ_WIN32_ERROR(req, ERROR_INVALID_HANDLE);
+ return;
+ }
+
+ if (offset != -1) {
+ memset(&overlapped, 0, sizeof overlapped);
+ overlapped_ptr = &overlapped;
+ if (SetFilePointerEx(handle, zero_offset, &original_position,
+ FILE_CURRENT)) {
+ restore_position = 1;
+ }
+ } else {
+ overlapped_ptr = NULL;
+ }
+
+ index = 0;
+ bytes = 0;
+ do {
+ DWORD incremental_bytes;
+
+ if (offset != -1) {
+ offset_.QuadPart = offset + bytes;
+ overlapped.Offset = offset_.LowPart;
+ overlapped.OffsetHigh = offset_.HighPart;
+ }
+
+ result = ReadFile(handle,
+ req->fs.info.bufs[index].base,
+ req->fs.info.bufs[index].len,
+ &incremental_bytes,
+ overlapped_ptr);
+ bytes += incremental_bytes;
+ ++index;
+ } while (result && index < req->fs.info.nbufs);
+
+ if (restore_position)
+ SetFilePointerEx(handle, original_position, NULL, FILE_BEGIN);
+
+ if (result || bytes > 0) {
+ SET_REQ_RESULT(req, bytes);
+ } else {
+ error = GetLastError();
+ if (error == ERROR_HANDLE_EOF) {
+ SET_REQ_RESULT(req, bytes);
+ } else {
+ SET_REQ_WIN32_ERROR(req, error);
+ }
+ }
+}
+
+
+void fs__write_filemap(uv_fs_t* req, HANDLE file,
+ struct uv__fd_info_s* fd_info) {
+ int fd = req->file.fd; /* VERIFY_FD done in fs__write */
+ int force_append = fd_info->flags & UV_FS_O_APPEND;
+ int rw_flags = fd_info->flags &
+ (UV_FS_O_RDONLY | UV_FS_O_WRONLY | UV_FS_O_RDWR);
+ size_t write_size, done_write;
+ unsigned int index;
+ LARGE_INTEGER pos, end_pos;
+ size_t view_offset;
+ LARGE_INTEGER view_base;
+ void* view;
+ FILETIME ft;
+
+ if (rw_flags == UV_FS_O_RDONLY) {
+ SET_REQ_WIN32_ERROR(req, ERROR_ACCESS_DENIED);
+ return;
+ }
+ if (fd_info->is_directory) {
+ SET_REQ_WIN32_ERROR(req, ERROR_INVALID_FUNCTION);
+ return;
+ }
+
+ write_size = 0;
+ for (index = 0; index < req->fs.info.nbufs; ++index) {
+ write_size += req->fs.info.bufs[index].len;
+ }
+
+ if (write_size == 0) {
+ SET_REQ_RESULT(req, 0);
+ return;
+ }
+
+ if (force_append) {
+ pos = fd_info->size;
+ } else if (req->fs.info.offset == -1) {
+ pos = fd_info->current_pos;
+ } else {
+ pos.QuadPart = req->fs.info.offset;
+ }
+
+ end_pos.QuadPart = pos.QuadPart + write_size;
+
+ /* Recreate the mapping to enlarge the file if needed */
+ if (end_pos.QuadPart > fd_info->size.QuadPart) {
+ if (fd_info->mapping != INVALID_HANDLE_VALUE) {
+ CloseHandle(fd_info->mapping);
+ }
+
+ fd_info->mapping = CreateFileMapping(file,
+ NULL,
+ PAGE_READWRITE,
+ end_pos.HighPart,
+ end_pos.LowPart,
+ NULL);
+ if (fd_info->mapping == NULL) {
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ CloseHandle(file);
+ fd_info->mapping = INVALID_HANDLE_VALUE;
+ fd_info->size.QuadPart = 0;
+ fd_info->current_pos.QuadPart = 0;
+ uv__fd_hash_add(fd, fd_info);
+ return;
+ }
+
+ fd_info->size = end_pos;
+ uv__fd_hash_add(fd, fd_info);
+ }
+
+ view_offset = pos.QuadPart % uv__allocation_granularity;
+ view_base.QuadPart = pos.QuadPart - view_offset;
+ view = MapViewOfFile(fd_info->mapping,
+ FILE_MAP_WRITE,
+ view_base.HighPart,
+ view_base.LowPart,
+ view_offset + write_size);
+ if (view == NULL) {
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ return;
+ }
+
+ done_write = 0;
+ for (index = 0; index < req->fs.info.nbufs; ++index) {
+#ifdef _MSC_VER
+ int err = 0;
+ __try {
+#endif
+ memcpy((char*)view + view_offset + done_write,
+ req->fs.info.bufs[index].base,
+ req->fs.info.bufs[index].len);
+#ifdef _MSC_VER
+ }
+ __except (fs__filemap_ex_filter(GetExceptionCode(),
+ GetExceptionInformation(), &err)) {
+ SET_REQ_WIN32_ERROR(req, err);
+ UnmapViewOfFile(view);
+ return;
+ }
+#endif
+ done_write += req->fs.info.bufs[index].len;
+ }
+ assert(done_write == write_size);
+
+ if (!FlushViewOfFile(view, 0)) {
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ UnmapViewOfFile(view);
+ return;
+ }
+ if (!UnmapViewOfFile(view)) {
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ return;
+ }
+
+ if (req->fs.info.offset == -1) {
+ fd_info->current_pos = end_pos;
+ uv__fd_hash_add(fd, fd_info);
+ }
+
+ GetSystemTimeAsFileTime(&ft);
+ SetFileTime(file, NULL, NULL, &ft);
+
+ SET_REQ_RESULT(req, done_write);
+}
+
+void fs__write(uv_fs_t* req) {
+ int fd = req->file.fd;
+ int64_t offset = req->fs.info.offset;
+ HANDLE handle;
+ OVERLAPPED overlapped, *overlapped_ptr;
+ LARGE_INTEGER offset_;
+ DWORD bytes;
+ int result;
+ unsigned int index;
+ LARGE_INTEGER original_position;
+ LARGE_INTEGER zero_offset;
+ int restore_position;
+ struct uv__fd_info_s fd_info;
+
+ VERIFY_FD(fd, req);
+
+ zero_offset.QuadPart = 0;
+ restore_position = 0;
+ handle = uv__get_osfhandle(fd);
+ if (handle == INVALID_HANDLE_VALUE) {
+ SET_REQ_WIN32_ERROR(req, ERROR_INVALID_HANDLE);
+ return;
+ }
+
+ if (uv__fd_hash_get(fd, &fd_info)) {
+ fs__write_filemap(req, handle, &fd_info);
+ return;
+ }
+
+ if (offset != -1) {
+ memset(&overlapped, 0, sizeof overlapped);
+ overlapped_ptr = &overlapped;
+ if (SetFilePointerEx(handle, zero_offset, &original_position,
+ FILE_CURRENT)) {
+ restore_position = 1;
+ }
+ } else {
+ overlapped_ptr = NULL;
+ }
+
+ index = 0;
+ bytes = 0;
+ do {
+ DWORD incremental_bytes;
+
+ if (offset != -1) {
+ offset_.QuadPart = offset + bytes;
+ overlapped.Offset = offset_.LowPart;
+ overlapped.OffsetHigh = offset_.HighPart;
+ }
+
+ result = WriteFile(handle,
+ req->fs.info.bufs[index].base,
+ req->fs.info.bufs[index].len,
+ &incremental_bytes,
+ overlapped_ptr);
+ bytes += incremental_bytes;
+ ++index;
+ } while (result && index < req->fs.info.nbufs);
+
+ if (restore_position)
+ SetFilePointerEx(handle, original_position, NULL, FILE_BEGIN);
+
+ if (result || bytes > 0) {
+ SET_REQ_RESULT(req, bytes);
+ } else {
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ }
+}
+
+
+void fs__rmdir(uv_fs_t* req) {
+ int result = _wrmdir(req->file.pathw);
+ if (result == -1)
+ SET_REQ_WIN32_ERROR(req, _doserrno);
+ else
+ SET_REQ_RESULT(req, 0);
+}
+
+
+void fs__unlink(uv_fs_t* req) {
+ const WCHAR* pathw = req->file.pathw;
+ HANDLE handle;
+ BY_HANDLE_FILE_INFORMATION info;
+ FILE_DISPOSITION_INFORMATION disposition;
+ IO_STATUS_BLOCK iosb;
+ NTSTATUS status;
+
+ handle = CreateFileW(pathw,
+ FILE_READ_ATTRIBUTES | FILE_WRITE_ATTRIBUTES | DELETE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
+ NULL,
+ OPEN_EXISTING,
+ FILE_FLAG_OPEN_REPARSE_POINT | FILE_FLAG_BACKUP_SEMANTICS,
+ NULL);
+
+ if (handle == INVALID_HANDLE_VALUE) {
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ return;
+ }
+
+ if (!GetFileInformationByHandle(handle, &info)) {
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ CloseHandle(handle);
+ return;
+ }
+
+ if (info.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) {
+ /* Do not allow deletion of directories, unless it is a symlink. When the
+ * path refers to a non-symlink directory, report EPERM as mandated by
+ * POSIX.1. */
+
+ /* Check if it is a reparse point. If it's not, it's a normal directory. */
+ if (!(info.dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT)) {
+ SET_REQ_WIN32_ERROR(req, ERROR_ACCESS_DENIED);
+ CloseHandle(handle);
+ return;
+ }
+
+ /* Read the reparse point and check if it is a valid symlink. If not, don't
+ * unlink. */
+ if (fs__readlink_handle(handle, NULL, NULL) < 0) {
+ DWORD error = GetLastError();
+ if (error == ERROR_SYMLINK_NOT_SUPPORTED)
+ error = ERROR_ACCESS_DENIED;
+ SET_REQ_WIN32_ERROR(req, error);
+ CloseHandle(handle);
+ return;
+ }
+ }
+
+ if (info.dwFileAttributes & FILE_ATTRIBUTE_READONLY) {
+ /* Remove read-only attribute */
+ FILE_BASIC_INFORMATION basic = { 0 };
+
+ basic.FileAttributes = (info.dwFileAttributes & ~FILE_ATTRIBUTE_READONLY) |
+ FILE_ATTRIBUTE_ARCHIVE;
+
+ status = pNtSetInformationFile(handle,
+ &iosb,
+ &basic,
+ sizeof basic,
+ FileBasicInformation);
+ if (!NT_SUCCESS(status)) {
+ SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(status));
+ CloseHandle(handle);
+ return;
+ }
+ }
+
+ /* Try to set the delete flag. */
+ disposition.DeleteFile = TRUE;
+ status = pNtSetInformationFile(handle,
+ &iosb,
+ &disposition,
+ sizeof disposition,
+ FileDispositionInformation);
+ if (NT_SUCCESS(status)) {
+ SET_REQ_SUCCESS(req);
+ } else {
+ SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(status));
+ }
+
+ CloseHandle(handle);
+}
+
+
+void fs__mkdir(uv_fs_t* req) {
+ /* TODO: use req->mode. */
+ if (CreateDirectoryW(req->file.pathw, NULL)) {
+ SET_REQ_RESULT(req, 0);
+ } else {
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ if (req->sys_errno_ == ERROR_INVALID_NAME)
+ req->result = UV_EINVAL;
+ }
+}
+
+typedef int (*uv__fs_mktemp_func)(uv_fs_t* req);
+
+/* OpenBSD original: lib/libc/stdio/mktemp.c */
+void fs__mktemp(uv_fs_t* req, uv__fs_mktemp_func func) {
+ static const WCHAR *tempchars =
+ L"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
+ static const size_t num_chars = 62;
+ static const size_t num_x = 6;
+ WCHAR *cp, *ep;
+ unsigned int tries, i;
+ size_t len;
+ uint64_t v;
+ char* path;
+
+ path = req->path;
+ len = wcslen(req->file.pathw);
+ ep = req->file.pathw + len;
+ if (len < num_x || wcsncmp(ep - num_x, L"XXXXXX", num_x)) {
+ SET_REQ_UV_ERROR(req, UV_EINVAL, ERROR_INVALID_PARAMETER);
+ goto clobber;
+ }
+
+ tries = TMP_MAX;
+ do {
+ if (uv__random_rtlgenrandom((void *)&v, sizeof(v)) < 0) {
+ SET_REQ_UV_ERROR(req, UV_EIO, ERROR_IO_DEVICE);
+ goto clobber;
+ }
+
+ cp = ep - num_x;
+ for (i = 0; i < num_x; i++) {
+ *cp++ = tempchars[v % num_chars];
+ v /= num_chars;
+ }
+
+ if (func(req)) {
+ if (req->result >= 0) {
+ len = strlen(path);
+ wcstombs(path + len - num_x, ep - num_x, num_x);
+ }
+ return;
+ }
+ } while (--tries);
+
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+
+clobber:
+ path[0] = '\0';
+}
+
+
+static int fs__mkdtemp_func(uv_fs_t* req) {
+ DWORD error;
+ if (CreateDirectoryW(req->file.pathw, NULL)) {
+ SET_REQ_RESULT(req, 0);
+ return 1;
+ }
+ error = GetLastError();
+ if (error != ERROR_ALREADY_EXISTS) {
+ SET_REQ_WIN32_ERROR(req, error);
+ return 1;
+ }
+
+ return 0;
+}
+
+
+void fs__mkdtemp(uv_fs_t* req) {
+ fs__mktemp(req, fs__mkdtemp_func);
+}
+
+
+static int fs__mkstemp_func(uv_fs_t* req) {
+ HANDLE file;
+ int fd;
+
+ file = CreateFileW(req->file.pathw,
+ GENERIC_READ | GENERIC_WRITE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
+ NULL,
+ CREATE_NEW,
+ FILE_ATTRIBUTE_NORMAL,
+ NULL);
+
+ if (file == INVALID_HANDLE_VALUE) {
+ DWORD error;
+ error = GetLastError();
+
+ /* If the file exists, the main fs__mktemp() function
+ will retry. If it's another error, we want to stop. */
+ if (error != ERROR_FILE_EXISTS) {
+ SET_REQ_WIN32_ERROR(req, error);
+ return 1;
+ }
+
+ return 0;
+ }
+
+ fd = _open_osfhandle((intptr_t) file, 0);
+ if (fd < 0) {
+ /* The only known failure mode for _open_osfhandle() is EMFILE, in which
+ * case GetLastError() will return zero. However we'll try to handle other
+ * errors as well, should they ever occur.
+ */
+ if (errno == EMFILE)
+ SET_REQ_UV_ERROR(req, UV_EMFILE, ERROR_TOO_MANY_OPEN_FILES);
+ else if (GetLastError() != ERROR_SUCCESS)
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ else
+ SET_REQ_WIN32_ERROR(req, UV_UNKNOWN);
+ CloseHandle(file);
+ return 1;
+ }
+
+ SET_REQ_RESULT(req, fd);
+
+ return 1;
+}
+
+
+void fs__mkstemp(uv_fs_t* req) {
+ fs__mktemp(req, fs__mkstemp_func);
+}
+
+
+void fs__scandir(uv_fs_t* req) {
+ static const size_t dirents_initial_size = 32;
+
+ HANDLE dir_handle = INVALID_HANDLE_VALUE;
+
+ uv__dirent_t** dirents = NULL;
+ size_t dirents_size = 0;
+ size_t dirents_used = 0;
+
+ IO_STATUS_BLOCK iosb;
+ NTSTATUS status;
+
+ /* Buffer to hold directory entries returned by NtQueryDirectoryFile.
+ * It's important that this buffer can hold at least one entry, regardless
+ * of the length of the file names present in the enumerated directory.
+ * A file name is at most 256 WCHARs long.
+ * According to MSDN, the buffer must be aligned at an 8-byte boundary.
+ */
+#if _MSC_VER
+ __declspec(align(8)) char buffer[8192];
+#else
+ __attribute__ ((aligned (8))) char buffer[8192];
+#endif
+
+ STATIC_ASSERT(sizeof buffer >=
+ sizeof(FILE_DIRECTORY_INFORMATION) + 256 * sizeof(WCHAR));
+
+ /* Open the directory. */
+ dir_handle =
+ CreateFileW(req->file.pathw,
+ FILE_LIST_DIRECTORY | SYNCHRONIZE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
+ NULL,
+ OPEN_EXISTING,
+ FILE_FLAG_BACKUP_SEMANTICS,
+ NULL);
+ if (dir_handle == INVALID_HANDLE_VALUE)
+ goto win32_error;
+
+ /* Read the first chunk. */
+ status = pNtQueryDirectoryFile(dir_handle,
+ NULL,
+ NULL,
+ NULL,
+ &iosb,
+ &buffer,
+ sizeof buffer,
+ FileDirectoryInformation,
+ FALSE,
+ NULL,
+ TRUE);
+
+ /* If the handle is not a directory, we'll get STATUS_INVALID_PARAMETER.
+ * This should be reported back as UV_ENOTDIR.
+ */
+ if (status == (NTSTATUS)STATUS_INVALID_PARAMETER)
+ goto not_a_directory_error;
+
+ while (NT_SUCCESS(status)) {
+ char* position = buffer;
+ size_t next_entry_offset = 0;
+
+ do {
+ FILE_DIRECTORY_INFORMATION* info;
+ uv__dirent_t* dirent;
+
+ size_t wchar_len;
+ size_t utf8_len;
+
+ /* Obtain a pointer to the current directory entry. */
+ position += next_entry_offset;
+ info = (FILE_DIRECTORY_INFORMATION*) position;
+
+ /* Fetch the offset to the next directory entry. */
+ next_entry_offset = info->NextEntryOffset;
+
+ /* Compute the length of the filename in WCHARs. */
+ wchar_len = info->FileNameLength / sizeof info->FileName[0];
+
+ /* Skip over '.' and '..' entries. It has been reported that
+ * the SharePoint driver includes the terminating zero byte in
+ * the filename length. Strip those first.
+ */
+ while (wchar_len > 0 && info->FileName[wchar_len - 1] == L'\0')
+ wchar_len -= 1;
+
+ if (wchar_len == 0)
+ continue;
+ if (wchar_len == 1 && info->FileName[0] == L'.')
+ continue;
+ if (wchar_len == 2 && info->FileName[0] == L'.' &&
+ info->FileName[1] == L'.')
+ continue;
+
+ /* Compute the space required to store the filename as UTF-8. */
+ utf8_len = WideCharToMultiByte(
+ CP_UTF8, 0, &info->FileName[0], wchar_len, NULL, 0, NULL, NULL);
+ if (utf8_len == 0)
+ goto win32_error;
+
+ /* Resize the dirent array if needed. */
+ if (dirents_used >= dirents_size) {
+ size_t new_dirents_size =
+ dirents_size == 0 ? dirents_initial_size : dirents_size << 1;
+ uv__dirent_t** new_dirents =
+ uv__realloc(dirents, new_dirents_size * sizeof *dirents);
+
+ if (new_dirents == NULL)
+ goto out_of_memory_error;
+
+ dirents_size = new_dirents_size;
+ dirents = new_dirents;
+ }
+
+ /* Allocate space for the uv dirent structure. The dirent structure
+ * includes room for the first character of the filename, but `utf8_len`
+ * doesn't count the NULL terminator at this point.
+ */
+ dirent = uv__malloc(sizeof *dirent + utf8_len);
+ if (dirent == NULL)
+ goto out_of_memory_error;
+
+ dirents[dirents_used++] = dirent;
+
+ /* Convert file name to UTF-8. */
+ if (WideCharToMultiByte(CP_UTF8,
+ 0,
+ &info->FileName[0],
+ wchar_len,
+ &dirent->d_name[0],
+ utf8_len,
+ NULL,
+ NULL) == 0)
+ goto win32_error;
+
+ /* Add a null terminator to the filename. */
+ dirent->d_name[utf8_len] = '\0';
+
+ /* Fill out the type field. */
+ if (info->FileAttributes & FILE_ATTRIBUTE_DEVICE)
+ dirent->d_type = UV__DT_CHAR;
+ else if (info->FileAttributes & FILE_ATTRIBUTE_REPARSE_POINT)
+ dirent->d_type = UV__DT_LINK;
+ else if (info->FileAttributes & FILE_ATTRIBUTE_DIRECTORY)
+ dirent->d_type = UV__DT_DIR;
+ else
+ dirent->d_type = UV__DT_FILE;
+ } while (next_entry_offset != 0);
+
+ /* Read the next chunk. */
+ status = pNtQueryDirectoryFile(dir_handle,
+ NULL,
+ NULL,
+ NULL,
+ &iosb,
+ &buffer,
+ sizeof buffer,
+ FileDirectoryInformation,
+ FALSE,
+ NULL,
+ FALSE);
+
+ /* After the first pNtQueryDirectoryFile call, the function may return
+ * STATUS_SUCCESS even if the buffer was too small to hold at least one
+ * directory entry.
+ */
+ if (status == STATUS_SUCCESS && iosb.Information == 0)
+ status = STATUS_BUFFER_OVERFLOW;
+ }
+
+ if (status != STATUS_NO_MORE_FILES)
+ goto nt_error;
+
+ CloseHandle(dir_handle);
+
+ /* Store the result in the request object. */
+ req->ptr = dirents;
+ if (dirents != NULL)
+ req->flags |= UV_FS_FREE_PTR;
+
+ SET_REQ_RESULT(req, dirents_used);
+
+ /* `nbufs` will be used as index by uv_fs_scandir_next. */
+ req->fs.info.nbufs = 0;
+
+ return;
+
+nt_error:
+ SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(status));
+ goto cleanup;
+
+win32_error:
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ goto cleanup;
+
+not_a_directory_error:
+ SET_REQ_UV_ERROR(req, UV_ENOTDIR, ERROR_DIRECTORY);
+ goto cleanup;
+
+out_of_memory_error:
+ SET_REQ_UV_ERROR(req, UV_ENOMEM, ERROR_OUTOFMEMORY);
+ goto cleanup;
+
+cleanup:
+ if (dir_handle != INVALID_HANDLE_VALUE)
+ CloseHandle(dir_handle);
+ while (dirents_used > 0)
+ uv__free(dirents[--dirents_used]);
+ if (dirents != NULL)
+ uv__free(dirents);
+}
+
+void fs__opendir(uv_fs_t* req) {
+ WCHAR* pathw;
+ size_t len;
+ const WCHAR* fmt;
+ WCHAR* find_path;
+ uv_dir_t* dir;
+
+ pathw = req->file.pathw;
+ dir = NULL;
+ find_path = NULL;
+
+ /* Figure out whether path is a file or a directory. */
+ if (!(GetFileAttributesW(pathw) & FILE_ATTRIBUTE_DIRECTORY)) {
+ SET_REQ_UV_ERROR(req, UV_ENOTDIR, ERROR_DIRECTORY);
+ goto error;
+ }
+
+ dir = uv__malloc(sizeof(*dir));
+ if (dir == NULL) {
+ SET_REQ_UV_ERROR(req, UV_ENOMEM, ERROR_OUTOFMEMORY);
+ goto error;
+ }
+
+ len = wcslen(pathw);
+
+ if (len == 0)
+ fmt = L"./*";
+ else if (IS_SLASH(pathw[len - 1]))
+ fmt = L"%s*";
+ else
+ fmt = L"%s\\*";
+
+ find_path = uv__malloc(sizeof(WCHAR) * (len + 4));
+ if (find_path == NULL) {
+ SET_REQ_UV_ERROR(req, UV_ENOMEM, ERROR_OUTOFMEMORY);
+ goto error;
+ }
+
+ _snwprintf(find_path, len + 3, fmt, pathw);
+ dir->dir_handle = FindFirstFileW(find_path, &dir->find_data);
+ uv__free(find_path);
+ find_path = NULL;
+ if (dir->dir_handle == INVALID_HANDLE_VALUE &&
+ GetLastError() != ERROR_FILE_NOT_FOUND) {
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ goto error;
+ }
+
+ dir->need_find_call = FALSE;
+ req->ptr = dir;
+ SET_REQ_RESULT(req, 0);
+ return;
+
+error:
+ uv__free(dir);
+ uv__free(find_path);
+ req->ptr = NULL;
+}
+
+void fs__readdir(uv_fs_t* req) {
+ uv_dir_t* dir;
+ uv_dirent_t* dirents;
+ uv__dirent_t dent;
+ unsigned int dirent_idx;
+ PWIN32_FIND_DATAW find_data;
+ unsigned int i;
+ int r;
+
+ req->flags |= UV_FS_FREE_PTR;
+ dir = req->ptr;
+ dirents = dir->dirents;
+ memset(dirents, 0, dir->nentries * sizeof(*dir->dirents));
+ find_data = &dir->find_data;
+ dirent_idx = 0;
+
+ while (dirent_idx < dir->nentries) {
+ if (dir->need_find_call && FindNextFileW(dir->dir_handle, find_data) == 0) {
+ if (GetLastError() == ERROR_NO_MORE_FILES)
+ break;
+ goto error;
+ }
+
+ /* Skip "." and ".." entries. */
+ if (find_data->cFileName[0] == L'.' &&
+ (find_data->cFileName[1] == L'\0' ||
+ (find_data->cFileName[1] == L'.' &&
+ find_data->cFileName[2] == L'\0'))) {
+ dir->need_find_call = TRUE;
+ continue;
+ }
+
+ r = uv__convert_utf16_to_utf8((const WCHAR*) &find_data->cFileName,
+ -1,
+ (char**) &dirents[dirent_idx].name);
+ if (r != 0)
+ goto error;
+
+ /* Copy file type. */
+ if ((find_data->dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0)
+ dent.d_type = UV__DT_DIR;
+ else if ((find_data->dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT) != 0)
+ dent.d_type = UV__DT_LINK;
+ else if ((find_data->dwFileAttributes & FILE_ATTRIBUTE_DEVICE) != 0)
+ dent.d_type = UV__DT_CHAR;
+ else
+ dent.d_type = UV__DT_FILE;
+
+ dirents[dirent_idx].type = uv__fs_get_dirent_type(&dent);
+ dir->need_find_call = TRUE;
+ ++dirent_idx;
+ }
+
+ SET_REQ_RESULT(req, dirent_idx);
+ return;
+
+error:
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ for (i = 0; i < dirent_idx; ++i) {
+ uv__free((char*) dirents[i].name);
+ dirents[i].name = NULL;
+ }
+}
+
+void fs__closedir(uv_fs_t* req) {
+ uv_dir_t* dir;
+
+ dir = req->ptr;
+ FindClose(dir->dir_handle);
+ uv__free(req->ptr);
+ SET_REQ_RESULT(req, 0);
+}
+
+INLINE static int fs__stat_handle(HANDLE handle, uv_stat_t* statbuf,
+ int do_lstat) {
+ FILE_ALL_INFORMATION file_info;
+ FILE_FS_VOLUME_INFORMATION volume_info;
+ NTSTATUS nt_status;
+ IO_STATUS_BLOCK io_status;
+
+ nt_status = pNtQueryInformationFile(handle,
+ &io_status,
+ &file_info,
+ sizeof file_info,
+ FileAllInformation);
+
+ /* Buffer overflow (a warning status code) is expected here. */
+ if (NT_ERROR(nt_status)) {
+ SetLastError(pRtlNtStatusToDosError(nt_status));
+ return -1;
+ }
+
+ nt_status = pNtQueryVolumeInformationFile(handle,
+ &io_status,
+ &volume_info,
+ sizeof volume_info,
+ FileFsVolumeInformation);
+
+ /* Buffer overflow (a warning status code) is expected here. */
+ if (io_status.Status == STATUS_NOT_IMPLEMENTED) {
+ statbuf->st_dev = 0;
+ } else if (NT_ERROR(nt_status)) {
+ SetLastError(pRtlNtStatusToDosError(nt_status));
+ return -1;
+ } else {
+ statbuf->st_dev = volume_info.VolumeSerialNumber;
+ }
+
+ /* Todo: st_mode should probably always be 0666 for everyone. We might also
+ * want to report 0777 if the file is a .exe or a directory.
+ *
+ * Currently it's based on whether the 'readonly' attribute is set, which
+ * makes little sense because the semantics are so different: the 'read-only'
+ * flag is just a way for a user to protect against accidental deletion, and
+ * serves no security purpose. Windows uses ACLs for that.
+ *
+ * Also people now use uv_fs_chmod() to take away the writable bit for good
+ * reasons. Windows however just makes the file read-only, which makes it
+ * impossible to delete the file afterwards, since read-only files can't be
+ * deleted.
+ *
+ * IOW it's all just a clusterfuck and we should think of something that
+ * makes slightly more sense.
+ *
+ * And uv_fs_chmod should probably just fail on windows or be a total no-op.
+ * There's nothing sensible it can do anyway.
+ */
+ statbuf->st_mode = 0;
+
+ /*
+ * On Windows, FILE_ATTRIBUTE_REPARSE_POINT is a general purpose mechanism
+ * by which filesystem drivers can intercept and alter file system requests.
+ *
+ * The only reparse points we care about are symlinks and mount points, both
+ * of which are treated as POSIX symlinks. Further, we only care when
+ * invoked via lstat, which seeks information about the link instead of its
+ * target. Otherwise, reparse points must be treated as regular files.
+ */
+ if (do_lstat &&
+ (file_info.BasicInformation.FileAttributes & FILE_ATTRIBUTE_REPARSE_POINT)) {
+ /*
+ * If reading the link fails, the reparse point is not a symlink and needs
+ * to be treated as a regular file. The higher level lstat function will
+ * detect this failure and retry without do_lstat if appropriate.
+ */
+ if (fs__readlink_handle(handle, NULL, &statbuf->st_size) != 0)
+ return -1;
+ statbuf->st_mode |= S_IFLNK;
+ }
+
+ if (statbuf->st_mode == 0) {
+ if (file_info.BasicInformation.FileAttributes & FILE_ATTRIBUTE_DIRECTORY) {
+ statbuf->st_mode |= _S_IFDIR;
+ statbuf->st_size = 0;
+ } else {
+ statbuf->st_mode |= _S_IFREG;
+ statbuf->st_size = file_info.StandardInformation.EndOfFile.QuadPart;
+ }
+ }
+
+ if (file_info.BasicInformation.FileAttributes & FILE_ATTRIBUTE_READONLY)
+ statbuf->st_mode |= _S_IREAD | (_S_IREAD >> 3) | (_S_IREAD >> 6);
+ else
+ statbuf->st_mode |= (_S_IREAD | _S_IWRITE) | ((_S_IREAD | _S_IWRITE) >> 3) |
+ ((_S_IREAD | _S_IWRITE) >> 6);
+
+ FILETIME_TO_TIMESPEC(statbuf->st_atim, file_info.BasicInformation.LastAccessTime);
+ FILETIME_TO_TIMESPEC(statbuf->st_ctim, file_info.BasicInformation.ChangeTime);
+ FILETIME_TO_TIMESPEC(statbuf->st_mtim, file_info.BasicInformation.LastWriteTime);
+ FILETIME_TO_TIMESPEC(statbuf->st_birthtim, file_info.BasicInformation.CreationTime);
+
+ statbuf->st_ino = file_info.InternalInformation.IndexNumber.QuadPart;
+
+ /* st_blocks contains the on-disk allocation size in 512-byte units. */
+ statbuf->st_blocks =
+ (uint64_t) file_info.StandardInformation.AllocationSize.QuadPart >> 9;
+
+ statbuf->st_nlink = file_info.StandardInformation.NumberOfLinks;
+
+ /* The st_blksize is supposed to be the 'optimal' number of bytes for reading
+ * and writing to the disk. That is, for any definition of 'optimal' - it's
+ * supposed to at least avoid read-update-write behavior when writing to the
+ * disk.
+ *
+ * However nobody knows this and even fewer people actually use this value,
+ * and in order to fill it out we'd have to make another syscall to query the
+ * volume for FILE_FS_SECTOR_SIZE_INFORMATION.
+ *
+ * Therefore we'll just report a sensible value that's quite commonly okay
+ * on modern hardware.
+ *
+ * 4096 is the minimum required to be compatible with newer Advanced Format
+ * drives (which have 4096 bytes per physical sector), and to be backwards
+ * compatible with older drives (which have 512 bytes per physical sector).
+ */
+ statbuf->st_blksize = 4096;
+
+ /* Todo: set st_flags to something meaningful. Also provide a wrapper for
+ * chattr(2).
+ */
+ statbuf->st_flags = 0;
+
+ /* Windows has nothing sensible to say about these values, so they'll just
+ * remain empty.
+ */
+ statbuf->st_gid = 0;
+ statbuf->st_uid = 0;
+ statbuf->st_rdev = 0;
+ statbuf->st_gen = 0;
+
+ return 0;
+}
+
+
+INLINE static void fs__stat_prepare_path(WCHAR* pathw) {
+ size_t len = wcslen(pathw);
+
+ /* TODO: ignore namespaced paths. */
+ if (len > 1 && pathw[len - 2] != L':' &&
+ (pathw[len - 1] == L'\\' || pathw[len - 1] == L'/')) {
+ pathw[len - 1] = '\0';
+ }
+}
+
+
+INLINE static DWORD fs__stat_impl_from_path(WCHAR* path,
+ int do_lstat,
+ uv_stat_t* statbuf) {
+ HANDLE handle;
+ DWORD flags;
+ DWORD ret;
+
+ flags = FILE_FLAG_BACKUP_SEMANTICS;
+ if (do_lstat)
+ flags |= FILE_FLAG_OPEN_REPARSE_POINT;
+
+ handle = CreateFileW(path,
+ FILE_READ_ATTRIBUTES,
+ FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
+ NULL,
+ OPEN_EXISTING,
+ flags,
+ NULL);
+
+ if (handle == INVALID_HANDLE_VALUE)
+ ret = GetLastError();
+ else if (fs__stat_handle(handle, statbuf, do_lstat) != 0)
+ ret = GetLastError();
+ else
+ ret = 0;
+
+ CloseHandle(handle);
+ return ret;
+}
+
+
+INLINE static void fs__stat_impl(uv_fs_t* req, int do_lstat) {
+ DWORD error;
+
+ error = fs__stat_impl_from_path(req->file.pathw, do_lstat, &req->statbuf);
+ if (error != 0) {
+ if (do_lstat &&
+ (error == ERROR_SYMLINK_NOT_SUPPORTED ||
+ error == ERROR_NOT_A_REPARSE_POINT)) {
+ /* We opened a reparse point but it was not a symlink. Try again. */
+ fs__stat_impl(req, 0);
+ } else {
+ /* Stat failed. */
+ SET_REQ_WIN32_ERROR(req, error);
+ }
+
+ return;
+ }
+
+ req->ptr = &req->statbuf;
+ SET_REQ_RESULT(req, 0);
+}
+
+
+static void fs__stat(uv_fs_t* req) {
+ fs__stat_prepare_path(req->file.pathw);
+ fs__stat_impl(req, 0);
+}
+
+
+static void fs__lstat(uv_fs_t* req) {
+ fs__stat_prepare_path(req->file.pathw);
+ fs__stat_impl(req, 1);
+}
+
+
+static void fs__fstat(uv_fs_t* req) {
+ int fd = req->file.fd;
+ HANDLE handle;
+
+ VERIFY_FD(fd, req);
+
+ handle = uv__get_osfhandle(fd);
+
+ if (handle == INVALID_HANDLE_VALUE) {
+ SET_REQ_WIN32_ERROR(req, ERROR_INVALID_HANDLE);
+ return;
+ }
+
+ if (fs__stat_handle(handle, &req->statbuf, 0) != 0) {
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ return;
+ }
+
+ req->ptr = &req->statbuf;
+ SET_REQ_RESULT(req, 0);
+}
+
+
+static void fs__rename(uv_fs_t* req) {
+ if (!MoveFileExW(req->file.pathw, req->fs.info.new_pathw, MOVEFILE_REPLACE_EXISTING)) {
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ return;
+ }
+
+ SET_REQ_RESULT(req, 0);
+}
+
+
+INLINE static void fs__sync_impl(uv_fs_t* req) {
+ int fd = req->file.fd;
+ int result;
+
+ VERIFY_FD(fd, req);
+
+ result = FlushFileBuffers(uv__get_osfhandle(fd)) ? 0 : -1;
+ if (result == -1) {
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ } else {
+ SET_REQ_RESULT(req, result);
+ }
+}
+
+
+static void fs__fsync(uv_fs_t* req) {
+ fs__sync_impl(req);
+}
+
+
+static void fs__fdatasync(uv_fs_t* req) {
+ fs__sync_impl(req);
+}
+
+
+static void fs__ftruncate(uv_fs_t* req) {
+ int fd = req->file.fd;
+ HANDLE handle;
+ struct uv__fd_info_s fd_info = { 0 };
+ NTSTATUS status;
+ IO_STATUS_BLOCK io_status;
+ FILE_END_OF_FILE_INFORMATION eof_info;
+
+ VERIFY_FD(fd, req);
+
+ handle = uv__get_osfhandle(fd);
+
+ if (uv__fd_hash_get(fd, &fd_info)) {
+ if (fd_info.is_directory) {
+ SET_REQ_WIN32_ERROR(req, ERROR_ACCESS_DENIED);
+ return;
+ }
+
+ if (fd_info.mapping != INVALID_HANDLE_VALUE) {
+ CloseHandle(fd_info.mapping);
+ }
+ }
+
+ eof_info.EndOfFile.QuadPart = req->fs.info.offset;
+
+ status = pNtSetInformationFile(handle,
+ &io_status,
+ &eof_info,
+ sizeof eof_info,
+ FileEndOfFileInformation);
+
+ if (NT_SUCCESS(status)) {
+ SET_REQ_RESULT(req, 0);
+ } else {
+ SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(status));
+
+ if (fd_info.flags) {
+ CloseHandle(handle);
+ fd_info.mapping = INVALID_HANDLE_VALUE;
+ fd_info.size.QuadPart = 0;
+ fd_info.current_pos.QuadPart = 0;
+ uv__fd_hash_add(fd, &fd_info);
+ return;
+ }
+ }
+
+ if (fd_info.flags) {
+ fd_info.size = eof_info.EndOfFile;
+
+ if (fd_info.size.QuadPart == 0) {
+ fd_info.mapping = INVALID_HANDLE_VALUE;
+ } else {
+ DWORD flProtect = (fd_info.flags & (UV_FS_O_RDONLY | UV_FS_O_WRONLY |
+ UV_FS_O_RDWR)) == UV_FS_O_RDONLY ? PAGE_READONLY : PAGE_READWRITE;
+ fd_info.mapping = CreateFileMapping(handle,
+ NULL,
+ flProtect,
+ fd_info.size.HighPart,
+ fd_info.size.LowPart,
+ NULL);
+ if (fd_info.mapping == NULL) {
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ CloseHandle(handle);
+ fd_info.mapping = INVALID_HANDLE_VALUE;
+ fd_info.size.QuadPart = 0;
+ fd_info.current_pos.QuadPart = 0;
+ uv__fd_hash_add(fd, &fd_info);
+ return;
+ }
+ }
+
+ uv__fd_hash_add(fd, &fd_info);
+ }
+}
+
+
+static void fs__copyfile(uv_fs_t* req) {
+ int flags;
+ int overwrite;
+ uv_stat_t statbuf;
+ uv_stat_t new_statbuf;
+
+ flags = req->fs.info.file_flags;
+
+ if (flags & UV_FS_COPYFILE_FICLONE_FORCE) {
+ SET_REQ_UV_ERROR(req, UV_ENOSYS, ERROR_NOT_SUPPORTED);
+ return;
+ }
+
+ overwrite = flags & UV_FS_COPYFILE_EXCL;
+
+ if (CopyFileW(req->file.pathw, req->fs.info.new_pathw, overwrite) != 0) {
+ SET_REQ_RESULT(req, 0);
+ return;
+ }
+
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ if (req->result != UV_EBUSY)
+ return;
+
+ /* if error UV_EBUSY check if src and dst file are the same */
+ if (fs__stat_impl_from_path(req->file.pathw, 0, &statbuf) != 0 ||
+ fs__stat_impl_from_path(req->fs.info.new_pathw, 0, &new_statbuf) != 0) {
+ return;
+ }
+
+ if (statbuf.st_dev == new_statbuf.st_dev &&
+ statbuf.st_ino == new_statbuf.st_ino) {
+ SET_REQ_RESULT(req, 0);
+ }
+}
+
+
+static void fs__sendfile(uv_fs_t* req) {
+ int fd_in = req->file.fd, fd_out = req->fs.info.fd_out;
+ size_t length = req->fs.info.bufsml[0].len;
+ int64_t offset = req->fs.info.offset;
+ const size_t max_buf_size = 65536;
+ size_t buf_size = length < max_buf_size ? length : max_buf_size;
+ int n, result = 0;
+ int64_t result_offset = 0;
+ char* buf = (char*) uv__malloc(buf_size);
+ if (!buf) {
+ uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+ }
+
+ if (offset != -1) {
+ result_offset = _lseeki64(fd_in, offset, SEEK_SET);
+ }
+
+ if (result_offset == -1) {
+ result = -1;
+ } else {
+ while (length > 0) {
+ n = _read(fd_in, buf, length < buf_size ? length : buf_size);
+ if (n == 0) {
+ break;
+ } else if (n == -1) {
+ result = -1;
+ break;
+ }
+
+ length -= n;
+
+ n = _write(fd_out, buf, n);
+ if (n == -1) {
+ result = -1;
+ break;
+ }
+
+ result += n;
+ }
+ }
+
+ uv__free(buf);
+
+ SET_REQ_RESULT(req, result);
+}
+
+
+static void fs__access(uv_fs_t* req) {
+ DWORD attr = GetFileAttributesW(req->file.pathw);
+
+ if (attr == INVALID_FILE_ATTRIBUTES) {
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ return;
+ }
+
+ /*
+ * Access is possible if
+ * - write access wasn't requested,
+ * - or the file isn't read-only,
+ * - or it's a directory.
+ * (Directories cannot be read-only on Windows.)
+ */
+ if (!(req->fs.info.mode & W_OK) ||
+ !(attr & FILE_ATTRIBUTE_READONLY) ||
+ (attr & FILE_ATTRIBUTE_DIRECTORY)) {
+ SET_REQ_RESULT(req, 0);
+ } else {
+ SET_REQ_WIN32_ERROR(req, UV_EPERM);
+ }
+
+}
+
+
+static void fs__chmod(uv_fs_t* req) {
+ int result = _wchmod(req->file.pathw, req->fs.info.mode);
+ if (result == -1)
+ SET_REQ_WIN32_ERROR(req, _doserrno);
+ else
+ SET_REQ_RESULT(req, 0);
+}
+
+
+static void fs__fchmod(uv_fs_t* req) {
+ int fd = req->file.fd;
+ int clear_archive_flag;
+ HANDLE handle;
+ NTSTATUS nt_status;
+ IO_STATUS_BLOCK io_status;
+ FILE_BASIC_INFORMATION file_info;
+
+ VERIFY_FD(fd, req);
+
+ handle = ReOpenFile(uv__get_osfhandle(fd), FILE_WRITE_ATTRIBUTES, 0, 0);
+ if (handle == INVALID_HANDLE_VALUE) {
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ return;
+ }
+
+ nt_status = pNtQueryInformationFile(handle,
+ &io_status,
+ &file_info,
+ sizeof file_info,
+ FileBasicInformation);
+
+ if (!NT_SUCCESS(nt_status)) {
+ SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(nt_status));
+ goto fchmod_cleanup;
+ }
+
+ /* Test if the Archive attribute is cleared */
+ if ((file_info.FileAttributes & FILE_ATTRIBUTE_ARCHIVE) == 0) {
+ /* Set Archive flag, otherwise setting or clearing the read-only
+ flag will not work */
+ file_info.FileAttributes |= FILE_ATTRIBUTE_ARCHIVE;
+ nt_status = pNtSetInformationFile(handle,
+ &io_status,
+ &file_info,
+ sizeof file_info,
+ FileBasicInformation);
+ if (!NT_SUCCESS(nt_status)) {
+ SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(nt_status));
+ goto fchmod_cleanup;
+ }
+ /* Remeber to clear the flag later on */
+ clear_archive_flag = 1;
+ } else {
+ clear_archive_flag = 0;
+ }
+
+ if (req->fs.info.mode & _S_IWRITE) {
+ file_info.FileAttributes &= ~FILE_ATTRIBUTE_READONLY;
+ } else {
+ file_info.FileAttributes |= FILE_ATTRIBUTE_READONLY;
+ }
+
+ nt_status = pNtSetInformationFile(handle,
+ &io_status,
+ &file_info,
+ sizeof file_info,
+ FileBasicInformation);
+
+ if (!NT_SUCCESS(nt_status)) {
+ SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(nt_status));
+ goto fchmod_cleanup;
+ }
+
+ if (clear_archive_flag) {
+ file_info.FileAttributes &= ~FILE_ATTRIBUTE_ARCHIVE;
+ if (file_info.FileAttributes == 0) {
+ file_info.FileAttributes = FILE_ATTRIBUTE_NORMAL;
+ }
+ nt_status = pNtSetInformationFile(handle,
+ &io_status,
+ &file_info,
+ sizeof file_info,
+ FileBasicInformation);
+ if (!NT_SUCCESS(nt_status)) {
+ SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(nt_status));
+ goto fchmod_cleanup;
+ }
+ }
+
+ SET_REQ_SUCCESS(req);
+fchmod_cleanup:
+ CloseHandle(handle);
+}
+
+
+INLINE static int fs__utime_handle(HANDLE handle, double atime, double mtime) {
+ FILETIME filetime_a, filetime_m;
+
+ TIME_T_TO_FILETIME(atime, &filetime_a);
+ TIME_T_TO_FILETIME(mtime, &filetime_m);
+
+ if (!SetFileTime(handle, NULL, &filetime_a, &filetime_m)) {
+ return -1;
+ }
+
+ return 0;
+}
+
+INLINE static DWORD fs__utime_impl_from_path(WCHAR* path,
+ double atime,
+ double mtime,
+ int do_lutime) {
+ HANDLE handle;
+ DWORD flags;
+ DWORD ret;
+
+ flags = FILE_FLAG_BACKUP_SEMANTICS;
+ if (do_lutime) {
+ flags |= FILE_FLAG_OPEN_REPARSE_POINT;
+ }
+
+ handle = CreateFileW(path,
+ FILE_WRITE_ATTRIBUTES,
+ FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
+ NULL,
+ OPEN_EXISTING,
+ flags,
+ NULL);
+
+ if (handle == INVALID_HANDLE_VALUE) {
+ ret = GetLastError();
+ } else if (fs__utime_handle(handle, atime, mtime) != 0) {
+ ret = GetLastError();
+ } else {
+ ret = 0;
+ }
+
+ CloseHandle(handle);
+ return ret;
+}
+
+INLINE static void fs__utime_impl(uv_fs_t* req, int do_lutime) {
+ DWORD error;
+
+ error = fs__utime_impl_from_path(req->file.pathw,
+ req->fs.time.atime,
+ req->fs.time.mtime,
+ do_lutime);
+
+ if (error != 0) {
+ if (do_lutime &&
+ (error == ERROR_SYMLINK_NOT_SUPPORTED ||
+ error == ERROR_NOT_A_REPARSE_POINT)) {
+ /* Opened file is a reparse point but not a symlink. Try again. */
+ fs__utime_impl(req, 0);
+ } else {
+ /* utime failed. */
+ SET_REQ_WIN32_ERROR(req, error);
+ }
+
+ return;
+ }
+
+ SET_REQ_RESULT(req, 0);
+}
+
+static void fs__utime(uv_fs_t* req) {
+ fs__utime_impl(req, /* do_lutime */ 0);
+}
+
+
+static void fs__futime(uv_fs_t* req) {
+ int fd = req->file.fd;
+ HANDLE handle;
+ VERIFY_FD(fd, req);
+
+ handle = uv__get_osfhandle(fd);
+
+ if (handle == INVALID_HANDLE_VALUE) {
+ SET_REQ_WIN32_ERROR(req, ERROR_INVALID_HANDLE);
+ return;
+ }
+
+ if (fs__utime_handle(handle, req->fs.time.atime, req->fs.time.mtime) != 0) {
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ return;
+ }
+
+ SET_REQ_RESULT(req, 0);
+}
+
+static void fs__lutime(uv_fs_t* req) {
+ fs__utime_impl(req, /* do_lutime */ 1);
+}
+
+
+static void fs__link(uv_fs_t* req) {
+ DWORD r = CreateHardLinkW(req->fs.info.new_pathw, req->file.pathw, NULL);
+ if (r == 0)
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ else
+ SET_REQ_RESULT(req, 0);
+}
+
+
+static void fs__create_junction(uv_fs_t* req, const WCHAR* path,
+ const WCHAR* new_path) {
+ HANDLE handle = INVALID_HANDLE_VALUE;
+ REPARSE_DATA_BUFFER *buffer = NULL;
+ int created = 0;
+ int target_len;
+ int is_absolute, is_long_path;
+ int needed_buf_size, used_buf_size, used_data_size, path_buf_len;
+ int start, len, i;
+ int add_slash;
+ DWORD bytes;
+ WCHAR* path_buf;
+
+ target_len = wcslen(path);
+ is_long_path = wcsncmp(path, LONG_PATH_PREFIX, LONG_PATH_PREFIX_LEN) == 0;
+
+ if (is_long_path) {
+ is_absolute = 1;
+ } else {
+ is_absolute = target_len >= 3 && IS_LETTER(path[0]) &&
+ path[1] == L':' && IS_SLASH(path[2]);
+ }
+
+ if (!is_absolute) {
+ /* Not supporting relative paths */
+ SET_REQ_UV_ERROR(req, UV_EINVAL, ERROR_NOT_SUPPORTED);
+ return;
+ }
+
+ /* Do a pessimistic calculation of the required buffer size */
+ needed_buf_size =
+ FIELD_OFFSET(REPARSE_DATA_BUFFER, MountPointReparseBuffer.PathBuffer) +
+ JUNCTION_PREFIX_LEN * sizeof(WCHAR) +
+ 2 * (target_len + 2) * sizeof(WCHAR);
+
+ /* Allocate the buffer */
+ buffer = (REPARSE_DATA_BUFFER*)uv__malloc(needed_buf_size);
+ if (!buffer) {
+ uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+ }
+
+ /* Grab a pointer to the part of the buffer where filenames go */
+ path_buf = (WCHAR*)&(buffer->MountPointReparseBuffer.PathBuffer);
+ path_buf_len = 0;
+
+ /* Copy the substitute (internal) target path */
+ start = path_buf_len;
+
+ wcsncpy((WCHAR*)&path_buf[path_buf_len], JUNCTION_PREFIX,
+ JUNCTION_PREFIX_LEN);
+ path_buf_len += JUNCTION_PREFIX_LEN;
+
+ add_slash = 0;
+ for (i = is_long_path ? LONG_PATH_PREFIX_LEN : 0; path[i] != L'\0'; i++) {
+ if (IS_SLASH(path[i])) {
+ add_slash = 1;
+ continue;
+ }
+
+ if (add_slash) {
+ path_buf[path_buf_len++] = L'\\';
+ add_slash = 0;
+ }
+
+ path_buf[path_buf_len++] = path[i];
+ }
+ path_buf[path_buf_len++] = L'\\';
+ len = path_buf_len - start;
+
+ /* Set the info about the substitute name */
+ buffer->MountPointReparseBuffer.SubstituteNameOffset = start * sizeof(WCHAR);
+ buffer->MountPointReparseBuffer.SubstituteNameLength = len * sizeof(WCHAR);
+
+ /* Insert null terminator */
+ path_buf[path_buf_len++] = L'\0';
+
+ /* Copy the print name of the target path */
+ start = path_buf_len;
+ add_slash = 0;
+ for (i = is_long_path ? LONG_PATH_PREFIX_LEN : 0; path[i] != L'\0'; i++) {
+ if (IS_SLASH(path[i])) {
+ add_slash = 1;
+ continue;
+ }
+
+ if (add_slash) {
+ path_buf[path_buf_len++] = L'\\';
+ add_slash = 0;
+ }
+
+ path_buf[path_buf_len++] = path[i];
+ }
+ len = path_buf_len - start;
+ if (len == 2) {
+ path_buf[path_buf_len++] = L'\\';
+ len++;
+ }
+
+ /* Set the info about the print name */
+ buffer->MountPointReparseBuffer.PrintNameOffset = start * sizeof(WCHAR);
+ buffer->MountPointReparseBuffer.PrintNameLength = len * sizeof(WCHAR);
+
+ /* Insert another null terminator */
+ path_buf[path_buf_len++] = L'\0';
+
+ /* Calculate how much buffer space was actually used */
+ used_buf_size = FIELD_OFFSET(REPARSE_DATA_BUFFER, MountPointReparseBuffer.PathBuffer) +
+ path_buf_len * sizeof(WCHAR);
+ used_data_size = used_buf_size -
+ FIELD_OFFSET(REPARSE_DATA_BUFFER, MountPointReparseBuffer);
+
+ /* Put general info in the data buffer */
+ buffer->ReparseTag = IO_REPARSE_TAG_MOUNT_POINT;
+ buffer->ReparseDataLength = used_data_size;
+ buffer->Reserved = 0;
+
+ /* Create a new directory */
+ if (!CreateDirectoryW(new_path, NULL)) {
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ goto error;
+ }
+ created = 1;
+
+ /* Open the directory */
+ handle = CreateFileW(new_path,
+ GENERIC_WRITE,
+ 0,
+ NULL,
+ OPEN_EXISTING,
+ FILE_FLAG_BACKUP_SEMANTICS |
+ FILE_FLAG_OPEN_REPARSE_POINT,
+ NULL);
+ if (handle == INVALID_HANDLE_VALUE) {
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ goto error;
+ }
+
+ /* Create the actual reparse point */
+ if (!DeviceIoControl(handle,
+ FSCTL_SET_REPARSE_POINT,
+ buffer,
+ used_buf_size,
+ NULL,
+ 0,
+ &bytes,
+ NULL)) {
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ goto error;
+ }
+
+ /* Clean up */
+ CloseHandle(handle);
+ uv__free(buffer);
+
+ SET_REQ_RESULT(req, 0);
+ return;
+
+error:
+ uv__free(buffer);
+
+ if (handle != INVALID_HANDLE_VALUE) {
+ CloseHandle(handle);
+ }
+
+ if (created) {
+ RemoveDirectoryW(new_path);
+ }
+}
+
+
+static void fs__symlink(uv_fs_t* req) {
+ WCHAR* pathw;
+ WCHAR* new_pathw;
+ int flags;
+ int err;
+
+ pathw = req->file.pathw;
+ new_pathw = req->fs.info.new_pathw;
+
+ if (req->fs.info.file_flags & UV_FS_SYMLINK_JUNCTION) {
+ fs__create_junction(req, pathw, new_pathw);
+ return;
+ }
+
+ if (req->fs.info.file_flags & UV_FS_SYMLINK_DIR)
+ flags = SYMBOLIC_LINK_FLAG_DIRECTORY | uv__file_symlink_usermode_flag;
+ else
+ flags = uv__file_symlink_usermode_flag;
+
+ if (CreateSymbolicLinkW(new_pathw, pathw, flags)) {
+ SET_REQ_RESULT(req, 0);
+ return;
+ }
+
+ /* Something went wrong. We will test if it is because of user-mode
+ * symlinks.
+ */
+ err = GetLastError();
+ if (err == ERROR_INVALID_PARAMETER &&
+ flags & SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE) {
+ /* This system does not support user-mode symlinks. We will clear the
+ * unsupported flag and retry.
+ */
+ uv__file_symlink_usermode_flag = 0;
+ fs__symlink(req);
+ } else {
+ SET_REQ_WIN32_ERROR(req, err);
+ }
+}
+
+
+static void fs__readlink(uv_fs_t* req) {
+ HANDLE handle;
+
+ handle = CreateFileW(req->file.pathw,
+ 0,
+ 0,
+ NULL,
+ OPEN_EXISTING,
+ FILE_FLAG_OPEN_REPARSE_POINT | FILE_FLAG_BACKUP_SEMANTICS,
+ NULL);
+
+ if (handle == INVALID_HANDLE_VALUE) {
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ return;
+ }
+
+ if (fs__readlink_handle(handle, (char**) &req->ptr, NULL) != 0) {
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ CloseHandle(handle);
+ return;
+ }
+
+ req->flags |= UV_FS_FREE_PTR;
+ SET_REQ_RESULT(req, 0);
+
+ CloseHandle(handle);
+}
+
+
+static ssize_t fs__realpath_handle(HANDLE handle, char** realpath_ptr) {
+ int r;
+ DWORD w_realpath_len;
+ WCHAR* w_realpath_ptr = NULL;
+ WCHAR* w_realpath_buf;
+
+ w_realpath_len = GetFinalPathNameByHandleW(handle, NULL, 0, VOLUME_NAME_DOS);
+ if (w_realpath_len == 0) {
+ return -1;
+ }
+
+ w_realpath_buf = uv__malloc((w_realpath_len + 1) * sizeof(WCHAR));
+ if (w_realpath_buf == NULL) {
+ SetLastError(ERROR_OUTOFMEMORY);
+ return -1;
+ }
+ w_realpath_ptr = w_realpath_buf;
+
+ if (GetFinalPathNameByHandleW(
+ handle, w_realpath_ptr, w_realpath_len, VOLUME_NAME_DOS) == 0) {
+ uv__free(w_realpath_buf);
+ SetLastError(ERROR_INVALID_HANDLE);
+ return -1;
+ }
+
+ /* convert UNC path to long path */
+ if (wcsncmp(w_realpath_ptr,
+ UNC_PATH_PREFIX,
+ UNC_PATH_PREFIX_LEN) == 0) {
+ w_realpath_ptr += 6;
+ *w_realpath_ptr = L'\\';
+ w_realpath_len -= 6;
+ } else if (wcsncmp(w_realpath_ptr,
+ LONG_PATH_PREFIX,
+ LONG_PATH_PREFIX_LEN) == 0) {
+ w_realpath_ptr += 4;
+ w_realpath_len -= 4;
+ } else {
+ uv__free(w_realpath_buf);
+ SetLastError(ERROR_INVALID_HANDLE);
+ return -1;
+ }
+
+ r = fs__wide_to_utf8(w_realpath_ptr, w_realpath_len, realpath_ptr, NULL);
+ uv__free(w_realpath_buf);
+ return r;
+}
+
+static void fs__realpath(uv_fs_t* req) {
+ HANDLE handle;
+
+ handle = CreateFileW(req->file.pathw,
+ 0,
+ 0,
+ NULL,
+ OPEN_EXISTING,
+ FILE_ATTRIBUTE_NORMAL | FILE_FLAG_BACKUP_SEMANTICS,
+ NULL);
+ if (handle == INVALID_HANDLE_VALUE) {
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ return;
+ }
+
+ if (fs__realpath_handle(handle, (char**) &req->ptr) == -1) {
+ CloseHandle(handle);
+ SET_REQ_WIN32_ERROR(req, GetLastError());
+ return;
+ }
+
+ CloseHandle(handle);
+ req->flags |= UV_FS_FREE_PTR;
+ SET_REQ_RESULT(req, 0);
+}
+
+
+static void fs__chown(uv_fs_t* req) {
+ SET_REQ_RESULT(req, 0);
+}
+
+
+static void fs__fchown(uv_fs_t* req) {
+ SET_REQ_RESULT(req, 0);
+}
+
+
+static void fs__lchown(uv_fs_t* req) {
+ SET_REQ_RESULT(req, 0);
+}
+
+
+static void fs__statfs(uv_fs_t* req) {
+ uv_statfs_t* stat_fs;
+ DWORD sectors_per_cluster;
+ DWORD bytes_per_sector;
+ DWORD free_clusters;
+ DWORD total_clusters;
+ WCHAR* pathw;
+
+ pathw = req->file.pathw;
+retry_get_disk_free_space:
+ if (0 == GetDiskFreeSpaceW(pathw,
+ &sectors_per_cluster,
+ &bytes_per_sector,
+ &free_clusters,
+ &total_clusters)) {
+ DWORD err;
+ WCHAR* fpart;
+ size_t len;
+ DWORD ret;
+ BOOL is_second;
+
+ err = GetLastError();
+ is_second = pathw != req->file.pathw;
+ if (err != ERROR_DIRECTORY || is_second) {
+ if (is_second)
+ uv__free(pathw);
+
+ SET_REQ_WIN32_ERROR(req, err);
+ return;
+ }
+
+ len = MAX_PATH + 1;
+ pathw = uv__malloc(len * sizeof(*pathw));
+ if (pathw == NULL) {
+ SET_REQ_UV_ERROR(req, UV_ENOMEM, ERROR_OUTOFMEMORY);
+ return;
+ }
+retry_get_full_path_name:
+ ret = GetFullPathNameW(req->file.pathw,
+ len,
+ pathw,
+ &fpart);
+ if (ret == 0) {
+ uv__free(pathw);
+ SET_REQ_WIN32_ERROR(req, err);
+ return;
+ } else if (ret > len) {
+ len = ret;
+ pathw = uv__reallocf(pathw, len * sizeof(*pathw));
+ if (pathw == NULL) {
+ SET_REQ_UV_ERROR(req, UV_ENOMEM, ERROR_OUTOFMEMORY);
+ return;
+ }
+ goto retry_get_full_path_name;
+ }
+ if (fpart != 0)
+ *fpart = L'\0';
+
+ goto retry_get_disk_free_space;
+ }
+ if (pathw != req->file.pathw) {
+ uv__free(pathw);
+ }
+
+ stat_fs = uv__malloc(sizeof(*stat_fs));
+ if (stat_fs == NULL) {
+ SET_REQ_UV_ERROR(req, UV_ENOMEM, ERROR_OUTOFMEMORY);
+ return;
+ }
+
+ stat_fs->f_type = 0;
+ stat_fs->f_bsize = bytes_per_sector * sectors_per_cluster;
+ stat_fs->f_blocks = total_clusters;
+ stat_fs->f_bfree = free_clusters;
+ stat_fs->f_bavail = free_clusters;
+ stat_fs->f_files = 0;
+ stat_fs->f_ffree = 0;
+ req->ptr = stat_fs;
+ req->flags |= UV_FS_FREE_PTR;
+ SET_REQ_RESULT(req, 0);
+}
+
+
+static void uv__fs_work(struct uv__work* w) {
+ uv_fs_t* req;
+
+ req = container_of(w, uv_fs_t, work_req);
+ assert(req->type == UV_FS);
+
+#define XX(uc, lc) case UV_FS_##uc: fs__##lc(req); break;
+ switch (req->fs_type) {
+ XX(OPEN, open)
+ XX(CLOSE, close)
+ XX(READ, read)
+ XX(WRITE, write)
+ XX(COPYFILE, copyfile)
+ XX(SENDFILE, sendfile)
+ XX(STAT, stat)
+ XX(LSTAT, lstat)
+ XX(FSTAT, fstat)
+ XX(FTRUNCATE, ftruncate)
+ XX(UTIME, utime)
+ XX(FUTIME, futime)
+ XX(LUTIME, lutime)
+ XX(ACCESS, access)
+ XX(CHMOD, chmod)
+ XX(FCHMOD, fchmod)
+ XX(FSYNC, fsync)
+ XX(FDATASYNC, fdatasync)
+ XX(UNLINK, unlink)
+ XX(RMDIR, rmdir)
+ XX(MKDIR, mkdir)
+ XX(MKDTEMP, mkdtemp)
+ XX(MKSTEMP, mkstemp)
+ XX(RENAME, rename)
+ XX(SCANDIR, scandir)
+ XX(READDIR, readdir)
+ XX(OPENDIR, opendir)
+ XX(CLOSEDIR, closedir)
+ XX(LINK, link)
+ XX(SYMLINK, symlink)
+ XX(READLINK, readlink)
+ XX(REALPATH, realpath)
+ XX(CHOWN, chown)
+ XX(FCHOWN, fchown)
+ XX(LCHOWN, lchown)
+ XX(STATFS, statfs)
+ default:
+ assert(!"bad uv_fs_type");
+ }
+}
+
+
+static void uv__fs_done(struct uv__work* w, int status) {
+ uv_fs_t* req;
+
+ req = container_of(w, uv_fs_t, work_req);
+ uv__req_unregister(req->loop, req);
+
+ if (status == UV_ECANCELED) {
+ assert(req->result == 0);
+ SET_REQ_UV_ERROR(req, UV_ECANCELED, 0);
+ }
+
+ req->cb(req);
+}
+
+
+void uv_fs_req_cleanup(uv_fs_t* req) {
+ if (req == NULL)
+ return;
+
+ if (req->flags & UV_FS_CLEANEDUP)
+ return;
+
+ if (req->flags & UV_FS_FREE_PATHS)
+ uv__free(req->file.pathw);
+
+ if (req->flags & UV_FS_FREE_PTR) {
+ if (req->fs_type == UV_FS_SCANDIR && req->ptr != NULL)
+ uv__fs_scandir_cleanup(req);
+ else if (req->fs_type == UV_FS_READDIR)
+ uv__fs_readdir_cleanup(req);
+ else
+ uv__free(req->ptr);
+ }
+
+ if (req->fs.info.bufs != req->fs.info.bufsml)
+ uv__free(req->fs.info.bufs);
+
+ req->path = NULL;
+ req->file.pathw = NULL;
+ req->fs.info.new_pathw = NULL;
+ req->fs.info.bufs = NULL;
+ req->ptr = NULL;
+
+ req->flags |= UV_FS_CLEANEDUP;
+}
+
+
+int uv_fs_open(uv_loop_t* loop, uv_fs_t* req, const char* path, int flags,
+ int mode, uv_fs_cb cb) {
+ int err;
+
+ INIT(UV_FS_OPEN);
+ err = fs__capture_path(req, path, NULL, cb != NULL);
+ if (err) {
+ SET_REQ_WIN32_ERROR(req, err);
+ return req->result;
+ }
+
+ req->fs.info.file_flags = flags;
+ req->fs.info.mode = mode;
+ POST;
+}
+
+
+int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file fd, uv_fs_cb cb) {
+ INIT(UV_FS_CLOSE);
+ req->file.fd = fd;
+ POST;
+}
+
+
+int uv_fs_read(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_file fd,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ int64_t offset,
+ uv_fs_cb cb) {
+ INIT(UV_FS_READ);
+
+ if (bufs == NULL || nbufs == 0) {
+ SET_REQ_UV_ERROR(req, UV_EINVAL, ERROR_INVALID_PARAMETER);
+ return UV_EINVAL;
+ }
+
+ req->file.fd = fd;
+
+ req->fs.info.nbufs = nbufs;
+ req->fs.info.bufs = req->fs.info.bufsml;
+ if (nbufs > ARRAY_SIZE(req->fs.info.bufsml))
+ req->fs.info.bufs = uv__malloc(nbufs * sizeof(*bufs));
+
+ if (req->fs.info.bufs == NULL) {
+ SET_REQ_UV_ERROR(req, UV_ENOMEM, ERROR_OUTOFMEMORY);
+ return UV_ENOMEM;
+ }
+
+ memcpy(req->fs.info.bufs, bufs, nbufs * sizeof(*bufs));
+
+ req->fs.info.offset = offset;
+ POST;
+}
+
+
+int uv_fs_write(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_file fd,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ int64_t offset,
+ uv_fs_cb cb) {
+ INIT(UV_FS_WRITE);
+
+ if (bufs == NULL || nbufs == 0) {
+ SET_REQ_UV_ERROR(req, UV_EINVAL, ERROR_INVALID_PARAMETER);
+ return UV_EINVAL;
+ }
+
+ req->file.fd = fd;
+
+ req->fs.info.nbufs = nbufs;
+ req->fs.info.bufs = req->fs.info.bufsml;
+ if (nbufs > ARRAY_SIZE(req->fs.info.bufsml))
+ req->fs.info.bufs = uv__malloc(nbufs * sizeof(*bufs));
+
+ if (req->fs.info.bufs == NULL) {
+ SET_REQ_UV_ERROR(req, UV_ENOMEM, ERROR_OUTOFMEMORY);
+ return UV_ENOMEM;
+ }
+
+ memcpy(req->fs.info.bufs, bufs, nbufs * sizeof(*bufs));
+
+ req->fs.info.offset = offset;
+ POST;
+}
+
+
+int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path,
+ uv_fs_cb cb) {
+ int err;
+
+ INIT(UV_FS_UNLINK);
+ err = fs__capture_path(req, path, NULL, cb != NULL);
+ if (err) {
+ SET_REQ_WIN32_ERROR(req, err);
+ return req->result;
+ }
+
+ POST;
+}
+
+
+int uv_fs_mkdir(uv_loop_t* loop, uv_fs_t* req, const char* path, int mode,
+ uv_fs_cb cb) {
+ int err;
+
+ INIT(UV_FS_MKDIR);
+ err = fs__capture_path(req, path, NULL, cb != NULL);
+ if (err) {
+ SET_REQ_WIN32_ERROR(req, err);
+ return req->result;
+ }
+
+ req->fs.info.mode = mode;
+ POST;
+}
+
+
+int uv_fs_mkdtemp(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* tpl,
+ uv_fs_cb cb) {
+ int err;
+
+ INIT(UV_FS_MKDTEMP);
+ err = fs__capture_path(req, tpl, NULL, TRUE);
+ if (err) {
+ SET_REQ_WIN32_ERROR(req, err);
+ return req->result;
+ }
+
+ POST;
+}
+
+
+int uv_fs_mkstemp(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* tpl,
+ uv_fs_cb cb) {
+ int err;
+
+ INIT(UV_FS_MKSTEMP);
+ err = fs__capture_path(req, tpl, NULL, TRUE);
+ if (err) {
+ SET_REQ_WIN32_ERROR(req, err);
+ return req->result;
+ }
+
+ POST;
+}
+
+
+int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
+ int err;
+
+ INIT(UV_FS_RMDIR);
+ err = fs__capture_path(req, path, NULL, cb != NULL);
+ if (err) {
+ SET_REQ_WIN32_ERROR(req, err);
+ return req->result;
+ }
+
+ POST;
+}
+
+
+int uv_fs_scandir(uv_loop_t* loop, uv_fs_t* req, const char* path, int flags,
+ uv_fs_cb cb) {
+ int err;
+
+ INIT(UV_FS_SCANDIR);
+ err = fs__capture_path(req, path, NULL, cb != NULL);
+ if (err) {
+ SET_REQ_WIN32_ERROR(req, err);
+ return req->result;
+ }
+
+ req->fs.info.file_flags = flags;
+ POST;
+}
+
+int uv_fs_opendir(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ uv_fs_cb cb) {
+ int err;
+
+ INIT(UV_FS_OPENDIR);
+ err = fs__capture_path(req, path, NULL, cb != NULL);
+ if (err) {
+ SET_REQ_WIN32_ERROR(req, err);
+ return req->result;
+ }
+ POST;
+}
+
+int uv_fs_readdir(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_dir_t* dir,
+ uv_fs_cb cb) {
+ INIT(UV_FS_READDIR);
+
+ if (dir == NULL ||
+ dir->dirents == NULL ||
+ dir->dir_handle == INVALID_HANDLE_VALUE) {
+ SET_REQ_UV_ERROR(req, UV_EINVAL, ERROR_INVALID_PARAMETER);
+ return UV_EINVAL;
+ }
+
+ req->ptr = dir;
+ POST;
+}
+
+int uv_fs_closedir(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_dir_t* dir,
+ uv_fs_cb cb) {
+ INIT(UV_FS_CLOSEDIR);
+ if (dir == NULL) {
+ SET_REQ_UV_ERROR(req, UV_EINVAL, ERROR_INVALID_PARAMETER);
+ return UV_EINVAL;
+ }
+ req->ptr = dir;
+ POST;
+}
+
+int uv_fs_link(uv_loop_t* loop, uv_fs_t* req, const char* path,
+ const char* new_path, uv_fs_cb cb) {
+ int err;
+
+ INIT(UV_FS_LINK);
+ err = fs__capture_path(req, path, new_path, cb != NULL);
+ if (err) {
+ SET_REQ_WIN32_ERROR(req, err);
+ return req->result;
+ }
+
+ POST;
+}
+
+
+int uv_fs_symlink(uv_loop_t* loop, uv_fs_t* req, const char* path,
+ const char* new_path, int flags, uv_fs_cb cb) {
+ int err;
+
+ INIT(UV_FS_SYMLINK);
+ err = fs__capture_path(req, path, new_path, cb != NULL);
+ if (err) {
+ SET_REQ_WIN32_ERROR(req, err);
+ return req->result;
+ }
+
+ req->fs.info.file_flags = flags;
+ POST;
+}
+
+
+int uv_fs_readlink(uv_loop_t* loop, uv_fs_t* req, const char* path,
+ uv_fs_cb cb) {
+ int err;
+
+ INIT(UV_FS_READLINK);
+ err = fs__capture_path(req, path, NULL, cb != NULL);
+ if (err) {
+ SET_REQ_WIN32_ERROR(req, err);
+ return req->result;
+ }
+
+ POST;
+}
+
+
+int uv_fs_realpath(uv_loop_t* loop, uv_fs_t* req, const char* path,
+ uv_fs_cb cb) {
+ int err;
+
+ INIT(UV_FS_REALPATH);
+
+ if (!path) {
+ SET_REQ_UV_ERROR(req, UV_EINVAL, ERROR_INVALID_PARAMETER);
+ return UV_EINVAL;
+ }
+
+ err = fs__capture_path(req, path, NULL, cb != NULL);
+ if (err) {
+ SET_REQ_WIN32_ERROR(req, err);
+ return req->result;
+ }
+
+ POST;
+}
+
+
+int uv_fs_chown(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_uid_t uid,
+ uv_gid_t gid, uv_fs_cb cb) {
+ int err;
+
+ INIT(UV_FS_CHOWN);
+ err = fs__capture_path(req, path, NULL, cb != NULL);
+ if (err) {
+ SET_REQ_WIN32_ERROR(req, err);
+ return req->result;
+ }
+
+ POST;
+}
+
+
+int uv_fs_fchown(uv_loop_t* loop, uv_fs_t* req, uv_file fd, uv_uid_t uid,
+ uv_gid_t gid, uv_fs_cb cb) {
+ INIT(UV_FS_FCHOWN);
+ POST;
+}
+
+
+int uv_fs_lchown(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_uid_t uid,
+ uv_gid_t gid, uv_fs_cb cb) {
+ int err;
+
+ INIT(UV_FS_LCHOWN);
+ err = fs__capture_path(req, path, NULL, cb != NULL);
+ if (err) {
+ SET_REQ_WIN32_ERROR(req, err);
+ return req->result;
+ }
+
+ POST;
+}
+
+
+int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
+ int err;
+
+ INIT(UV_FS_STAT);
+ err = fs__capture_path(req, path, NULL, cb != NULL);
+ if (err) {
+ SET_REQ_WIN32_ERROR(req, err);
+ return req->result;
+ }
+
+ POST;
+}
+
+
+int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
+ int err;
+
+ INIT(UV_FS_LSTAT);
+ err = fs__capture_path(req, path, NULL, cb != NULL);
+ if (err) {
+ SET_REQ_WIN32_ERROR(req, err);
+ return req->result;
+ }
+
+ POST;
+}
+
+
+int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file fd, uv_fs_cb cb) {
+ INIT(UV_FS_FSTAT);
+ req->file.fd = fd;
+ POST;
+}
+
+
+int uv_fs_rename(uv_loop_t* loop, uv_fs_t* req, const char* path,
+ const char* new_path, uv_fs_cb cb) {
+ int err;
+
+ INIT(UV_FS_RENAME);
+ err = fs__capture_path(req, path, new_path, cb != NULL);
+ if (err) {
+ SET_REQ_WIN32_ERROR(req, err);
+ return req->result;
+ }
+
+ POST;
+}
+
+
+int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file fd, uv_fs_cb cb) {
+ INIT(UV_FS_FSYNC);
+ req->file.fd = fd;
+ POST;
+}
+
+
+int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file fd, uv_fs_cb cb) {
+ INIT(UV_FS_FDATASYNC);
+ req->file.fd = fd;
+ POST;
+}
+
+
+int uv_fs_ftruncate(uv_loop_t* loop, uv_fs_t* req, uv_file fd,
+ int64_t offset, uv_fs_cb cb) {
+ INIT(UV_FS_FTRUNCATE);
+ req->file.fd = fd;
+ req->fs.info.offset = offset;
+ POST;
+}
+
+
+int uv_fs_copyfile(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ const char* new_path,
+ int flags,
+ uv_fs_cb cb) {
+ int err;
+
+ INIT(UV_FS_COPYFILE);
+
+ if (flags & ~(UV_FS_COPYFILE_EXCL |
+ UV_FS_COPYFILE_FICLONE |
+ UV_FS_COPYFILE_FICLONE_FORCE)) {
+ SET_REQ_UV_ERROR(req, UV_EINVAL, ERROR_INVALID_PARAMETER);
+ return UV_EINVAL;
+ }
+
+ err = fs__capture_path(req, path, new_path, cb != NULL);
+ if (err) {
+ SET_REQ_WIN32_ERROR(req, err);
+ return req->result;
+ }
+
+ req->fs.info.file_flags = flags;
+ POST;
+}
+
+
+int uv_fs_sendfile(uv_loop_t* loop, uv_fs_t* req, uv_file fd_out,
+ uv_file fd_in, int64_t in_offset, size_t length, uv_fs_cb cb) {
+ INIT(UV_FS_SENDFILE);
+ req->file.fd = fd_in;
+ req->fs.info.fd_out = fd_out;
+ req->fs.info.offset = in_offset;
+ req->fs.info.bufsml[0].len = length;
+ POST;
+}
+
+
+int uv_fs_access(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ int flags,
+ uv_fs_cb cb) {
+ int err;
+
+ INIT(UV_FS_ACCESS);
+ err = fs__capture_path(req, path, NULL, cb != NULL);
+ if (err) {
+ SET_REQ_WIN32_ERROR(req, err);
+ return req->result;
+ }
+
+ req->fs.info.mode = flags;
+ POST;
+}
+
+
+int uv_fs_chmod(uv_loop_t* loop, uv_fs_t* req, const char* path, int mode,
+ uv_fs_cb cb) {
+ int err;
+
+ INIT(UV_FS_CHMOD);
+ err = fs__capture_path(req, path, NULL, cb != NULL);
+ if (err) {
+ SET_REQ_WIN32_ERROR(req, err);
+ return req->result;
+ }
+
+ req->fs.info.mode = mode;
+ POST;
+}
+
+
+int uv_fs_fchmod(uv_loop_t* loop, uv_fs_t* req, uv_file fd, int mode,
+ uv_fs_cb cb) {
+ INIT(UV_FS_FCHMOD);
+ req->file.fd = fd;
+ req->fs.info.mode = mode;
+ POST;
+}
+
+
+int uv_fs_utime(uv_loop_t* loop, uv_fs_t* req, const char* path, double atime,
+ double mtime, uv_fs_cb cb) {
+ int err;
+
+ INIT(UV_FS_UTIME);
+ err = fs__capture_path(req, path, NULL, cb != NULL);
+ if (err) {
+ SET_REQ_WIN32_ERROR(req, err);
+ return req->result;
+ }
+
+ req->fs.time.atime = atime;
+ req->fs.time.mtime = mtime;
+ POST;
+}
+
+
+int uv_fs_futime(uv_loop_t* loop, uv_fs_t* req, uv_file fd, double atime,
+ double mtime, uv_fs_cb cb) {
+ INIT(UV_FS_FUTIME);
+ req->file.fd = fd;
+ req->fs.time.atime = atime;
+ req->fs.time.mtime = mtime;
+ POST;
+}
+
+int uv_fs_lutime(uv_loop_t* loop, uv_fs_t* req, const char* path, double atime,
+ double mtime, uv_fs_cb cb) {
+ int err;
+
+ INIT(UV_FS_LUTIME);
+ err = fs__capture_path(req, path, NULL, cb != NULL);
+ if (err) {
+ SET_REQ_WIN32_ERROR(req, err);
+ return req->result;
+ }
+
+ req->fs.time.atime = atime;
+ req->fs.time.mtime = mtime;
+ POST;
+}
+
+
+int uv_fs_statfs(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ uv_fs_cb cb) {
+ int err;
+
+ INIT(UV_FS_STATFS);
+ err = fs__capture_path(req, path, NULL, cb != NULL);
+ if (err) {
+ SET_REQ_WIN32_ERROR(req, err);
+ return req->result;
+ }
+
+ POST;
+}
+
+int uv_fs_get_system_error(const uv_fs_t* req) {
+ return req->sys_errno_;
+}
diff --git a/Utilities/cmlibuv/src/win/getaddrinfo.c b/Utilities/cmlibuv/src/win/getaddrinfo.c
new file mode 100644
index 0000000..dfab860
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/getaddrinfo.c
@@ -0,0 +1,463 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+
+#include "uv.h"
+#include "internal.h"
+#include "req-inl.h"
+#include "idna.h"
+
+/* EAI_* constants. */
+#include <winsock2.h>
+
+/* Needed for ConvertInterfaceIndexToLuid and ConvertInterfaceLuidToNameA */
+#include <iphlpapi.h>
+
+int uv__getaddrinfo_translate_error(int sys_err) {
+ switch (sys_err) {
+ case 0: return 0;
+ case WSATRY_AGAIN: return UV_EAI_AGAIN;
+ case WSAEINVAL: return UV_EAI_BADFLAGS;
+ case WSANO_RECOVERY: return UV_EAI_FAIL;
+ case WSAEAFNOSUPPORT: return UV_EAI_FAMILY;
+ case WSA_NOT_ENOUGH_MEMORY: return UV_EAI_MEMORY;
+ case WSAHOST_NOT_FOUND: return UV_EAI_NONAME;
+ case WSATYPE_NOT_FOUND: return UV_EAI_SERVICE;
+ case WSAESOCKTNOSUPPORT: return UV_EAI_SOCKTYPE;
+ default: return uv_translate_sys_error(sys_err);
+ }
+}
+
+
+/*
+ * MinGW is missing this
+ */
+#if !defined(_MSC_VER) && !defined(__MINGW64_VERSION_MAJOR)
+ typedef struct addrinfoW {
+ int ai_flags;
+ int ai_family;
+ int ai_socktype;
+ int ai_protocol;
+ size_t ai_addrlen;
+ WCHAR* ai_canonname;
+ struct sockaddr* ai_addr;
+ struct addrinfoW* ai_next;
+ } ADDRINFOW, *PADDRINFOW;
+
+ DECLSPEC_IMPORT int WSAAPI GetAddrInfoW(const WCHAR* node,
+ const WCHAR* service,
+ const ADDRINFOW* hints,
+ PADDRINFOW* result);
+
+ DECLSPEC_IMPORT void WSAAPI FreeAddrInfoW(PADDRINFOW pAddrInfo);
+#endif
+
+
+/* Adjust size value to be multiple of 4. Use to keep pointer aligned.
+ * Do we need different versions of this for different architectures? */
+#define ALIGNED_SIZE(X) ((((X) + 3) >> 2) << 2)
+
+#ifndef NDIS_IF_MAX_STRING_SIZE
+#define NDIS_IF_MAX_STRING_SIZE IF_MAX_STRING_SIZE
+#endif
+
+static void uv__getaddrinfo_work(struct uv__work* w) {
+ uv_getaddrinfo_t* req;
+ struct addrinfoW* hints;
+ int err;
+
+ req = container_of(w, uv_getaddrinfo_t, work_req);
+ hints = req->addrinfow;
+ req->addrinfow = NULL;
+ err = GetAddrInfoW(req->node, req->service, hints, &req->addrinfow);
+ req->retcode = uv__getaddrinfo_translate_error(err);
+}
+
+
+/*
+ * Called from uv_run when complete. Call user specified callback
+ * then free returned addrinfo
+ * Returned addrinfo strings are converted from UTF-16 to UTF-8.
+ *
+ * To minimize allocation we calculate total size required,
+ * and copy all structs and referenced strings into the one block.
+ * Each size calculation is adjusted to avoid unaligned pointers.
+ */
+static void uv__getaddrinfo_done(struct uv__work* w, int status) {
+ uv_getaddrinfo_t* req;
+ int addrinfo_len = 0;
+ int name_len = 0;
+ size_t addrinfo_struct_len = ALIGNED_SIZE(sizeof(struct addrinfo));
+ struct addrinfoW* addrinfow_ptr;
+ struct addrinfo* addrinfo_ptr;
+ char* alloc_ptr = NULL;
+ char* cur_ptr = NULL;
+
+ req = container_of(w, uv_getaddrinfo_t, work_req);
+
+ /* release input parameter memory */
+ uv__free(req->alloc);
+ req->alloc = NULL;
+
+ if (status == UV_ECANCELED) {
+ assert(req->retcode == 0);
+ req->retcode = UV_EAI_CANCELED;
+ goto complete;
+ }
+
+ if (req->retcode == 0) {
+ /* Convert addrinfoW to addrinfo. First calculate required length. */
+ addrinfow_ptr = req->addrinfow;
+ while (addrinfow_ptr != NULL) {
+ addrinfo_len += addrinfo_struct_len +
+ ALIGNED_SIZE(addrinfow_ptr->ai_addrlen);
+ if (addrinfow_ptr->ai_canonname != NULL) {
+ name_len = WideCharToMultiByte(CP_UTF8,
+ 0,
+ addrinfow_ptr->ai_canonname,
+ -1,
+ NULL,
+ 0,
+ NULL,
+ NULL);
+ if (name_len == 0) {
+ req->retcode = uv_translate_sys_error(GetLastError());
+ goto complete;
+ }
+ addrinfo_len += ALIGNED_SIZE(name_len);
+ }
+ addrinfow_ptr = addrinfow_ptr->ai_next;
+ }
+
+ /* allocate memory for addrinfo results */
+ alloc_ptr = (char*)uv__malloc(addrinfo_len);
+
+ /* do conversions */
+ if (alloc_ptr != NULL) {
+ cur_ptr = alloc_ptr;
+ addrinfow_ptr = req->addrinfow;
+
+ while (addrinfow_ptr != NULL) {
+ /* copy addrinfo struct data */
+ assert(cur_ptr + addrinfo_struct_len <= alloc_ptr + addrinfo_len);
+ addrinfo_ptr = (struct addrinfo*)cur_ptr;
+ addrinfo_ptr->ai_family = addrinfow_ptr->ai_family;
+ addrinfo_ptr->ai_socktype = addrinfow_ptr->ai_socktype;
+ addrinfo_ptr->ai_protocol = addrinfow_ptr->ai_protocol;
+ addrinfo_ptr->ai_flags = addrinfow_ptr->ai_flags;
+ addrinfo_ptr->ai_addrlen = addrinfow_ptr->ai_addrlen;
+ addrinfo_ptr->ai_canonname = NULL;
+ addrinfo_ptr->ai_addr = NULL;
+ addrinfo_ptr->ai_next = NULL;
+
+ cur_ptr += addrinfo_struct_len;
+
+ /* copy sockaddr */
+ if (addrinfo_ptr->ai_addrlen > 0) {
+ assert(cur_ptr + addrinfo_ptr->ai_addrlen <=
+ alloc_ptr + addrinfo_len);
+ memcpy(cur_ptr, addrinfow_ptr->ai_addr, addrinfo_ptr->ai_addrlen);
+ addrinfo_ptr->ai_addr = (struct sockaddr*)cur_ptr;
+ cur_ptr += ALIGNED_SIZE(addrinfo_ptr->ai_addrlen);
+ }
+
+ /* convert canonical name to UTF-8 */
+ if (addrinfow_ptr->ai_canonname != NULL) {
+ name_len = WideCharToMultiByte(CP_UTF8,
+ 0,
+ addrinfow_ptr->ai_canonname,
+ -1,
+ NULL,
+ 0,
+ NULL,
+ NULL);
+ assert(name_len > 0);
+ assert(cur_ptr + name_len <= alloc_ptr + addrinfo_len);
+ name_len = WideCharToMultiByte(CP_UTF8,
+ 0,
+ addrinfow_ptr->ai_canonname,
+ -1,
+ cur_ptr,
+ name_len,
+ NULL,
+ NULL);
+ assert(name_len > 0);
+ addrinfo_ptr->ai_canonname = cur_ptr;
+ cur_ptr += ALIGNED_SIZE(name_len);
+ }
+ assert(cur_ptr <= alloc_ptr + addrinfo_len);
+
+ /* set next ptr */
+ addrinfow_ptr = addrinfow_ptr->ai_next;
+ if (addrinfow_ptr != NULL) {
+ addrinfo_ptr->ai_next = (struct addrinfo*)cur_ptr;
+ }
+ }
+ req->addrinfo = (struct addrinfo*)alloc_ptr;
+ } else {
+ req->retcode = UV_EAI_MEMORY;
+ }
+ }
+
+ /* return memory to system */
+ if (req->addrinfow != NULL) {
+ FreeAddrInfoW(req->addrinfow);
+ req->addrinfow = NULL;
+ }
+
+complete:
+ uv__req_unregister(req->loop, req);
+
+ /* finally do callback with converted result */
+ if (req->getaddrinfo_cb)
+ req->getaddrinfo_cb(req, req->retcode, req->addrinfo);
+}
+
+
+void uv_freeaddrinfo(struct addrinfo* ai) {
+ char* alloc_ptr = (char*)ai;
+
+ /* release copied result memory */
+ uv__free(alloc_ptr);
+}
+
+
+/*
+ * Entry point for getaddrinfo
+ * we convert the UTF-8 strings to UNICODE
+ * and save the UNICODE string pointers in the req
+ * We also copy hints so that caller does not need to keep memory until the
+ * callback.
+ * return 0 if a callback will be made
+ * return error code if validation fails
+ *
+ * To minimize allocation we calculate total size required,
+ * and copy all structs and referenced strings into the one block.
+ * Each size calculation is adjusted to avoid unaligned pointers.
+ */
+int uv_getaddrinfo(uv_loop_t* loop,
+ uv_getaddrinfo_t* req,
+ uv_getaddrinfo_cb getaddrinfo_cb,
+ const char* node,
+ const char* service,
+ const struct addrinfo* hints) {
+ char hostname_ascii[256];
+ int nodesize = 0;
+ int servicesize = 0;
+ int hintssize = 0;
+ char* alloc_ptr = NULL;
+ int err;
+ long rc;
+
+ if (req == NULL || (node == NULL && service == NULL)) {
+ return UV_EINVAL;
+ }
+
+ UV_REQ_INIT(req, UV_GETADDRINFO);
+ req->getaddrinfo_cb = getaddrinfo_cb;
+ req->addrinfo = NULL;
+ req->loop = loop;
+ req->retcode = 0;
+
+ /* calculate required memory size for all input values */
+ if (node != NULL) {
+ rc = uv__idna_toascii(node,
+ node + strlen(node),
+ hostname_ascii,
+ hostname_ascii + sizeof(hostname_ascii));
+ if (rc < 0)
+ return rc;
+ nodesize = ALIGNED_SIZE(MultiByteToWideChar(CP_UTF8, 0, hostname_ascii,
+ -1, NULL, 0) * sizeof(WCHAR));
+ if (nodesize == 0) {
+ err = GetLastError();
+ goto error;
+ }
+ node = hostname_ascii;
+ }
+
+ if (service != NULL) {
+ servicesize = ALIGNED_SIZE(MultiByteToWideChar(CP_UTF8,
+ 0,
+ service,
+ -1,
+ NULL,
+ 0) *
+ sizeof(WCHAR));
+ if (servicesize == 0) {
+ err = GetLastError();
+ goto error;
+ }
+ }
+ if (hints != NULL) {
+ hintssize = ALIGNED_SIZE(sizeof(struct addrinfoW));
+ }
+
+ /* allocate memory for inputs, and partition it as needed */
+ alloc_ptr = (char*)uv__malloc(nodesize + servicesize + hintssize);
+ if (!alloc_ptr) {
+ err = WSAENOBUFS;
+ goto error;
+ }
+
+ /* save alloc_ptr now so we can free if error */
+ req->alloc = (void*)alloc_ptr;
+
+ /* Convert node string to UTF16 into allocated memory and save pointer in the
+ * request. */
+ if (node != NULL) {
+ req->node = (WCHAR*)alloc_ptr;
+ if (MultiByteToWideChar(CP_UTF8,
+ 0,
+ node,
+ -1,
+ (WCHAR*) alloc_ptr,
+ nodesize / sizeof(WCHAR)) == 0) {
+ err = GetLastError();
+ goto error;
+ }
+ alloc_ptr += nodesize;
+ } else {
+ req->node = NULL;
+ }
+
+ /* Convert service string to UTF16 into allocated memory and save pointer in
+ * the req. */
+ if (service != NULL) {
+ req->service = (WCHAR*)alloc_ptr;
+ if (MultiByteToWideChar(CP_UTF8,
+ 0,
+ service,
+ -1,
+ (WCHAR*) alloc_ptr,
+ servicesize / sizeof(WCHAR)) == 0) {
+ err = GetLastError();
+ goto error;
+ }
+ alloc_ptr += servicesize;
+ } else {
+ req->service = NULL;
+ }
+
+ /* copy hints to allocated memory and save pointer in req */
+ if (hints != NULL) {
+ req->addrinfow = (struct addrinfoW*)alloc_ptr;
+ req->addrinfow->ai_family = hints->ai_family;
+ req->addrinfow->ai_socktype = hints->ai_socktype;
+ req->addrinfow->ai_protocol = hints->ai_protocol;
+ req->addrinfow->ai_flags = hints->ai_flags;
+ req->addrinfow->ai_addrlen = 0;
+ req->addrinfow->ai_canonname = NULL;
+ req->addrinfow->ai_addr = NULL;
+ req->addrinfow->ai_next = NULL;
+ } else {
+ req->addrinfow = NULL;
+ }
+
+ uv__req_register(loop, req);
+
+ if (getaddrinfo_cb) {
+ uv__work_submit(loop,
+ &req->work_req,
+ UV__WORK_SLOW_IO,
+ uv__getaddrinfo_work,
+ uv__getaddrinfo_done);
+ return 0;
+ } else {
+ uv__getaddrinfo_work(&req->work_req);
+ uv__getaddrinfo_done(&req->work_req, 0);
+ return req->retcode;
+ }
+
+error:
+ if (req != NULL) {
+ uv__free(req->alloc);
+ req->alloc = NULL;
+ }
+ return uv_translate_sys_error(err);
+}
+
+int uv_if_indextoname(unsigned int ifindex, char* buffer, size_t* size) {
+ NET_LUID luid;
+ wchar_t wname[NDIS_IF_MAX_STRING_SIZE + 1]; /* Add one for the NUL. */
+ DWORD bufsize;
+ int r;
+
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ r = ConvertInterfaceIndexToLuid(ifindex, &luid);
+
+ if (r != 0)
+ return uv_translate_sys_error(r);
+
+ r = ConvertInterfaceLuidToNameW(&luid, wname, ARRAY_SIZE(wname));
+
+ if (r != 0)
+ return uv_translate_sys_error(r);
+
+ /* Check how much space we need */
+ bufsize = WideCharToMultiByte(CP_UTF8, 0, wname, -1, NULL, 0, NULL, NULL);
+
+ if (bufsize == 0) {
+ return uv_translate_sys_error(GetLastError());
+ } else if (bufsize > *size) {
+ *size = bufsize;
+ return UV_ENOBUFS;
+ }
+
+ /* Convert to UTF-8 */
+ bufsize = WideCharToMultiByte(CP_UTF8,
+ 0,
+ wname,
+ -1,
+ buffer,
+ *size,
+ NULL,
+ NULL);
+
+ if (bufsize == 0)
+ return uv_translate_sys_error(GetLastError());
+
+ *size = bufsize - 1;
+ return 0;
+}
+
+int uv_if_indextoiid(unsigned int ifindex, char* buffer, size_t* size) {
+ int r;
+
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ r = snprintf(buffer, *size, "%d", ifindex);
+
+ if (r < 0)
+ return uv_translate_sys_error(r);
+
+ if (r >= (int) *size) {
+ *size = r + 1;
+ return UV_ENOBUFS;
+ }
+
+ *size = r;
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/win/getnameinfo.c b/Utilities/cmlibuv/src/win/getnameinfo.c
new file mode 100644
index 0000000..b377338
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/getnameinfo.c
@@ -0,0 +1,157 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+*
+* Permission is hereby granted, free of charge, to any person obtaining a copy
+* of this software and associated documentation files (the "Software"), to
+* deal in the Software without restriction, including without limitation the
+* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+* sell copies of the Software, and to permit persons to whom the Software is
+* furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+* IN THE SOFTWARE.
+*/
+
+#include <assert.h>
+#include <stdio.h>
+
+#include "uv.h"
+#include "internal.h"
+#include "req-inl.h"
+
+#ifndef GetNameInfo
+int WSAAPI GetNameInfoW(
+ const SOCKADDR *pSockaddr,
+ socklen_t SockaddrLength,
+ PWCHAR pNodeBuffer,
+ DWORD NodeBufferSize,
+ PWCHAR pServiceBuffer,
+ DWORD ServiceBufferSize,
+ INT Flags
+);
+#endif
+
+static void uv__getnameinfo_work(struct uv__work* w) {
+ uv_getnameinfo_t* req;
+ WCHAR host[NI_MAXHOST];
+ WCHAR service[NI_MAXSERV];
+ int ret;
+
+ req = container_of(w, uv_getnameinfo_t, work_req);
+ if (GetNameInfoW((struct sockaddr*)&req->storage,
+ sizeof(req->storage),
+ host,
+ ARRAY_SIZE(host),
+ service,
+ ARRAY_SIZE(service),
+ req->flags)) {
+ ret = WSAGetLastError();
+ req->retcode = uv__getaddrinfo_translate_error(ret);
+ return;
+ }
+
+ ret = WideCharToMultiByte(CP_UTF8,
+ 0,
+ host,
+ -1,
+ req->host,
+ sizeof(req->host),
+ NULL,
+ NULL);
+ if (ret == 0) {
+ req->retcode = uv_translate_sys_error(GetLastError());
+ return;
+ }
+
+ ret = WideCharToMultiByte(CP_UTF8,
+ 0,
+ service,
+ -1,
+ req->service,
+ sizeof(req->service),
+ NULL,
+ NULL);
+ if (ret == 0) {
+ req->retcode = uv_translate_sys_error(GetLastError());
+ }
+}
+
+
+/*
+* Called from uv_run when complete.
+*/
+static void uv__getnameinfo_done(struct uv__work* w, int status) {
+ uv_getnameinfo_t* req;
+ char* host;
+ char* service;
+
+ req = container_of(w, uv_getnameinfo_t, work_req);
+ uv__req_unregister(req->loop, req);
+ host = service = NULL;
+
+ if (status == UV_ECANCELED) {
+ assert(req->retcode == 0);
+ req->retcode = UV_EAI_CANCELED;
+ } else if (req->retcode == 0) {
+ host = req->host;
+ service = req->service;
+ }
+
+ if (req->getnameinfo_cb)
+ req->getnameinfo_cb(req, req->retcode, host, service);
+}
+
+
+/*
+* Entry point for getnameinfo
+* return 0 if a callback will be made
+* return error code if validation fails
+*/
+int uv_getnameinfo(uv_loop_t* loop,
+ uv_getnameinfo_t* req,
+ uv_getnameinfo_cb getnameinfo_cb,
+ const struct sockaddr* addr,
+ int flags) {
+ if (req == NULL || addr == NULL)
+ return UV_EINVAL;
+
+ if (addr->sa_family == AF_INET) {
+ memcpy(&req->storage,
+ addr,
+ sizeof(struct sockaddr_in));
+ } else if (addr->sa_family == AF_INET6) {
+ memcpy(&req->storage,
+ addr,
+ sizeof(struct sockaddr_in6));
+ } else {
+ return UV_EINVAL;
+ }
+
+ UV_REQ_INIT(req, UV_GETNAMEINFO);
+ uv__req_register(loop, req);
+
+ req->getnameinfo_cb = getnameinfo_cb;
+ req->flags = flags;
+ req->loop = loop;
+ req->retcode = 0;
+
+ if (getnameinfo_cb) {
+ uv__work_submit(loop,
+ &req->work_req,
+ UV__WORK_SLOW_IO,
+ uv__getnameinfo_work,
+ uv__getnameinfo_done);
+ return 0;
+ } else {
+ uv__getnameinfo_work(&req->work_req);
+ uv__getnameinfo_done(&req->work_req, 0);
+ return req->retcode;
+ }
+}
diff --git a/Utilities/cmlibuv/src/win/handle-inl.h b/Utilities/cmlibuv/src/win/handle-inl.h
new file mode 100644
index 0000000..82c657d
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/handle-inl.h
@@ -0,0 +1,180 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_WIN_HANDLE_INL_H_
+#define UV_WIN_HANDLE_INL_H_
+
+#include <assert.h>
+#include <io.h>
+
+#include "uv.h"
+#include "internal.h"
+
+
+#define DECREASE_ACTIVE_COUNT(loop, handle) \
+ do { \
+ if (--(handle)->activecnt == 0 && \
+ !((handle)->flags & UV_HANDLE_CLOSING)) { \
+ uv__handle_stop((handle)); \
+ } \
+ assert((handle)->activecnt >= 0); \
+ } while (0)
+
+
+#define INCREASE_ACTIVE_COUNT(loop, handle) \
+ do { \
+ if ((handle)->activecnt++ == 0) { \
+ uv__handle_start((handle)); \
+ } \
+ assert((handle)->activecnt > 0); \
+ } while (0)
+
+
+#define DECREASE_PENDING_REQ_COUNT(handle) \
+ do { \
+ assert(handle->reqs_pending > 0); \
+ handle->reqs_pending--; \
+ \
+ if (handle->flags & UV_HANDLE_CLOSING && \
+ handle->reqs_pending == 0) { \
+ uv_want_endgame(loop, (uv_handle_t*)handle); \
+ } \
+ } while (0)
+
+
+#define uv__handle_closing(handle) \
+ do { \
+ assert(!((handle)->flags & UV_HANDLE_CLOSING)); \
+ \
+ if (!(((handle)->flags & UV_HANDLE_ACTIVE) && \
+ ((handle)->flags & UV_HANDLE_REF))) \
+ uv__active_handle_add((uv_handle_t*) (handle)); \
+ \
+ (handle)->flags |= UV_HANDLE_CLOSING; \
+ (handle)->flags &= ~UV_HANDLE_ACTIVE; \
+ } while (0)
+
+
+#define uv__handle_close(handle) \
+ do { \
+ QUEUE_REMOVE(&(handle)->handle_queue); \
+ uv__active_handle_rm((uv_handle_t*) (handle)); \
+ \
+ (handle)->flags |= UV_HANDLE_CLOSED; \
+ \
+ if ((handle)->close_cb) \
+ (handle)->close_cb((uv_handle_t*) (handle)); \
+ } while (0)
+
+
+INLINE static void uv_want_endgame(uv_loop_t* loop, uv_handle_t* handle) {
+ if (!(handle->flags & UV_HANDLE_ENDGAME_QUEUED)) {
+ handle->flags |= UV_HANDLE_ENDGAME_QUEUED;
+
+ handle->endgame_next = loop->endgame_handles;
+ loop->endgame_handles = handle;
+ }
+}
+
+
+INLINE static void uv_process_endgames(uv_loop_t* loop) {
+ uv_handle_t* handle;
+
+ while (loop->endgame_handles) {
+ handle = loop->endgame_handles;
+ loop->endgame_handles = handle->endgame_next;
+
+ handle->flags &= ~UV_HANDLE_ENDGAME_QUEUED;
+
+ switch (handle->type) {
+ case UV_TCP:
+ uv_tcp_endgame(loop, (uv_tcp_t*) handle);
+ break;
+
+ case UV_NAMED_PIPE:
+ uv_pipe_endgame(loop, (uv_pipe_t*) handle);
+ break;
+
+ case UV_TTY:
+ uv_tty_endgame(loop, (uv_tty_t*) handle);
+ break;
+
+ case UV_UDP:
+ uv_udp_endgame(loop, (uv_udp_t*) handle);
+ break;
+
+ case UV_POLL:
+ uv_poll_endgame(loop, (uv_poll_t*) handle);
+ break;
+
+ case UV_TIMER:
+ uv__timer_close((uv_timer_t*) handle);
+ uv__handle_close(handle);
+ break;
+
+ case UV_PREPARE:
+ case UV_CHECK:
+ case UV_IDLE:
+ uv_loop_watcher_endgame(loop, handle);
+ break;
+
+ case UV_ASYNC:
+ uv_async_endgame(loop, (uv_async_t*) handle);
+ break;
+
+ case UV_SIGNAL:
+ uv_signal_endgame(loop, (uv_signal_t*) handle);
+ break;
+
+ case UV_PROCESS:
+ uv_process_endgame(loop, (uv_process_t*) handle);
+ break;
+
+ case UV_FS_EVENT:
+ uv_fs_event_endgame(loop, (uv_fs_event_t*) handle);
+ break;
+
+ case UV_FS_POLL:
+ uv__fs_poll_endgame(loop, (uv_fs_poll_t*) handle);
+ break;
+
+ default:
+ assert(0);
+ break;
+ }
+ }
+}
+
+INLINE static HANDLE uv__get_osfhandle(int fd)
+{
+ /* _get_osfhandle() raises an assert in debug builds if the FD is invalid.
+ * But it also correctly checks the FD and returns INVALID_HANDLE_VALUE for
+ * invalid FDs in release builds (or if you let the assert continue). So this
+ * wrapper function disables asserts when calling _get_osfhandle. */
+
+ HANDLE handle;
+ UV_BEGIN_DISABLE_CRT_ASSERT();
+ handle = (HANDLE) _get_osfhandle(fd);
+ UV_END_DISABLE_CRT_ASSERT();
+ return handle;
+}
+
+#endif /* UV_WIN_HANDLE_INL_H_ */
diff --git a/Utilities/cmlibuv/src/win/handle.c b/Utilities/cmlibuv/src/win/handle.c
new file mode 100644
index 0000000..61e4df6
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/handle.c
@@ -0,0 +1,162 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <io.h>
+#include <stdlib.h>
+
+#include "uv.h"
+#include "internal.h"
+#include "handle-inl.h"
+
+
+uv_handle_type uv_guess_handle(uv_file file) {
+ HANDLE handle;
+ DWORD mode;
+
+ if (file < 0) {
+ return UV_UNKNOWN_HANDLE;
+ }
+
+ handle = uv__get_osfhandle(file);
+
+ switch (GetFileType(handle)) {
+ case FILE_TYPE_CHAR:
+ if (GetConsoleMode(handle, &mode)) {
+ return UV_TTY;
+ } else {
+ return UV_FILE;
+ }
+
+ case FILE_TYPE_PIPE:
+ return UV_NAMED_PIPE;
+
+ case FILE_TYPE_DISK:
+ return UV_FILE;
+
+ default:
+ return UV_UNKNOWN_HANDLE;
+ }
+}
+
+
+int uv_is_active(const uv_handle_t* handle) {
+ return (handle->flags & UV_HANDLE_ACTIVE) &&
+ !(handle->flags & UV_HANDLE_CLOSING);
+}
+
+
+void uv_close(uv_handle_t* handle, uv_close_cb cb) {
+ uv_loop_t* loop = handle->loop;
+
+ if (handle->flags & UV_HANDLE_CLOSING) {
+ assert(0);
+ return;
+ }
+
+ handle->close_cb = cb;
+
+ /* Handle-specific close actions */
+ switch (handle->type) {
+ case UV_TCP:
+ uv_tcp_close(loop, (uv_tcp_t*)handle);
+ return;
+
+ case UV_NAMED_PIPE:
+ uv_pipe_close(loop, (uv_pipe_t*) handle);
+ return;
+
+ case UV_TTY:
+ uv_tty_close((uv_tty_t*) handle);
+ return;
+
+ case UV_UDP:
+ uv_udp_close(loop, (uv_udp_t*) handle);
+ return;
+
+ case UV_POLL:
+ uv_poll_close(loop, (uv_poll_t*) handle);
+ return;
+
+ case UV_TIMER:
+ uv_timer_stop((uv_timer_t*)handle);
+ uv__handle_closing(handle);
+ uv_want_endgame(loop, handle);
+ return;
+
+ case UV_PREPARE:
+ uv_prepare_stop((uv_prepare_t*)handle);
+ uv__handle_closing(handle);
+ uv_want_endgame(loop, handle);
+ return;
+
+ case UV_CHECK:
+ uv_check_stop((uv_check_t*)handle);
+ uv__handle_closing(handle);
+ uv_want_endgame(loop, handle);
+ return;
+
+ case UV_IDLE:
+ uv_idle_stop((uv_idle_t*)handle);
+ uv__handle_closing(handle);
+ uv_want_endgame(loop, handle);
+ return;
+
+ case UV_ASYNC:
+ uv_async_close(loop, (uv_async_t*) handle);
+ return;
+
+ case UV_SIGNAL:
+ uv_signal_close(loop, (uv_signal_t*) handle);
+ return;
+
+ case UV_PROCESS:
+ uv_process_close(loop, (uv_process_t*) handle);
+ return;
+
+ case UV_FS_EVENT:
+ uv_fs_event_close(loop, (uv_fs_event_t*) handle);
+ return;
+
+ case UV_FS_POLL:
+ uv__fs_poll_close((uv_fs_poll_t*) handle);
+ uv__handle_closing(handle);
+ return;
+
+ default:
+ /* Not supported */
+ abort();
+ }
+}
+
+
+int uv_is_closing(const uv_handle_t* handle) {
+ return !!(handle->flags & (UV_HANDLE_CLOSING | UV_HANDLE_CLOSED));
+}
+
+
+uv_os_fd_t uv_get_osfhandle(int fd) {
+ return uv__get_osfhandle(fd);
+}
+
+int uv_open_osfhandle(uv_os_fd_t os_fd) {
+ return _open_osfhandle((intptr_t) os_fd, 0);
+}
diff --git a/Utilities/cmlibuv/src/win/internal.h b/Utilities/cmlibuv/src/win/internal.h
new file mode 100644
index 0000000..3f56777
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/internal.h
@@ -0,0 +1,348 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_WIN_INTERNAL_H_
+#define UV_WIN_INTERNAL_H_
+
+#if defined(_MSC_VER)
+# pragma warning(push,1)
+#endif
+
+#include "uv.h"
+#include "../uv-common.h"
+
+#include "uv/tree.h"
+#include "winapi.h"
+#include "winsock.h"
+
+#ifdef _MSC_VER
+# define INLINE __inline
+# define UV_THREAD_LOCAL __declspec( thread )
+#else
+# define INLINE inline
+# define UV_THREAD_LOCAL __thread
+#endif
+
+
+#ifdef _DEBUG
+
+extern UV_THREAD_LOCAL int uv__crt_assert_enabled;
+
+#define UV_BEGIN_DISABLE_CRT_ASSERT() \
+ { \
+ int uv__saved_crt_assert_enabled = uv__crt_assert_enabled; \
+ uv__crt_assert_enabled = FALSE;
+
+
+#define UV_END_DISABLE_CRT_ASSERT() \
+ uv__crt_assert_enabled = uv__saved_crt_assert_enabled; \
+ }
+
+#else
+#define UV_BEGIN_DISABLE_CRT_ASSERT()
+#define UV_END_DISABLE_CRT_ASSERT()
+#endif
+
+/*
+ * TCP
+ */
+
+typedef enum {
+ UV__IPC_SOCKET_XFER_NONE = 0,
+ UV__IPC_SOCKET_XFER_TCP_CONNECTION,
+ UV__IPC_SOCKET_XFER_TCP_SERVER
+} uv__ipc_socket_xfer_type_t;
+
+typedef struct {
+ WSAPROTOCOL_INFOW socket_info;
+ uint32_t delayed_error;
+} uv__ipc_socket_xfer_info_t;
+
+int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb);
+int uv_tcp_accept(uv_tcp_t* server, uv_tcp_t* client);
+int uv_tcp_read_start(uv_tcp_t* handle, uv_alloc_cb alloc_cb,
+ uv_read_cb read_cb);
+int uv_tcp_write(uv_loop_t* loop, uv_write_t* req, uv_tcp_t* handle,
+ const uv_buf_t bufs[], unsigned int nbufs, uv_write_cb cb);
+int uv__tcp_try_write(uv_tcp_t* handle, const uv_buf_t bufs[],
+ unsigned int nbufs);
+
+void uv_process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle, uv_req_t* req);
+void uv_process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle,
+ uv_write_t* req);
+void uv_process_tcp_accept_req(uv_loop_t* loop, uv_tcp_t* handle,
+ uv_req_t* req);
+void uv_process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle,
+ uv_connect_t* req);
+
+void uv_tcp_close(uv_loop_t* loop, uv_tcp_t* tcp);
+void uv_tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle);
+
+int uv__tcp_xfer_export(uv_tcp_t* handle,
+ int pid,
+ uv__ipc_socket_xfer_type_t* xfer_type,
+ uv__ipc_socket_xfer_info_t* xfer_info);
+int uv__tcp_xfer_import(uv_tcp_t* tcp,
+ uv__ipc_socket_xfer_type_t xfer_type,
+ uv__ipc_socket_xfer_info_t* xfer_info);
+
+
+/*
+ * UDP
+ */
+void uv_process_udp_recv_req(uv_loop_t* loop, uv_udp_t* handle, uv_req_t* req);
+void uv_process_udp_send_req(uv_loop_t* loop, uv_udp_t* handle,
+ uv_udp_send_t* req);
+
+void uv_udp_close(uv_loop_t* loop, uv_udp_t* handle);
+void uv_udp_endgame(uv_loop_t* loop, uv_udp_t* handle);
+
+
+/*
+ * Pipes
+ */
+int uv_stdio_pipe_server(uv_loop_t* loop, uv_pipe_t* handle, DWORD access,
+ char* name, size_t nameSize);
+
+int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb);
+int uv_pipe_accept(uv_pipe_t* server, uv_stream_t* client);
+int uv_pipe_read_start(uv_pipe_t* handle, uv_alloc_cb alloc_cb,
+ uv_read_cb read_cb);
+void uv__pipe_read_stop(uv_pipe_t* handle);
+int uv__pipe_write(uv_loop_t* loop,
+ uv_write_t* req,
+ uv_pipe_t* handle,
+ const uv_buf_t bufs[],
+ size_t nbufs,
+ uv_stream_t* send_handle,
+ uv_write_cb cb);
+
+void uv_process_pipe_read_req(uv_loop_t* loop, uv_pipe_t* handle,
+ uv_req_t* req);
+void uv_process_pipe_write_req(uv_loop_t* loop, uv_pipe_t* handle,
+ uv_write_t* req);
+void uv_process_pipe_accept_req(uv_loop_t* loop, uv_pipe_t* handle,
+ uv_req_t* raw_req);
+void uv_process_pipe_connect_req(uv_loop_t* loop, uv_pipe_t* handle,
+ uv_connect_t* req);
+void uv_process_pipe_shutdown_req(uv_loop_t* loop, uv_pipe_t* handle,
+ uv_shutdown_t* req);
+
+void uv_pipe_close(uv_loop_t* loop, uv_pipe_t* handle);
+void uv_pipe_cleanup(uv_loop_t* loop, uv_pipe_t* handle);
+void uv_pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle);
+
+
+/*
+ * TTY
+ */
+void uv_console_init(void);
+
+int uv_tty_read_start(uv_tty_t* handle, uv_alloc_cb alloc_cb,
+ uv_read_cb read_cb);
+int uv_tty_read_stop(uv_tty_t* handle);
+int uv_tty_write(uv_loop_t* loop, uv_write_t* req, uv_tty_t* handle,
+ const uv_buf_t bufs[], unsigned int nbufs, uv_write_cb cb);
+int uv__tty_try_write(uv_tty_t* handle, const uv_buf_t bufs[],
+ unsigned int nbufs);
+void uv_tty_close(uv_tty_t* handle);
+
+void uv_process_tty_read_req(uv_loop_t* loop, uv_tty_t* handle,
+ uv_req_t* req);
+void uv_process_tty_write_req(uv_loop_t* loop, uv_tty_t* handle,
+ uv_write_t* req);
+/*
+ * uv_process_tty_accept_req() is a stub to keep DELEGATE_STREAM_REQ working
+ * TODO: find a way to remove it
+ */
+void uv_process_tty_accept_req(uv_loop_t* loop, uv_tty_t* handle,
+ uv_req_t* raw_req);
+/*
+ * uv_process_tty_connect_req() is a stub to keep DELEGATE_STREAM_REQ working
+ * TODO: find a way to remove it
+ */
+void uv_process_tty_connect_req(uv_loop_t* loop, uv_tty_t* handle,
+ uv_connect_t* req);
+
+void uv_tty_endgame(uv_loop_t* loop, uv_tty_t* handle);
+
+
+/*
+ * Poll watchers
+ */
+void uv_process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
+ uv_req_t* req);
+
+int uv_poll_close(uv_loop_t* loop, uv_poll_t* handle);
+void uv_poll_endgame(uv_loop_t* loop, uv_poll_t* handle);
+
+
+/*
+ * Loop watchers
+ */
+void uv_loop_watcher_endgame(uv_loop_t* loop, uv_handle_t* handle);
+
+void uv_prepare_invoke(uv_loop_t* loop);
+void uv_check_invoke(uv_loop_t* loop);
+void uv_idle_invoke(uv_loop_t* loop);
+
+void uv__once_init(void);
+
+
+/*
+ * Async watcher
+ */
+void uv_async_close(uv_loop_t* loop, uv_async_t* handle);
+void uv_async_endgame(uv_loop_t* loop, uv_async_t* handle);
+
+void uv_process_async_wakeup_req(uv_loop_t* loop, uv_async_t* handle,
+ uv_req_t* req);
+
+
+/*
+ * Signal watcher
+ */
+void uv_signals_init(void);
+int uv__signal_dispatch(int signum);
+
+void uv_signal_close(uv_loop_t* loop, uv_signal_t* handle);
+void uv_signal_endgame(uv_loop_t* loop, uv_signal_t* handle);
+
+void uv_process_signal_req(uv_loop_t* loop, uv_signal_t* handle,
+ uv_req_t* req);
+
+
+/*
+ * Spawn
+ */
+void uv_process_proc_exit(uv_loop_t* loop, uv_process_t* handle);
+void uv_process_close(uv_loop_t* loop, uv_process_t* handle);
+void uv_process_endgame(uv_loop_t* loop, uv_process_t* handle);
+
+
+/*
+ * Error
+ */
+int uv_translate_sys_error(int sys_errno);
+
+
+/*
+ * FS
+ */
+void uv_fs_init(void);
+
+
+/*
+ * FS Event
+ */
+void uv_process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
+ uv_fs_event_t* handle);
+void uv_fs_event_close(uv_loop_t* loop, uv_fs_event_t* handle);
+void uv_fs_event_endgame(uv_loop_t* loop, uv_fs_event_t* handle);
+
+
+/*
+ * Stat poller.
+ */
+void uv__fs_poll_endgame(uv_loop_t* loop, uv_fs_poll_t* handle);
+
+
+/*
+ * Utilities.
+ */
+void uv__util_init(void);
+
+uint64_t uv__hrtime(unsigned int scale);
+__declspec(noreturn) void uv_fatal_error(const int errorno, const char* syscall);
+int uv__getpwuid_r(uv_passwd_t* pwd);
+int uv__convert_utf16_to_utf8(const WCHAR* utf16, int utf16len, char** utf8);
+int uv__convert_utf8_to_utf16(const char* utf8, int utf8len, WCHAR** utf16);
+
+typedef int (WINAPI *uv__peersockfunc)(SOCKET, struct sockaddr*, int*);
+
+int uv__getsockpeername(const uv_handle_t* handle,
+ uv__peersockfunc func,
+ struct sockaddr* name,
+ int* namelen,
+ int delayed_error);
+
+int uv__random_rtlgenrandom(void* buf, size_t buflen);
+
+
+/*
+ * Process stdio handles.
+ */
+int uv__stdio_create(uv_loop_t* loop,
+ const uv_process_options_t* options,
+ BYTE** buffer_ptr);
+void uv__stdio_destroy(BYTE* buffer);
+void uv__stdio_noinherit(BYTE* buffer);
+int uv__stdio_verify(BYTE* buffer, WORD size);
+WORD uv__stdio_size(BYTE* buffer);
+HANDLE uv__stdio_handle(BYTE* buffer, int fd);
+
+
+/*
+ * Winapi and ntapi utility functions
+ */
+void uv_winapi_init(void);
+
+
+/*
+ * Winsock utility functions
+ */
+void uv_winsock_init(void);
+
+int uv_ntstatus_to_winsock_error(NTSTATUS status);
+
+BOOL uv_get_acceptex_function(SOCKET socket, LPFN_ACCEPTEX* target);
+BOOL uv_get_connectex_function(SOCKET socket, LPFN_CONNECTEX* target);
+
+int WSAAPI uv_wsarecv_workaround(SOCKET socket, WSABUF* buffers,
+ DWORD buffer_count, DWORD* bytes, DWORD* flags, WSAOVERLAPPED *overlapped,
+ LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine);
+int WSAAPI uv_wsarecvfrom_workaround(SOCKET socket, WSABUF* buffers,
+ DWORD buffer_count, DWORD* bytes, DWORD* flags, struct sockaddr* addr,
+ int* addr_len, WSAOVERLAPPED *overlapped,
+ LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine);
+
+int WSAAPI uv_msafd_poll(SOCKET socket, AFD_POLL_INFO* info_in,
+ AFD_POLL_INFO* info_out, OVERLAPPED* overlapped);
+
+/* Whether there are any non-IFS LSPs stacked on TCP */
+extern int uv_tcp_non_ifs_lsp_ipv4;
+extern int uv_tcp_non_ifs_lsp_ipv6;
+
+/* Ip address used to bind to any port at any interface */
+extern struct sockaddr_in uv_addr_ip4_any_;
+extern struct sockaddr_in6 uv_addr_ip6_any_;
+
+/*
+ * Wake all loops with fake message
+ */
+void uv__wake_all_loops(void);
+
+/*
+ * Init system wake-up detection
+ */
+void uv__init_detect_system_wakeup(void);
+
+#endif /* UV_WIN_INTERNAL_H_ */
diff --git a/Utilities/cmlibuv/src/win/loop-watcher.c b/Utilities/cmlibuv/src/win/loop-watcher.c
new file mode 100644
index 0000000..ad7fbea
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/loop-watcher.c
@@ -0,0 +1,122 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+
+#include "uv.h"
+#include "internal.h"
+#include "handle-inl.h"
+
+
+void uv_loop_watcher_endgame(uv_loop_t* loop, uv_handle_t* handle) {
+ if (handle->flags & UV_HANDLE_CLOSING) {
+ assert(!(handle->flags & UV_HANDLE_CLOSED));
+ handle->flags |= UV_HANDLE_CLOSED;
+ uv__handle_close(handle);
+ }
+}
+
+
+#define UV_LOOP_WATCHER_DEFINE(name, NAME) \
+ int uv_##name##_init(uv_loop_t* loop, uv_##name##_t* handle) { \
+ uv__handle_init(loop, (uv_handle_t*) handle, UV_##NAME); \
+ \
+ return 0; \
+ } \
+ \
+ \
+ int uv_##name##_start(uv_##name##_t* handle, uv_##name##_cb cb) { \
+ uv_loop_t* loop = handle->loop; \
+ uv_##name##_t* old_head; \
+ \
+ assert(handle->type == UV_##NAME); \
+ \
+ if (uv__is_active(handle)) \
+ return 0; \
+ \
+ if (cb == NULL) \
+ return UV_EINVAL; \
+ \
+ old_head = loop->name##_handles; \
+ \
+ handle->name##_next = old_head; \
+ handle->name##_prev = NULL; \
+ \
+ if (old_head) { \
+ old_head->name##_prev = handle; \
+ } \
+ \
+ loop->name##_handles = handle; \
+ \
+ handle->name##_cb = cb; \
+ uv__handle_start(handle); \
+ \
+ return 0; \
+ } \
+ \
+ \
+ int uv_##name##_stop(uv_##name##_t* handle) { \
+ uv_loop_t* loop = handle->loop; \
+ \
+ assert(handle->type == UV_##NAME); \
+ \
+ if (!uv__is_active(handle)) \
+ return 0; \
+ \
+ /* Update loop head if needed */ \
+ if (loop->name##_handles == handle) { \
+ loop->name##_handles = handle->name##_next; \
+ } \
+ \
+ /* Update the iterator-next pointer of needed */ \
+ if (loop->next_##name##_handle == handle) { \
+ loop->next_##name##_handle = handle->name##_next; \
+ } \
+ \
+ if (handle->name##_prev) { \
+ handle->name##_prev->name##_next = handle->name##_next; \
+ } \
+ if (handle->name##_next) { \
+ handle->name##_next->name##_prev = handle->name##_prev; \
+ } \
+ \
+ uv__handle_stop(handle); \
+ \
+ return 0; \
+ } \
+ \
+ \
+ void uv_##name##_invoke(uv_loop_t* loop) { \
+ uv_##name##_t* handle; \
+ \
+ (loop)->next_##name##_handle = (loop)->name##_handles; \
+ \
+ while ((loop)->next_##name##_handle != NULL) { \
+ handle = (loop)->next_##name##_handle; \
+ (loop)->next_##name##_handle = handle->name##_next; \
+ \
+ handle->name##_cb(handle); \
+ } \
+ }
+
+UV_LOOP_WATCHER_DEFINE(prepare, PREPARE)
+UV_LOOP_WATCHER_DEFINE(check, CHECK)
+UV_LOOP_WATCHER_DEFINE(idle, IDLE)
diff --git a/Utilities/cmlibuv/src/win/pipe.c b/Utilities/cmlibuv/src/win/pipe.c
new file mode 100644
index 0000000..f81245e
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/pipe.c
@@ -0,0 +1,2393 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <io.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "handle-inl.h"
+#include "internal.h"
+#include "req-inl.h"
+#include "stream-inl.h"
+#include "uv-common.h"
+#include "uv.h"
+
+#include <aclapi.h>
+#include <accctrl.h>
+
+/* A zero-size buffer for use by uv_pipe_read */
+static char uv_zero_[] = "";
+
+/* Null uv_buf_t */
+static const uv_buf_t uv_null_buf_ = { 0, NULL };
+
+/* The timeout that the pipe will wait for the remote end to write data when
+ * the local ends wants to shut it down. */
+static const int64_t eof_timeout = 50; /* ms */
+
+static const int default_pending_pipe_instances = 4;
+
+/* Pipe prefix */
+static char pipe_prefix[] = "\\\\?\\pipe";
+static const int pipe_prefix_len = sizeof(pipe_prefix) - 1;
+
+/* IPC incoming xfer queue item. */
+typedef struct {
+ uv__ipc_socket_xfer_type_t xfer_type;
+ uv__ipc_socket_xfer_info_t xfer_info;
+ QUEUE member;
+} uv__ipc_xfer_queue_item_t;
+
+/* IPC frame header flags. */
+/* clang-format off */
+enum {
+ UV__IPC_FRAME_HAS_DATA = 0x01,
+ UV__IPC_FRAME_HAS_SOCKET_XFER = 0x02,
+ UV__IPC_FRAME_XFER_IS_TCP_CONNECTION = 0x04,
+ /* These are combinations of the flags above. */
+ UV__IPC_FRAME_XFER_FLAGS = 0x06,
+ UV__IPC_FRAME_VALID_FLAGS = 0x07
+};
+/* clang-format on */
+
+/* IPC frame header. */
+typedef struct {
+ uint32_t flags;
+ uint32_t reserved1; /* Ignored. */
+ uint32_t data_length; /* Must be zero if there is no data. */
+ uint32_t reserved2; /* Must be zero. */
+} uv__ipc_frame_header_t;
+
+/* To implement the IPC protocol correctly, these structures must have exactly
+ * the right size. */
+STATIC_ASSERT(sizeof(uv__ipc_frame_header_t) == 16);
+STATIC_ASSERT(sizeof(uv__ipc_socket_xfer_info_t) == 632);
+
+/* Coalesced write request. */
+typedef struct {
+ uv_write_t req; /* Internal heap-allocated write request. */
+ uv_write_t* user_req; /* Pointer to user-specified uv_write_t. */
+} uv__coalesced_write_t;
+
+
+static void eof_timer_init(uv_pipe_t* pipe);
+static void eof_timer_start(uv_pipe_t* pipe);
+static void eof_timer_stop(uv_pipe_t* pipe);
+static void eof_timer_cb(uv_timer_t* timer);
+static void eof_timer_destroy(uv_pipe_t* pipe);
+static void eof_timer_close_cb(uv_handle_t* handle);
+
+
+static void uv_unique_pipe_name(char* ptr, char* name, size_t size) {
+ snprintf(name, size, "\\\\?\\pipe\\uv\\%p-%lu", ptr, GetCurrentProcessId());
+}
+
+
+int uv_pipe_init(uv_loop_t* loop, uv_pipe_t* handle, int ipc) {
+ uv_stream_init(loop, (uv_stream_t*)handle, UV_NAMED_PIPE);
+
+ handle->reqs_pending = 0;
+ handle->handle = INVALID_HANDLE_VALUE;
+ handle->name = NULL;
+ handle->pipe.conn.ipc_remote_pid = 0;
+ handle->pipe.conn.ipc_data_frame.payload_remaining = 0;
+ QUEUE_INIT(&handle->pipe.conn.ipc_xfer_queue);
+ handle->pipe.conn.ipc_xfer_queue_length = 0;
+ handle->ipc = ipc;
+ handle->pipe.conn.non_overlapped_writes_tail = NULL;
+
+ return 0;
+}
+
+
+static void uv_pipe_connection_init(uv_pipe_t* handle) {
+ uv_connection_init((uv_stream_t*) handle);
+ handle->read_req.data = handle;
+ handle->pipe.conn.eof_timer = NULL;
+ assert(!(handle->flags & UV_HANDLE_PIPESERVER));
+ if (handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE) {
+ handle->pipe.conn.readfile_thread_handle = NULL;
+ InitializeCriticalSection(&handle->pipe.conn.readfile_thread_lock);
+ }
+}
+
+
+static HANDLE open_named_pipe(const WCHAR* name, DWORD* duplex_flags) {
+ HANDLE pipeHandle;
+
+ /*
+ * Assume that we have a duplex pipe first, so attempt to
+ * connect with GENERIC_READ | GENERIC_WRITE.
+ */
+ pipeHandle = CreateFileW(name,
+ GENERIC_READ | GENERIC_WRITE,
+ 0,
+ NULL,
+ OPEN_EXISTING,
+ FILE_FLAG_OVERLAPPED,
+ NULL);
+ if (pipeHandle != INVALID_HANDLE_VALUE) {
+ *duplex_flags = UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
+ return pipeHandle;
+ }
+
+ /*
+ * If the pipe is not duplex CreateFileW fails with
+ * ERROR_ACCESS_DENIED. In that case try to connect
+ * as a read-only or write-only.
+ */
+ if (GetLastError() == ERROR_ACCESS_DENIED) {
+ pipeHandle = CreateFileW(name,
+ GENERIC_READ | FILE_WRITE_ATTRIBUTES,
+ 0,
+ NULL,
+ OPEN_EXISTING,
+ FILE_FLAG_OVERLAPPED,
+ NULL);
+
+ if (pipeHandle != INVALID_HANDLE_VALUE) {
+ *duplex_flags = UV_HANDLE_READABLE;
+ return pipeHandle;
+ }
+ }
+
+ if (GetLastError() == ERROR_ACCESS_DENIED) {
+ pipeHandle = CreateFileW(name,
+ GENERIC_WRITE | FILE_READ_ATTRIBUTES,
+ 0,
+ NULL,
+ OPEN_EXISTING,
+ FILE_FLAG_OVERLAPPED,
+ NULL);
+
+ if (pipeHandle != INVALID_HANDLE_VALUE) {
+ *duplex_flags = UV_HANDLE_WRITABLE;
+ return pipeHandle;
+ }
+ }
+
+ return INVALID_HANDLE_VALUE;
+}
+
+
+static void close_pipe(uv_pipe_t* pipe) {
+ assert(pipe->u.fd == -1 || pipe->u.fd > 2);
+ if (pipe->u.fd == -1)
+ CloseHandle(pipe->handle);
+ else
+ close(pipe->u.fd);
+
+ pipe->u.fd = -1;
+ pipe->handle = INVALID_HANDLE_VALUE;
+}
+
+
+int uv_stdio_pipe_server(uv_loop_t* loop, uv_pipe_t* handle, DWORD access,
+ char* name, size_t nameSize) {
+ HANDLE pipeHandle;
+ int err;
+ char* ptr = (char*)handle;
+
+ for (;;) {
+ uv_unique_pipe_name(ptr, name, nameSize);
+
+ pipeHandle = CreateNamedPipeA(name,
+ access | FILE_FLAG_OVERLAPPED | FILE_FLAG_FIRST_PIPE_INSTANCE | WRITE_DAC,
+ PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT, 1, 65536, 65536, 0,
+ NULL);
+
+ if (pipeHandle != INVALID_HANDLE_VALUE) {
+ /* No name collisions. We're done. */
+ break;
+ }
+
+ err = GetLastError();
+ if (err != ERROR_PIPE_BUSY && err != ERROR_ACCESS_DENIED) {
+ goto error;
+ }
+
+ /* Pipe name collision. Increment the pointer and try again. */
+ ptr++;
+ }
+
+ if (CreateIoCompletionPort(pipeHandle,
+ loop->iocp,
+ (ULONG_PTR)handle,
+ 0) == NULL) {
+ err = GetLastError();
+ goto error;
+ }
+
+ uv_pipe_connection_init(handle);
+ handle->handle = pipeHandle;
+
+ return 0;
+
+ error:
+ if (pipeHandle != INVALID_HANDLE_VALUE)
+ CloseHandle(pipeHandle);
+
+ return err;
+}
+
+
+static int uv_set_pipe_handle(uv_loop_t* loop,
+ uv_pipe_t* handle,
+ HANDLE pipeHandle,
+ int fd,
+ DWORD duplex_flags) {
+ NTSTATUS nt_status;
+ IO_STATUS_BLOCK io_status;
+ FILE_MODE_INFORMATION mode_info;
+ DWORD mode = PIPE_READMODE_BYTE | PIPE_WAIT;
+ DWORD current_mode = 0;
+ DWORD err = 0;
+
+ if (handle->flags & UV_HANDLE_PIPESERVER)
+ return UV_EINVAL;
+ if (handle->handle != INVALID_HANDLE_VALUE)
+ return UV_EBUSY;
+
+ if (!SetNamedPipeHandleState(pipeHandle, &mode, NULL, NULL)) {
+ err = GetLastError();
+ if (err == ERROR_ACCESS_DENIED) {
+ /*
+ * SetNamedPipeHandleState can fail if the handle doesn't have either
+ * GENERIC_WRITE or FILE_WRITE_ATTRIBUTES.
+ * But if the handle already has the desired wait and blocking modes
+ * we can continue.
+ */
+ if (!GetNamedPipeHandleState(pipeHandle, &current_mode, NULL, NULL,
+ NULL, NULL, 0)) {
+ return -1;
+ } else if (current_mode & PIPE_NOWAIT) {
+ SetLastError(ERROR_ACCESS_DENIED);
+ return -1;
+ }
+ } else {
+ /* If this returns ERROR_INVALID_PARAMETER we probably opened
+ * something that is not a pipe. */
+ if (err == ERROR_INVALID_PARAMETER) {
+ SetLastError(WSAENOTSOCK);
+ }
+ return -1;
+ }
+ }
+
+ /* Check if the pipe was created with FILE_FLAG_OVERLAPPED. */
+ nt_status = pNtQueryInformationFile(pipeHandle,
+ &io_status,
+ &mode_info,
+ sizeof(mode_info),
+ FileModeInformation);
+ if (nt_status != STATUS_SUCCESS) {
+ return -1;
+ }
+
+ if (mode_info.Mode & FILE_SYNCHRONOUS_IO_ALERT ||
+ mode_info.Mode & FILE_SYNCHRONOUS_IO_NONALERT) {
+ /* Non-overlapped pipe. */
+ handle->flags |= UV_HANDLE_NON_OVERLAPPED_PIPE;
+ } else {
+ /* Overlapped pipe. Try to associate with IOCP. */
+ if (CreateIoCompletionPort(pipeHandle,
+ loop->iocp,
+ (ULONG_PTR) handle,
+ 0) == NULL) {
+ handle->flags |= UV_HANDLE_EMULATE_IOCP;
+ }
+ }
+
+ handle->handle = pipeHandle;
+ handle->u.fd = fd;
+ handle->flags |= duplex_flags;
+
+ return 0;
+}
+
+
+static int pipe_alloc_accept(uv_loop_t* loop, uv_pipe_t* handle,
+ uv_pipe_accept_t* req, BOOL firstInstance) {
+ assert(req->pipeHandle == INVALID_HANDLE_VALUE);
+
+ req->pipeHandle =
+ CreateNamedPipeW(handle->name,
+ PIPE_ACCESS_DUPLEX | FILE_FLAG_OVERLAPPED | WRITE_DAC |
+ (firstInstance ? FILE_FLAG_FIRST_PIPE_INSTANCE : 0),
+ PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT,
+ PIPE_UNLIMITED_INSTANCES, 65536, 65536, 0, NULL);
+
+ if (req->pipeHandle == INVALID_HANDLE_VALUE) {
+ return 0;
+ }
+
+ /* Associate it with IOCP so we can get events. */
+ if (CreateIoCompletionPort(req->pipeHandle,
+ loop->iocp,
+ (ULONG_PTR) handle,
+ 0) == NULL) {
+ uv_fatal_error(GetLastError(), "CreateIoCompletionPort");
+ }
+
+ /* Stash a handle in the server object for use from places such as
+ * getsockname and chmod. As we transfer ownership of these to client
+ * objects, we'll allocate new ones here. */
+ handle->handle = req->pipeHandle;
+
+ return 1;
+}
+
+
+static DWORD WINAPI pipe_shutdown_thread_proc(void* parameter) {
+ uv_loop_t* loop;
+ uv_pipe_t* handle;
+ uv_shutdown_t* req;
+
+ req = (uv_shutdown_t*) parameter;
+ assert(req);
+ handle = (uv_pipe_t*) req->handle;
+ assert(handle);
+ loop = handle->loop;
+ assert(loop);
+
+ FlushFileBuffers(handle->handle);
+
+ /* Post completed */
+ POST_COMPLETION_FOR_REQ(loop, req);
+
+ return 0;
+}
+
+
+void uv_pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle) {
+ int err;
+ DWORD result;
+ uv_shutdown_t* req;
+ NTSTATUS nt_status;
+ IO_STATUS_BLOCK io_status;
+ FILE_PIPE_LOCAL_INFORMATION pipe_info;
+ uv__ipc_xfer_queue_item_t* xfer_queue_item;
+
+ if ((handle->flags & UV_HANDLE_CONNECTION) &&
+ handle->stream.conn.shutdown_req != NULL &&
+ handle->stream.conn.write_reqs_pending == 0) {
+ req = handle->stream.conn.shutdown_req;
+
+ /* Clear the shutdown_req field so we don't go here again. */
+ handle->stream.conn.shutdown_req = NULL;
+
+ if (handle->flags & UV_HANDLE_CLOSING) {
+ UNREGISTER_HANDLE_REQ(loop, handle, req);
+
+ /* Already closing. Cancel the shutdown. */
+ if (req->cb) {
+ req->cb(req, UV_ECANCELED);
+ }
+
+ DECREASE_PENDING_REQ_COUNT(handle);
+ return;
+ }
+
+ /* Try to avoid flushing the pipe buffer in the thread pool. */
+ nt_status = pNtQueryInformationFile(handle->handle,
+ &io_status,
+ &pipe_info,
+ sizeof pipe_info,
+ FilePipeLocalInformation);
+
+ if (nt_status != STATUS_SUCCESS) {
+ /* Failure */
+ UNREGISTER_HANDLE_REQ(loop, handle, req);
+
+ handle->flags |= UV_HANDLE_WRITABLE; /* Questionable */
+ if (req->cb) {
+ err = pRtlNtStatusToDosError(nt_status);
+ req->cb(req, uv_translate_sys_error(err));
+ }
+
+ DECREASE_PENDING_REQ_COUNT(handle);
+ return;
+ }
+
+ if (pipe_info.OutboundQuota == pipe_info.WriteQuotaAvailable) {
+ /* Short-circuit, no need to call FlushFileBuffers. */
+ uv_insert_pending_req(loop, (uv_req_t*) req);
+ return;
+ }
+
+ /* Run FlushFileBuffers in the thread pool. */
+ result = QueueUserWorkItem(pipe_shutdown_thread_proc,
+ req,
+ WT_EXECUTELONGFUNCTION);
+ if (result) {
+ return;
+
+ } else {
+ /* Failure. */
+ UNREGISTER_HANDLE_REQ(loop, handle, req);
+
+ handle->flags |= UV_HANDLE_WRITABLE; /* Questionable */
+ if (req->cb) {
+ err = GetLastError();
+ req->cb(req, uv_translate_sys_error(err));
+ }
+
+ DECREASE_PENDING_REQ_COUNT(handle);
+ return;
+ }
+ }
+
+ if (handle->flags & UV_HANDLE_CLOSING &&
+ handle->reqs_pending == 0) {
+ assert(!(handle->flags & UV_HANDLE_CLOSED));
+
+ if (handle->flags & UV_HANDLE_CONNECTION) {
+ /* Free pending sockets */
+ while (!QUEUE_EMPTY(&handle->pipe.conn.ipc_xfer_queue)) {
+ QUEUE* q;
+ SOCKET socket;
+
+ q = QUEUE_HEAD(&handle->pipe.conn.ipc_xfer_queue);
+ QUEUE_REMOVE(q);
+ xfer_queue_item = QUEUE_DATA(q, uv__ipc_xfer_queue_item_t, member);
+
+ /* Materialize socket and close it */
+ socket = WSASocketW(FROM_PROTOCOL_INFO,
+ FROM_PROTOCOL_INFO,
+ FROM_PROTOCOL_INFO,
+ &xfer_queue_item->xfer_info.socket_info,
+ 0,
+ WSA_FLAG_OVERLAPPED);
+ uv__free(xfer_queue_item);
+
+ if (socket != INVALID_SOCKET)
+ closesocket(socket);
+ }
+ handle->pipe.conn.ipc_xfer_queue_length = 0;
+
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
+ if (handle->read_req.wait_handle != INVALID_HANDLE_VALUE) {
+ UnregisterWait(handle->read_req.wait_handle);
+ handle->read_req.wait_handle = INVALID_HANDLE_VALUE;
+ }
+ if (handle->read_req.event_handle != NULL) {
+ CloseHandle(handle->read_req.event_handle);
+ handle->read_req.event_handle = NULL;
+ }
+ }
+
+ if (handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE)
+ DeleteCriticalSection(&handle->pipe.conn.readfile_thread_lock);
+ }
+
+ if (handle->flags & UV_HANDLE_PIPESERVER) {
+ assert(handle->pipe.serv.accept_reqs);
+ uv__free(handle->pipe.serv.accept_reqs);
+ handle->pipe.serv.accept_reqs = NULL;
+ }
+
+ uv__handle_close(handle);
+ }
+}
+
+
+void uv_pipe_pending_instances(uv_pipe_t* handle, int count) {
+ if (handle->flags & UV_HANDLE_BOUND)
+ return;
+ handle->pipe.serv.pending_instances = count;
+ handle->flags |= UV_HANDLE_PIPESERVER;
+}
+
+
+/* Creates a pipe server. */
+int uv_pipe_bind(uv_pipe_t* handle, const char* name) {
+ uv_loop_t* loop = handle->loop;
+ int i, err, nameSize;
+ uv_pipe_accept_t* req;
+
+ if (handle->flags & UV_HANDLE_BOUND) {
+ return UV_EINVAL;
+ }
+
+ if (!name) {
+ return UV_EINVAL;
+ }
+
+ if (!(handle->flags & UV_HANDLE_PIPESERVER)) {
+ handle->pipe.serv.pending_instances = default_pending_pipe_instances;
+ }
+
+ handle->pipe.serv.accept_reqs = (uv_pipe_accept_t*)
+ uv__malloc(sizeof(uv_pipe_accept_t) * handle->pipe.serv.pending_instances);
+ if (!handle->pipe.serv.accept_reqs) {
+ uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+ }
+
+ for (i = 0; i < handle->pipe.serv.pending_instances; i++) {
+ req = &handle->pipe.serv.accept_reqs[i];
+ UV_REQ_INIT(req, UV_ACCEPT);
+ req->data = handle;
+ req->pipeHandle = INVALID_HANDLE_VALUE;
+ req->next_pending = NULL;
+ }
+
+ /* Convert name to UTF16. */
+ nameSize = MultiByteToWideChar(CP_UTF8, 0, name, -1, NULL, 0) * sizeof(WCHAR);
+ handle->name = uv__malloc(nameSize);
+ if (!handle->name) {
+ uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+ }
+
+ if (!MultiByteToWideChar(CP_UTF8,
+ 0,
+ name,
+ -1,
+ handle->name,
+ nameSize / sizeof(WCHAR))) {
+ err = GetLastError();
+ goto error;
+ }
+
+ /*
+ * Attempt to create the first pipe with FILE_FLAG_FIRST_PIPE_INSTANCE.
+ * If this fails then there's already a pipe server for the given pipe name.
+ */
+ if (!pipe_alloc_accept(loop,
+ handle,
+ &handle->pipe.serv.accept_reqs[0],
+ TRUE)) {
+ err = GetLastError();
+ if (err == ERROR_ACCESS_DENIED) {
+ err = WSAEADDRINUSE; /* Translates to UV_EADDRINUSE. */
+ } else if (err == ERROR_PATH_NOT_FOUND || err == ERROR_INVALID_NAME) {
+ err = WSAEACCES; /* Translates to UV_EACCES. */
+ }
+ goto error;
+ }
+
+ handle->pipe.serv.pending_accepts = NULL;
+ handle->flags |= UV_HANDLE_PIPESERVER;
+ handle->flags |= UV_HANDLE_BOUND;
+
+ return 0;
+
+error:
+ if (handle->name) {
+ uv__free(handle->name);
+ handle->name = NULL;
+ }
+
+ return uv_translate_sys_error(err);
+}
+
+
+static DWORD WINAPI pipe_connect_thread_proc(void* parameter) {
+ uv_loop_t* loop;
+ uv_pipe_t* handle;
+ uv_connect_t* req;
+ HANDLE pipeHandle = INVALID_HANDLE_VALUE;
+ DWORD duplex_flags;
+
+ req = (uv_connect_t*) parameter;
+ assert(req);
+ handle = (uv_pipe_t*) req->handle;
+ assert(handle);
+ loop = handle->loop;
+ assert(loop);
+
+ /* We're here because CreateFile on a pipe returned ERROR_PIPE_BUSY. We wait
+ * for the pipe to become available with WaitNamedPipe. */
+ while (WaitNamedPipeW(handle->name, 30000)) {
+ /* The pipe is now available, try to connect. */
+ pipeHandle = open_named_pipe(handle->name, &duplex_flags);
+ if (pipeHandle != INVALID_HANDLE_VALUE)
+ break;
+
+ SwitchToThread();
+ }
+
+ if (pipeHandle != INVALID_HANDLE_VALUE &&
+ !uv_set_pipe_handle(loop, handle, pipeHandle, -1, duplex_flags)) {
+ SET_REQ_SUCCESS(req);
+ } else {
+ SET_REQ_ERROR(req, GetLastError());
+ }
+
+ /* Post completed */
+ POST_COMPLETION_FOR_REQ(loop, req);
+
+ return 0;
+}
+
+
+void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle,
+ const char* name, uv_connect_cb cb) {
+ uv_loop_t* loop = handle->loop;
+ int err, nameSize;
+ HANDLE pipeHandle = INVALID_HANDLE_VALUE;
+ DWORD duplex_flags;
+
+ UV_REQ_INIT(req, UV_CONNECT);
+ req->handle = (uv_stream_t*) handle;
+ req->cb = cb;
+
+ /* Convert name to UTF16. */
+ nameSize = MultiByteToWideChar(CP_UTF8, 0, name, -1, NULL, 0) * sizeof(WCHAR);
+ handle->name = uv__malloc(nameSize);
+ if (!handle->name) {
+ uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+ }
+
+ if (!MultiByteToWideChar(CP_UTF8,
+ 0,
+ name,
+ -1,
+ handle->name,
+ nameSize / sizeof(WCHAR))) {
+ err = GetLastError();
+ goto error;
+ }
+
+ pipeHandle = open_named_pipe(handle->name, &duplex_flags);
+ if (pipeHandle == INVALID_HANDLE_VALUE) {
+ if (GetLastError() == ERROR_PIPE_BUSY) {
+ /* Wait for the server to make a pipe instance available. */
+ if (!QueueUserWorkItem(&pipe_connect_thread_proc,
+ req,
+ WT_EXECUTELONGFUNCTION)) {
+ err = GetLastError();
+ goto error;
+ }
+
+ REGISTER_HANDLE_REQ(loop, handle, req);
+ handle->reqs_pending++;
+
+ return;
+ }
+
+ err = GetLastError();
+ goto error;
+ }
+
+ assert(pipeHandle != INVALID_HANDLE_VALUE);
+
+ if (uv_set_pipe_handle(loop,
+ (uv_pipe_t*) req->handle,
+ pipeHandle,
+ -1,
+ duplex_flags)) {
+ err = GetLastError();
+ goto error;
+ }
+
+ SET_REQ_SUCCESS(req);
+ uv_insert_pending_req(loop, (uv_req_t*) req);
+ handle->reqs_pending++;
+ REGISTER_HANDLE_REQ(loop, handle, req);
+ return;
+
+error:
+ if (handle->name) {
+ uv__free(handle->name);
+ handle->name = NULL;
+ }
+
+ if (pipeHandle != INVALID_HANDLE_VALUE) {
+ CloseHandle(pipeHandle);
+ }
+
+ /* Make this req pending reporting an error. */
+ SET_REQ_ERROR(req, err);
+ uv_insert_pending_req(loop, (uv_req_t*) req);
+ handle->reqs_pending++;
+ REGISTER_HANDLE_REQ(loop, handle, req);
+ return;
+}
+
+
+void uv__pipe_interrupt_read(uv_pipe_t* handle) {
+ BOOL r;
+
+ if (!(handle->flags & UV_HANDLE_READ_PENDING))
+ return; /* No pending reads. */
+ if (handle->flags & UV_HANDLE_CANCELLATION_PENDING)
+ return; /* Already cancelled. */
+ if (handle->handle == INVALID_HANDLE_VALUE)
+ return; /* Pipe handle closed. */
+
+ if (!(handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE)) {
+ /* Cancel asynchronous read. */
+ r = CancelIoEx(handle->handle, &handle->read_req.u.io.overlapped);
+ assert(r || GetLastError() == ERROR_NOT_FOUND);
+
+ } else {
+ /* Cancel synchronous read (which is happening in the thread pool). */
+ HANDLE thread;
+ volatile HANDLE* thread_ptr = &handle->pipe.conn.readfile_thread_handle;
+
+ EnterCriticalSection(&handle->pipe.conn.readfile_thread_lock);
+
+ thread = *thread_ptr;
+ if (thread == NULL) {
+ /* The thread pool thread has not yet reached the point of blocking, we
+ * can pre-empt it by setting thread_handle to INVALID_HANDLE_VALUE. */
+ *thread_ptr = INVALID_HANDLE_VALUE;
+
+ } else {
+ /* Spin until the thread has acknowledged (by setting the thread to
+ * INVALID_HANDLE_VALUE) that it is past the point of blocking. */
+ while (thread != INVALID_HANDLE_VALUE) {
+ r = CancelSynchronousIo(thread);
+ assert(r || GetLastError() == ERROR_NOT_FOUND);
+ SwitchToThread(); /* Yield thread. */
+ thread = *thread_ptr;
+ }
+ }
+
+ LeaveCriticalSection(&handle->pipe.conn.readfile_thread_lock);
+ }
+
+ /* Set flag to indicate that read has been cancelled. */
+ handle->flags |= UV_HANDLE_CANCELLATION_PENDING;
+}
+
+
+void uv__pipe_read_stop(uv_pipe_t* handle) {
+ handle->flags &= ~UV_HANDLE_READING;
+ DECREASE_ACTIVE_COUNT(handle->loop, handle);
+
+ uv__pipe_interrupt_read(handle);
+}
+
+
+/* Cleans up uv_pipe_t (server or connection) and all resources associated with
+ * it. */
+void uv_pipe_cleanup(uv_loop_t* loop, uv_pipe_t* handle) {
+ int i;
+ HANDLE pipeHandle;
+
+ uv__pipe_interrupt_read(handle);
+
+ if (handle->name) {
+ uv__free(handle->name);
+ handle->name = NULL;
+ }
+
+ if (handle->flags & UV_HANDLE_PIPESERVER) {
+ for (i = 0; i < handle->pipe.serv.pending_instances; i++) {
+ pipeHandle = handle->pipe.serv.accept_reqs[i].pipeHandle;
+ if (pipeHandle != INVALID_HANDLE_VALUE) {
+ CloseHandle(pipeHandle);
+ handle->pipe.serv.accept_reqs[i].pipeHandle = INVALID_HANDLE_VALUE;
+ }
+ }
+ handle->handle = INVALID_HANDLE_VALUE;
+ }
+
+ if (handle->flags & UV_HANDLE_CONNECTION) {
+ handle->flags &= ~UV_HANDLE_WRITABLE;
+ eof_timer_destroy(handle);
+ }
+
+ if ((handle->flags & UV_HANDLE_CONNECTION)
+ && handle->handle != INVALID_HANDLE_VALUE)
+ close_pipe(handle);
+}
+
+
+void uv_pipe_close(uv_loop_t* loop, uv_pipe_t* handle) {
+ if (handle->flags & UV_HANDLE_READING) {
+ handle->flags &= ~UV_HANDLE_READING;
+ DECREASE_ACTIVE_COUNT(loop, handle);
+ }
+
+ if (handle->flags & UV_HANDLE_LISTENING) {
+ handle->flags &= ~UV_HANDLE_LISTENING;
+ DECREASE_ACTIVE_COUNT(loop, handle);
+ }
+
+ uv_pipe_cleanup(loop, handle);
+
+ if (handle->reqs_pending == 0) {
+ uv_want_endgame(loop, (uv_handle_t*) handle);
+ }
+
+ handle->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
+ uv__handle_closing(handle);
+}
+
+
+static void uv_pipe_queue_accept(uv_loop_t* loop, uv_pipe_t* handle,
+ uv_pipe_accept_t* req, BOOL firstInstance) {
+ assert(handle->flags & UV_HANDLE_LISTENING);
+
+ if (!firstInstance && !pipe_alloc_accept(loop, handle, req, FALSE)) {
+ SET_REQ_ERROR(req, GetLastError());
+ uv_insert_pending_req(loop, (uv_req_t*) req);
+ handle->reqs_pending++;
+ return;
+ }
+
+ assert(req->pipeHandle != INVALID_HANDLE_VALUE);
+
+ /* Prepare the overlapped structure. */
+ memset(&(req->u.io.overlapped), 0, sizeof(req->u.io.overlapped));
+
+ if (!ConnectNamedPipe(req->pipeHandle, &req->u.io.overlapped) &&
+ GetLastError() != ERROR_IO_PENDING) {
+ if (GetLastError() == ERROR_PIPE_CONNECTED) {
+ SET_REQ_SUCCESS(req);
+ } else {
+ CloseHandle(req->pipeHandle);
+ req->pipeHandle = INVALID_HANDLE_VALUE;
+ /* Make this req pending reporting an error. */
+ SET_REQ_ERROR(req, GetLastError());
+ }
+ uv_insert_pending_req(loop, (uv_req_t*) req);
+ handle->reqs_pending++;
+ return;
+ }
+
+ /* Wait for completion via IOCP */
+ handle->reqs_pending++;
+}
+
+
+int uv_pipe_accept(uv_pipe_t* server, uv_stream_t* client) {
+ uv_loop_t* loop = server->loop;
+ uv_pipe_t* pipe_client;
+ uv_pipe_accept_t* req;
+ QUEUE* q;
+ uv__ipc_xfer_queue_item_t* item;
+ int err;
+
+ if (server->ipc) {
+ if (QUEUE_EMPTY(&server->pipe.conn.ipc_xfer_queue)) {
+ /* No valid pending sockets. */
+ return WSAEWOULDBLOCK;
+ }
+
+ q = QUEUE_HEAD(&server->pipe.conn.ipc_xfer_queue);
+ QUEUE_REMOVE(q);
+ server->pipe.conn.ipc_xfer_queue_length--;
+ item = QUEUE_DATA(q, uv__ipc_xfer_queue_item_t, member);
+
+ err = uv__tcp_xfer_import(
+ (uv_tcp_t*) client, item->xfer_type, &item->xfer_info);
+ if (err != 0)
+ return err;
+
+ uv__free(item);
+
+ } else {
+ pipe_client = (uv_pipe_t*) client;
+
+ /* Find a connection instance that has been connected, but not yet
+ * accepted. */
+ req = server->pipe.serv.pending_accepts;
+
+ if (!req) {
+ /* No valid connections found, so we error out. */
+ return WSAEWOULDBLOCK;
+ }
+
+ /* Initialize the client handle and copy the pipeHandle to the client */
+ uv_pipe_connection_init(pipe_client);
+ pipe_client->handle = req->pipeHandle;
+ pipe_client->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
+
+ /* Prepare the req to pick up a new connection */
+ server->pipe.serv.pending_accepts = req->next_pending;
+ req->next_pending = NULL;
+ req->pipeHandle = INVALID_HANDLE_VALUE;
+
+ server->handle = INVALID_HANDLE_VALUE;
+ if (!(server->flags & UV_HANDLE_CLOSING)) {
+ uv_pipe_queue_accept(loop, server, req, FALSE);
+ }
+ }
+
+ return 0;
+}
+
+
+/* Starts listening for connections for the given pipe. */
+int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb) {
+ uv_loop_t* loop = handle->loop;
+ int i;
+
+ if (handle->flags & UV_HANDLE_LISTENING) {
+ handle->stream.serv.connection_cb = cb;
+ }
+
+ if (!(handle->flags & UV_HANDLE_BOUND)) {
+ return WSAEINVAL;
+ }
+
+ if (handle->flags & UV_HANDLE_READING) {
+ return WSAEISCONN;
+ }
+
+ if (!(handle->flags & UV_HANDLE_PIPESERVER)) {
+ return ERROR_NOT_SUPPORTED;
+ }
+
+ if (handle->ipc) {
+ return WSAEINVAL;
+ }
+
+ handle->flags |= UV_HANDLE_LISTENING;
+ INCREASE_ACTIVE_COUNT(loop, handle);
+ handle->stream.serv.connection_cb = cb;
+
+ /* First pipe handle should have already been created in uv_pipe_bind */
+ assert(handle->pipe.serv.accept_reqs[0].pipeHandle != INVALID_HANDLE_VALUE);
+
+ for (i = 0; i < handle->pipe.serv.pending_instances; i++) {
+ uv_pipe_queue_accept(loop, handle, &handle->pipe.serv.accept_reqs[i], i == 0);
+ }
+
+ return 0;
+}
+
+
+static DWORD WINAPI uv_pipe_zero_readfile_thread_proc(void* arg) {
+ uv_read_t* req = (uv_read_t*) arg;
+ uv_pipe_t* handle = (uv_pipe_t*) req->data;
+ uv_loop_t* loop = handle->loop;
+ volatile HANDLE* thread_ptr = &handle->pipe.conn.readfile_thread_handle;
+ CRITICAL_SECTION* lock = &handle->pipe.conn.readfile_thread_lock;
+ HANDLE thread;
+ DWORD bytes;
+ DWORD err;
+
+ assert(req->type == UV_READ);
+ assert(handle->type == UV_NAMED_PIPE);
+
+ err = 0;
+
+ /* Create a handle to the current thread. */
+ if (!DuplicateHandle(GetCurrentProcess(),
+ GetCurrentThread(),
+ GetCurrentProcess(),
+ &thread,
+ 0,
+ FALSE,
+ DUPLICATE_SAME_ACCESS)) {
+ err = GetLastError();
+ goto out1;
+ }
+
+ /* The lock needs to be held when thread handle is modified. */
+ EnterCriticalSection(lock);
+ if (*thread_ptr == INVALID_HANDLE_VALUE) {
+ /* uv__pipe_interrupt_read() cancelled reading before we got here. */
+ err = ERROR_OPERATION_ABORTED;
+ } else {
+ /* Let main thread know which worker thread is doing the blocking read. */
+ assert(*thread_ptr == NULL);
+ *thread_ptr = thread;
+ }
+ LeaveCriticalSection(lock);
+
+ if (err)
+ goto out2;
+
+ /* Block the thread until data is available on the pipe, or the read is
+ * cancelled. */
+ if (!ReadFile(handle->handle, &uv_zero_, 0, &bytes, NULL))
+ err = GetLastError();
+
+ /* Let the main thread know the worker is past the point of blocking. */
+ assert(thread == *thread_ptr);
+ *thread_ptr = INVALID_HANDLE_VALUE;
+
+ /* Briefly acquire the mutex. Since the main thread holds the lock while it
+ * is spinning trying to cancel this thread's I/O, we will block here until
+ * it stops doing that. */
+ EnterCriticalSection(lock);
+ LeaveCriticalSection(lock);
+
+out2:
+ /* Close the handle to the current thread. */
+ CloseHandle(thread);
+
+out1:
+ /* Set request status and post a completion record to the IOCP. */
+ if (err)
+ SET_REQ_ERROR(req, err);
+ else
+ SET_REQ_SUCCESS(req);
+ POST_COMPLETION_FOR_REQ(loop, req);
+
+ return 0;
+}
+
+
+static DWORD WINAPI uv_pipe_writefile_thread_proc(void* parameter) {
+ int result;
+ DWORD bytes;
+ uv_write_t* req = (uv_write_t*) parameter;
+ uv_pipe_t* handle = (uv_pipe_t*) req->handle;
+ uv_loop_t* loop = handle->loop;
+
+ assert(req != NULL);
+ assert(req->type == UV_WRITE);
+ assert(handle->type == UV_NAMED_PIPE);
+ assert(req->write_buffer.base);
+
+ result = WriteFile(handle->handle,
+ req->write_buffer.base,
+ req->write_buffer.len,
+ &bytes,
+ NULL);
+
+ if (!result) {
+ SET_REQ_ERROR(req, GetLastError());
+ }
+
+ POST_COMPLETION_FOR_REQ(loop, req);
+ return 0;
+}
+
+
+static void CALLBACK post_completion_read_wait(void* context, BOOLEAN timed_out) {
+ uv_read_t* req;
+ uv_tcp_t* handle;
+
+ req = (uv_read_t*) context;
+ assert(req != NULL);
+ handle = (uv_tcp_t*)req->data;
+ assert(handle != NULL);
+ assert(!timed_out);
+
+ if (!PostQueuedCompletionStatus(handle->loop->iocp,
+ req->u.io.overlapped.InternalHigh,
+ 0,
+ &req->u.io.overlapped)) {
+ uv_fatal_error(GetLastError(), "PostQueuedCompletionStatus");
+ }
+}
+
+
+static void CALLBACK post_completion_write_wait(void* context, BOOLEAN timed_out) {
+ uv_write_t* req;
+ uv_tcp_t* handle;
+
+ req = (uv_write_t*) context;
+ assert(req != NULL);
+ handle = (uv_tcp_t*)req->handle;
+ assert(handle != NULL);
+ assert(!timed_out);
+
+ if (!PostQueuedCompletionStatus(handle->loop->iocp,
+ req->u.io.overlapped.InternalHigh,
+ 0,
+ &req->u.io.overlapped)) {
+ uv_fatal_error(GetLastError(), "PostQueuedCompletionStatus");
+ }
+}
+
+
+static void uv_pipe_queue_read(uv_loop_t* loop, uv_pipe_t* handle) {
+ uv_read_t* req;
+ int result;
+
+ assert(handle->flags & UV_HANDLE_READING);
+ assert(!(handle->flags & UV_HANDLE_READ_PENDING));
+
+ assert(handle->handle != INVALID_HANDLE_VALUE);
+
+ req = &handle->read_req;
+
+ if (handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE) {
+ handle->pipe.conn.readfile_thread_handle = NULL; /* Reset cancellation. */
+ if (!QueueUserWorkItem(&uv_pipe_zero_readfile_thread_proc,
+ req,
+ WT_EXECUTELONGFUNCTION)) {
+ /* Make this req pending reporting an error. */
+ SET_REQ_ERROR(req, GetLastError());
+ goto error;
+ }
+ } else {
+ memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
+ assert(req->event_handle != NULL);
+ req->u.io.overlapped.hEvent = (HANDLE) ((uintptr_t) req->event_handle | 1);
+ }
+
+ /* Do 0-read */
+ result = ReadFile(handle->handle,
+ &uv_zero_,
+ 0,
+ NULL,
+ &req->u.io.overlapped);
+
+ if (!result && GetLastError() != ERROR_IO_PENDING) {
+ /* Make this req pending reporting an error. */
+ SET_REQ_ERROR(req, GetLastError());
+ goto error;
+ }
+
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
+ if (req->wait_handle == INVALID_HANDLE_VALUE) {
+ if (!RegisterWaitForSingleObject(&req->wait_handle,
+ req->event_handle, post_completion_read_wait, (void*) req,
+ INFINITE, WT_EXECUTEINWAITTHREAD)) {
+ SET_REQ_ERROR(req, GetLastError());
+ goto error;
+ }
+ }
+ }
+ }
+
+ /* Start the eof timer if there is one */
+ eof_timer_start(handle);
+ handle->flags |= UV_HANDLE_READ_PENDING;
+ handle->reqs_pending++;
+ return;
+
+error:
+ uv_insert_pending_req(loop, (uv_req_t*)req);
+ handle->flags |= UV_HANDLE_READ_PENDING;
+ handle->reqs_pending++;
+}
+
+
+int uv_pipe_read_start(uv_pipe_t* handle,
+ uv_alloc_cb alloc_cb,
+ uv_read_cb read_cb) {
+ uv_loop_t* loop = handle->loop;
+
+ handle->flags |= UV_HANDLE_READING;
+ INCREASE_ACTIVE_COUNT(loop, handle);
+ handle->read_cb = read_cb;
+ handle->alloc_cb = alloc_cb;
+
+ /* If reading was stopped and then started again, there could still be a read
+ * request pending. */
+ if (!(handle->flags & UV_HANDLE_READ_PENDING)) {
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP &&
+ handle->read_req.event_handle == NULL) {
+ handle->read_req.event_handle = CreateEvent(NULL, 0, 0, NULL);
+ if (handle->read_req.event_handle == NULL) {
+ uv_fatal_error(GetLastError(), "CreateEvent");
+ }
+ }
+ uv_pipe_queue_read(loop, handle);
+ }
+
+ return 0;
+}
+
+
+static void uv_insert_non_overlapped_write_req(uv_pipe_t* handle,
+ uv_write_t* req) {
+ req->next_req = NULL;
+ if (handle->pipe.conn.non_overlapped_writes_tail) {
+ req->next_req =
+ handle->pipe.conn.non_overlapped_writes_tail->next_req;
+ handle->pipe.conn.non_overlapped_writes_tail->next_req = (uv_req_t*)req;
+ handle->pipe.conn.non_overlapped_writes_tail = req;
+ } else {
+ req->next_req = (uv_req_t*)req;
+ handle->pipe.conn.non_overlapped_writes_tail = req;
+ }
+}
+
+
+static uv_write_t* uv_remove_non_overlapped_write_req(uv_pipe_t* handle) {
+ uv_write_t* req;
+
+ if (handle->pipe.conn.non_overlapped_writes_tail) {
+ req = (uv_write_t*)handle->pipe.conn.non_overlapped_writes_tail->next_req;
+
+ if (req == handle->pipe.conn.non_overlapped_writes_tail) {
+ handle->pipe.conn.non_overlapped_writes_tail = NULL;
+ } else {
+ handle->pipe.conn.non_overlapped_writes_tail->next_req =
+ req->next_req;
+ }
+
+ return req;
+ } else {
+ /* queue empty */
+ return NULL;
+ }
+}
+
+
+static void uv_queue_non_overlapped_write(uv_pipe_t* handle) {
+ uv_write_t* req = uv_remove_non_overlapped_write_req(handle);
+ if (req) {
+ if (!QueueUserWorkItem(&uv_pipe_writefile_thread_proc,
+ req,
+ WT_EXECUTELONGFUNCTION)) {
+ uv_fatal_error(GetLastError(), "QueueUserWorkItem");
+ }
+ }
+}
+
+
+static int uv__build_coalesced_write_req(uv_write_t* user_req,
+ const uv_buf_t bufs[],
+ size_t nbufs,
+ uv_write_t** req_out,
+ uv_buf_t* write_buf_out) {
+ /* Pack into a single heap-allocated buffer:
+ * (a) a uv_write_t structure where libuv stores the actual state.
+ * (b) a pointer to the original uv_write_t.
+ * (c) data from all `bufs` entries.
+ */
+ char* heap_buffer;
+ size_t heap_buffer_length, heap_buffer_offset;
+ uv__coalesced_write_t* coalesced_write_req; /* (a) + (b) */
+ char* data_start; /* (c) */
+ size_t data_length;
+ unsigned int i;
+
+ /* Compute combined size of all combined buffers from `bufs`. */
+ data_length = 0;
+ for (i = 0; i < nbufs; i++)
+ data_length += bufs[i].len;
+
+ /* The total combined size of data buffers should not exceed UINT32_MAX,
+ * because WriteFile() won't accept buffers larger than that. */
+ if (data_length > UINT32_MAX)
+ return WSAENOBUFS; /* Maps to UV_ENOBUFS. */
+
+ /* Compute heap buffer size. */
+ heap_buffer_length = sizeof *coalesced_write_req + /* (a) + (b) */
+ data_length; /* (c) */
+
+ /* Allocate buffer. */
+ heap_buffer = uv__malloc(heap_buffer_length);
+ if (heap_buffer == NULL)
+ return ERROR_NOT_ENOUGH_MEMORY; /* Maps to UV_ENOMEM. */
+
+ /* Copy uv_write_t information to the buffer. */
+ coalesced_write_req = (uv__coalesced_write_t*) heap_buffer;
+ coalesced_write_req->req = *user_req; /* copy (a) */
+ coalesced_write_req->req.coalesced = 1;
+ coalesced_write_req->user_req = user_req; /* copy (b) */
+ heap_buffer_offset = sizeof *coalesced_write_req; /* offset (a) + (b) */
+
+ /* Copy data buffers to the heap buffer. */
+ data_start = &heap_buffer[heap_buffer_offset];
+ for (i = 0; i < nbufs; i++) {
+ memcpy(&heap_buffer[heap_buffer_offset],
+ bufs[i].base,
+ bufs[i].len); /* copy (c) */
+ heap_buffer_offset += bufs[i].len; /* offset (c) */
+ }
+ assert(heap_buffer_offset == heap_buffer_length);
+
+ /* Set out arguments and return. */
+ *req_out = &coalesced_write_req->req;
+ *write_buf_out = uv_buf_init(data_start, (unsigned int) data_length);
+ return 0;
+}
+
+
+static int uv__pipe_write_data(uv_loop_t* loop,
+ uv_write_t* req,
+ uv_pipe_t* handle,
+ const uv_buf_t bufs[],
+ size_t nbufs,
+ uv_write_cb cb,
+ int copy_always) {
+ int err;
+ int result;
+ uv_buf_t write_buf;
+
+ assert(handle->handle != INVALID_HANDLE_VALUE);
+
+ UV_REQ_INIT(req, UV_WRITE);
+ req->handle = (uv_stream_t*) handle;
+ req->send_handle = NULL;
+ req->cb = cb;
+ /* Private fields. */
+ req->coalesced = 0;
+ req->event_handle = NULL;
+ req->wait_handle = INVALID_HANDLE_VALUE;
+
+ /* Prepare the overlapped structure. */
+ memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
+ if (handle->flags & (UV_HANDLE_EMULATE_IOCP | UV_HANDLE_BLOCKING_WRITES)) {
+ req->event_handle = CreateEvent(NULL, 0, 0, NULL);
+ if (req->event_handle == NULL) {
+ uv_fatal_error(GetLastError(), "CreateEvent");
+ }
+ req->u.io.overlapped.hEvent = (HANDLE) ((uintptr_t) req->event_handle | 1);
+ }
+ req->write_buffer = uv_null_buf_;
+
+ if (nbufs == 0) {
+ /* Write empty buffer. */
+ write_buf = uv_null_buf_;
+ } else if (nbufs == 1 && !copy_always) {
+ /* Write directly from bufs[0]. */
+ write_buf = bufs[0];
+ } else {
+ /* Coalesce all `bufs` into one big buffer. This also creates a new
+ * write-request structure that replaces the old one. */
+ err = uv__build_coalesced_write_req(req, bufs, nbufs, &req, &write_buf);
+ if (err != 0)
+ return err;
+ }
+
+ if ((handle->flags &
+ (UV_HANDLE_BLOCKING_WRITES | UV_HANDLE_NON_OVERLAPPED_PIPE)) ==
+ (UV_HANDLE_BLOCKING_WRITES | UV_HANDLE_NON_OVERLAPPED_PIPE)) {
+ DWORD bytes;
+ result =
+ WriteFile(handle->handle, write_buf.base, write_buf.len, &bytes, NULL);
+
+ if (!result) {
+ err = GetLastError();
+ return err;
+ } else {
+ /* Request completed immediately. */
+ req->u.io.queued_bytes = 0;
+ }
+
+ REGISTER_HANDLE_REQ(loop, handle, req);
+ handle->reqs_pending++;
+ handle->stream.conn.write_reqs_pending++;
+ POST_COMPLETION_FOR_REQ(loop, req);
+ return 0;
+ } else if (handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE) {
+ req->write_buffer = write_buf;
+ uv_insert_non_overlapped_write_req(handle, req);
+ if (handle->stream.conn.write_reqs_pending == 0) {
+ uv_queue_non_overlapped_write(handle);
+ }
+
+ /* Request queued by the kernel. */
+ req->u.io.queued_bytes = write_buf.len;
+ handle->write_queue_size += req->u.io.queued_bytes;
+ } else if (handle->flags & UV_HANDLE_BLOCKING_WRITES) {
+ /* Using overlapped IO, but wait for completion before returning */
+ result = WriteFile(handle->handle,
+ write_buf.base,
+ write_buf.len,
+ NULL,
+ &req->u.io.overlapped);
+
+ if (!result && GetLastError() != ERROR_IO_PENDING) {
+ err = GetLastError();
+ CloseHandle(req->event_handle);
+ req->event_handle = NULL;
+ return err;
+ }
+
+ if (result) {
+ /* Request completed immediately. */
+ req->u.io.queued_bytes = 0;
+ } else {
+ /* Request queued by the kernel. */
+ req->u.io.queued_bytes = write_buf.len;
+ handle->write_queue_size += req->u.io.queued_bytes;
+ if (WaitForSingleObject(req->event_handle, INFINITE) !=
+ WAIT_OBJECT_0) {
+ err = GetLastError();
+ CloseHandle(req->event_handle);
+ req->event_handle = NULL;
+ return err;
+ }
+ }
+ CloseHandle(req->event_handle);
+ req->event_handle = NULL;
+
+ REGISTER_HANDLE_REQ(loop, handle, req);
+ handle->reqs_pending++;
+ handle->stream.conn.write_reqs_pending++;
+ return 0;
+ } else {
+ result = WriteFile(handle->handle,
+ write_buf.base,
+ write_buf.len,
+ NULL,
+ &req->u.io.overlapped);
+
+ if (!result && GetLastError() != ERROR_IO_PENDING) {
+ return GetLastError();
+ }
+
+ if (result) {
+ /* Request completed immediately. */
+ req->u.io.queued_bytes = 0;
+ } else {
+ /* Request queued by the kernel. */
+ req->u.io.queued_bytes = write_buf.len;
+ handle->write_queue_size += req->u.io.queued_bytes;
+ }
+
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
+ if (!RegisterWaitForSingleObject(&req->wait_handle,
+ req->event_handle, post_completion_write_wait, (void*) req,
+ INFINITE, WT_EXECUTEINWAITTHREAD)) {
+ return GetLastError();
+ }
+ }
+ }
+
+ REGISTER_HANDLE_REQ(loop, handle, req);
+ handle->reqs_pending++;
+ handle->stream.conn.write_reqs_pending++;
+
+ return 0;
+}
+
+
+static DWORD uv__pipe_get_ipc_remote_pid(uv_pipe_t* handle) {
+ DWORD* pid = &handle->pipe.conn.ipc_remote_pid;
+
+ /* If the both ends of the IPC pipe are owned by the same process,
+ * the remote end pid may not yet be set. If so, do it here.
+ * TODO: this is weird; it'd probably better to use a handshake. */
+ if (*pid == 0)
+ *pid = GetCurrentProcessId();
+
+ return *pid;
+}
+
+
+int uv__pipe_write_ipc(uv_loop_t* loop,
+ uv_write_t* req,
+ uv_pipe_t* handle,
+ const uv_buf_t data_bufs[],
+ size_t data_buf_count,
+ uv_stream_t* send_handle,
+ uv_write_cb cb) {
+ uv_buf_t stack_bufs[6];
+ uv_buf_t* bufs;
+ size_t buf_count, buf_index;
+ uv__ipc_frame_header_t frame_header;
+ uv__ipc_socket_xfer_type_t xfer_type = UV__IPC_SOCKET_XFER_NONE;
+ uv__ipc_socket_xfer_info_t xfer_info;
+ uint64_t data_length;
+ size_t i;
+ int err;
+
+ /* Compute the combined size of data buffers. */
+ data_length = 0;
+ for (i = 0; i < data_buf_count; i++)
+ data_length += data_bufs[i].len;
+ if (data_length > UINT32_MAX)
+ return WSAENOBUFS; /* Maps to UV_ENOBUFS. */
+
+ /* Prepare the frame's socket xfer payload. */
+ if (send_handle != NULL) {
+ uv_tcp_t* send_tcp_handle = (uv_tcp_t*) send_handle;
+
+ /* Verify that `send_handle` it is indeed a tcp handle. */
+ if (send_tcp_handle->type != UV_TCP)
+ return ERROR_NOT_SUPPORTED;
+
+ /* Export the tcp handle. */
+ err = uv__tcp_xfer_export(send_tcp_handle,
+ uv__pipe_get_ipc_remote_pid(handle),
+ &xfer_type,
+ &xfer_info);
+ if (err != 0)
+ return err;
+ }
+
+ /* Compute the number of uv_buf_t's required. */
+ buf_count = 1 + data_buf_count; /* Frame header and data buffers. */
+ if (send_handle != NULL)
+ buf_count += 1; /* One extra for the socket xfer information. */
+
+ /* Use the on-stack buffer array if it is big enough; otherwise allocate
+ * space for it on the heap. */
+ if (buf_count < ARRAY_SIZE(stack_bufs)) {
+ /* Use on-stack buffer array. */
+ bufs = stack_bufs;
+ } else {
+ /* Use heap-allocated buffer array. */
+ bufs = uv__calloc(buf_count, sizeof(uv_buf_t));
+ if (bufs == NULL)
+ return ERROR_NOT_ENOUGH_MEMORY; /* Maps to UV_ENOMEM. */
+ }
+ buf_index = 0;
+
+ /* Initialize frame header and add it to the buffers list. */
+ memset(&frame_header, 0, sizeof frame_header);
+ bufs[buf_index++] = uv_buf_init((char*) &frame_header, sizeof frame_header);
+
+ if (send_handle != NULL) {
+ /* Add frame header flags. */
+ switch (xfer_type) {
+ case UV__IPC_SOCKET_XFER_TCP_CONNECTION:
+ frame_header.flags |= UV__IPC_FRAME_HAS_SOCKET_XFER |
+ UV__IPC_FRAME_XFER_IS_TCP_CONNECTION;
+ break;
+ case UV__IPC_SOCKET_XFER_TCP_SERVER:
+ frame_header.flags |= UV__IPC_FRAME_HAS_SOCKET_XFER;
+ break;
+ default:
+ assert(0); /* Unreachable. */
+ }
+ /* Add xfer info buffer. */
+ bufs[buf_index++] = uv_buf_init((char*) &xfer_info, sizeof xfer_info);
+ }
+
+ if (data_length > 0) {
+ /* Update frame header. */
+ frame_header.flags |= UV__IPC_FRAME_HAS_DATA;
+ frame_header.data_length = (uint32_t) data_length;
+ /* Add data buffers to buffers list. */
+ for (i = 0; i < data_buf_count; i++)
+ bufs[buf_index++] = data_bufs[i];
+ }
+
+ /* Write buffers. We set the `always_copy` flag, so it is not a problem that
+ * some of the written data lives on the stack. */
+ err = uv__pipe_write_data(loop, req, handle, bufs, buf_count, cb, 1);
+
+ /* If we had to heap-allocate the bufs array, free it now. */
+ if (bufs != stack_bufs) {
+ uv__free(bufs);
+ }
+
+ return err;
+}
+
+
+int uv__pipe_write(uv_loop_t* loop,
+ uv_write_t* req,
+ uv_pipe_t* handle,
+ const uv_buf_t bufs[],
+ size_t nbufs,
+ uv_stream_t* send_handle,
+ uv_write_cb cb) {
+ if (handle->ipc) {
+ /* IPC pipe write: use framing protocol. */
+ return uv__pipe_write_ipc(loop, req, handle, bufs, nbufs, send_handle, cb);
+ } else {
+ /* Non-IPC pipe write: put data on the wire directly. */
+ assert(send_handle == NULL);
+ return uv__pipe_write_data(loop, req, handle, bufs, nbufs, cb, 0);
+ }
+}
+
+
+static void uv_pipe_read_eof(uv_loop_t* loop, uv_pipe_t* handle,
+ uv_buf_t buf) {
+ /* If there is an eof timer running, we don't need it any more, so discard
+ * it. */
+ eof_timer_destroy(handle);
+
+ handle->flags &= ~UV_HANDLE_READABLE;
+ uv_read_stop((uv_stream_t*) handle);
+
+ handle->read_cb((uv_stream_t*) handle, UV_EOF, &buf);
+}
+
+
+static void uv_pipe_read_error(uv_loop_t* loop, uv_pipe_t* handle, int error,
+ uv_buf_t buf) {
+ /* If there is an eof timer running, we don't need it any more, so discard
+ * it. */
+ eof_timer_destroy(handle);
+
+ uv_read_stop((uv_stream_t*) handle);
+
+ handle->read_cb((uv_stream_t*)handle, uv_translate_sys_error(error), &buf);
+}
+
+
+static void uv_pipe_read_error_or_eof(uv_loop_t* loop, uv_pipe_t* handle,
+ int error, uv_buf_t buf) {
+ if (error == ERROR_BROKEN_PIPE) {
+ uv_pipe_read_eof(loop, handle, buf);
+ } else {
+ uv_pipe_read_error(loop, handle, error, buf);
+ }
+}
+
+
+static void uv__pipe_queue_ipc_xfer_info(
+ uv_pipe_t* handle,
+ uv__ipc_socket_xfer_type_t xfer_type,
+ uv__ipc_socket_xfer_info_t* xfer_info) {
+ uv__ipc_xfer_queue_item_t* item;
+
+ item = (uv__ipc_xfer_queue_item_t*) uv__malloc(sizeof(*item));
+ if (item == NULL)
+ uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+
+ item->xfer_type = xfer_type;
+ item->xfer_info = *xfer_info;
+
+ QUEUE_INSERT_TAIL(&handle->pipe.conn.ipc_xfer_queue, &item->member);
+ handle->pipe.conn.ipc_xfer_queue_length++;
+}
+
+
+/* Read an exact number of bytes from a pipe. If an error or end-of-file is
+ * encountered before the requested number of bytes are read, an error is
+ * returned. */
+static int uv__pipe_read_exactly(HANDLE h, void* buffer, DWORD count) {
+ DWORD bytes_read, bytes_read_now;
+
+ bytes_read = 0;
+ while (bytes_read < count) {
+ if (!ReadFile(h,
+ (char*) buffer + bytes_read,
+ count - bytes_read,
+ &bytes_read_now,
+ NULL)) {
+ return GetLastError();
+ }
+
+ bytes_read += bytes_read_now;
+ }
+
+ assert(bytes_read == count);
+ return 0;
+}
+
+
+static DWORD uv__pipe_read_data(uv_loop_t* loop,
+ uv_pipe_t* handle,
+ DWORD suggested_bytes,
+ DWORD max_bytes) {
+ DWORD bytes_read;
+ uv_buf_t buf;
+
+ /* Ask the user for a buffer to read data into. */
+ buf = uv_buf_init(NULL, 0);
+ handle->alloc_cb((uv_handle_t*) handle, suggested_bytes, &buf);
+ if (buf.base == NULL || buf.len == 0) {
+ handle->read_cb((uv_stream_t*) handle, UV_ENOBUFS, &buf);
+ return 0; /* Break out of read loop. */
+ }
+
+ /* Ensure we read at most the smaller of:
+ * (a) the length of the user-allocated buffer.
+ * (b) the maximum data length as specified by the `max_bytes` argument.
+ */
+ if (max_bytes > buf.len)
+ max_bytes = buf.len;
+
+ /* Read into the user buffer. */
+ if (!ReadFile(handle->handle, buf.base, max_bytes, &bytes_read, NULL)) {
+ uv_pipe_read_error_or_eof(loop, handle, GetLastError(), buf);
+ return 0; /* Break out of read loop. */
+ }
+
+ /* Call the read callback. */
+ handle->read_cb((uv_stream_t*) handle, bytes_read, &buf);
+
+ return bytes_read;
+}
+
+
+static DWORD uv__pipe_read_ipc(uv_loop_t* loop, uv_pipe_t* handle) {
+ uint32_t* data_remaining = &handle->pipe.conn.ipc_data_frame.payload_remaining;
+ int err;
+
+ if (*data_remaining > 0) {
+ /* Read frame data payload. */
+ DWORD bytes_read =
+ uv__pipe_read_data(loop, handle, *data_remaining, *data_remaining);
+ *data_remaining -= bytes_read;
+ return bytes_read;
+
+ } else {
+ /* Start of a new IPC frame. */
+ uv__ipc_frame_header_t frame_header;
+ uint32_t xfer_flags;
+ uv__ipc_socket_xfer_type_t xfer_type;
+ uv__ipc_socket_xfer_info_t xfer_info;
+
+ /* Read the IPC frame header. */
+ err = uv__pipe_read_exactly(
+ handle->handle, &frame_header, sizeof frame_header);
+ if (err)
+ goto error;
+
+ /* Validate that flags are valid. */
+ if ((frame_header.flags & ~UV__IPC_FRAME_VALID_FLAGS) != 0)
+ goto invalid;
+ /* Validate that reserved2 is zero. */
+ if (frame_header.reserved2 != 0)
+ goto invalid;
+
+ /* Parse xfer flags. */
+ xfer_flags = frame_header.flags & UV__IPC_FRAME_XFER_FLAGS;
+ if (xfer_flags & UV__IPC_FRAME_HAS_SOCKET_XFER) {
+ /* Socket coming -- determine the type. */
+ xfer_type = xfer_flags & UV__IPC_FRAME_XFER_IS_TCP_CONNECTION
+ ? UV__IPC_SOCKET_XFER_TCP_CONNECTION
+ : UV__IPC_SOCKET_XFER_TCP_SERVER;
+ } else if (xfer_flags == 0) {
+ /* No socket. */
+ xfer_type = UV__IPC_SOCKET_XFER_NONE;
+ } else {
+ /* Invalid flags. */
+ goto invalid;
+ }
+
+ /* Parse data frame information. */
+ if (frame_header.flags & UV__IPC_FRAME_HAS_DATA) {
+ *data_remaining = frame_header.data_length;
+ } else if (frame_header.data_length != 0) {
+ /* Data length greater than zero but data flag not set -- invalid. */
+ goto invalid;
+ }
+
+ /* If no socket xfer info follows, return here. Data will be read in a
+ * subsequent invocation of uv__pipe_read_ipc(). */
+ if (xfer_type == UV__IPC_SOCKET_XFER_NONE)
+ return sizeof frame_header; /* Number of bytes read. */
+
+ /* Read transferred socket information. */
+ err = uv__pipe_read_exactly(handle->handle, &xfer_info, sizeof xfer_info);
+ if (err)
+ goto error;
+
+ /* Store the pending socket info. */
+ uv__pipe_queue_ipc_xfer_info(handle, xfer_type, &xfer_info);
+
+ /* Return number of bytes read. */
+ return sizeof frame_header + sizeof xfer_info;
+ }
+
+invalid:
+ /* Invalid frame. */
+ err = WSAECONNABORTED; /* Maps to UV_ECONNABORTED. */
+
+error:
+ uv_pipe_read_error_or_eof(loop, handle, err, uv_null_buf_);
+ return 0; /* Break out of read loop. */
+}
+
+
+void uv_process_pipe_read_req(uv_loop_t* loop,
+ uv_pipe_t* handle,
+ uv_req_t* req) {
+ assert(handle->type == UV_NAMED_PIPE);
+
+ handle->flags &= ~(UV_HANDLE_READ_PENDING | UV_HANDLE_CANCELLATION_PENDING);
+ DECREASE_PENDING_REQ_COUNT(handle);
+ eof_timer_stop(handle);
+
+ /* At this point, we're done with bookkeeping. If the user has stopped
+ * reading the pipe in the meantime, there is nothing left to do, since there
+ * is no callback that we can call. */
+ if (!(handle->flags & UV_HANDLE_READING))
+ return;
+
+ if (!REQ_SUCCESS(req)) {
+ /* An error occurred doing the zero-read. */
+ DWORD err = GET_REQ_ERROR(req);
+
+ /* If the read was cancelled by uv__pipe_interrupt_read(), the request may
+ * indicate an ERROR_OPERATION_ABORTED error. This error isn't relevant to
+ * the user; we'll start a new zero-read at the end of this function. */
+ if (err != ERROR_OPERATION_ABORTED)
+ uv_pipe_read_error_or_eof(loop, handle, err, uv_null_buf_);
+
+ } else {
+ /* The zero-read completed without error, indicating there is data
+ * available in the kernel buffer. */
+ DWORD avail;
+
+ /* Get the number of bytes available. */
+ avail = 0;
+ if (!PeekNamedPipe(handle->handle, NULL, 0, NULL, &avail, NULL))
+ uv_pipe_read_error_or_eof(loop, handle, GetLastError(), uv_null_buf_);
+
+ /* Read until we've either read all the bytes available, or the 'reading'
+ * flag is cleared. */
+ while (avail > 0 && handle->flags & UV_HANDLE_READING) {
+ /* Depending on the type of pipe, read either IPC frames or raw data. */
+ DWORD bytes_read =
+ handle->ipc ? uv__pipe_read_ipc(loop, handle)
+ : uv__pipe_read_data(loop, handle, avail, (DWORD) -1);
+
+ /* If no bytes were read, treat this as an indication that an error
+ * occurred, and break out of the read loop. */
+ if (bytes_read == 0)
+ break;
+
+ /* It is possible that more bytes were read than we thought were
+ * available. To prevent `avail` from underflowing, break out of the loop
+ * if this is the case. */
+ if (bytes_read > avail)
+ break;
+
+ /* Recompute the number of bytes available. */
+ avail -= bytes_read;
+ }
+ }
+
+ /* Start another zero-read request if necessary. */
+ if ((handle->flags & UV_HANDLE_READING) &&
+ !(handle->flags & UV_HANDLE_READ_PENDING)) {
+ uv_pipe_queue_read(loop, handle);
+ }
+}
+
+
+void uv_process_pipe_write_req(uv_loop_t* loop, uv_pipe_t* handle,
+ uv_write_t* req) {
+ int err;
+
+ assert(handle->type == UV_NAMED_PIPE);
+
+ assert(handle->write_queue_size >= req->u.io.queued_bytes);
+ handle->write_queue_size -= req->u.io.queued_bytes;
+
+ UNREGISTER_HANDLE_REQ(loop, handle, req);
+
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
+ if (req->wait_handle != INVALID_HANDLE_VALUE) {
+ UnregisterWait(req->wait_handle);
+ req->wait_handle = INVALID_HANDLE_VALUE;
+ }
+ if (req->event_handle) {
+ CloseHandle(req->event_handle);
+ req->event_handle = NULL;
+ }
+ }
+
+ err = GET_REQ_ERROR(req);
+
+ /* If this was a coalesced write, extract pointer to the user_provided
+ * uv_write_t structure so we can pass the expected pointer to the callback,
+ * then free the heap-allocated write req. */
+ if (req->coalesced) {
+ uv__coalesced_write_t* coalesced_write =
+ container_of(req, uv__coalesced_write_t, req);
+ req = coalesced_write->user_req;
+ uv__free(coalesced_write);
+ }
+ if (req->cb) {
+ req->cb(req, uv_translate_sys_error(err));
+ }
+
+ handle->stream.conn.write_reqs_pending--;
+
+ if (handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE &&
+ handle->pipe.conn.non_overlapped_writes_tail) {
+ assert(handle->stream.conn.write_reqs_pending > 0);
+ uv_queue_non_overlapped_write(handle);
+ }
+
+ if (handle->stream.conn.shutdown_req != NULL &&
+ handle->stream.conn.write_reqs_pending == 0) {
+ uv_want_endgame(loop, (uv_handle_t*)handle);
+ }
+
+ DECREASE_PENDING_REQ_COUNT(handle);
+}
+
+
+void uv_process_pipe_accept_req(uv_loop_t* loop, uv_pipe_t* handle,
+ uv_req_t* raw_req) {
+ uv_pipe_accept_t* req = (uv_pipe_accept_t*) raw_req;
+
+ assert(handle->type == UV_NAMED_PIPE);
+
+ if (handle->flags & UV_HANDLE_CLOSING) {
+ /* The req->pipeHandle should be freed already in uv_pipe_cleanup(). */
+ assert(req->pipeHandle == INVALID_HANDLE_VALUE);
+ DECREASE_PENDING_REQ_COUNT(handle);
+ return;
+ }
+
+ if (REQ_SUCCESS(req)) {
+ assert(req->pipeHandle != INVALID_HANDLE_VALUE);
+ req->next_pending = handle->pipe.serv.pending_accepts;
+ handle->pipe.serv.pending_accepts = req;
+
+ if (handle->stream.serv.connection_cb) {
+ handle->stream.serv.connection_cb((uv_stream_t*)handle, 0);
+ }
+ } else {
+ if (req->pipeHandle != INVALID_HANDLE_VALUE) {
+ CloseHandle(req->pipeHandle);
+ req->pipeHandle = INVALID_HANDLE_VALUE;
+ }
+ if (!(handle->flags & UV_HANDLE_CLOSING)) {
+ uv_pipe_queue_accept(loop, handle, req, FALSE);
+ }
+ }
+
+ DECREASE_PENDING_REQ_COUNT(handle);
+}
+
+
+void uv_process_pipe_connect_req(uv_loop_t* loop, uv_pipe_t* handle,
+ uv_connect_t* req) {
+ int err;
+
+ assert(handle->type == UV_NAMED_PIPE);
+
+ UNREGISTER_HANDLE_REQ(loop, handle, req);
+
+ if (req->cb) {
+ err = 0;
+ if (REQ_SUCCESS(req)) {
+ uv_pipe_connection_init(handle);
+ } else {
+ err = GET_REQ_ERROR(req);
+ }
+ req->cb(req, uv_translate_sys_error(err));
+ }
+
+ DECREASE_PENDING_REQ_COUNT(handle);
+}
+
+
+void uv_process_pipe_shutdown_req(uv_loop_t* loop, uv_pipe_t* handle,
+ uv_shutdown_t* req) {
+ assert(handle->type == UV_NAMED_PIPE);
+
+ UNREGISTER_HANDLE_REQ(loop, handle, req);
+
+ if (handle->flags & UV_HANDLE_READABLE) {
+ /* Initialize and optionally start the eof timer. Only do this if the pipe
+ * is readable and we haven't seen EOF come in ourselves. */
+ eof_timer_init(handle);
+
+ /* If reading start the timer right now. Otherwise uv_pipe_queue_read will
+ * start it. */
+ if (handle->flags & UV_HANDLE_READ_PENDING) {
+ eof_timer_start(handle);
+ }
+
+ } else {
+ /* This pipe is not readable. We can just close it to let the other end
+ * know that we're done writing. */
+ close_pipe(handle);
+ }
+
+ if (req->cb) {
+ req->cb(req, 0);
+ }
+
+ DECREASE_PENDING_REQ_COUNT(handle);
+}
+
+
+static void eof_timer_init(uv_pipe_t* pipe) {
+ int r;
+
+ assert(pipe->pipe.conn.eof_timer == NULL);
+ assert(pipe->flags & UV_HANDLE_CONNECTION);
+
+ pipe->pipe.conn.eof_timer = (uv_timer_t*) uv__malloc(sizeof *pipe->pipe.conn.eof_timer);
+
+ r = uv_timer_init(pipe->loop, pipe->pipe.conn.eof_timer);
+ assert(r == 0); /* timers can't fail */
+ pipe->pipe.conn.eof_timer->data = pipe;
+ uv_unref((uv_handle_t*) pipe->pipe.conn.eof_timer);
+}
+
+
+static void eof_timer_start(uv_pipe_t* pipe) {
+ assert(pipe->flags & UV_HANDLE_CONNECTION);
+
+ if (pipe->pipe.conn.eof_timer != NULL) {
+ uv_timer_start(pipe->pipe.conn.eof_timer, eof_timer_cb, eof_timeout, 0);
+ }
+}
+
+
+static void eof_timer_stop(uv_pipe_t* pipe) {
+ assert(pipe->flags & UV_HANDLE_CONNECTION);
+
+ if (pipe->pipe.conn.eof_timer != NULL) {
+ uv_timer_stop(pipe->pipe.conn.eof_timer);
+ }
+}
+
+
+static void eof_timer_cb(uv_timer_t* timer) {
+ uv_pipe_t* pipe = (uv_pipe_t*) timer->data;
+ uv_loop_t* loop = timer->loop;
+
+ assert(pipe->type == UV_NAMED_PIPE);
+
+ /* This should always be true, since we start the timer only in
+ * uv_pipe_queue_read after successfully calling ReadFile, or in
+ * uv_process_pipe_shutdown_req if a read is pending, and we always
+ * immediately stop the timer in uv_process_pipe_read_req. */
+ assert(pipe->flags & UV_HANDLE_READ_PENDING);
+
+ /* If there are many packets coming off the iocp then the timer callback may
+ * be called before the read request is coming off the queue. Therefore we
+ * check here if the read request has completed but will be processed later.
+ */
+ if ((pipe->flags & UV_HANDLE_READ_PENDING) &&
+ HasOverlappedIoCompleted(&pipe->read_req.u.io.overlapped)) {
+ return;
+ }
+
+ /* Force both ends off the pipe. */
+ close_pipe(pipe);
+
+ /* Stop reading, so the pending read that is going to fail will not be
+ * reported to the user. */
+ uv_read_stop((uv_stream_t*) pipe);
+
+ /* Report the eof and update flags. This will get reported even if the user
+ * stopped reading in the meantime. TODO: is that okay? */
+ uv_pipe_read_eof(loop, pipe, uv_null_buf_);
+}
+
+
+static void eof_timer_destroy(uv_pipe_t* pipe) {
+ assert(pipe->flags & UV_HANDLE_CONNECTION);
+
+ if (pipe->pipe.conn.eof_timer) {
+ uv_close((uv_handle_t*) pipe->pipe.conn.eof_timer, eof_timer_close_cb);
+ pipe->pipe.conn.eof_timer = NULL;
+ }
+}
+
+
+static void eof_timer_close_cb(uv_handle_t* handle) {
+ assert(handle->type == UV_TIMER);
+ uv__free(handle);
+}
+
+
+int uv_pipe_open(uv_pipe_t* pipe, uv_file file) {
+ HANDLE os_handle = uv__get_osfhandle(file);
+ NTSTATUS nt_status;
+ IO_STATUS_BLOCK io_status;
+ FILE_ACCESS_INFORMATION access;
+ DWORD duplex_flags = 0;
+
+ if (os_handle == INVALID_HANDLE_VALUE)
+ return UV_EBADF;
+
+ uv__once_init();
+ /* In order to avoid closing a stdio file descriptor 0-2, duplicate the
+ * underlying OS handle and forget about the original fd.
+ * We could also opt to use the original OS handle and just never close it,
+ * but then there would be no reliable way to cancel pending read operations
+ * upon close.
+ */
+ if (file <= 2) {
+ if (!DuplicateHandle(INVALID_HANDLE_VALUE,
+ os_handle,
+ INVALID_HANDLE_VALUE,
+ &os_handle,
+ 0,
+ FALSE,
+ DUPLICATE_SAME_ACCESS))
+ return uv_translate_sys_error(GetLastError());
+ file = -1;
+ }
+
+ /* Determine what kind of permissions we have on this handle.
+ * Cygwin opens the pipe in message mode, but we can support it,
+ * just query the access flags and set the stream flags accordingly.
+ */
+ nt_status = pNtQueryInformationFile(os_handle,
+ &io_status,
+ &access,
+ sizeof(access),
+ FileAccessInformation);
+ if (nt_status != STATUS_SUCCESS)
+ return UV_EINVAL;
+
+ if (pipe->ipc) {
+ if (!(access.AccessFlags & FILE_WRITE_DATA) ||
+ !(access.AccessFlags & FILE_READ_DATA)) {
+ return UV_EINVAL;
+ }
+ }
+
+ if (access.AccessFlags & FILE_WRITE_DATA)
+ duplex_flags |= UV_HANDLE_WRITABLE;
+ if (access.AccessFlags & FILE_READ_DATA)
+ duplex_flags |= UV_HANDLE_READABLE;
+
+ if (os_handle == INVALID_HANDLE_VALUE ||
+ uv_set_pipe_handle(pipe->loop,
+ pipe,
+ os_handle,
+ file,
+ duplex_flags) == -1) {
+ return UV_EINVAL;
+ }
+
+ uv_pipe_connection_init(pipe);
+
+ if (pipe->ipc) {
+ assert(!(pipe->flags & UV_HANDLE_NON_OVERLAPPED_PIPE));
+ pipe->pipe.conn.ipc_remote_pid = uv_os_getppid();
+ assert(pipe->pipe.conn.ipc_remote_pid != (DWORD)(uv_pid_t) -1);
+ }
+ return 0;
+}
+
+
+static int uv__pipe_getname(const uv_pipe_t* handle, char* buffer, size_t* size) {
+ NTSTATUS nt_status;
+ IO_STATUS_BLOCK io_status;
+ FILE_NAME_INFORMATION tmp_name_info;
+ FILE_NAME_INFORMATION* name_info;
+ WCHAR* name_buf;
+ unsigned int addrlen;
+ unsigned int name_size;
+ unsigned int name_len;
+ int err;
+
+ uv__once_init();
+ name_info = NULL;
+
+ if (handle->handle == INVALID_HANDLE_VALUE) {
+ *size = 0;
+ return UV_EINVAL;
+ }
+
+ /* NtQueryInformationFile will block if another thread is performing a
+ * blocking operation on the queried handle. If the pipe handle is
+ * synchronous, there may be a worker thread currently calling ReadFile() on
+ * the pipe handle, which could cause a deadlock. To avoid this, interrupt
+ * the read. */
+ if (handle->flags & UV_HANDLE_CONNECTION &&
+ handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE) {
+ uv__pipe_interrupt_read((uv_pipe_t*) handle); /* cast away const warning */
+ }
+
+ nt_status = pNtQueryInformationFile(handle->handle,
+ &io_status,
+ &tmp_name_info,
+ sizeof tmp_name_info,
+ FileNameInformation);
+ if (nt_status == STATUS_BUFFER_OVERFLOW) {
+ name_size = sizeof(*name_info) + tmp_name_info.FileNameLength;
+ name_info = uv__malloc(name_size);
+ if (!name_info) {
+ *size = 0;
+ err = UV_ENOMEM;
+ goto cleanup;
+ }
+
+ nt_status = pNtQueryInformationFile(handle->handle,
+ &io_status,
+ name_info,
+ name_size,
+ FileNameInformation);
+ }
+
+ if (nt_status != STATUS_SUCCESS) {
+ *size = 0;
+ err = uv_translate_sys_error(pRtlNtStatusToDosError(nt_status));
+ goto error;
+ }
+
+ if (!name_info) {
+ /* the struct on stack was used */
+ name_buf = tmp_name_info.FileName;
+ name_len = tmp_name_info.FileNameLength;
+ } else {
+ name_buf = name_info->FileName;
+ name_len = name_info->FileNameLength;
+ }
+
+ if (name_len == 0) {
+ *size = 0;
+ err = 0;
+ goto error;
+ }
+
+ name_len /= sizeof(WCHAR);
+
+ /* check how much space we need */
+ addrlen = WideCharToMultiByte(CP_UTF8,
+ 0,
+ name_buf,
+ name_len,
+ NULL,
+ 0,
+ NULL,
+ NULL);
+ if (!addrlen) {
+ *size = 0;
+ err = uv_translate_sys_error(GetLastError());
+ goto error;
+ } else if (pipe_prefix_len + addrlen >= *size) {
+ /* "\\\\.\\pipe" + name */
+ *size = pipe_prefix_len + addrlen + 1;
+ err = UV_ENOBUFS;
+ goto error;
+ }
+
+ memcpy(buffer, pipe_prefix, pipe_prefix_len);
+ addrlen = WideCharToMultiByte(CP_UTF8,
+ 0,
+ name_buf,
+ name_len,
+ buffer+pipe_prefix_len,
+ *size-pipe_prefix_len,
+ NULL,
+ NULL);
+ if (!addrlen) {
+ *size = 0;
+ err = uv_translate_sys_error(GetLastError());
+ goto error;
+ }
+
+ addrlen += pipe_prefix_len;
+ *size = addrlen;
+ buffer[addrlen] = '\0';
+
+ err = 0;
+
+error:
+ uv__free(name_info);
+
+cleanup:
+ return err;
+}
+
+
+int uv_pipe_pending_count(uv_pipe_t* handle) {
+ if (!handle->ipc)
+ return 0;
+ return handle->pipe.conn.ipc_xfer_queue_length;
+}
+
+
+int uv_pipe_getsockname(const uv_pipe_t* handle, char* buffer, size_t* size) {
+ if (handle->flags & UV_HANDLE_BOUND)
+ return uv__pipe_getname(handle, buffer, size);
+
+ if (handle->flags & UV_HANDLE_CONNECTION ||
+ handle->handle != INVALID_HANDLE_VALUE) {
+ *size = 0;
+ return 0;
+ }
+
+ return UV_EBADF;
+}
+
+
+int uv_pipe_getpeername(const uv_pipe_t* handle, char* buffer, size_t* size) {
+ /* emulate unix behaviour */
+ if (handle->flags & UV_HANDLE_BOUND)
+ return UV_ENOTCONN;
+
+ if (handle->handle != INVALID_HANDLE_VALUE)
+ return uv__pipe_getname(handle, buffer, size);
+
+ return UV_EBADF;
+}
+
+
+uv_handle_type uv_pipe_pending_type(uv_pipe_t* handle) {
+ if (!handle->ipc)
+ return UV_UNKNOWN_HANDLE;
+ if (handle->pipe.conn.ipc_xfer_queue_length == 0)
+ return UV_UNKNOWN_HANDLE;
+ else
+ return UV_TCP;
+}
+
+int uv_pipe_chmod(uv_pipe_t* handle, int mode) {
+ SID_IDENTIFIER_AUTHORITY sid_world = { SECURITY_WORLD_SID_AUTHORITY };
+ PACL old_dacl, new_dacl;
+ PSECURITY_DESCRIPTOR sd;
+ EXPLICIT_ACCESS ea;
+ PSID everyone;
+ int error;
+
+ if (handle == NULL || handle->handle == INVALID_HANDLE_VALUE)
+ return UV_EBADF;
+
+ if (mode != UV_READABLE &&
+ mode != UV_WRITABLE &&
+ mode != (UV_WRITABLE | UV_READABLE))
+ return UV_EINVAL;
+
+ if (!AllocateAndInitializeSid(&sid_world,
+ 1,
+ SECURITY_WORLD_RID,
+ 0, 0, 0, 0, 0, 0, 0,
+ &everyone)) {
+ error = GetLastError();
+ goto done;
+ }
+
+ if (GetSecurityInfo(handle->handle,
+ SE_KERNEL_OBJECT,
+ DACL_SECURITY_INFORMATION,
+ NULL,
+ NULL,
+ &old_dacl,
+ NULL,
+ &sd)) {
+ error = GetLastError();
+ goto clean_sid;
+ }
+
+ memset(&ea, 0, sizeof(EXPLICIT_ACCESS));
+ if (mode & UV_READABLE)
+ ea.grfAccessPermissions |= GENERIC_READ | FILE_WRITE_ATTRIBUTES;
+ if (mode & UV_WRITABLE)
+ ea.grfAccessPermissions |= GENERIC_WRITE | FILE_READ_ATTRIBUTES;
+ ea.grfAccessPermissions |= SYNCHRONIZE;
+ ea.grfAccessMode = SET_ACCESS;
+ ea.grfInheritance = NO_INHERITANCE;
+ ea.Trustee.TrusteeForm = TRUSTEE_IS_SID;
+ ea.Trustee.TrusteeType = TRUSTEE_IS_WELL_KNOWN_GROUP;
+ ea.Trustee.ptstrName = (LPTSTR)everyone;
+
+ if (SetEntriesInAcl(1, &ea, old_dacl, &new_dacl)) {
+ error = GetLastError();
+ goto clean_sd;
+ }
+
+ if (SetSecurityInfo(handle->handle,
+ SE_KERNEL_OBJECT,
+ DACL_SECURITY_INFORMATION,
+ NULL,
+ NULL,
+ new_dacl,
+ NULL)) {
+ error = GetLastError();
+ goto clean_dacl;
+ }
+
+ error = 0;
+
+clean_dacl:
+ LocalFree((HLOCAL) new_dacl);
+clean_sd:
+ LocalFree((HLOCAL) sd);
+clean_sid:
+ FreeSid(everyone);
+done:
+ return uv_translate_sys_error(error);
+}
diff --git a/Utilities/cmlibuv/src/win/poll.c b/Utilities/cmlibuv/src/win/poll.c
new file mode 100644
index 0000000..8785859
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/poll.c
@@ -0,0 +1,584 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <io.h>
+
+#include "uv.h"
+#include "internal.h"
+#include "handle-inl.h"
+#include "req-inl.h"
+
+
+static const GUID uv_msafd_provider_ids[UV_MSAFD_PROVIDER_COUNT] = {
+ {0xe70f1aa0, 0xab8b, 0x11cf,
+ {0x8c, 0xa3, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92}},
+ {0xf9eab0c0, 0x26d4, 0x11d0,
+ {0xbb, 0xbf, 0x00, 0xaa, 0x00, 0x6c, 0x34, 0xe4}},
+ {0x9fc48064, 0x7298, 0x43e4,
+ {0xb7, 0xbd, 0x18, 0x1f, 0x20, 0x89, 0x79, 0x2a}}
+};
+
+typedef struct uv_single_fd_set_s {
+ unsigned int fd_count;
+ SOCKET fd_array[1];
+} uv_single_fd_set_t;
+
+
+static OVERLAPPED overlapped_dummy_;
+static uv_once_t overlapped_dummy_init_guard_ = UV_ONCE_INIT;
+
+static AFD_POLL_INFO afd_poll_info_dummy_;
+
+
+static void uv__init_overlapped_dummy(void) {
+ HANDLE event;
+
+ event = CreateEvent(NULL, TRUE, TRUE, NULL);
+ if (event == NULL)
+ uv_fatal_error(GetLastError(), "CreateEvent");
+
+ memset(&overlapped_dummy_, 0, sizeof overlapped_dummy_);
+ overlapped_dummy_.hEvent = (HANDLE) ((uintptr_t) event | 1);
+}
+
+
+static OVERLAPPED* uv__get_overlapped_dummy(void) {
+ uv_once(&overlapped_dummy_init_guard_, uv__init_overlapped_dummy);
+ return &overlapped_dummy_;
+}
+
+
+static AFD_POLL_INFO* uv__get_afd_poll_info_dummy(void) {
+ return &afd_poll_info_dummy_;
+}
+
+
+static void uv__fast_poll_submit_poll_req(uv_loop_t* loop, uv_poll_t* handle) {
+ uv_req_t* req;
+ AFD_POLL_INFO* afd_poll_info;
+ int result;
+
+ /* Find a yet unsubmitted req to submit. */
+ if (handle->submitted_events_1 == 0) {
+ req = &handle->poll_req_1;
+ afd_poll_info = &handle->afd_poll_info_1;
+ handle->submitted_events_1 = handle->events;
+ handle->mask_events_1 = 0;
+ handle->mask_events_2 = handle->events;
+ } else if (handle->submitted_events_2 == 0) {
+ req = &handle->poll_req_2;
+ afd_poll_info = &handle->afd_poll_info_2;
+ handle->submitted_events_2 = handle->events;
+ handle->mask_events_1 = handle->events;
+ handle->mask_events_2 = 0;
+ } else {
+ /* Just wait until there's an unsubmitted req. This will happen almost
+ * immediately as one of the 2 outstanding requests is about to return.
+ * When this happens, uv__fast_poll_process_poll_req will be called, and
+ * the pending events, if needed, will be processed in a subsequent
+ * request. */
+ return;
+ }
+
+ /* Setting Exclusive to TRUE makes the other poll request return if there is
+ * any. */
+ afd_poll_info->Exclusive = TRUE;
+ afd_poll_info->NumberOfHandles = 1;
+ afd_poll_info->Timeout.QuadPart = INT64_MAX;
+ afd_poll_info->Handles[0].Handle = (HANDLE) handle->socket;
+ afd_poll_info->Handles[0].Status = 0;
+ afd_poll_info->Handles[0].Events = 0;
+
+ if (handle->events & UV_READABLE) {
+ afd_poll_info->Handles[0].Events |= AFD_POLL_RECEIVE |
+ AFD_POLL_DISCONNECT | AFD_POLL_ACCEPT | AFD_POLL_ABORT;
+ } else {
+ if (handle->events & UV_DISCONNECT) {
+ afd_poll_info->Handles[0].Events |= AFD_POLL_DISCONNECT;
+ }
+ }
+ if (handle->events & UV_WRITABLE) {
+ afd_poll_info->Handles[0].Events |= AFD_POLL_SEND | AFD_POLL_CONNECT_FAIL;
+ }
+
+ memset(&req->u.io.overlapped, 0, sizeof req->u.io.overlapped);
+
+ result = uv_msafd_poll((SOCKET) handle->peer_socket,
+ afd_poll_info,
+ afd_poll_info,
+ &req->u.io.overlapped);
+ if (result != 0 && WSAGetLastError() != WSA_IO_PENDING) {
+ /* Queue this req, reporting an error. */
+ SET_REQ_ERROR(req, WSAGetLastError());
+ uv_insert_pending_req(loop, req);
+ }
+}
+
+
+static void uv__fast_poll_process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
+ uv_req_t* req) {
+ unsigned char mask_events;
+ AFD_POLL_INFO* afd_poll_info;
+
+ if (req == &handle->poll_req_1) {
+ afd_poll_info = &handle->afd_poll_info_1;
+ handle->submitted_events_1 = 0;
+ mask_events = handle->mask_events_1;
+ } else if (req == &handle->poll_req_2) {
+ afd_poll_info = &handle->afd_poll_info_2;
+ handle->submitted_events_2 = 0;
+ mask_events = handle->mask_events_2;
+ } else {
+ assert(0);
+ return;
+ }
+
+ /* Report an error unless the select was just interrupted. */
+ if (!REQ_SUCCESS(req)) {
+ DWORD error = GET_REQ_SOCK_ERROR(req);
+ if (error != WSAEINTR && handle->events != 0) {
+ handle->events = 0; /* Stop the watcher */
+ handle->poll_cb(handle, uv_translate_sys_error(error), 0);
+ }
+
+ } else if (afd_poll_info->NumberOfHandles >= 1) {
+ unsigned char events = 0;
+
+ if ((afd_poll_info->Handles[0].Events & (AFD_POLL_RECEIVE |
+ AFD_POLL_DISCONNECT | AFD_POLL_ACCEPT | AFD_POLL_ABORT)) != 0) {
+ events |= UV_READABLE;
+ if ((afd_poll_info->Handles[0].Events & AFD_POLL_DISCONNECT) != 0) {
+ events |= UV_DISCONNECT;
+ }
+ }
+ if ((afd_poll_info->Handles[0].Events & (AFD_POLL_SEND |
+ AFD_POLL_CONNECT_FAIL)) != 0) {
+ events |= UV_WRITABLE;
+ }
+
+ events &= handle->events & ~mask_events;
+
+ if (afd_poll_info->Handles[0].Events & AFD_POLL_LOCAL_CLOSE) {
+ /* Stop polling. */
+ handle->events = 0;
+ if (uv__is_active(handle))
+ uv__handle_stop(handle);
+ }
+
+ if (events != 0) {
+ handle->poll_cb(handle, 0, events);
+ }
+ }
+
+ if ((handle->events & ~(handle->submitted_events_1 |
+ handle->submitted_events_2)) != 0) {
+ uv__fast_poll_submit_poll_req(loop, handle);
+ } else if ((handle->flags & UV_HANDLE_CLOSING) &&
+ handle->submitted_events_1 == 0 &&
+ handle->submitted_events_2 == 0) {
+ uv_want_endgame(loop, (uv_handle_t*) handle);
+ }
+}
+
+
+static SOCKET uv__fast_poll_create_peer_socket(HANDLE iocp,
+ WSAPROTOCOL_INFOW* protocol_info) {
+ SOCKET sock = 0;
+
+ sock = WSASocketW(protocol_info->iAddressFamily,
+ protocol_info->iSocketType,
+ protocol_info->iProtocol,
+ protocol_info,
+ 0,
+ WSA_FLAG_OVERLAPPED);
+ if (sock == INVALID_SOCKET) {
+ return INVALID_SOCKET;
+ }
+
+ if (!SetHandleInformation((HANDLE) sock, HANDLE_FLAG_INHERIT, 0)) {
+ goto error;
+ };
+
+ if (CreateIoCompletionPort((HANDLE) sock,
+ iocp,
+ (ULONG_PTR) sock,
+ 0) == NULL) {
+ goto error;
+ }
+
+ return sock;
+
+ error:
+ closesocket(sock);
+ return INVALID_SOCKET;
+}
+
+
+static SOCKET uv__fast_poll_get_peer_socket(uv_loop_t* loop,
+ WSAPROTOCOL_INFOW* protocol_info) {
+ int index, i;
+ SOCKET peer_socket;
+
+ index = -1;
+ for (i = 0; (size_t) i < ARRAY_SIZE(uv_msafd_provider_ids); i++) {
+ if (memcmp((void*) &protocol_info->ProviderId,
+ (void*) &uv_msafd_provider_ids[i],
+ sizeof protocol_info->ProviderId) == 0) {
+ index = i;
+ }
+ }
+
+ /* Check if the protocol uses an msafd socket. */
+ if (index < 0) {
+ return INVALID_SOCKET;
+ }
+
+ /* If we didn't (try) to create a peer socket yet, try to make one. Don't try
+ * again if the peer socket creation failed earlier for the same protocol. */
+ peer_socket = loop->poll_peer_sockets[index];
+ if (peer_socket == 0) {
+ peer_socket = uv__fast_poll_create_peer_socket(loop->iocp, protocol_info);
+ loop->poll_peer_sockets[index] = peer_socket;
+ }
+
+ return peer_socket;
+}
+
+
+static DWORD WINAPI uv__slow_poll_thread_proc(void* arg) {
+ uv_req_t* req = (uv_req_t*) arg;
+ uv_poll_t* handle = (uv_poll_t*) req->data;
+ unsigned char reported_events;
+ int r;
+ uv_single_fd_set_t rfds, wfds, efds;
+ struct timeval timeout;
+
+ assert(handle->type == UV_POLL);
+ assert(req->type == UV_POLL_REQ);
+
+ if (handle->events & UV_READABLE) {
+ rfds.fd_count = 1;
+ rfds.fd_array[0] = handle->socket;
+ } else {
+ rfds.fd_count = 0;
+ }
+
+ if (handle->events & UV_WRITABLE) {
+ wfds.fd_count = 1;
+ wfds.fd_array[0] = handle->socket;
+ efds.fd_count = 1;
+ efds.fd_array[0] = handle->socket;
+ } else {
+ wfds.fd_count = 0;
+ efds.fd_count = 0;
+ }
+
+ /* Make the select() time out after 3 minutes. If select() hangs because the
+ * user closed the socket, we will at least not hang indefinitely. */
+ timeout.tv_sec = 3 * 60;
+ timeout.tv_usec = 0;
+
+ r = select(1, (fd_set*) &rfds, (fd_set*) &wfds, (fd_set*) &efds, &timeout);
+ if (r == SOCKET_ERROR) {
+ /* Queue this req, reporting an error. */
+ SET_REQ_ERROR(&handle->poll_req_1, WSAGetLastError());
+ POST_COMPLETION_FOR_REQ(handle->loop, req);
+ return 0;
+ }
+
+ reported_events = 0;
+
+ if (r > 0) {
+ if (rfds.fd_count > 0) {
+ assert(rfds.fd_count == 1);
+ assert(rfds.fd_array[0] == handle->socket);
+ reported_events |= UV_READABLE;
+ }
+
+ if (wfds.fd_count > 0) {
+ assert(wfds.fd_count == 1);
+ assert(wfds.fd_array[0] == handle->socket);
+ reported_events |= UV_WRITABLE;
+ } else if (efds.fd_count > 0) {
+ assert(efds.fd_count == 1);
+ assert(efds.fd_array[0] == handle->socket);
+ reported_events |= UV_WRITABLE;
+ }
+ }
+
+ SET_REQ_SUCCESS(req);
+ req->u.io.overlapped.InternalHigh = (DWORD) reported_events;
+ POST_COMPLETION_FOR_REQ(handle->loop, req);
+
+ return 0;
+}
+
+
+static void uv__slow_poll_submit_poll_req(uv_loop_t* loop, uv_poll_t* handle) {
+ uv_req_t* req;
+
+ /* Find a yet unsubmitted req to submit. */
+ if (handle->submitted_events_1 == 0) {
+ req = &handle->poll_req_1;
+ handle->submitted_events_1 = handle->events;
+ handle->mask_events_1 = 0;
+ handle->mask_events_2 = handle->events;
+ } else if (handle->submitted_events_2 == 0) {
+ req = &handle->poll_req_2;
+ handle->submitted_events_2 = handle->events;
+ handle->mask_events_1 = handle->events;
+ handle->mask_events_2 = 0;
+ } else {
+ assert(0);
+ return;
+ }
+
+ if (!QueueUserWorkItem(uv__slow_poll_thread_proc,
+ (void*) req,
+ WT_EXECUTELONGFUNCTION)) {
+ /* Make this req pending, reporting an error. */
+ SET_REQ_ERROR(req, GetLastError());
+ uv_insert_pending_req(loop, req);
+ }
+}
+
+
+
+static void uv__slow_poll_process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
+ uv_req_t* req) {
+ unsigned char mask_events;
+ int err;
+
+ if (req == &handle->poll_req_1) {
+ handle->submitted_events_1 = 0;
+ mask_events = handle->mask_events_1;
+ } else if (req == &handle->poll_req_2) {
+ handle->submitted_events_2 = 0;
+ mask_events = handle->mask_events_2;
+ } else {
+ assert(0);
+ return;
+ }
+
+ if (!REQ_SUCCESS(req)) {
+ /* Error. */
+ if (handle->events != 0) {
+ err = GET_REQ_ERROR(req);
+ handle->events = 0; /* Stop the watcher */
+ handle->poll_cb(handle, uv_translate_sys_error(err), 0);
+ }
+ } else {
+ /* Got some events. */
+ int events = req->u.io.overlapped.InternalHigh & handle->events & ~mask_events;
+ if (events != 0) {
+ handle->poll_cb(handle, 0, events);
+ }
+ }
+
+ if ((handle->events & ~(handle->submitted_events_1 |
+ handle->submitted_events_2)) != 0) {
+ uv__slow_poll_submit_poll_req(loop, handle);
+ } else if ((handle->flags & UV_HANDLE_CLOSING) &&
+ handle->submitted_events_1 == 0 &&
+ handle->submitted_events_2 == 0) {
+ uv_want_endgame(loop, (uv_handle_t*) handle);
+ }
+}
+
+
+int uv_poll_init(uv_loop_t* loop, uv_poll_t* handle, int fd) {
+ return uv_poll_init_socket(loop, handle, (SOCKET) uv__get_osfhandle(fd));
+}
+
+
+int uv_poll_init_socket(uv_loop_t* loop, uv_poll_t* handle,
+ uv_os_sock_t socket) {
+ WSAPROTOCOL_INFOW protocol_info;
+ int len;
+ SOCKET peer_socket, base_socket;
+ DWORD bytes;
+ DWORD yes = 1;
+
+ /* Set the socket to nonblocking mode */
+ if (ioctlsocket(socket, FIONBIO, &yes) == SOCKET_ERROR)
+ return uv_translate_sys_error(WSAGetLastError());
+
+/* Try to obtain a base handle for the socket. This increases this chances that
+ * we find an AFD handle and are able to use the fast poll mechanism. This will
+ * always fail on windows XP/2k3, since they don't support the. SIO_BASE_HANDLE
+ * ioctl. */
+#ifndef NDEBUG
+ base_socket = INVALID_SOCKET;
+#endif
+
+ if (WSAIoctl(socket,
+ SIO_BASE_HANDLE,
+ NULL,
+ 0,
+ &base_socket,
+ sizeof base_socket,
+ &bytes,
+ NULL,
+ NULL) == 0) {
+ assert(base_socket != 0 && base_socket != INVALID_SOCKET);
+ socket = base_socket;
+ }
+
+ uv__handle_init(loop, (uv_handle_t*) handle, UV_POLL);
+ handle->socket = socket;
+ handle->events = 0;
+
+ /* Obtain protocol information about the socket. */
+ len = sizeof protocol_info;
+ if (getsockopt(socket,
+ SOL_SOCKET,
+ SO_PROTOCOL_INFOW,
+ (char*) &protocol_info,
+ &len) != 0) {
+ return uv_translate_sys_error(WSAGetLastError());
+ }
+
+ /* Get the peer socket that is needed to enable fast poll. If the returned
+ * value is NULL, the protocol is not implemented by MSAFD and we'll have to
+ * use slow mode. */
+ peer_socket = uv__fast_poll_get_peer_socket(loop, &protocol_info);
+
+ if (peer_socket != INVALID_SOCKET) {
+ /* Initialize fast poll specific fields. */
+ handle->peer_socket = peer_socket;
+ } else {
+ /* Initialize slow poll specific fields. */
+ handle->flags |= UV_HANDLE_POLL_SLOW;
+ }
+
+ /* Initialize 2 poll reqs. */
+ handle->submitted_events_1 = 0;
+ UV_REQ_INIT(&handle->poll_req_1, UV_POLL_REQ);
+ handle->poll_req_1.data = handle;
+
+ handle->submitted_events_2 = 0;
+ UV_REQ_INIT(&handle->poll_req_2, UV_POLL_REQ);
+ handle->poll_req_2.data = handle;
+
+ return 0;
+}
+
+
+static int uv__poll_set(uv_poll_t* handle, int events, uv_poll_cb cb) {
+ int submitted_events;
+
+ assert(handle->type == UV_POLL);
+ assert(!(handle->flags & UV_HANDLE_CLOSING));
+ assert((events & ~(UV_READABLE | UV_WRITABLE | UV_DISCONNECT)) == 0);
+
+ handle->events = events;
+ handle->poll_cb = cb;
+
+ if (handle->events == 0) {
+ uv__handle_stop(handle);
+ return 0;
+ }
+
+ uv__handle_start(handle);
+ submitted_events = handle->submitted_events_1 | handle->submitted_events_2;
+
+ if (handle->events & ~submitted_events) {
+ if (handle->flags & UV_HANDLE_POLL_SLOW) {
+ uv__slow_poll_submit_poll_req(handle->loop, handle);
+ } else {
+ uv__fast_poll_submit_poll_req(handle->loop, handle);
+ }
+ }
+
+ return 0;
+}
+
+
+int uv_poll_start(uv_poll_t* handle, int events, uv_poll_cb cb) {
+ return uv__poll_set(handle, events, cb);
+}
+
+
+int uv_poll_stop(uv_poll_t* handle) {
+ return uv__poll_set(handle, 0, handle->poll_cb);
+}
+
+
+void uv_process_poll_req(uv_loop_t* loop, uv_poll_t* handle, uv_req_t* req) {
+ if (!(handle->flags & UV_HANDLE_POLL_SLOW)) {
+ uv__fast_poll_process_poll_req(loop, handle, req);
+ } else {
+ uv__slow_poll_process_poll_req(loop, handle, req);
+ }
+}
+
+
+int uv_poll_close(uv_loop_t* loop, uv_poll_t* handle) {
+ AFD_POLL_INFO afd_poll_info;
+ DWORD error;
+ int result;
+
+ handle->events = 0;
+ uv__handle_closing(handle);
+
+ if (handle->submitted_events_1 == 0 &&
+ handle->submitted_events_2 == 0) {
+ uv_want_endgame(loop, (uv_handle_t*) handle);
+ return 0;
+ }
+
+ if (handle->flags & UV_HANDLE_POLL_SLOW)
+ return 0;
+
+ /* Cancel outstanding poll requests by executing another, unique poll
+ * request that forces the outstanding ones to return. */
+ afd_poll_info.Exclusive = TRUE;
+ afd_poll_info.NumberOfHandles = 1;
+ afd_poll_info.Timeout.QuadPart = INT64_MAX;
+ afd_poll_info.Handles[0].Handle = (HANDLE) handle->socket;
+ afd_poll_info.Handles[0].Status = 0;
+ afd_poll_info.Handles[0].Events = AFD_POLL_ALL;
+
+ result = uv_msafd_poll(handle->socket,
+ &afd_poll_info,
+ uv__get_afd_poll_info_dummy(),
+ uv__get_overlapped_dummy());
+
+ if (result == SOCKET_ERROR) {
+ error = WSAGetLastError();
+ if (error != WSA_IO_PENDING)
+ return uv_translate_sys_error(error);
+ }
+
+ return 0;
+}
+
+
+void uv_poll_endgame(uv_loop_t* loop, uv_poll_t* handle) {
+ assert(handle->flags & UV_HANDLE_CLOSING);
+ assert(!(handle->flags & UV_HANDLE_CLOSED));
+
+ assert(handle->submitted_events_1 == 0);
+ assert(handle->submitted_events_2 == 0);
+
+ uv__handle_close(handle);
+}
diff --git a/Utilities/cmlibuv/src/win/process-stdio.c b/Utilities/cmlibuv/src/win/process-stdio.c
new file mode 100644
index 0000000..355d618
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/process-stdio.c
@@ -0,0 +1,512 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <io.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "uv.h"
+#include "internal.h"
+#include "handle-inl.h"
+
+
+/*
+ * The `child_stdio_buffer` buffer has the following layout:
+ * int number_of_fds
+ * unsigned char crt_flags[number_of_fds]
+ * HANDLE os_handle[number_of_fds]
+ */
+#define CHILD_STDIO_SIZE(count) \
+ (sizeof(int) + \
+ sizeof(unsigned char) * (count) + \
+ sizeof(uintptr_t) * (count))
+
+#define CHILD_STDIO_COUNT(buffer) \
+ *((unsigned int*) (buffer))
+
+#define CHILD_STDIO_CRT_FLAGS(buffer, fd) \
+ *((unsigned char*) (buffer) + sizeof(int) + fd)
+
+#define CHILD_STDIO_HANDLE(buffer, fd) \
+ *((HANDLE*) ((unsigned char*) (buffer) + \
+ sizeof(int) + \
+ sizeof(unsigned char) * \
+ CHILD_STDIO_COUNT((buffer)) + \
+ sizeof(HANDLE) * (fd)))
+
+
+/* CRT file descriptor mode flags */
+#define FOPEN 0x01
+#define FEOFLAG 0x02
+#define FCRLF 0x04
+#define FPIPE 0x08
+#define FNOINHERIT 0x10
+#define FAPPEND 0x20
+#define FDEV 0x40
+#define FTEXT 0x80
+
+
+/*
+ * Clear the HANDLE_FLAG_INHERIT flag from all HANDLEs that were inherited
+ * the parent process. Don't check for errors - the stdio handles may not be
+ * valid, or may be closed already. There is no guarantee that this function
+ * does a perfect job.
+ */
+void uv_disable_stdio_inheritance(void) {
+ HANDLE handle;
+ STARTUPINFOW si;
+
+ /* Make the windows stdio handles non-inheritable. */
+ handle = GetStdHandle(STD_INPUT_HANDLE);
+ if (handle != NULL && handle != INVALID_HANDLE_VALUE)
+ SetHandleInformation(handle, HANDLE_FLAG_INHERIT, 0);
+
+ handle = GetStdHandle(STD_OUTPUT_HANDLE);
+ if (handle != NULL && handle != INVALID_HANDLE_VALUE)
+ SetHandleInformation(handle, HANDLE_FLAG_INHERIT, 0);
+
+ handle = GetStdHandle(STD_ERROR_HANDLE);
+ if (handle != NULL && handle != INVALID_HANDLE_VALUE)
+ SetHandleInformation(handle, HANDLE_FLAG_INHERIT, 0);
+
+ /* Make inherited CRT FDs non-inheritable. */
+ GetStartupInfoW(&si);
+ if (uv__stdio_verify(si.lpReserved2, si.cbReserved2))
+ uv__stdio_noinherit(si.lpReserved2);
+}
+
+
+static int uv__create_stdio_pipe_pair(uv_loop_t* loop,
+ uv_pipe_t* server_pipe, HANDLE* child_pipe_ptr, unsigned int flags) {
+ char pipe_name[64];
+ SECURITY_ATTRIBUTES sa;
+ DWORD server_access = 0;
+ DWORD client_access = 0;
+ HANDLE child_pipe = INVALID_HANDLE_VALUE;
+ int err;
+ int overlap;
+
+ if (flags & UV_READABLE_PIPE) {
+ /* The server needs inbound access too, otherwise CreateNamedPipe() won't
+ * give us the FILE_READ_ATTRIBUTES permission. We need that to probe the
+ * state of the write buffer when we're trying to shutdown the pipe. */
+ server_access |= PIPE_ACCESS_OUTBOUND | PIPE_ACCESS_INBOUND;
+ client_access |= GENERIC_READ | FILE_WRITE_ATTRIBUTES;
+ }
+ if (flags & UV_WRITABLE_PIPE) {
+ server_access |= PIPE_ACCESS_INBOUND;
+ client_access |= GENERIC_WRITE | FILE_READ_ATTRIBUTES;
+ }
+
+ /* Create server pipe handle. */
+ err = uv_stdio_pipe_server(loop,
+ server_pipe,
+ server_access,
+ pipe_name,
+ sizeof(pipe_name));
+ if (err)
+ goto error;
+
+ /* Create child pipe handle. */
+ sa.nLength = sizeof sa;
+ sa.lpSecurityDescriptor = NULL;
+ sa.bInheritHandle = TRUE;
+
+ overlap = server_pipe->ipc || (flags & UV_OVERLAPPED_PIPE);
+ child_pipe = CreateFileA(pipe_name,
+ client_access,
+ 0,
+ &sa,
+ OPEN_EXISTING,
+ overlap ? FILE_FLAG_OVERLAPPED : 0,
+ NULL);
+ if (child_pipe == INVALID_HANDLE_VALUE) {
+ err = GetLastError();
+ goto error;
+ }
+
+#ifndef NDEBUG
+ /* Validate that the pipe was opened in the right mode. */
+ {
+ DWORD mode;
+ BOOL r = GetNamedPipeHandleState(child_pipe,
+ &mode,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ 0);
+ assert(r == TRUE);
+ assert(mode == (PIPE_READMODE_BYTE | PIPE_WAIT));
+ }
+#endif
+
+ /* Do a blocking ConnectNamedPipe. This should not block because we have both
+ * ends of the pipe created. */
+ if (!ConnectNamedPipe(server_pipe->handle, NULL)) {
+ if (GetLastError() != ERROR_PIPE_CONNECTED) {
+ err = GetLastError();
+ goto error;
+ }
+ }
+
+ /* The server end is now readable and/or writable. */
+ if (flags & UV_READABLE_PIPE)
+ server_pipe->flags |= UV_HANDLE_WRITABLE;
+ if (flags & UV_WRITABLE_PIPE)
+ server_pipe->flags |= UV_HANDLE_READABLE;
+
+ *child_pipe_ptr = child_pipe;
+ return 0;
+
+ error:
+ if (server_pipe->handle != INVALID_HANDLE_VALUE) {
+ uv_pipe_cleanup(loop, server_pipe);
+ }
+
+ if (child_pipe != INVALID_HANDLE_VALUE) {
+ CloseHandle(child_pipe);
+ }
+
+ return err;
+}
+
+
+static int uv__duplicate_handle(uv_loop_t* loop, HANDLE handle, HANDLE* dup) {
+ HANDLE current_process;
+
+
+ /* _get_osfhandle will sometimes return -2 in case of an error. This seems to
+ * happen when fd <= 2 and the process' corresponding stdio handle is set to
+ * NULL. Unfortunately DuplicateHandle will happily duplicate (HANDLE) -2, so
+ * this situation goes unnoticed until someone tries to use the duplicate.
+ * Therefore we filter out known-invalid handles here. */
+ if (handle == INVALID_HANDLE_VALUE ||
+ handle == NULL ||
+ handle == (HANDLE) -2) {
+ *dup = INVALID_HANDLE_VALUE;
+ return ERROR_INVALID_HANDLE;
+ }
+
+ current_process = GetCurrentProcess();
+
+ if (!DuplicateHandle(current_process,
+ handle,
+ current_process,
+ dup,
+ 0,
+ TRUE,
+ DUPLICATE_SAME_ACCESS)) {
+ *dup = INVALID_HANDLE_VALUE;
+ return GetLastError();
+ }
+
+ return 0;
+}
+
+
+static int uv__duplicate_fd(uv_loop_t* loop, int fd, HANDLE* dup) {
+ HANDLE handle;
+
+ if (fd == -1) {
+ *dup = INVALID_HANDLE_VALUE;
+ return ERROR_INVALID_HANDLE;
+ }
+
+ handle = uv__get_osfhandle(fd);
+ return uv__duplicate_handle(loop, handle, dup);
+}
+
+
+int uv__create_nul_handle(HANDLE* handle_ptr,
+ DWORD access) {
+ HANDLE handle;
+ SECURITY_ATTRIBUTES sa;
+
+ sa.nLength = sizeof sa;
+ sa.lpSecurityDescriptor = NULL;
+ sa.bInheritHandle = TRUE;
+
+ handle = CreateFileW(L"NUL",
+ access,
+ FILE_SHARE_READ | FILE_SHARE_WRITE,
+ &sa,
+ OPEN_EXISTING,
+ 0,
+ NULL);
+ if (handle == INVALID_HANDLE_VALUE) {
+ return GetLastError();
+ }
+
+ *handle_ptr = handle;
+ return 0;
+}
+
+
+int uv__stdio_create(uv_loop_t* loop,
+ const uv_process_options_t* options,
+ BYTE** buffer_ptr) {
+ BYTE* buffer;
+ int count, i;
+ int err;
+
+ count = options->stdio_count;
+
+ if (count < 0 || count > 255) {
+ /* Only support FDs 0-255 */
+ return ERROR_NOT_SUPPORTED;
+ } else if (count < 3) {
+ /* There should always be at least 3 stdio handles. */
+ count = 3;
+ }
+
+ /* Allocate the child stdio buffer */
+ buffer = (BYTE*) uv__malloc(CHILD_STDIO_SIZE(count));
+ if (buffer == NULL) {
+ return ERROR_OUTOFMEMORY;
+ }
+
+ /* Prepopulate the buffer with INVALID_HANDLE_VALUE handles so we can clean
+ * up on failure. */
+ CHILD_STDIO_COUNT(buffer) = count;
+ for (i = 0; i < count; i++) {
+ CHILD_STDIO_CRT_FLAGS(buffer, i) = 0;
+ CHILD_STDIO_HANDLE(buffer, i) = INVALID_HANDLE_VALUE;
+ }
+
+ for (i = 0; i < count; i++) {
+ uv_stdio_container_t fdopt;
+ if (i < options->stdio_count) {
+ fdopt = options->stdio[i];
+ } else {
+ fdopt.flags = UV_IGNORE;
+ }
+
+ switch (fdopt.flags & (UV_IGNORE | UV_CREATE_PIPE | UV_INHERIT_FD |
+ UV_INHERIT_STREAM)) {
+ case UV_IGNORE:
+ /* Starting a process with no stdin/stout/stderr can confuse it. So no
+ * matter what the user specified, we make sure the first three FDs are
+ * always open in their typical modes, e. g. stdin be readable and
+ * stdout/err should be writable. For FDs > 2, don't do anything - all
+ * handles in the stdio buffer are initialized with.
+ * INVALID_HANDLE_VALUE, which should be okay. */
+ if (i <= 2) {
+ DWORD access = (i == 0) ? FILE_GENERIC_READ :
+ FILE_GENERIC_WRITE | FILE_READ_ATTRIBUTES;
+
+ err = uv__create_nul_handle(&CHILD_STDIO_HANDLE(buffer, i),
+ access);
+ if (err)
+ goto error;
+
+ CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN | FDEV;
+ }
+ break;
+
+ case UV_CREATE_PIPE: {
+ /* Create a pair of two connected pipe ends; one end is turned into an
+ * uv_pipe_t for use by the parent. The other one is given to the
+ * child. */
+ uv_pipe_t* parent_pipe = (uv_pipe_t*) fdopt.data.stream;
+ HANDLE child_pipe = INVALID_HANDLE_VALUE;
+
+ /* Create a new, connected pipe pair. stdio[i]. stream should point to
+ * an uninitialized, but not connected pipe handle. */
+ assert(fdopt.data.stream->type == UV_NAMED_PIPE);
+ assert(!(fdopt.data.stream->flags & UV_HANDLE_CONNECTION));
+ assert(!(fdopt.data.stream->flags & UV_HANDLE_PIPESERVER));
+
+ err = uv__create_stdio_pipe_pair(loop,
+ parent_pipe,
+ &child_pipe,
+ fdopt.flags);
+ if (err)
+ goto error;
+
+ CHILD_STDIO_HANDLE(buffer, i) = child_pipe;
+ CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN | FPIPE;
+ break;
+ }
+
+ case UV_INHERIT_FD: {
+ /* Inherit a raw FD. */
+ HANDLE child_handle;
+
+ /* Make an inheritable duplicate of the handle. */
+ err = uv__duplicate_fd(loop, fdopt.data.fd, &child_handle);
+ if (err) {
+ /* If fdopt. data. fd is not valid and fd <= 2, then ignore the
+ * error. */
+ if (fdopt.data.fd <= 2 && err == ERROR_INVALID_HANDLE) {
+ CHILD_STDIO_CRT_FLAGS(buffer, i) = 0;
+ CHILD_STDIO_HANDLE(buffer, i) = INVALID_HANDLE_VALUE;
+ break;
+ }
+ goto error;
+ }
+
+ /* Figure out what the type is. */
+ switch (GetFileType(child_handle)) {
+ case FILE_TYPE_DISK:
+ CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN;
+ break;
+
+ case FILE_TYPE_PIPE:
+ CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN | FPIPE;
+ break;
+
+ case FILE_TYPE_CHAR:
+ case FILE_TYPE_REMOTE:
+ CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN | FDEV;
+ break;
+
+ case FILE_TYPE_UNKNOWN:
+ if (GetLastError() != 0) {
+ err = GetLastError();
+ CloseHandle(child_handle);
+ goto error;
+ }
+ CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN | FDEV;
+ break;
+
+ default:
+ assert(0);
+ return -1;
+ }
+
+ CHILD_STDIO_HANDLE(buffer, i) = child_handle;
+ break;
+ }
+
+ case UV_INHERIT_STREAM: {
+ /* Use an existing stream as the stdio handle for the child. */
+ HANDLE stream_handle, child_handle;
+ unsigned char crt_flags;
+ uv_stream_t* stream = fdopt.data.stream;
+
+ /* Leech the handle out of the stream. */
+ if (stream->type == UV_TTY) {
+ stream_handle = ((uv_tty_t*) stream)->handle;
+ crt_flags = FOPEN | FDEV;
+ } else if (stream->type == UV_NAMED_PIPE &&
+ stream->flags & UV_HANDLE_CONNECTION) {
+ stream_handle = ((uv_pipe_t*) stream)->handle;
+ crt_flags = FOPEN | FPIPE;
+ } else {
+ stream_handle = INVALID_HANDLE_VALUE;
+ crt_flags = 0;
+ }
+
+ if (stream_handle == NULL ||
+ stream_handle == INVALID_HANDLE_VALUE) {
+ /* The handle is already closed, or not yet created, or the stream
+ * type is not supported. */
+ err = ERROR_NOT_SUPPORTED;
+ goto error;
+ }
+
+ /* Make an inheritable copy of the handle. */
+ err = uv__duplicate_handle(loop, stream_handle, &child_handle);
+ if (err)
+ goto error;
+
+ CHILD_STDIO_HANDLE(buffer, i) = child_handle;
+ CHILD_STDIO_CRT_FLAGS(buffer, i) = crt_flags;
+ break;
+ }
+
+ default:
+ assert(0);
+ return -1;
+ }
+ }
+
+ *buffer_ptr = buffer;
+ return 0;
+
+ error:
+ uv__stdio_destroy(buffer);
+ return err;
+}
+
+
+void uv__stdio_destroy(BYTE* buffer) {
+ int i, count;
+
+ count = CHILD_STDIO_COUNT(buffer);
+ for (i = 0; i < count; i++) {
+ HANDLE handle = CHILD_STDIO_HANDLE(buffer, i);
+ if (handle != INVALID_HANDLE_VALUE) {
+ CloseHandle(handle);
+ }
+ }
+
+ uv__free(buffer);
+}
+
+
+void uv__stdio_noinherit(BYTE* buffer) {
+ int i, count;
+
+ count = CHILD_STDIO_COUNT(buffer);
+ for (i = 0; i < count; i++) {
+ HANDLE handle = CHILD_STDIO_HANDLE(buffer, i);
+ if (handle != INVALID_HANDLE_VALUE) {
+ SetHandleInformation(handle, HANDLE_FLAG_INHERIT, 0);
+ }
+ }
+}
+
+
+int uv__stdio_verify(BYTE* buffer, WORD size) {
+ unsigned int count;
+
+ /* Check the buffer pointer. */
+ if (buffer == NULL)
+ return 0;
+
+ /* Verify that the buffer is at least big enough to hold the count. */
+ if (size < CHILD_STDIO_SIZE(0))
+ return 0;
+
+ /* Verify if the count is within range. */
+ count = CHILD_STDIO_COUNT(buffer);
+ if (count > 256)
+ return 0;
+
+ /* Verify that the buffer size is big enough to hold info for N FDs. */
+ if (size < CHILD_STDIO_SIZE(count))
+ return 0;
+
+ return 1;
+}
+
+
+WORD uv__stdio_size(BYTE* buffer) {
+ return (WORD) CHILD_STDIO_SIZE(CHILD_STDIO_COUNT((buffer)));
+}
+
+
+HANDLE uv__stdio_handle(BYTE* buffer, int fd) {
+ return CHILD_STDIO_HANDLE(buffer, fd);
+}
diff --git a/Utilities/cmlibuv/src/win/process.c b/Utilities/cmlibuv/src/win/process.c
new file mode 100644
index 0000000..04718db
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/process.c
@@ -0,0 +1,1337 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <io.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <limits.h>
+#include <wchar.h>
+#include <malloc.h> /* alloca */
+
+#include "uv.h"
+#include "internal.h"
+#include "handle-inl.h"
+#include "req-inl.h"
+
+
+#define SIGKILL 9
+
+
+typedef struct env_var {
+ const WCHAR* const wide;
+ const WCHAR* const wide_eq;
+ const size_t len; /* including null or '=' */
+} env_var_t;
+
+#define E_V(str) { L##str, L##str L"=", sizeof(str) }
+
+static const env_var_t required_vars[] = { /* keep me sorted */
+ E_V("HOMEDRIVE"),
+ E_V("HOMEPATH"),
+ E_V("LOGONSERVER"),
+ E_V("PATH"),
+ E_V("SYSTEMDRIVE"),
+ E_V("SYSTEMROOT"),
+ E_V("TEMP"),
+ E_V("USERDOMAIN"),
+ E_V("USERNAME"),
+ E_V("USERPROFILE"),
+ E_V("WINDIR"),
+};
+
+
+static HANDLE uv_global_job_handle_;
+static uv_once_t uv_global_job_handle_init_guard_ = UV_ONCE_INIT;
+
+
+static void uv__init_global_job_handle(void) {
+ /* Create a job object and set it up to kill all contained processes when
+ * it's closed. Since this handle is made non-inheritable and we're not
+ * giving it to anyone, we're the only process holding a reference to it.
+ * That means that if this process exits it is closed and all the processes
+ * it contains are killed. All processes created with uv_spawn that are not
+ * spawned with the UV_PROCESS_DETACHED flag are assigned to this job.
+ *
+ * We're setting the JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK flag so only the
+ * processes that we explicitly add are affected, and *their* subprocesses
+ * are not. This ensures that our child processes are not limited in their
+ * ability to use job control on Windows versions that don't deal with
+ * nested jobs (prior to Windows 8 / Server 2012). It also lets our child
+ * processes created detached processes without explicitly breaking away
+ * from job control (which uv_spawn doesn't, either).
+ */
+ SECURITY_ATTRIBUTES attr;
+ JOBOBJECT_EXTENDED_LIMIT_INFORMATION info;
+
+ memset(&attr, 0, sizeof attr);
+ attr.bInheritHandle = FALSE;
+
+ memset(&info, 0, sizeof info);
+ info.BasicLimitInformation.LimitFlags =
+ JOB_OBJECT_LIMIT_BREAKAWAY_OK |
+ JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK |
+ JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION |
+ JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE;
+
+ uv_global_job_handle_ = CreateJobObjectW(&attr, NULL);
+ if (uv_global_job_handle_ == NULL)
+ uv_fatal_error(GetLastError(), "CreateJobObjectW");
+
+ if (!SetInformationJobObject(uv_global_job_handle_,
+ JobObjectExtendedLimitInformation,
+ &info,
+ sizeof info))
+ uv_fatal_error(GetLastError(), "SetInformationJobObject");
+}
+
+
+static int uv_utf8_to_utf16_alloc(const char* s, WCHAR** ws_ptr) {
+ int ws_len, r;
+ WCHAR* ws;
+
+ ws_len = MultiByteToWideChar(CP_UTF8,
+ 0,
+ s,
+ -1,
+ NULL,
+ 0);
+ if (ws_len <= 0) {
+ return GetLastError();
+ }
+
+ ws = (WCHAR*) uv__malloc(ws_len * sizeof(WCHAR));
+ if (ws == NULL) {
+ return ERROR_OUTOFMEMORY;
+ }
+
+ r = MultiByteToWideChar(CP_UTF8,
+ 0,
+ s,
+ -1,
+ ws,
+ ws_len);
+ assert(r == ws_len);
+
+ *ws_ptr = ws;
+ return 0;
+}
+
+
+static void uv_process_init(uv_loop_t* loop, uv_process_t* handle) {
+ uv__handle_init(loop, (uv_handle_t*) handle, UV_PROCESS);
+ handle->exit_cb = NULL;
+ handle->pid = 0;
+ handle->exit_signal = 0;
+ handle->wait_handle = INVALID_HANDLE_VALUE;
+ handle->process_handle = INVALID_HANDLE_VALUE;
+ handle->child_stdio_buffer = NULL;
+ handle->exit_cb_pending = 0;
+
+ UV_REQ_INIT(&handle->exit_req, UV_PROCESS_EXIT);
+ handle->exit_req.data = handle;
+}
+
+
+/*
+ * Path search functions
+ */
+
+/*
+ * Helper function for search_path
+ */
+static WCHAR* search_path_join_test(const WCHAR* dir,
+ size_t dir_len,
+ const WCHAR* name,
+ size_t name_len,
+ const WCHAR* ext,
+ size_t ext_len,
+ const WCHAR* cwd,
+ size_t cwd_len) {
+ WCHAR *result, *result_pos;
+ DWORD attrs;
+ if (dir_len > 2 && dir[0] == L'\\' && dir[1] == L'\\') {
+ /* It's a UNC path so ignore cwd */
+ cwd_len = 0;
+ } else if (dir_len >= 1 && (dir[0] == L'/' || dir[0] == L'\\')) {
+ /* It's a full path without drive letter, use cwd's drive letter only */
+ cwd_len = 2;
+ } else if (dir_len >= 2 && dir[1] == L':' &&
+ (dir_len < 3 || (dir[2] != L'/' && dir[2] != L'\\'))) {
+ /* It's a relative path with drive letter (ext.g. D:../some/file)
+ * Replace drive letter in dir by full cwd if it points to the same drive,
+ * otherwise use the dir only.
+ */
+ if (cwd_len < 2 || _wcsnicmp(cwd, dir, 2) != 0) {
+ cwd_len = 0;
+ } else {
+ dir += 2;
+ dir_len -= 2;
+ }
+ } else if (dir_len > 2 && dir[1] == L':') {
+ /* It's an absolute path with drive letter
+ * Don't use the cwd at all
+ */
+ cwd_len = 0;
+ }
+
+ /* Allocate buffer for output */
+ result = result_pos = (WCHAR*)uv__malloc(sizeof(WCHAR) *
+ (cwd_len + 1 + dir_len + 1 + name_len + 1 + ext_len + 1));
+
+ /* Copy cwd */
+ wcsncpy(result_pos, cwd, cwd_len);
+ result_pos += cwd_len;
+
+ /* Add a path separator if cwd didn't end with one */
+ if (cwd_len && wcsrchr(L"\\/:", result_pos[-1]) == NULL) {
+ result_pos[0] = L'\\';
+ result_pos++;
+ }
+
+ /* Copy dir */
+ wcsncpy(result_pos, dir, dir_len);
+ result_pos += dir_len;
+
+ /* Add a separator if the dir didn't end with one */
+ if (dir_len && wcsrchr(L"\\/:", result_pos[-1]) == NULL) {
+ result_pos[0] = L'\\';
+ result_pos++;
+ }
+
+ /* Copy filename */
+ wcsncpy(result_pos, name, name_len);
+ result_pos += name_len;
+
+ if (ext_len) {
+ /* Add a dot if the filename didn't end with one */
+ if (name_len && result_pos[-1] != '.') {
+ result_pos[0] = L'.';
+ result_pos++;
+ }
+
+ /* Copy extension */
+ wcsncpy(result_pos, ext, ext_len);
+ result_pos += ext_len;
+ }
+
+ /* Null terminator */
+ result_pos[0] = L'\0';
+
+ attrs = GetFileAttributesW(result);
+
+ if (attrs != INVALID_FILE_ATTRIBUTES &&
+ !(attrs & FILE_ATTRIBUTE_DIRECTORY)) {
+ return result;
+ }
+
+ uv__free(result);
+ return NULL;
+}
+
+
+/*
+ * Helper function for search_path
+ */
+static WCHAR* path_search_walk_ext(const WCHAR *dir,
+ size_t dir_len,
+ const WCHAR *name,
+ size_t name_len,
+ WCHAR *cwd,
+ size_t cwd_len,
+ int name_has_ext) {
+ WCHAR* result;
+
+ /* If the name itself has a nonempty extension, try this extension first */
+ if (name_has_ext) {
+ result = search_path_join_test(dir, dir_len,
+ name, name_len,
+ L"", 0,
+ cwd, cwd_len);
+ if (result != NULL) {
+ return result;
+ }
+ }
+
+ /* Try .com extension */
+ result = search_path_join_test(dir, dir_len,
+ name, name_len,
+ L"com", 3,
+ cwd, cwd_len);
+ if (result != NULL) {
+ return result;
+ }
+
+ /* Try .exe extension */
+ result = search_path_join_test(dir, dir_len,
+ name, name_len,
+ L"exe", 3,
+ cwd, cwd_len);
+ if (result != NULL) {
+ return result;
+ }
+
+ return NULL;
+}
+
+
+/*
+ * search_path searches the system path for an executable filename -
+ * the windows API doesn't provide this as a standalone function nor as an
+ * option to CreateProcess.
+ *
+ * It tries to return an absolute filename.
+ *
+ * Furthermore, it tries to follow the semantics that cmd.exe, with this
+ * exception that PATHEXT environment variable isn't used. Since CreateProcess
+ * can start only .com and .exe files, only those extensions are tried. This
+ * behavior equals that of msvcrt's spawn functions.
+ *
+ * - Do not search the path if the filename already contains a path (either
+ * relative or absolute).
+ *
+ * - If there's really only a filename, check the current directory for file,
+ * then search all path directories.
+ *
+ * - If filename specified has *any* extension, search for the file with the
+ * specified extension first.
+ *
+ * - If the literal filename is not found in a directory, try *appending*
+ * (not replacing) .com first and then .exe.
+ *
+ * - The path variable may contain relative paths; relative paths are relative
+ * to the cwd.
+ *
+ * - Directories in path may or may not end with a trailing backslash.
+ *
+ * - CMD does not trim leading/trailing whitespace from path/pathex entries
+ * nor from the environment variables as a whole.
+ *
+ * - When cmd.exe cannot read a directory, it will just skip it and go on
+ * searching. However, unlike posix-y systems, it will happily try to run a
+ * file that is not readable/executable; if the spawn fails it will not
+ * continue searching.
+ *
+ * UNC path support: we are dealing with UNC paths in both the path and the
+ * filename. This is a deviation from what cmd.exe does (it does not let you
+ * start a program by specifying an UNC path on the command line) but this is
+ * really a pointless restriction.
+ *
+ */
+static WCHAR* search_path(const WCHAR *file,
+ WCHAR *cwd,
+ const WCHAR *path) {
+ int file_has_dir;
+ WCHAR* result = NULL;
+ WCHAR *file_name_start;
+ WCHAR *dot;
+ const WCHAR *dir_start, *dir_end, *dir_path;
+ size_t dir_len;
+ int name_has_ext;
+
+ size_t file_len = wcslen(file);
+ size_t cwd_len = wcslen(cwd);
+
+ /* If the caller supplies an empty filename,
+ * we're not gonna return c:\windows\.exe -- GFY!
+ */
+ if (file_len == 0
+ || (file_len == 1 && file[0] == L'.')) {
+ return NULL;
+ }
+
+ /* Find the start of the filename so we can split the directory from the
+ * name. */
+ for (file_name_start = (WCHAR*)file + file_len;
+ file_name_start > file
+ && file_name_start[-1] != L'\\'
+ && file_name_start[-1] != L'/'
+ && file_name_start[-1] != L':';
+ file_name_start--);
+
+ file_has_dir = file_name_start != file;
+
+ /* Check if the filename includes an extension */
+ dot = wcschr(file_name_start, L'.');
+ name_has_ext = (dot != NULL && dot[1] != L'\0');
+
+ if (file_has_dir) {
+ /* The file has a path inside, don't use path */
+ result = path_search_walk_ext(
+ file, file_name_start - file,
+ file_name_start, file_len - (file_name_start - file),
+ cwd, cwd_len,
+ name_has_ext);
+
+ } else {
+ dir_end = path;
+
+ /* The file is really only a name; look in cwd first, then scan path */
+ result = path_search_walk_ext(L"", 0,
+ file, file_len,
+ cwd, cwd_len,
+ name_has_ext);
+
+ while (result == NULL) {
+ if (*dir_end == L'\0') {
+ break;
+ }
+
+ /* Skip the separator that dir_end now points to */
+ if (dir_end != path || *path == L';') {
+ dir_end++;
+ }
+
+ /* Next slice starts just after where the previous one ended */
+ dir_start = dir_end;
+
+ /* If path is quoted, find quote end */
+ if (*dir_start == L'"' || *dir_start == L'\'') {
+ dir_end = wcschr(dir_start + 1, *dir_start);
+ if (dir_end == NULL) {
+ dir_end = wcschr(dir_start, L'\0');
+ }
+ }
+ /* Slice until the next ; or \0 is found */
+ dir_end = wcschr(dir_end, L';');
+ if (dir_end == NULL) {
+ dir_end = wcschr(dir_start, L'\0');
+ }
+
+ /* If the slice is zero-length, don't bother */
+ if (dir_end - dir_start == 0) {
+ continue;
+ }
+
+ dir_path = dir_start;
+ dir_len = dir_end - dir_start;
+
+ /* Adjust if the path is quoted. */
+ if (dir_path[0] == '"' || dir_path[0] == '\'') {
+ ++dir_path;
+ --dir_len;
+ }
+
+ if (dir_path[dir_len - 1] == '"' || dir_path[dir_len - 1] == '\'') {
+ --dir_len;
+ }
+
+ result = path_search_walk_ext(dir_path, dir_len,
+ file, file_len,
+ cwd, cwd_len,
+ name_has_ext);
+ }
+ }
+
+ return result;
+}
+
+
+/*
+ * Quotes command line arguments
+ * Returns a pointer to the end (next char to be written) of the buffer
+ */
+WCHAR* quote_cmd_arg(const WCHAR *source, WCHAR *target) {
+ size_t len = wcslen(source);
+ size_t i;
+ int quote_hit;
+ WCHAR* start;
+
+ if (len == 0) {
+ /* Need double quotation for empty argument */
+ *(target++) = L'"';
+ *(target++) = L'"';
+ return target;
+ }
+
+ if (NULL == wcspbrk(source, L" \t\"")) {
+ /* No quotation needed */
+ wcsncpy(target, source, len);
+ target += len;
+ return target;
+ }
+
+ if (NULL == wcspbrk(source, L"\"\\")) {
+ /*
+ * No embedded double quotes or backlashes, so I can just wrap
+ * quote marks around the whole thing.
+ */
+ *(target++) = L'"';
+ wcsncpy(target, source, len);
+ target += len;
+ *(target++) = L'"';
+ return target;
+ }
+
+ /*
+ * Expected input/output:
+ * input : hello"world
+ * output: "hello\"world"
+ * input : hello""world
+ * output: "hello\"\"world"
+ * input : hello\world
+ * output: hello\world
+ * input : hello\\world
+ * output: hello\\world
+ * input : hello\"world
+ * output: "hello\\\"world"
+ * input : hello\\"world
+ * output: "hello\\\\\"world"
+ * input : hello world\
+ * output: "hello world\\"
+ */
+
+ *(target++) = L'"';
+ start = target;
+ quote_hit = 1;
+
+ for (i = len; i > 0; --i) {
+ *(target++) = source[i - 1];
+
+ if (quote_hit && source[i - 1] == L'\\') {
+ *(target++) = L'\\';
+ } else if(source[i - 1] == L'"') {
+ quote_hit = 1;
+ *(target++) = L'\\';
+ } else {
+ quote_hit = 0;
+ }
+ }
+ target[0] = L'\0';
+ wcsrev(start);
+ *(target++) = L'"';
+ return target;
+}
+
+
+int make_program_args(char** args, int verbatim_arguments, WCHAR** dst_ptr) {
+ char** arg;
+ WCHAR* dst = NULL;
+ WCHAR* temp_buffer = NULL;
+ size_t dst_len = 0;
+ size_t temp_buffer_len = 0;
+ WCHAR* pos;
+ int arg_count = 0;
+ int err = 0;
+
+ /* Count the required size. */
+ for (arg = args; *arg; arg++) {
+ DWORD arg_len;
+
+ arg_len = MultiByteToWideChar(CP_UTF8,
+ 0,
+ *arg,
+ -1,
+ NULL,
+ 0);
+ if (arg_len == 0) {
+ return GetLastError();
+ }
+
+ dst_len += arg_len;
+
+ if (arg_len > temp_buffer_len)
+ temp_buffer_len = arg_len;
+
+ arg_count++;
+ }
+
+ /* Adjust for potential quotes. Also assume the worst-case scenario that
+ * every character needs escaping, so we need twice as much space. */
+ dst_len = dst_len * 2 + arg_count * 2;
+
+ /* Allocate buffer for the final command line. */
+ dst = (WCHAR*) uv__malloc(dst_len * sizeof(WCHAR));
+ if (dst == NULL) {
+ err = ERROR_OUTOFMEMORY;
+ goto error;
+ }
+
+ /* Allocate temporary working buffer. */
+ temp_buffer = (WCHAR*) uv__malloc(temp_buffer_len * sizeof(WCHAR));
+ if (temp_buffer == NULL) {
+ err = ERROR_OUTOFMEMORY;
+ goto error;
+ }
+
+ pos = dst;
+ for (arg = args; *arg; arg++) {
+ DWORD arg_len;
+
+ /* Convert argument to wide char. */
+ arg_len = MultiByteToWideChar(CP_UTF8,
+ 0,
+ *arg,
+ -1,
+ temp_buffer,
+ (int) (dst + dst_len - pos));
+ if (arg_len == 0) {
+ err = GetLastError();
+ goto error;
+ }
+
+ if (verbatim_arguments) {
+ /* Copy verbatim. */
+ wcscpy(pos, temp_buffer);
+ pos += arg_len - 1;
+ } else {
+ /* Quote/escape, if needed. */
+ pos = quote_cmd_arg(temp_buffer, pos);
+ }
+
+ *pos++ = *(arg + 1) ? L' ' : L'\0';
+ }
+
+ uv__free(temp_buffer);
+
+ *dst_ptr = dst;
+ return 0;
+
+error:
+ uv__free(dst);
+ uv__free(temp_buffer);
+ return err;
+}
+
+
+int env_strncmp(const wchar_t* a, int na, const wchar_t* b) {
+ wchar_t* a_eq;
+ wchar_t* b_eq;
+ wchar_t* A;
+ wchar_t* B;
+ int nb;
+ int r;
+
+ if (na < 0) {
+ a_eq = wcschr(a, L'=');
+ assert(a_eq);
+ na = (int)(long)(a_eq - a);
+ } else {
+ na--;
+ }
+ b_eq = wcschr(b, L'=');
+ assert(b_eq);
+ nb = b_eq - b;
+
+ A = alloca((na+1) * sizeof(wchar_t));
+ B = alloca((nb+1) * sizeof(wchar_t));
+
+ r = LCMapStringW(LOCALE_INVARIANT, LCMAP_UPPERCASE, a, na, A, na);
+ assert(r==na);
+ A[na] = L'\0';
+ r = LCMapStringW(LOCALE_INVARIANT, LCMAP_UPPERCASE, b, nb, B, nb);
+ assert(r==nb);
+ B[nb] = L'\0';
+
+ while (1) {
+ wchar_t AA = *A++;
+ wchar_t BB = *B++;
+ if (AA < BB) {
+ return -1;
+ } else if (AA > BB) {
+ return 1;
+ } else if (!AA && !BB) {
+ return 0;
+ }
+ }
+}
+
+
+static int qsort_wcscmp(const void *a, const void *b) {
+ wchar_t* astr = *(wchar_t* const*)a;
+ wchar_t* bstr = *(wchar_t* const*)b;
+ return env_strncmp(astr, -1, bstr);
+}
+
+
+/*
+ * The way windows takes environment variables is different than what C does;
+ * Windows wants a contiguous block of null-terminated strings, terminated
+ * with an additional null.
+ *
+ * Windows has a few "essential" environment variables. winsock will fail
+ * to initialize if SYSTEMROOT is not defined; some APIs make reference to
+ * TEMP. SYSTEMDRIVE is probably also important. We therefore ensure that
+ * these get defined if the input environment block does not contain any
+ * values for them.
+ *
+ * Also add variables known to Cygwin to be required for correct
+ * subprocess operation in many cases:
+ * https://github.com/Alexpux/Cygwin/blob/b266b04fbbd3a595f02ea149e4306d3ab9b1fe3d/winsup/cygwin/environ.cc#L955
+ *
+ */
+int make_program_env(char* env_block[], WCHAR** dst_ptr) {
+ WCHAR* dst;
+ WCHAR* ptr;
+ char** env;
+ size_t env_len = 0;
+ int len;
+ size_t i;
+ DWORD var_size;
+ size_t env_block_count = 1; /* 1 for null-terminator */
+ WCHAR* dst_copy;
+ WCHAR** ptr_copy;
+ WCHAR** env_copy;
+ DWORD required_vars_value_len[ARRAY_SIZE(required_vars)];
+
+ /* first pass: determine size in UTF-16 */
+ for (env = env_block; *env; env++) {
+ int len;
+ if (strchr(*env, '=')) {
+ len = MultiByteToWideChar(CP_UTF8,
+ 0,
+ *env,
+ -1,
+ NULL,
+ 0);
+ if (len <= 0) {
+ return GetLastError();
+ }
+ env_len += len;
+ env_block_count++;
+ }
+ }
+
+ /* second pass: copy to UTF-16 environment block */
+ dst_copy = (WCHAR*)uv__malloc(env_len * sizeof(WCHAR));
+ if (dst_copy == NULL && env_len > 0) {
+ return ERROR_OUTOFMEMORY;
+ }
+ env_copy = alloca(env_block_count * sizeof(WCHAR*));
+
+ ptr = dst_copy;
+ ptr_copy = env_copy;
+ for (env = env_block; *env; env++) {
+ if (strchr(*env, '=')) {
+ len = MultiByteToWideChar(CP_UTF8,
+ 0,
+ *env,
+ -1,
+ ptr,
+ (int) (env_len - (ptr - dst_copy)));
+ if (len <= 0) {
+ DWORD err = GetLastError();
+ uv__free(dst_copy);
+ return err;
+ }
+ *ptr_copy++ = ptr;
+ ptr += len;
+ }
+ }
+ *ptr_copy = NULL;
+ assert(env_len == 0 || env_len == (size_t) (ptr - dst_copy));
+
+ /* sort our (UTF-16) copy */
+ qsort(env_copy, env_block_count-1, sizeof(wchar_t*), qsort_wcscmp);
+
+ /* third pass: check for required variables */
+ for (ptr_copy = env_copy, i = 0; i < ARRAY_SIZE(required_vars); ) {
+ int cmp;
+ if (!*ptr_copy) {
+ cmp = -1;
+ } else {
+ cmp = env_strncmp(required_vars[i].wide_eq,
+ required_vars[i].len,
+ *ptr_copy);
+ }
+ if (cmp < 0) {
+ /* missing required var */
+ var_size = GetEnvironmentVariableW(required_vars[i].wide, NULL, 0);
+ required_vars_value_len[i] = var_size;
+ if (var_size != 0) {
+ env_len += required_vars[i].len;
+ env_len += var_size;
+ }
+ i++;
+ } else {
+ ptr_copy++;
+ if (cmp == 0)
+ i++;
+ }
+ }
+
+ /* final pass: copy, in sort order, and inserting required variables */
+ dst = uv__malloc((1+env_len) * sizeof(WCHAR));
+ if (!dst) {
+ uv__free(dst_copy);
+ return ERROR_OUTOFMEMORY;
+ }
+
+ for (ptr = dst, ptr_copy = env_copy, i = 0;
+ *ptr_copy || i < ARRAY_SIZE(required_vars);
+ ptr += len) {
+ int cmp;
+ if (i >= ARRAY_SIZE(required_vars)) {
+ cmp = 1;
+ } else if (!*ptr_copy) {
+ cmp = -1;
+ } else {
+ cmp = env_strncmp(required_vars[i].wide_eq,
+ required_vars[i].len,
+ *ptr_copy);
+ }
+ if (cmp < 0) {
+ /* missing required var */
+ len = required_vars_value_len[i];
+ if (len) {
+ wcscpy(ptr, required_vars[i].wide_eq);
+ ptr += required_vars[i].len;
+ var_size = GetEnvironmentVariableW(required_vars[i].wide,
+ ptr,
+ (int) (env_len - (ptr - dst)));
+ if (var_size != (DWORD) (len - 1)) { /* TODO: handle race condition? */
+ uv_fatal_error(GetLastError(), "GetEnvironmentVariableW");
+ }
+ }
+ i++;
+ } else {
+ /* copy var from env_block */
+ len = wcslen(*ptr_copy) + 1;
+ wmemcpy(ptr, *ptr_copy, len);
+ ptr_copy++;
+ if (cmp == 0)
+ i++;
+ }
+ }
+
+ /* Terminate with an extra NULL. */
+ assert(env_len == (size_t) (ptr - dst));
+ *ptr = L'\0';
+
+ uv__free(dst_copy);
+ *dst_ptr = dst;
+ return 0;
+}
+
+/*
+ * Attempt to find the value of the PATH environment variable in the child's
+ * preprocessed environment.
+ *
+ * If found, a pointer into `env` is returned. If not found, NULL is returned.
+ */
+static WCHAR* find_path(WCHAR *env) {
+ for (; env != NULL && *env != 0; env += wcslen(env) + 1) {
+ if ((env[0] == L'P' || env[0] == L'p') &&
+ (env[1] == L'A' || env[1] == L'a') &&
+ (env[2] == L'T' || env[2] == L't') &&
+ (env[3] == L'H' || env[3] == L'h') &&
+ (env[4] == L'=')) {
+ return &env[5];
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * Called on Windows thread-pool thread to indicate that
+ * a child process has exited.
+ */
+static void CALLBACK exit_wait_callback(void* data, BOOLEAN didTimeout) {
+ uv_process_t* process = (uv_process_t*) data;
+ uv_loop_t* loop = process->loop;
+
+ assert(didTimeout == FALSE);
+ assert(process);
+ assert(!process->exit_cb_pending);
+
+ process->exit_cb_pending = 1;
+
+ /* Post completed */
+ POST_COMPLETION_FOR_REQ(loop, &process->exit_req);
+}
+
+
+/* Called on main thread after a child process has exited. */
+void uv_process_proc_exit(uv_loop_t* loop, uv_process_t* handle) {
+ int64_t exit_code;
+ DWORD status;
+
+ assert(handle->exit_cb_pending);
+ handle->exit_cb_pending = 0;
+
+ /* If we're closing, don't call the exit callback. Just schedule a close
+ * callback now. */
+ if (handle->flags & UV_HANDLE_CLOSING) {
+ uv_want_endgame(loop, (uv_handle_t*) handle);
+ return;
+ }
+
+ /* Unregister from process notification. */
+ if (handle->wait_handle != INVALID_HANDLE_VALUE) {
+ UnregisterWait(handle->wait_handle);
+ handle->wait_handle = INVALID_HANDLE_VALUE;
+ }
+
+ /* Set the handle to inactive: no callbacks will be made after the exit
+ * callback. */
+ uv__handle_stop(handle);
+
+ if (GetExitCodeProcess(handle->process_handle, &status)) {
+ exit_code = status;
+ } else {
+ /* Unable to obtain the exit code. This should never happen. */
+ exit_code = uv_translate_sys_error(GetLastError());
+ }
+
+ /* Fire the exit callback. */
+ if (handle->exit_cb) {
+ handle->exit_cb(handle, exit_code, handle->exit_signal);
+ }
+}
+
+
+void uv_process_close(uv_loop_t* loop, uv_process_t* handle) {
+ uv__handle_closing(handle);
+
+ if (handle->wait_handle != INVALID_HANDLE_VALUE) {
+ /* This blocks until either the wait was cancelled, or the callback has
+ * completed. */
+ BOOL r = UnregisterWaitEx(handle->wait_handle, INVALID_HANDLE_VALUE);
+ if (!r) {
+ /* This should never happen, and if it happens, we can't recover... */
+ uv_fatal_error(GetLastError(), "UnregisterWaitEx");
+ }
+
+ handle->wait_handle = INVALID_HANDLE_VALUE;
+ }
+
+ if (!handle->exit_cb_pending) {
+ uv_want_endgame(loop, (uv_handle_t*)handle);
+ }
+}
+
+
+void uv_process_endgame(uv_loop_t* loop, uv_process_t* handle) {
+ assert(!handle->exit_cb_pending);
+ assert(handle->flags & UV_HANDLE_CLOSING);
+ assert(!(handle->flags & UV_HANDLE_CLOSED));
+
+ /* Clean-up the process handle. */
+ CloseHandle(handle->process_handle);
+
+ uv__handle_close(handle);
+}
+
+
+int uv_spawn(uv_loop_t* loop,
+ uv_process_t* process,
+ const uv_process_options_t* options) {
+ int i;
+ int err = 0;
+ WCHAR* path = NULL, *alloc_path = NULL;
+ BOOL result;
+ WCHAR* application_path = NULL, *application = NULL, *arguments = NULL,
+ *env = NULL, *cwd = NULL;
+ STARTUPINFOW startup;
+ PROCESS_INFORMATION info;
+ DWORD process_flags;
+
+ uv_process_init(loop, process);
+ process->exit_cb = options->exit_cb;
+
+ if (options->flags & (UV_PROCESS_SETGID | UV_PROCESS_SETUID)) {
+ return UV_ENOTSUP;
+ }
+
+ if (options->file == NULL ||
+ options->args == NULL) {
+ return UV_EINVAL;
+ }
+
+ if (options->cpumask != NULL) {
+ if (options->cpumask_size < (size_t)uv_cpumask_size()) {
+ return UV_EINVAL;
+ }
+ }
+
+ assert(options->file != NULL);
+ assert(!(options->flags & ~(UV_PROCESS_DETACHED |
+ UV_PROCESS_SETGID |
+ UV_PROCESS_SETUID |
+ UV_PROCESS_WINDOWS_HIDE |
+ UV_PROCESS_WINDOWS_HIDE_CONSOLE |
+ UV_PROCESS_WINDOWS_HIDE_GUI |
+ UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS)));
+
+ err = uv_utf8_to_utf16_alloc(options->file, &application);
+ if (err)
+ goto done;
+
+ err = make_program_args(
+ options->args,
+ options->flags & UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS,
+ &arguments);
+ if (err)
+ goto done;
+
+ if (options->env) {
+ err = make_program_env(options->env, &env);
+ if (err)
+ goto done;
+ }
+
+ if (options->cwd) {
+ /* Explicit cwd */
+ err = uv_utf8_to_utf16_alloc(options->cwd, &cwd);
+ if (err)
+ goto done;
+
+ } else {
+ /* Inherit cwd */
+ DWORD cwd_len, r;
+
+ cwd_len = GetCurrentDirectoryW(0, NULL);
+ if (!cwd_len) {
+ err = GetLastError();
+ goto done;
+ }
+
+ cwd = (WCHAR*) uv__malloc(cwd_len * sizeof(WCHAR));
+ if (cwd == NULL) {
+ err = ERROR_OUTOFMEMORY;
+ goto done;
+ }
+
+ r = GetCurrentDirectoryW(cwd_len, cwd);
+ if (r == 0 || r >= cwd_len) {
+ err = GetLastError();
+ goto done;
+ }
+ }
+
+ /* Get PATH environment variable. */
+ path = find_path(env);
+ if (path == NULL) {
+ DWORD path_len, r;
+
+ path_len = GetEnvironmentVariableW(L"PATH", NULL, 0);
+ if (path_len == 0) {
+ err = GetLastError();
+ goto done;
+ }
+
+ alloc_path = (WCHAR*) uv__malloc(path_len * sizeof(WCHAR));
+ if (alloc_path == NULL) {
+ err = ERROR_OUTOFMEMORY;
+ goto done;
+ }
+ path = alloc_path;
+
+ r = GetEnvironmentVariableW(L"PATH", path, path_len);
+ if (r == 0 || r >= path_len) {
+ err = GetLastError();
+ goto done;
+ }
+ }
+
+ err = uv__stdio_create(loop, options, &process->child_stdio_buffer);
+ if (err)
+ goto done;
+
+ application_path = search_path(application,
+ cwd,
+ path);
+ if (application_path == NULL) {
+ /* Not found. */
+ err = ERROR_FILE_NOT_FOUND;
+ goto done;
+ }
+
+ startup.cb = sizeof(startup);
+ startup.lpReserved = NULL;
+ startup.lpDesktop = NULL;
+ startup.lpTitle = NULL;
+ startup.dwFlags = STARTF_USESTDHANDLES | STARTF_USESHOWWINDOW;
+
+ startup.cbReserved2 = uv__stdio_size(process->child_stdio_buffer);
+ startup.lpReserved2 = (BYTE*) process->child_stdio_buffer;
+
+ startup.hStdInput = uv__stdio_handle(process->child_stdio_buffer, 0);
+ startup.hStdOutput = uv__stdio_handle(process->child_stdio_buffer, 1);
+ startup.hStdError = uv__stdio_handle(process->child_stdio_buffer, 2);
+
+ process_flags = CREATE_UNICODE_ENVIRONMENT;
+
+ if ((options->flags & UV_PROCESS_WINDOWS_HIDE_CONSOLE) ||
+ (options->flags & UV_PROCESS_WINDOWS_HIDE)) {
+ /* Avoid creating console window if stdio is not inherited. */
+ for (i = 0; i < options->stdio_count; i++) {
+ if (options->stdio[i].flags & UV_INHERIT_FD)
+ break;
+ if (i == options->stdio_count - 1)
+ process_flags |= CREATE_NO_WINDOW;
+ }
+ }
+ if ((options->flags & UV_PROCESS_WINDOWS_HIDE_GUI) ||
+ (options->flags & UV_PROCESS_WINDOWS_HIDE)) {
+ /* Use SW_HIDE to avoid any potential process window. */
+ startup.wShowWindow = SW_HIDE;
+ } else {
+ startup.wShowWindow = SW_SHOWDEFAULT;
+ }
+
+ if (options->flags & UV_PROCESS_DETACHED) {
+ /* Note that we're not setting the CREATE_BREAKAWAY_FROM_JOB flag. That
+ * means that libuv might not let you create a fully daemonized process
+ * when run under job control. However the type of job control that libuv
+ * itself creates doesn't trickle down to subprocesses so they can still
+ * daemonize.
+ *
+ * A reason to not do this is that CREATE_BREAKAWAY_FROM_JOB makes the
+ * CreateProcess call fail if we're under job control that doesn't allow
+ * breakaway.
+ */
+ process_flags |= DETACHED_PROCESS | CREATE_NEW_PROCESS_GROUP;
+ }
+
+ if (options->cpumask != NULL) {
+ /* Create the child in a suspended state so we have a chance to set
+ its process affinity before it runs. */
+ process_flags |= CREATE_SUSPENDED;
+ }
+
+ if (!CreateProcessW(application_path,
+ arguments,
+ NULL,
+ NULL,
+ 1,
+ process_flags,
+ env,
+ cwd,
+ &startup,
+ &info)) {
+ /* CreateProcessW failed. */
+ err = GetLastError();
+ goto done;
+ }
+
+ if (options->cpumask != NULL) {
+ /* The child is currently suspended. Set its process affinity
+ or terminate it if we can't. */
+ int i;
+ int cpumasksize;
+ DWORD_PTR sysmask;
+ DWORD_PTR oldmask;
+ DWORD_PTR newmask;
+
+ cpumasksize = uv_cpumask_size();
+
+ if (!GetProcessAffinityMask(info.hProcess, &oldmask, &sysmask)) {
+ err = GetLastError();
+ TerminateProcess(info.hProcess, 1);
+ goto done;
+ }
+
+ newmask = 0;
+ for (i = 0; i < cpumasksize; i++) {
+ if (options->cpumask[i]) {
+ if (oldmask & (((DWORD_PTR)1) << i)) {
+ newmask |= ((DWORD_PTR)1) << i;
+ } else {
+ err = UV_EINVAL;
+ TerminateProcess(info.hProcess, 1);
+ goto done;
+ }
+ }
+ }
+
+ if (!SetProcessAffinityMask(info.hProcess, newmask)) {
+ err = GetLastError();
+ TerminateProcess(info.hProcess, 1);
+ goto done;
+ }
+
+ /* The process affinity of the child is set. Let it run. */
+ if (ResumeThread(info.hThread) == ((DWORD)-1)) {
+ err = GetLastError();
+ TerminateProcess(info.hProcess, 1);
+ goto done;
+ }
+ }
+
+ /* Spawn succeeded. Beyond this point, failure is reported asynchronously. */
+
+ process->process_handle = info.hProcess;
+ process->pid = info.dwProcessId;
+
+ /* If the process isn't spawned as detached, assign to the global job object
+ * so windows will kill it when the parent process dies. */
+ if (!(options->flags & UV_PROCESS_DETACHED)) {
+ uv_once(&uv_global_job_handle_init_guard_, uv__init_global_job_handle);
+
+ if (!AssignProcessToJobObject(uv_global_job_handle_, info.hProcess)) {
+ /* AssignProcessToJobObject might fail if this process is under job
+ * control and the job doesn't have the
+ * JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK flag set, on a Windows version
+ * that doesn't support nested jobs.
+ *
+ * When that happens we just swallow the error and continue without
+ * establishing a kill-child-on-parent-exit relationship, otherwise
+ * there would be no way for libuv applications run under job control
+ * to spawn processes at all.
+ */
+ DWORD err = GetLastError();
+ if (err != ERROR_ACCESS_DENIED)
+ uv_fatal_error(err, "AssignProcessToJobObject");
+ }
+ }
+
+ /* Set IPC pid to all IPC pipes. */
+ for (i = 0; i < options->stdio_count; i++) {
+ const uv_stdio_container_t* fdopt = &options->stdio[i];
+ if (fdopt->flags & UV_CREATE_PIPE &&
+ fdopt->data.stream->type == UV_NAMED_PIPE &&
+ ((uv_pipe_t*) fdopt->data.stream)->ipc) {
+ ((uv_pipe_t*) fdopt->data.stream)->pipe.conn.ipc_remote_pid =
+ info.dwProcessId;
+ }
+ }
+
+ /* Setup notifications for when the child process exits. */
+ result = RegisterWaitForSingleObject(&process->wait_handle,
+ process->process_handle, exit_wait_callback, (void*)process, INFINITE,
+ WT_EXECUTEINWAITTHREAD | WT_EXECUTEONLYONCE);
+ if (!result) {
+ uv_fatal_error(GetLastError(), "RegisterWaitForSingleObject");
+ }
+
+ CloseHandle(info.hThread);
+
+ assert(!err);
+
+ /* Make the handle active. It will remain active until the exit callback is
+ * made or the handle is closed, whichever happens first. */
+ uv__handle_start(process);
+
+ /* Cleanup, whether we succeeded or failed. */
+ done:
+ uv__free(application);
+ uv__free(application_path);
+ uv__free(arguments);
+ uv__free(cwd);
+ uv__free(env);
+ uv__free(alloc_path);
+
+ if (process->child_stdio_buffer != NULL) {
+ /* Clean up child stdio handles. */
+ uv__stdio_destroy(process->child_stdio_buffer);
+ process->child_stdio_buffer = NULL;
+ }
+
+ return uv_translate_sys_error(err);
+}
+
+
+static int uv__kill(HANDLE process_handle, int signum) {
+ if (signum < 0 || signum >= NSIG) {
+ return UV_EINVAL;
+ }
+
+ switch (signum) {
+ case SIGTERM:
+ case SIGKILL:
+ case SIGINT: {
+ /* Unconditionally terminate the process. On Windows, killed processes
+ * normally return 1. */
+ DWORD status;
+ int err;
+
+ if (TerminateProcess(process_handle, 1))
+ return 0;
+
+ /* If the process already exited before TerminateProcess was called,.
+ * TerminateProcess will fail with ERROR_ACCESS_DENIED. */
+ err = GetLastError();
+ if (err == ERROR_ACCESS_DENIED &&
+ GetExitCodeProcess(process_handle, &status) &&
+ status != STILL_ACTIVE) {
+ return UV_ESRCH;
+ }
+
+ return uv_translate_sys_error(err);
+ }
+
+ case 0: {
+ /* Health check: is the process still alive? */
+ DWORD status;
+
+ if (!GetExitCodeProcess(process_handle, &status))
+ return uv_translate_sys_error(GetLastError());
+
+ if (status != STILL_ACTIVE)
+ return UV_ESRCH;
+
+ return 0;
+ }
+
+ default:
+ /* Unsupported signal. */
+ return UV_ENOSYS;
+ }
+}
+
+
+int uv_process_kill(uv_process_t* process, int signum) {
+ int err;
+
+ if (process->process_handle == INVALID_HANDLE_VALUE) {
+ return UV_EINVAL;
+ }
+
+ err = uv__kill(process->process_handle, signum);
+ if (err) {
+ return err; /* err is already translated. */
+ }
+
+ process->exit_signal = signum;
+
+ return 0;
+}
+
+
+int uv_kill(int pid, int signum) {
+ int err;
+ HANDLE process_handle;
+
+ if (pid == 0) {
+ process_handle = GetCurrentProcess();
+ } else {
+ process_handle = OpenProcess(PROCESS_TERMINATE | PROCESS_QUERY_INFORMATION,
+ FALSE,
+ pid);
+ }
+
+ if (process_handle == NULL) {
+ err = GetLastError();
+ if (err == ERROR_INVALID_PARAMETER) {
+ return UV_ESRCH;
+ } else {
+ return uv_translate_sys_error(err);
+ }
+ }
+
+ err = uv__kill(process_handle, signum);
+ CloseHandle(process_handle);
+
+ return err; /* err is already translated. */
+}
diff --git a/Utilities/cmlibuv/src/win/req-inl.h b/Utilities/cmlibuv/src/win/req-inl.h
new file mode 100644
index 0000000..f2513b7
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/req-inl.h
@@ -0,0 +1,221 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_WIN_REQ_INL_H_
+#define UV_WIN_REQ_INL_H_
+
+#include <assert.h>
+
+#include "uv.h"
+#include "internal.h"
+
+
+#define SET_REQ_STATUS(req, status) \
+ (req)->u.io.overlapped.Internal = (ULONG_PTR) (status)
+
+#define SET_REQ_ERROR(req, error) \
+ SET_REQ_STATUS((req), NTSTATUS_FROM_WIN32((error)))
+
+/* Note: used open-coded in UV_REQ_INIT() because of a circular dependency
+ * between src/uv-common.h and src/win/internal.h.
+ */
+#define SET_REQ_SUCCESS(req) \
+ SET_REQ_STATUS((req), STATUS_SUCCESS)
+
+#define GET_REQ_STATUS(req) \
+ ((NTSTATUS) (req)->u.io.overlapped.Internal)
+
+#define REQ_SUCCESS(req) \
+ (NT_SUCCESS(GET_REQ_STATUS((req))))
+
+#define GET_REQ_ERROR(req) \
+ (pRtlNtStatusToDosError(GET_REQ_STATUS((req))))
+
+#define GET_REQ_SOCK_ERROR(req) \
+ (uv_ntstatus_to_winsock_error(GET_REQ_STATUS((req))))
+
+
+#define REGISTER_HANDLE_REQ(loop, handle, req) \
+ do { \
+ INCREASE_ACTIVE_COUNT((loop), (handle)); \
+ uv__req_register((loop), (req)); \
+ } while (0)
+
+#define UNREGISTER_HANDLE_REQ(loop, handle, req) \
+ do { \
+ DECREASE_ACTIVE_COUNT((loop), (handle)); \
+ uv__req_unregister((loop), (req)); \
+ } while (0)
+
+
+#define UV_SUCCEEDED_WITHOUT_IOCP(result) \
+ ((result) && (handle->flags & UV_HANDLE_SYNC_BYPASS_IOCP))
+
+#define UV_SUCCEEDED_WITH_IOCP(result) \
+ ((result) || (GetLastError() == ERROR_IO_PENDING))
+
+
+#define POST_COMPLETION_FOR_REQ(loop, req) \
+ if (!PostQueuedCompletionStatus((loop)->iocp, \
+ 0, \
+ 0, \
+ &((req)->u.io.overlapped))) { \
+ uv_fatal_error(GetLastError(), "PostQueuedCompletionStatus"); \
+ }
+
+
+INLINE static uv_req_t* uv_overlapped_to_req(OVERLAPPED* overlapped) {
+ return CONTAINING_RECORD(overlapped, uv_req_t, u.io.overlapped);
+}
+
+
+INLINE static void uv_insert_pending_req(uv_loop_t* loop, uv_req_t* req) {
+ req->next_req = NULL;
+ if (loop->pending_reqs_tail) {
+#ifdef _DEBUG
+ /* Ensure the request is not already in the queue, or the queue
+ * will get corrupted.
+ */
+ uv_req_t* current = loop->pending_reqs_tail;
+ do {
+ assert(req != current);
+ current = current->next_req;
+ } while(current != loop->pending_reqs_tail);
+#endif
+
+ req->next_req = loop->pending_reqs_tail->next_req;
+ loop->pending_reqs_tail->next_req = req;
+ loop->pending_reqs_tail = req;
+ } else {
+ req->next_req = req;
+ loop->pending_reqs_tail = req;
+ }
+}
+
+
+#define DELEGATE_STREAM_REQ(loop, req, method, handle_at) \
+ do { \
+ switch (((uv_handle_t*) (req)->handle_at)->type) { \
+ case UV_TCP: \
+ uv_process_tcp_##method##_req(loop, \
+ (uv_tcp_t*) ((req)->handle_at), \
+ req); \
+ break; \
+ \
+ case UV_NAMED_PIPE: \
+ uv_process_pipe_##method##_req(loop, \
+ (uv_pipe_t*) ((req)->handle_at), \
+ req); \
+ break; \
+ \
+ case UV_TTY: \
+ uv_process_tty_##method##_req(loop, \
+ (uv_tty_t*) ((req)->handle_at), \
+ req); \
+ break; \
+ \
+ default: \
+ assert(0); \
+ } \
+ } while (0)
+
+
+INLINE static int uv_process_reqs(uv_loop_t* loop) {
+ uv_req_t* req;
+ uv_req_t* first;
+ uv_req_t* next;
+
+ if (loop->pending_reqs_tail == NULL)
+ return 0;
+
+ first = loop->pending_reqs_tail->next_req;
+ next = first;
+ loop->pending_reqs_tail = NULL;
+
+ while (next != NULL) {
+ req = next;
+ next = req->next_req != first ? req->next_req : NULL;
+
+ switch (req->type) {
+ case UV_READ:
+ DELEGATE_STREAM_REQ(loop, req, read, data);
+ break;
+
+ case UV_WRITE:
+ DELEGATE_STREAM_REQ(loop, (uv_write_t*) req, write, handle);
+ break;
+
+ case UV_ACCEPT:
+ DELEGATE_STREAM_REQ(loop, req, accept, data);
+ break;
+
+ case UV_CONNECT:
+ DELEGATE_STREAM_REQ(loop, (uv_connect_t*) req, connect, handle);
+ break;
+
+ case UV_SHUTDOWN:
+ /* Tcp shutdown requests don't come here. */
+ assert(((uv_shutdown_t*) req)->handle->type == UV_NAMED_PIPE);
+ uv_process_pipe_shutdown_req(
+ loop,
+ (uv_pipe_t*) ((uv_shutdown_t*) req)->handle,
+ (uv_shutdown_t*) req);
+ break;
+
+ case UV_UDP_RECV:
+ uv_process_udp_recv_req(loop, (uv_udp_t*) req->data, req);
+ break;
+
+ case UV_UDP_SEND:
+ uv_process_udp_send_req(loop,
+ ((uv_udp_send_t*) req)->handle,
+ (uv_udp_send_t*) req);
+ break;
+
+ case UV_WAKEUP:
+ uv_process_async_wakeup_req(loop, (uv_async_t*) req->data, req);
+ break;
+
+ case UV_SIGNAL_REQ:
+ uv_process_signal_req(loop, (uv_signal_t*) req->data, req);
+ break;
+
+ case UV_POLL_REQ:
+ uv_process_poll_req(loop, (uv_poll_t*) req->data, req);
+ break;
+
+ case UV_PROCESS_EXIT:
+ uv_process_proc_exit(loop, (uv_process_t*) req->data);
+ break;
+
+ case UV_FS_EVENT_REQ:
+ uv_process_fs_event_req(loop, req, (uv_fs_event_t*) req->data);
+ break;
+
+ default:
+ assert(0);
+ }
+ }
+
+ return 1;
+}
+
+#endif /* UV_WIN_REQ_INL_H_ */
diff --git a/Utilities/cmlibuv/src/win/signal.c b/Utilities/cmlibuv/src/win/signal.c
new file mode 100644
index 0000000..3d9f92c
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/signal.c
@@ -0,0 +1,282 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <signal.h>
+
+#include "uv.h"
+#include "internal.h"
+#include "handle-inl.h"
+#include "req-inl.h"
+
+
+RB_HEAD(uv_signal_tree_s, uv_signal_s);
+
+static struct uv_signal_tree_s uv__signal_tree = RB_INITIALIZER(uv__signal_tree);
+static CRITICAL_SECTION uv__signal_lock;
+
+static BOOL WINAPI uv__signal_control_handler(DWORD type);
+
+int uv__signal_start(uv_signal_t* handle,
+ uv_signal_cb signal_cb,
+ int signum,
+ int oneshot);
+
+void uv_signals_init(void) {
+ InitializeCriticalSection(&uv__signal_lock);
+ if (!SetConsoleCtrlHandler(uv__signal_control_handler, TRUE))
+ abort();
+}
+
+
+void uv__signal_cleanup(void) {
+ /* TODO(bnoordhuis) Undo effects of uv_signal_init()? */
+}
+
+
+static int uv__signal_compare(uv_signal_t* w1, uv_signal_t* w2) {
+ /* Compare signums first so all watchers with the same signnum end up
+ * adjacent. */
+ if (w1->signum < w2->signum) return -1;
+ if (w1->signum > w2->signum) return 1;
+
+ /* Sort by loop pointer, so we can easily look up the first item after
+ * { .signum = x, .loop = NULL }. */
+ if ((uintptr_t) w1->loop < (uintptr_t) w2->loop) return -1;
+ if ((uintptr_t) w1->loop > (uintptr_t) w2->loop) return 1;
+
+ if ((uintptr_t) w1 < (uintptr_t) w2) return -1;
+ if ((uintptr_t) w1 > (uintptr_t) w2) return 1;
+
+ return 0;
+}
+
+
+RB_GENERATE_STATIC(uv_signal_tree_s, uv_signal_s, tree_entry, uv__signal_compare)
+
+
+/*
+ * Dispatches signal {signum} to all active uv_signal_t watchers in all loops.
+ * Returns 1 if the signal was dispatched to any watcher, or 0 if there were
+ * no active signal watchers observing this signal.
+ */
+int uv__signal_dispatch(int signum) {
+ uv_signal_t lookup;
+ uv_signal_t* handle;
+ int dispatched;
+
+ dispatched = 0;
+
+ EnterCriticalSection(&uv__signal_lock);
+
+ lookup.signum = signum;
+ lookup.loop = NULL;
+
+ for (handle = RB_NFIND(uv_signal_tree_s, &uv__signal_tree, &lookup);
+ handle != NULL && handle->signum == signum;
+ handle = RB_NEXT(uv_signal_tree_s, &uv__signal_tree, handle)) {
+ unsigned long previous = InterlockedExchange(
+ (volatile LONG*) &handle->pending_signum, signum);
+
+ if (handle->flags & UV_SIGNAL_ONE_SHOT_DISPATCHED)
+ continue;
+
+ if (!previous) {
+ POST_COMPLETION_FOR_REQ(handle->loop, &handle->signal_req);
+ }
+
+ dispatched = 1;
+ if (handle->flags & UV_SIGNAL_ONE_SHOT)
+ handle->flags |= UV_SIGNAL_ONE_SHOT_DISPATCHED;
+ }
+
+ LeaveCriticalSection(&uv__signal_lock);
+
+ return dispatched;
+}
+
+
+static BOOL WINAPI uv__signal_control_handler(DWORD type) {
+ switch (type) {
+ case CTRL_C_EVENT:
+ return uv__signal_dispatch(SIGINT);
+
+ case CTRL_BREAK_EVENT:
+ return uv__signal_dispatch(SIGBREAK);
+
+ case CTRL_CLOSE_EVENT:
+ if (uv__signal_dispatch(SIGHUP)) {
+ /* Windows will terminate the process after the control handler
+ * returns. After that it will just terminate our process. Therefore
+ * block the signal handler so the main loop has some time to pick up
+ * the signal and do something for a few seconds. */
+ Sleep(INFINITE);
+ return TRUE;
+ }
+ return FALSE;
+
+ case CTRL_LOGOFF_EVENT:
+ case CTRL_SHUTDOWN_EVENT:
+ /* These signals are only sent to services. Services have their own
+ * notification mechanism, so there's no point in handling these. */
+
+ default:
+ /* We don't handle these. */
+ return FALSE;
+ }
+}
+
+
+int uv_signal_init(uv_loop_t* loop, uv_signal_t* handle) {
+ uv__handle_init(loop, (uv_handle_t*) handle, UV_SIGNAL);
+ handle->pending_signum = 0;
+ handle->signum = 0;
+ handle->signal_cb = NULL;
+
+ UV_REQ_INIT(&handle->signal_req, UV_SIGNAL_REQ);
+ handle->signal_req.data = handle;
+
+ return 0;
+}
+
+
+int uv_signal_stop(uv_signal_t* handle) {
+ uv_signal_t* removed_handle;
+
+ /* If the watcher wasn't started, this is a no-op. */
+ if (handle->signum == 0)
+ return 0;
+
+ EnterCriticalSection(&uv__signal_lock);
+
+ removed_handle = RB_REMOVE(uv_signal_tree_s, &uv__signal_tree, handle);
+ assert(removed_handle == handle);
+
+ LeaveCriticalSection(&uv__signal_lock);
+
+ handle->signum = 0;
+ uv__handle_stop(handle);
+
+ return 0;
+}
+
+
+int uv_signal_start(uv_signal_t* handle, uv_signal_cb signal_cb, int signum) {
+ return uv__signal_start(handle, signal_cb, signum, 0);
+}
+
+
+int uv_signal_start_oneshot(uv_signal_t* handle,
+ uv_signal_cb signal_cb,
+ int signum) {
+ return uv__signal_start(handle, signal_cb, signum, 1);
+}
+
+
+int uv__signal_start(uv_signal_t* handle,
+ uv_signal_cb signal_cb,
+ int signum,
+ int oneshot) {
+ /* Test for invalid signal values. */
+ if (signum <= 0 || signum >= NSIG)
+ return UV_EINVAL;
+
+ /* Short circuit: if the signal watcher is already watching {signum} don't go
+ * through the process of deregistering and registering the handler.
+ * Additionally, this avoids pending signals getting lost in the (small) time
+ * frame that handle->signum == 0. */
+ if (signum == handle->signum) {
+ handle->signal_cb = signal_cb;
+ return 0;
+ }
+
+ /* If the signal handler was already active, stop it first. */
+ if (handle->signum != 0) {
+ int r = uv_signal_stop(handle);
+ /* uv_signal_stop is infallible. */
+ assert(r == 0);
+ }
+
+ EnterCriticalSection(&uv__signal_lock);
+
+ handle->signum = signum;
+ if (oneshot)
+ handle->flags |= UV_SIGNAL_ONE_SHOT;
+
+ RB_INSERT(uv_signal_tree_s, &uv__signal_tree, handle);
+
+ LeaveCriticalSection(&uv__signal_lock);
+
+ handle->signal_cb = signal_cb;
+ uv__handle_start(handle);
+
+ return 0;
+}
+
+
+void uv_process_signal_req(uv_loop_t* loop, uv_signal_t* handle,
+ uv_req_t* req) {
+ long dispatched_signum;
+
+ assert(handle->type == UV_SIGNAL);
+ assert(req->type == UV_SIGNAL_REQ);
+
+ dispatched_signum = InterlockedExchange(
+ (volatile LONG*) &handle->pending_signum, 0);
+ assert(dispatched_signum != 0);
+
+ /* Check if the pending signal equals the signum that we are watching for.
+ * These can get out of sync when the handler is stopped and restarted while
+ * the signal_req is pending. */
+ if (dispatched_signum == handle->signum)
+ handle->signal_cb(handle, dispatched_signum);
+
+ if (handle->flags & UV_SIGNAL_ONE_SHOT)
+ uv_signal_stop(handle);
+
+ if (handle->flags & UV_HANDLE_CLOSING) {
+ /* When it is closing, it must be stopped at this point. */
+ assert(handle->signum == 0);
+ uv_want_endgame(loop, (uv_handle_t*) handle);
+ }
+}
+
+
+void uv_signal_close(uv_loop_t* loop, uv_signal_t* handle) {
+ uv_signal_stop(handle);
+ uv__handle_closing(handle);
+
+ if (handle->pending_signum == 0) {
+ uv_want_endgame(loop, (uv_handle_t*) handle);
+ }
+}
+
+
+void uv_signal_endgame(uv_loop_t* loop, uv_signal_t* handle) {
+ assert(handle->flags & UV_HANDLE_CLOSING);
+ assert(!(handle->flags & UV_HANDLE_CLOSED));
+
+ assert(handle->signum == 0);
+ assert(handle->pending_signum == 0);
+
+ handle->flags |= UV_HANDLE_CLOSED;
+
+ uv__handle_close(handle);
+}
diff --git a/Utilities/cmlibuv/src/win/snprintf.c b/Utilities/cmlibuv/src/win/snprintf.c
new file mode 100644
index 0000000..776c0e3
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/snprintf.c
@@ -0,0 +1,42 @@
+/* Copyright the libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(_MSC_VER) && _MSC_VER < 1900
+
+#include <stdio.h>
+#include <stdarg.h>
+
+/* Emulate snprintf() on MSVC<2015, _snprintf() doesn't zero-terminate the buffer
+ * on overflow...
+ */
+int snprintf(char* buf, size_t len, const char* fmt, ...) {
+ int n;
+ va_list ap;
+ va_start(ap, fmt);
+
+ n = _vscprintf(fmt, ap);
+ vsnprintf_s(buf, len, _TRUNCATE, fmt, ap);
+
+ va_end(ap);
+ return n;
+}
+
+#endif
diff --git a/Utilities/cmlibuv/src/win/stream-inl.h b/Utilities/cmlibuv/src/win/stream-inl.h
new file mode 100644
index 0000000..40f5ddd
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/stream-inl.h
@@ -0,0 +1,54 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_WIN_STREAM_INL_H_
+#define UV_WIN_STREAM_INL_H_
+
+#include <assert.h>
+
+#include "uv.h"
+#include "internal.h"
+#include "handle-inl.h"
+#include "req-inl.h"
+
+
+INLINE static void uv_stream_init(uv_loop_t* loop,
+ uv_stream_t* handle,
+ uv_handle_type type) {
+ uv__handle_init(loop, (uv_handle_t*) handle, type);
+ handle->write_queue_size = 0;
+ handle->activecnt = 0;
+ handle->stream.conn.shutdown_req = NULL;
+ handle->stream.conn.write_reqs_pending = 0;
+
+ UV_REQ_INIT(&handle->read_req, UV_READ);
+ handle->read_req.event_handle = NULL;
+ handle->read_req.wait_handle = INVALID_HANDLE_VALUE;
+ handle->read_req.data = handle;
+}
+
+
+INLINE static void uv_connection_init(uv_stream_t* handle) {
+ handle->flags |= UV_HANDLE_CONNECTION;
+}
+
+
+#endif /* UV_WIN_STREAM_INL_H_ */
diff --git a/Utilities/cmlibuv/src/win/stream.c b/Utilities/cmlibuv/src/win/stream.c
new file mode 100644
index 0000000..46a0709
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/stream.c
@@ -0,0 +1,243 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+
+#include "uv.h"
+#include "internal.h"
+#include "handle-inl.h"
+#include "req-inl.h"
+
+
+int uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb) {
+ int err;
+
+ err = ERROR_INVALID_PARAMETER;
+ switch (stream->type) {
+ case UV_TCP:
+ err = uv_tcp_listen((uv_tcp_t*)stream, backlog, cb);
+ break;
+ case UV_NAMED_PIPE:
+ err = uv_pipe_listen((uv_pipe_t*)stream, backlog, cb);
+ break;
+ default:
+ assert(0);
+ }
+
+ return uv_translate_sys_error(err);
+}
+
+
+int uv_accept(uv_stream_t* server, uv_stream_t* client) {
+ int err;
+
+ err = ERROR_INVALID_PARAMETER;
+ switch (server->type) {
+ case UV_TCP:
+ err = uv_tcp_accept((uv_tcp_t*)server, (uv_tcp_t*)client);
+ break;
+ case UV_NAMED_PIPE:
+ err = uv_pipe_accept((uv_pipe_t*)server, client);
+ break;
+ default:
+ assert(0);
+ }
+
+ return uv_translate_sys_error(err);
+}
+
+
+int uv_read_start(uv_stream_t* handle, uv_alloc_cb alloc_cb,
+ uv_read_cb read_cb) {
+ int err;
+
+ if (handle->flags & UV_HANDLE_READING) {
+ return UV_EALREADY;
+ }
+
+ if (!(handle->flags & UV_HANDLE_READABLE)) {
+ return UV_ENOTCONN;
+ }
+
+ err = ERROR_INVALID_PARAMETER;
+ switch (handle->type) {
+ case UV_TCP:
+ err = uv_tcp_read_start((uv_tcp_t*)handle, alloc_cb, read_cb);
+ break;
+ case UV_NAMED_PIPE:
+ err = uv_pipe_read_start((uv_pipe_t*)handle, alloc_cb, read_cb);
+ break;
+ case UV_TTY:
+ err = uv_tty_read_start((uv_tty_t*) handle, alloc_cb, read_cb);
+ break;
+ default:
+ assert(0);
+ }
+
+ return uv_translate_sys_error(err);
+}
+
+
+int uv_read_stop(uv_stream_t* handle) {
+ int err;
+
+ if (!(handle->flags & UV_HANDLE_READING))
+ return 0;
+
+ err = 0;
+ if (handle->type == UV_TTY) {
+ err = uv_tty_read_stop((uv_tty_t*) handle);
+ } else if (handle->type == UV_NAMED_PIPE) {
+ uv__pipe_read_stop((uv_pipe_t*) handle);
+ } else {
+ handle->flags &= ~UV_HANDLE_READING;
+ DECREASE_ACTIVE_COUNT(handle->loop, handle);
+ }
+
+ return uv_translate_sys_error(err);
+}
+
+
+int uv_write(uv_write_t* req,
+ uv_stream_t* handle,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ uv_write_cb cb) {
+ uv_loop_t* loop = handle->loop;
+ int err;
+
+ if (!(handle->flags & UV_HANDLE_WRITABLE)) {
+ return UV_EPIPE;
+ }
+
+ err = ERROR_INVALID_PARAMETER;
+ switch (handle->type) {
+ case UV_TCP:
+ err = uv_tcp_write(loop, req, (uv_tcp_t*) handle, bufs, nbufs, cb);
+ break;
+ case UV_NAMED_PIPE:
+ err = uv__pipe_write(
+ loop, req, (uv_pipe_t*) handle, bufs, nbufs, NULL, cb);
+ break;
+ case UV_TTY:
+ err = uv_tty_write(loop, req, (uv_tty_t*) handle, bufs, nbufs, cb);
+ break;
+ default:
+ assert(0);
+ }
+
+ return uv_translate_sys_error(err);
+}
+
+
+int uv_write2(uv_write_t* req,
+ uv_stream_t* handle,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ uv_stream_t* send_handle,
+ uv_write_cb cb) {
+ uv_loop_t* loop = handle->loop;
+ int err;
+
+ if (send_handle == NULL) {
+ return uv_write(req, handle, bufs, nbufs, cb);
+ }
+
+ if (handle->type != UV_NAMED_PIPE || !((uv_pipe_t*) handle)->ipc) {
+ return UV_EINVAL;
+ } else if (!(handle->flags & UV_HANDLE_WRITABLE)) {
+ return UV_EPIPE;
+ }
+
+ err = uv__pipe_write(
+ loop, req, (uv_pipe_t*) handle, bufs, nbufs, send_handle, cb);
+ return uv_translate_sys_error(err);
+}
+
+
+int uv_try_write(uv_stream_t* stream,
+ const uv_buf_t bufs[],
+ unsigned int nbufs) {
+ if (stream->flags & UV_HANDLE_CLOSING)
+ return UV_EBADF;
+ if (!(stream->flags & UV_HANDLE_WRITABLE))
+ return UV_EPIPE;
+
+ switch (stream->type) {
+ case UV_TCP:
+ return uv__tcp_try_write((uv_tcp_t*) stream, bufs, nbufs);
+ case UV_TTY:
+ return uv__tty_try_write((uv_tty_t*) stream, bufs, nbufs);
+ case UV_NAMED_PIPE:
+ return UV_EAGAIN;
+ default:
+ assert(0);
+ return UV_ENOSYS;
+ }
+}
+
+
+int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb) {
+ uv_loop_t* loop = handle->loop;
+
+ if (!(handle->flags & UV_HANDLE_WRITABLE) ||
+ handle->flags & UV_HANDLE_SHUTTING ||
+ uv__is_closing(handle)) {
+ return UV_ENOTCONN;
+ }
+
+ UV_REQ_INIT(req, UV_SHUTDOWN);
+ req->handle = handle;
+ req->cb = cb;
+
+ handle->flags &= ~UV_HANDLE_WRITABLE;
+ handle->flags |= UV_HANDLE_SHUTTING;
+ handle->stream.conn.shutdown_req = req;
+ handle->reqs_pending++;
+ REGISTER_HANDLE_REQ(loop, handle, req);
+
+ uv_want_endgame(loop, (uv_handle_t*)handle);
+
+ return 0;
+}
+
+
+int uv_is_readable(const uv_stream_t* handle) {
+ return !!(handle->flags & UV_HANDLE_READABLE);
+}
+
+
+int uv_is_writable(const uv_stream_t* handle) {
+ return !!(handle->flags & UV_HANDLE_WRITABLE);
+}
+
+
+int uv_stream_set_blocking(uv_stream_t* handle, int blocking) {
+ if (handle->type != UV_NAMED_PIPE)
+ return UV_EINVAL;
+
+ if (blocking != 0)
+ handle->flags |= UV_HANDLE_BLOCKING_WRITES;
+ else
+ handle->flags &= ~UV_HANDLE_BLOCKING_WRITES;
+
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/win/tcp.c b/Utilities/cmlibuv/src/win/tcp.c
new file mode 100644
index 0000000..0dcaa97
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/tcp.c
@@ -0,0 +1,1573 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+
+#include "uv.h"
+#include "internal.h"
+#include "handle-inl.h"
+#include "stream-inl.h"
+#include "req-inl.h"
+
+
+/*
+ * Threshold of active tcp streams for which to preallocate tcp read buffers.
+ * (Due to node slab allocator performing poorly under this pattern,
+ * the optimization is temporarily disabled (threshold=0). This will be
+ * revisited once node allocator is improved.)
+ */
+const unsigned int uv_active_tcp_streams_threshold = 0;
+
+/*
+ * Number of simultaneous pending AcceptEx calls.
+ */
+const unsigned int uv_simultaneous_server_accepts = 32;
+
+/* A zero-size buffer for use by uv_tcp_read */
+static char uv_zero_[] = "";
+
+static int uv__tcp_nodelay(uv_tcp_t* handle, SOCKET socket, int enable) {
+ if (setsockopt(socket,
+ IPPROTO_TCP,
+ TCP_NODELAY,
+ (const char*)&enable,
+ sizeof enable) == -1) {
+ return WSAGetLastError();
+ }
+ return 0;
+}
+
+
+static int uv__tcp_keepalive(uv_tcp_t* handle, SOCKET socket, int enable, unsigned int delay) {
+ if (setsockopt(socket,
+ SOL_SOCKET,
+ SO_KEEPALIVE,
+ (const char*)&enable,
+ sizeof enable) == -1) {
+ return WSAGetLastError();
+ }
+
+ if (enable && setsockopt(socket,
+ IPPROTO_TCP,
+ TCP_KEEPALIVE,
+ (const char*)&delay,
+ sizeof delay) == -1) {
+ return WSAGetLastError();
+ }
+
+ return 0;
+}
+
+
+static int uv_tcp_set_socket(uv_loop_t* loop,
+ uv_tcp_t* handle,
+ SOCKET socket,
+ int family,
+ int imported) {
+ DWORD yes = 1;
+ int non_ifs_lsp;
+ int err;
+
+ if (handle->socket != INVALID_SOCKET)
+ return UV_EBUSY;
+
+ /* Set the socket to nonblocking mode */
+ if (ioctlsocket(socket, FIONBIO, &yes) == SOCKET_ERROR) {
+ return WSAGetLastError();
+ }
+
+ /* Make the socket non-inheritable */
+ if (!SetHandleInformation((HANDLE) socket, HANDLE_FLAG_INHERIT, 0))
+ return GetLastError();
+
+ /* Associate it with the I/O completion port. Use uv_handle_t pointer as
+ * completion key. */
+ if (CreateIoCompletionPort((HANDLE)socket,
+ loop->iocp,
+ (ULONG_PTR)socket,
+ 0) == NULL) {
+ if (imported) {
+ handle->flags |= UV_HANDLE_EMULATE_IOCP;
+ } else {
+ return GetLastError();
+ }
+ }
+
+ if (family == AF_INET6) {
+ non_ifs_lsp = uv_tcp_non_ifs_lsp_ipv6;
+ } else {
+ non_ifs_lsp = uv_tcp_non_ifs_lsp_ipv4;
+ }
+
+ if (!(handle->flags & UV_HANDLE_EMULATE_IOCP) && !non_ifs_lsp) {
+ UCHAR sfcnm_flags =
+ FILE_SKIP_SET_EVENT_ON_HANDLE | FILE_SKIP_COMPLETION_PORT_ON_SUCCESS;
+ if (!SetFileCompletionNotificationModes((HANDLE) socket, sfcnm_flags))
+ return GetLastError();
+ handle->flags |= UV_HANDLE_SYNC_BYPASS_IOCP;
+ }
+
+ if (handle->flags & UV_HANDLE_TCP_NODELAY) {
+ err = uv__tcp_nodelay(handle, socket, 1);
+ if (err)
+ return err;
+ }
+
+ /* TODO: Use stored delay. */
+ if (handle->flags & UV_HANDLE_TCP_KEEPALIVE) {
+ err = uv__tcp_keepalive(handle, socket, 1, 60);
+ if (err)
+ return err;
+ }
+
+ handle->socket = socket;
+
+ if (family == AF_INET6) {
+ handle->flags |= UV_HANDLE_IPV6;
+ } else {
+ assert(!(handle->flags & UV_HANDLE_IPV6));
+ }
+
+ return 0;
+}
+
+
+int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* handle, unsigned int flags) {
+ int domain;
+
+ /* Use the lower 8 bits for the domain */
+ domain = flags & 0xFF;
+ if (domain != AF_INET && domain != AF_INET6 && domain != AF_UNSPEC)
+ return UV_EINVAL;
+
+ if (flags & ~0xFF)
+ return UV_EINVAL;
+
+ uv_stream_init(loop, (uv_stream_t*) handle, UV_TCP);
+ handle->tcp.serv.accept_reqs = NULL;
+ handle->tcp.serv.pending_accepts = NULL;
+ handle->socket = INVALID_SOCKET;
+ handle->reqs_pending = 0;
+ handle->tcp.serv.func_acceptex = NULL;
+ handle->tcp.conn.func_connectex = NULL;
+ handle->tcp.serv.processed_accepts = 0;
+ handle->delayed_error = 0;
+
+ /* If anything fails beyond this point we need to remove the handle from
+ * the handle queue, since it was added by uv__handle_init in uv_stream_init.
+ */
+
+ if (domain != AF_UNSPEC) {
+ SOCKET sock;
+ DWORD err;
+
+ sock = socket(domain, SOCK_STREAM, 0);
+ if (sock == INVALID_SOCKET) {
+ err = WSAGetLastError();
+ QUEUE_REMOVE(&handle->handle_queue);
+ return uv_translate_sys_error(err);
+ }
+
+ err = uv_tcp_set_socket(handle->loop, handle, sock, domain, 0);
+ if (err) {
+ closesocket(sock);
+ QUEUE_REMOVE(&handle->handle_queue);
+ return uv_translate_sys_error(err);
+ }
+
+ }
+
+ return 0;
+}
+
+
+int uv_tcp_init(uv_loop_t* loop, uv_tcp_t* handle) {
+ return uv_tcp_init_ex(loop, handle, AF_UNSPEC);
+}
+
+
+void uv_tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle) {
+ int err;
+ unsigned int i;
+ uv_tcp_accept_t* req;
+
+ if (handle->flags & UV_HANDLE_CONNECTION &&
+ handle->stream.conn.shutdown_req != NULL &&
+ handle->stream.conn.write_reqs_pending == 0) {
+
+ UNREGISTER_HANDLE_REQ(loop, handle, handle->stream.conn.shutdown_req);
+
+ err = 0;
+ if (handle->flags & UV_HANDLE_CLOSING) {
+ err = ERROR_OPERATION_ABORTED;
+ } else if (shutdown(handle->socket, SD_SEND) == SOCKET_ERROR) {
+ err = WSAGetLastError();
+ }
+
+ if (handle->stream.conn.shutdown_req->cb) {
+ handle->stream.conn.shutdown_req->cb(handle->stream.conn.shutdown_req,
+ uv_translate_sys_error(err));
+ }
+
+ handle->stream.conn.shutdown_req = NULL;
+ DECREASE_PENDING_REQ_COUNT(handle);
+ return;
+ }
+
+ if (handle->flags & UV_HANDLE_CLOSING &&
+ handle->reqs_pending == 0) {
+ assert(!(handle->flags & UV_HANDLE_CLOSED));
+
+ if (!(handle->flags & UV_HANDLE_TCP_SOCKET_CLOSED)) {
+ closesocket(handle->socket);
+ handle->socket = INVALID_SOCKET;
+ handle->flags |= UV_HANDLE_TCP_SOCKET_CLOSED;
+ }
+
+ if (!(handle->flags & UV_HANDLE_CONNECTION) && handle->tcp.serv.accept_reqs) {
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
+ for (i = 0; i < uv_simultaneous_server_accepts; i++) {
+ req = &handle->tcp.serv.accept_reqs[i];
+ if (req->wait_handle != INVALID_HANDLE_VALUE) {
+ UnregisterWait(req->wait_handle);
+ req->wait_handle = INVALID_HANDLE_VALUE;
+ }
+ if (req->event_handle != NULL) {
+ CloseHandle(req->event_handle);
+ req->event_handle = NULL;
+ }
+ }
+ }
+
+ uv__free(handle->tcp.serv.accept_reqs);
+ handle->tcp.serv.accept_reqs = NULL;
+ }
+
+ if (handle->flags & UV_HANDLE_CONNECTION &&
+ handle->flags & UV_HANDLE_EMULATE_IOCP) {
+ if (handle->read_req.wait_handle != INVALID_HANDLE_VALUE) {
+ UnregisterWait(handle->read_req.wait_handle);
+ handle->read_req.wait_handle = INVALID_HANDLE_VALUE;
+ }
+ if (handle->read_req.event_handle != NULL) {
+ CloseHandle(handle->read_req.event_handle);
+ handle->read_req.event_handle = NULL;
+ }
+ }
+
+ uv__handle_close(handle);
+ loop->active_tcp_streams--;
+ }
+}
+
+
+/* Unlike on Unix, here we don't set SO_REUSEADDR, because it doesn't just
+ * allow binding to addresses that are in use by sockets in TIME_WAIT, it
+ * effectively allows 'stealing' a port which is in use by another application.
+ *
+ * SO_EXCLUSIVEADDRUSE is also not good here because it does check all sockets,
+ * regardless of state, so we'd get an error even if the port is in use by a
+ * socket in TIME_WAIT state.
+ *
+ * See issue #1360.
+ *
+ */
+static int uv_tcp_try_bind(uv_tcp_t* handle,
+ const struct sockaddr* addr,
+ unsigned int addrlen,
+ unsigned int flags) {
+ DWORD err;
+ int r;
+
+ if (handle->socket == INVALID_SOCKET) {
+ SOCKET sock;
+
+ /* Cannot set IPv6-only mode on non-IPv6 socket. */
+ if ((flags & UV_TCP_IPV6ONLY) && addr->sa_family != AF_INET6)
+ return ERROR_INVALID_PARAMETER;
+
+ sock = socket(addr->sa_family, SOCK_STREAM, 0);
+ if (sock == INVALID_SOCKET) {
+ return WSAGetLastError();
+ }
+
+ err = uv_tcp_set_socket(handle->loop, handle, sock, addr->sa_family, 0);
+ if (err) {
+ closesocket(sock);
+ return err;
+ }
+ }
+
+#ifdef IPV6_V6ONLY
+ if (addr->sa_family == AF_INET6) {
+ int on;
+
+ on = (flags & UV_TCP_IPV6ONLY) != 0;
+
+ /* TODO: how to handle errors? This may fail if there is no ipv4 stack
+ * available, or when run on XP/2003 which have no support for dualstack
+ * sockets. For now we're silently ignoring the error. */
+ setsockopt(handle->socket,
+ IPPROTO_IPV6,
+ IPV6_V6ONLY,
+ (const char*)&on,
+ sizeof on);
+ }
+#endif
+
+ r = bind(handle->socket, addr, addrlen);
+
+ if (r == SOCKET_ERROR) {
+ err = WSAGetLastError();
+ if (err == WSAEADDRINUSE) {
+ /* Some errors are not to be reported until connect() or listen() */
+ handle->delayed_error = err;
+ } else {
+ return err;
+ }
+ }
+
+ handle->flags |= UV_HANDLE_BOUND;
+
+ return 0;
+}
+
+
+static void CALLBACK post_completion(void* context, BOOLEAN timed_out) {
+ uv_req_t* req;
+ uv_tcp_t* handle;
+
+ req = (uv_req_t*) context;
+ assert(req != NULL);
+ handle = (uv_tcp_t*)req->data;
+ assert(handle != NULL);
+ assert(!timed_out);
+
+ if (!PostQueuedCompletionStatus(handle->loop->iocp,
+ req->u.io.overlapped.InternalHigh,
+ 0,
+ &req->u.io.overlapped)) {
+ uv_fatal_error(GetLastError(), "PostQueuedCompletionStatus");
+ }
+}
+
+
+static void CALLBACK post_write_completion(void* context, BOOLEAN timed_out) {
+ uv_write_t* req;
+ uv_tcp_t* handle;
+
+ req = (uv_write_t*) context;
+ assert(req != NULL);
+ handle = (uv_tcp_t*)req->handle;
+ assert(handle != NULL);
+ assert(!timed_out);
+
+ if (!PostQueuedCompletionStatus(handle->loop->iocp,
+ req->u.io.overlapped.InternalHigh,
+ 0,
+ &req->u.io.overlapped)) {
+ uv_fatal_error(GetLastError(), "PostQueuedCompletionStatus");
+ }
+}
+
+
+static void uv_tcp_queue_accept(uv_tcp_t* handle, uv_tcp_accept_t* req) {
+ uv_loop_t* loop = handle->loop;
+ BOOL success;
+ DWORD bytes;
+ SOCKET accept_socket;
+ short family;
+
+ assert(handle->flags & UV_HANDLE_LISTENING);
+ assert(req->accept_socket == INVALID_SOCKET);
+
+ /* choose family and extension function */
+ if (handle->flags & UV_HANDLE_IPV6) {
+ family = AF_INET6;
+ } else {
+ family = AF_INET;
+ }
+
+ /* Open a socket for the accepted connection. */
+ accept_socket = socket(family, SOCK_STREAM, 0);
+ if (accept_socket == INVALID_SOCKET) {
+ SET_REQ_ERROR(req, WSAGetLastError());
+ uv_insert_pending_req(loop, (uv_req_t*)req);
+ handle->reqs_pending++;
+ return;
+ }
+
+ /* Make the socket non-inheritable */
+ if (!SetHandleInformation((HANDLE) accept_socket, HANDLE_FLAG_INHERIT, 0)) {
+ SET_REQ_ERROR(req, GetLastError());
+ uv_insert_pending_req(loop, (uv_req_t*)req);
+ handle->reqs_pending++;
+ closesocket(accept_socket);
+ return;
+ }
+
+ /* Prepare the overlapped structure. */
+ memset(&(req->u.io.overlapped), 0, sizeof(req->u.io.overlapped));
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
+ assert(req->event_handle != NULL);
+ req->u.io.overlapped.hEvent = (HANDLE) ((ULONG_PTR) req->event_handle | 1);
+ }
+
+ success = handle->tcp.serv.func_acceptex(handle->socket,
+ accept_socket,
+ (void*)req->accept_buffer,
+ 0,
+ sizeof(struct sockaddr_storage),
+ sizeof(struct sockaddr_storage),
+ &bytes,
+ &req->u.io.overlapped);
+
+ if (UV_SUCCEEDED_WITHOUT_IOCP(success)) {
+ /* Process the req without IOCP. */
+ req->accept_socket = accept_socket;
+ handle->reqs_pending++;
+ uv_insert_pending_req(loop, (uv_req_t*)req);
+ } else if (UV_SUCCEEDED_WITH_IOCP(success)) {
+ /* The req will be processed with IOCP. */
+ req->accept_socket = accept_socket;
+ handle->reqs_pending++;
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP &&
+ req->wait_handle == INVALID_HANDLE_VALUE &&
+ !RegisterWaitForSingleObject(&req->wait_handle,
+ req->event_handle, post_completion, (void*) req,
+ INFINITE, WT_EXECUTEINWAITTHREAD)) {
+ SET_REQ_ERROR(req, GetLastError());
+ uv_insert_pending_req(loop, (uv_req_t*)req);
+ }
+ } else {
+ /* Make this req pending reporting an error. */
+ SET_REQ_ERROR(req, WSAGetLastError());
+ uv_insert_pending_req(loop, (uv_req_t*)req);
+ handle->reqs_pending++;
+ /* Destroy the preallocated client socket. */
+ closesocket(accept_socket);
+ /* Destroy the event handle */
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
+ CloseHandle(req->event_handle);
+ req->event_handle = NULL;
+ }
+ }
+}
+
+
+static void uv_tcp_queue_read(uv_loop_t* loop, uv_tcp_t* handle) {
+ uv_read_t* req;
+ uv_buf_t buf;
+ int result;
+ DWORD bytes, flags;
+
+ assert(handle->flags & UV_HANDLE_READING);
+ assert(!(handle->flags & UV_HANDLE_READ_PENDING));
+
+ req = &handle->read_req;
+ memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
+
+ /*
+ * Preallocate a read buffer if the number of active streams is below
+ * the threshold.
+ */
+ if (loop->active_tcp_streams < uv_active_tcp_streams_threshold) {
+ handle->flags &= ~UV_HANDLE_ZERO_READ;
+ handle->tcp.conn.read_buffer = uv_buf_init(NULL, 0);
+ handle->alloc_cb((uv_handle_t*) handle, 65536, &handle->tcp.conn.read_buffer);
+ if (handle->tcp.conn.read_buffer.base == NULL ||
+ handle->tcp.conn.read_buffer.len == 0) {
+ handle->read_cb((uv_stream_t*) handle, UV_ENOBUFS, &handle->tcp.conn.read_buffer);
+ return;
+ }
+ assert(handle->tcp.conn.read_buffer.base != NULL);
+ buf = handle->tcp.conn.read_buffer;
+ } else {
+ handle->flags |= UV_HANDLE_ZERO_READ;
+ buf.base = (char*) &uv_zero_;
+ buf.len = 0;
+ }
+
+ /* Prepare the overlapped structure. */
+ memset(&(req->u.io.overlapped), 0, sizeof(req->u.io.overlapped));
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
+ assert(req->event_handle != NULL);
+ req->u.io.overlapped.hEvent = (HANDLE) ((ULONG_PTR) req->event_handle | 1);
+ }
+
+ flags = 0;
+ result = WSARecv(handle->socket,
+ (WSABUF*)&buf,
+ 1,
+ &bytes,
+ &flags,
+ &req->u.io.overlapped,
+ NULL);
+
+ handle->flags |= UV_HANDLE_READ_PENDING;
+ handle->reqs_pending++;
+
+ if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
+ /* Process the req without IOCP. */
+ req->u.io.overlapped.InternalHigh = bytes;
+ uv_insert_pending_req(loop, (uv_req_t*)req);
+ } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
+ /* The req will be processed with IOCP. */
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP &&
+ req->wait_handle == INVALID_HANDLE_VALUE &&
+ !RegisterWaitForSingleObject(&req->wait_handle,
+ req->event_handle, post_completion, (void*) req,
+ INFINITE, WT_EXECUTEINWAITTHREAD)) {
+ SET_REQ_ERROR(req, GetLastError());
+ uv_insert_pending_req(loop, (uv_req_t*)req);
+ }
+ } else {
+ /* Make this req pending reporting an error. */
+ SET_REQ_ERROR(req, WSAGetLastError());
+ uv_insert_pending_req(loop, (uv_req_t*)req);
+ }
+}
+
+
+int uv_tcp_close_reset(uv_tcp_t* handle, uv_close_cb close_cb) {
+ struct linger l = { 1, 0 };
+
+ /* Disallow setting SO_LINGER to zero due to some platform inconsistencies */
+ if (handle->flags & UV_HANDLE_SHUTTING)
+ return UV_EINVAL;
+
+ if (0 != setsockopt(handle->socket, SOL_SOCKET, SO_LINGER, (const char*)&l, sizeof(l)))
+ return uv_translate_sys_error(WSAGetLastError());
+
+ uv_close((uv_handle_t*) handle, close_cb);
+ return 0;
+}
+
+
+int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) {
+ unsigned int i, simultaneous_accepts;
+ uv_tcp_accept_t* req;
+ int err;
+
+ assert(backlog > 0);
+
+ if (handle->flags & UV_HANDLE_LISTENING) {
+ handle->stream.serv.connection_cb = cb;
+ }
+
+ if (handle->flags & UV_HANDLE_READING) {
+ return WSAEISCONN;
+ }
+
+ if (handle->delayed_error) {
+ return handle->delayed_error;
+ }
+
+ if (!(handle->flags & UV_HANDLE_BOUND)) {
+ err = uv_tcp_try_bind(handle,
+ (const struct sockaddr*) &uv_addr_ip4_any_,
+ sizeof(uv_addr_ip4_any_),
+ 0);
+ if (err)
+ return err;
+ if (handle->delayed_error)
+ return handle->delayed_error;
+ }
+
+ if (!handle->tcp.serv.func_acceptex) {
+ if (!uv_get_acceptex_function(handle->socket, &handle->tcp.serv.func_acceptex)) {
+ return WSAEAFNOSUPPORT;
+ }
+ }
+
+ if (!(handle->flags & UV_HANDLE_SHARED_TCP_SOCKET) &&
+ listen(handle->socket, backlog) == SOCKET_ERROR) {
+ return WSAGetLastError();
+ }
+
+ handle->flags |= UV_HANDLE_LISTENING;
+ handle->stream.serv.connection_cb = cb;
+ INCREASE_ACTIVE_COUNT(loop, handle);
+
+ simultaneous_accepts = handle->flags & UV_HANDLE_TCP_SINGLE_ACCEPT ? 1
+ : uv_simultaneous_server_accepts;
+
+ if (handle->tcp.serv.accept_reqs == NULL) {
+ handle->tcp.serv.accept_reqs =
+ uv__malloc(uv_simultaneous_server_accepts * sizeof(uv_tcp_accept_t));
+ if (!handle->tcp.serv.accept_reqs) {
+ uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+ }
+
+ for (i = 0; i < simultaneous_accepts; i++) {
+ req = &handle->tcp.serv.accept_reqs[i];
+ UV_REQ_INIT(req, UV_ACCEPT);
+ req->accept_socket = INVALID_SOCKET;
+ req->data = handle;
+
+ req->wait_handle = INVALID_HANDLE_VALUE;
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
+ req->event_handle = CreateEvent(NULL, 0, 0, NULL);
+ if (req->event_handle == NULL) {
+ uv_fatal_error(GetLastError(), "CreateEvent");
+ }
+ } else {
+ req->event_handle = NULL;
+ }
+
+ uv_tcp_queue_accept(handle, req);
+ }
+
+ /* Initialize other unused requests too, because uv_tcp_endgame doesn't
+ * know how many requests were initialized, so it will try to clean up
+ * {uv_simultaneous_server_accepts} requests. */
+ for (i = simultaneous_accepts; i < uv_simultaneous_server_accepts; i++) {
+ req = &handle->tcp.serv.accept_reqs[i];
+ UV_REQ_INIT(req, UV_ACCEPT);
+ req->accept_socket = INVALID_SOCKET;
+ req->data = handle;
+ req->wait_handle = INVALID_HANDLE_VALUE;
+ req->event_handle = NULL;
+ }
+ }
+
+ return 0;
+}
+
+
+int uv_tcp_accept(uv_tcp_t* server, uv_tcp_t* client) {
+ uv_loop_t* loop = server->loop;
+ int err = 0;
+ int family;
+
+ uv_tcp_accept_t* req = server->tcp.serv.pending_accepts;
+
+ if (!req) {
+ /* No valid connections found, so we error out. */
+ return WSAEWOULDBLOCK;
+ }
+
+ if (req->accept_socket == INVALID_SOCKET) {
+ return WSAENOTCONN;
+ }
+
+ if (server->flags & UV_HANDLE_IPV6) {
+ family = AF_INET6;
+ } else {
+ family = AF_INET;
+ }
+
+ err = uv_tcp_set_socket(client->loop,
+ client,
+ req->accept_socket,
+ family,
+ 0);
+ if (err) {
+ closesocket(req->accept_socket);
+ } else {
+ uv_connection_init((uv_stream_t*) client);
+ /* AcceptEx() implicitly binds the accepted socket. */
+ client->flags |= UV_HANDLE_BOUND | UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
+ }
+
+ /* Prepare the req to pick up a new connection */
+ server->tcp.serv.pending_accepts = req->next_pending;
+ req->next_pending = NULL;
+ req->accept_socket = INVALID_SOCKET;
+
+ if (!(server->flags & UV_HANDLE_CLOSING)) {
+ /* Check if we're in a middle of changing the number of pending accepts. */
+ if (!(server->flags & UV_HANDLE_TCP_ACCEPT_STATE_CHANGING)) {
+ uv_tcp_queue_accept(server, req);
+ } else {
+ /* We better be switching to a single pending accept. */
+ assert(server->flags & UV_HANDLE_TCP_SINGLE_ACCEPT);
+
+ server->tcp.serv.processed_accepts++;
+
+ if (server->tcp.serv.processed_accepts >= uv_simultaneous_server_accepts) {
+ server->tcp.serv.processed_accepts = 0;
+ /*
+ * All previously queued accept requests are now processed.
+ * We now switch to queueing just a single accept.
+ */
+ uv_tcp_queue_accept(server, &server->tcp.serv.accept_reqs[0]);
+ server->flags &= ~UV_HANDLE_TCP_ACCEPT_STATE_CHANGING;
+ server->flags |= UV_HANDLE_TCP_SINGLE_ACCEPT;
+ }
+ }
+ }
+
+ loop->active_tcp_streams++;
+
+ return err;
+}
+
+
+int uv_tcp_read_start(uv_tcp_t* handle, uv_alloc_cb alloc_cb,
+ uv_read_cb read_cb) {
+ uv_loop_t* loop = handle->loop;
+
+ handle->flags |= UV_HANDLE_READING;
+ handle->read_cb = read_cb;
+ handle->alloc_cb = alloc_cb;
+ INCREASE_ACTIVE_COUNT(loop, handle);
+
+ /* If reading was stopped and then started again, there could still be a read
+ * request pending. */
+ if (!(handle->flags & UV_HANDLE_READ_PENDING)) {
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP &&
+ handle->read_req.event_handle == NULL) {
+ handle->read_req.event_handle = CreateEvent(NULL, 0, 0, NULL);
+ if (handle->read_req.event_handle == NULL) {
+ uv_fatal_error(GetLastError(), "CreateEvent");
+ }
+ }
+ uv_tcp_queue_read(loop, handle);
+ }
+
+ return 0;
+}
+
+static int uv__is_loopback(const struct sockaddr_storage* storage) {
+ const struct sockaddr_in* in4;
+ const struct sockaddr_in6* in6;
+ int i;
+
+ if (storage->ss_family == AF_INET) {
+ in4 = (const struct sockaddr_in*) storage;
+ return in4->sin_addr.S_un.S_un_b.s_b1 == 127;
+ }
+ if (storage->ss_family == AF_INET6) {
+ in6 = (const struct sockaddr_in6*) storage;
+ for (i = 0; i < 7; ++i) {
+ if (in6->sin6_addr.u.Word[i] != 0)
+ return 0;
+ }
+ return in6->sin6_addr.u.Word[7] == htons(1);
+ }
+ return 0;
+}
+
+// Check if Windows version is 10.0.16299 or later
+static int uv__is_fast_loopback_fail_supported() {
+ OSVERSIONINFOW os_info;
+ if (!pRtlGetVersion)
+ return 0;
+ pRtlGetVersion(&os_info);
+ if (os_info.dwMajorVersion < 10)
+ return 0;
+ if (os_info.dwMajorVersion > 10)
+ return 1;
+ if (os_info.dwMinorVersion > 0)
+ return 1;
+ return os_info.dwBuildNumber >= 16299;
+}
+
+static int uv_tcp_try_connect(uv_connect_t* req,
+ uv_tcp_t* handle,
+ const struct sockaddr* addr,
+ unsigned int addrlen,
+ uv_connect_cb cb) {
+ uv_loop_t* loop = handle->loop;
+ TCP_INITIAL_RTO_PARAMETERS retransmit_ioctl;
+ const struct sockaddr* bind_addr;
+ struct sockaddr_storage converted;
+ BOOL success;
+ DWORD bytes;
+ int err;
+
+ err = uv__convert_to_localhost_if_unspecified(addr, &converted);
+ if (err)
+ return err;
+
+ if (handle->delayed_error) {
+ return handle->delayed_error;
+ }
+
+ if (!(handle->flags & UV_HANDLE_BOUND)) {
+ if (addrlen == sizeof(uv_addr_ip4_any_)) {
+ bind_addr = (const struct sockaddr*) &uv_addr_ip4_any_;
+ } else if (addrlen == sizeof(uv_addr_ip6_any_)) {
+ bind_addr = (const struct sockaddr*) &uv_addr_ip6_any_;
+ } else {
+ abort();
+ }
+ err = uv_tcp_try_bind(handle, bind_addr, addrlen, 0);
+ if (err)
+ return err;
+ if (handle->delayed_error)
+ return handle->delayed_error;
+ }
+
+ if (!handle->tcp.conn.func_connectex) {
+ if (!uv_get_connectex_function(handle->socket, &handle->tcp.conn.func_connectex)) {
+ return WSAEAFNOSUPPORT;
+ }
+ }
+
+ /* This makes connect() fail instantly if the target port on the localhost
+ * is not reachable, instead of waiting for 2s. We do not care if this fails.
+ * This only works on Windows version 10.0.16299 and later.
+ */
+ if (uv__is_fast_loopback_fail_supported() && uv__is_loopback(&converted)) {
+ memset(&retransmit_ioctl, 0, sizeof(retransmit_ioctl));
+ retransmit_ioctl.Rtt = TCP_INITIAL_RTO_NO_SYN_RETRANSMISSIONS;
+ retransmit_ioctl.MaxSynRetransmissions = TCP_INITIAL_RTO_NO_SYN_RETRANSMISSIONS;
+ WSAIoctl(handle->socket,
+ SIO_TCP_INITIAL_RTO,
+ &retransmit_ioctl,
+ sizeof(retransmit_ioctl),
+ NULL,
+ 0,
+ &bytes,
+ NULL,
+ NULL);
+ }
+
+ UV_REQ_INIT(req, UV_CONNECT);
+ req->handle = (uv_stream_t*) handle;
+ req->cb = cb;
+ memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
+
+ success = handle->tcp.conn.func_connectex(handle->socket,
+ (const struct sockaddr*) &converted,
+ addrlen,
+ NULL,
+ 0,
+ &bytes,
+ &req->u.io.overlapped);
+
+ if (UV_SUCCEEDED_WITHOUT_IOCP(success)) {
+ /* Process the req without IOCP. */
+ handle->reqs_pending++;
+ REGISTER_HANDLE_REQ(loop, handle, req);
+ uv_insert_pending_req(loop, (uv_req_t*)req);
+ } else if (UV_SUCCEEDED_WITH_IOCP(success)) {
+ /* The req will be processed with IOCP. */
+ handle->reqs_pending++;
+ REGISTER_HANDLE_REQ(loop, handle, req);
+ } else {
+ return WSAGetLastError();
+ }
+
+ return 0;
+}
+
+
+int uv_tcp_getsockname(const uv_tcp_t* handle,
+ struct sockaddr* name,
+ int* namelen) {
+
+ return uv__getsockpeername((const uv_handle_t*) handle,
+ getsockname,
+ name,
+ namelen,
+ handle->delayed_error);
+}
+
+
+int uv_tcp_getpeername(const uv_tcp_t* handle,
+ struct sockaddr* name,
+ int* namelen) {
+
+ return uv__getsockpeername((const uv_handle_t*) handle,
+ getpeername,
+ name,
+ namelen,
+ handle->delayed_error);
+}
+
+
+int uv_tcp_write(uv_loop_t* loop,
+ uv_write_t* req,
+ uv_tcp_t* handle,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ uv_write_cb cb) {
+ int result;
+ DWORD bytes;
+
+ UV_REQ_INIT(req, UV_WRITE);
+ req->handle = (uv_stream_t*) handle;
+ req->cb = cb;
+
+ /* Prepare the overlapped structure. */
+ memset(&(req->u.io.overlapped), 0, sizeof(req->u.io.overlapped));
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
+ req->event_handle = CreateEvent(NULL, 0, 0, NULL);
+ if (req->event_handle == NULL) {
+ uv_fatal_error(GetLastError(), "CreateEvent");
+ }
+ req->u.io.overlapped.hEvent = (HANDLE) ((ULONG_PTR) req->event_handle | 1);
+ req->wait_handle = INVALID_HANDLE_VALUE;
+ }
+
+ result = WSASend(handle->socket,
+ (WSABUF*) bufs,
+ nbufs,
+ &bytes,
+ 0,
+ &req->u.io.overlapped,
+ NULL);
+
+ if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
+ /* Request completed immediately. */
+ req->u.io.queued_bytes = 0;
+ handle->reqs_pending++;
+ handle->stream.conn.write_reqs_pending++;
+ REGISTER_HANDLE_REQ(loop, handle, req);
+ uv_insert_pending_req(loop, (uv_req_t*) req);
+ } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
+ /* Request queued by the kernel. */
+ req->u.io.queued_bytes = uv__count_bufs(bufs, nbufs);
+ handle->reqs_pending++;
+ handle->stream.conn.write_reqs_pending++;
+ REGISTER_HANDLE_REQ(loop, handle, req);
+ handle->write_queue_size += req->u.io.queued_bytes;
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP &&
+ !RegisterWaitForSingleObject(&req->wait_handle,
+ req->event_handle, post_write_completion, (void*) req,
+ INFINITE, WT_EXECUTEINWAITTHREAD | WT_EXECUTEONLYONCE)) {
+ SET_REQ_ERROR(req, GetLastError());
+ uv_insert_pending_req(loop, (uv_req_t*)req);
+ }
+ } else {
+ /* Send failed due to an error, report it later */
+ req->u.io.queued_bytes = 0;
+ handle->reqs_pending++;
+ handle->stream.conn.write_reqs_pending++;
+ REGISTER_HANDLE_REQ(loop, handle, req);
+ SET_REQ_ERROR(req, WSAGetLastError());
+ uv_insert_pending_req(loop, (uv_req_t*) req);
+ }
+
+ return 0;
+}
+
+
+int uv__tcp_try_write(uv_tcp_t* handle,
+ const uv_buf_t bufs[],
+ unsigned int nbufs) {
+ int result;
+ DWORD bytes;
+
+ if (handle->stream.conn.write_reqs_pending > 0)
+ return UV_EAGAIN;
+
+ result = WSASend(handle->socket,
+ (WSABUF*) bufs,
+ nbufs,
+ &bytes,
+ 0,
+ NULL,
+ NULL);
+
+ if (result == SOCKET_ERROR)
+ return uv_translate_sys_error(WSAGetLastError());
+ else
+ return bytes;
+}
+
+
+void uv_process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle,
+ uv_req_t* req) {
+ DWORD bytes, flags, err;
+ uv_buf_t buf;
+ int count;
+
+ assert(handle->type == UV_TCP);
+
+ handle->flags &= ~UV_HANDLE_READ_PENDING;
+
+ if (!REQ_SUCCESS(req)) {
+ /* An error occurred doing the read. */
+ if ((handle->flags & UV_HANDLE_READING) ||
+ !(handle->flags & UV_HANDLE_ZERO_READ)) {
+ handle->flags &= ~UV_HANDLE_READING;
+ DECREASE_ACTIVE_COUNT(loop, handle);
+ buf = (handle->flags & UV_HANDLE_ZERO_READ) ?
+ uv_buf_init(NULL, 0) : handle->tcp.conn.read_buffer;
+
+ err = GET_REQ_SOCK_ERROR(req);
+
+ if (err == WSAECONNABORTED) {
+ /* Turn WSAECONNABORTED into UV_ECONNRESET to be consistent with Unix.
+ */
+ err = WSAECONNRESET;
+ }
+
+ handle->read_cb((uv_stream_t*)handle,
+ uv_translate_sys_error(err),
+ &buf);
+ }
+ } else {
+ if (!(handle->flags & UV_HANDLE_ZERO_READ)) {
+ /* The read was done with a non-zero buffer length. */
+ if (req->u.io.overlapped.InternalHigh > 0) {
+ /* Successful read */
+ handle->read_cb((uv_stream_t*)handle,
+ req->u.io.overlapped.InternalHigh,
+ &handle->tcp.conn.read_buffer);
+ /* Read again only if bytes == buf.len */
+ if (req->u.io.overlapped.InternalHigh < handle->tcp.conn.read_buffer.len) {
+ goto done;
+ }
+ } else {
+ /* Connection closed */
+ if (handle->flags & UV_HANDLE_READING) {
+ handle->flags &= ~UV_HANDLE_READING;
+ DECREASE_ACTIVE_COUNT(loop, handle);
+ }
+ handle->flags &= ~UV_HANDLE_READABLE;
+
+ buf.base = 0;
+ buf.len = 0;
+ handle->read_cb((uv_stream_t*)handle, UV_EOF, &handle->tcp.conn.read_buffer);
+ goto done;
+ }
+ }
+
+ /* Do nonblocking reads until the buffer is empty */
+ count = 32;
+ while ((handle->flags & UV_HANDLE_READING) && (count-- > 0)) {
+ buf = uv_buf_init(NULL, 0);
+ handle->alloc_cb((uv_handle_t*) handle, 65536, &buf);
+ if (buf.base == NULL || buf.len == 0) {
+ handle->read_cb((uv_stream_t*) handle, UV_ENOBUFS, &buf);
+ break;
+ }
+ assert(buf.base != NULL);
+
+ flags = 0;
+ if (WSARecv(handle->socket,
+ (WSABUF*)&buf,
+ 1,
+ &bytes,
+ &flags,
+ NULL,
+ NULL) != SOCKET_ERROR) {
+ if (bytes > 0) {
+ /* Successful read */
+ handle->read_cb((uv_stream_t*)handle, bytes, &buf);
+ /* Read again only if bytes == buf.len */
+ if (bytes < buf.len) {
+ break;
+ }
+ } else {
+ /* Connection closed */
+ handle->flags &= ~(UV_HANDLE_READING | UV_HANDLE_READABLE);
+ DECREASE_ACTIVE_COUNT(loop, handle);
+
+ handle->read_cb((uv_stream_t*)handle, UV_EOF, &buf);
+ break;
+ }
+ } else {
+ err = WSAGetLastError();
+ if (err == WSAEWOULDBLOCK) {
+ /* Read buffer was completely empty, report a 0-byte read. */
+ handle->read_cb((uv_stream_t*)handle, 0, &buf);
+ } else {
+ /* Ouch! serious error. */
+ handle->flags &= ~UV_HANDLE_READING;
+ DECREASE_ACTIVE_COUNT(loop, handle);
+
+ if (err == WSAECONNABORTED) {
+ /* Turn WSAECONNABORTED into UV_ECONNRESET to be consistent with
+ * Unix. */
+ err = WSAECONNRESET;
+ }
+
+ handle->read_cb((uv_stream_t*)handle,
+ uv_translate_sys_error(err),
+ &buf);
+ }
+ break;
+ }
+ }
+
+done:
+ /* Post another read if still reading and not closing. */
+ if ((handle->flags & UV_HANDLE_READING) &&
+ !(handle->flags & UV_HANDLE_READ_PENDING)) {
+ uv_tcp_queue_read(loop, handle);
+ }
+ }
+
+ DECREASE_PENDING_REQ_COUNT(handle);
+}
+
+
+void uv_process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle,
+ uv_write_t* req) {
+ int err;
+
+ assert(handle->type == UV_TCP);
+
+ assert(handle->write_queue_size >= req->u.io.queued_bytes);
+ handle->write_queue_size -= req->u.io.queued_bytes;
+
+ UNREGISTER_HANDLE_REQ(loop, handle, req);
+
+ if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
+ if (req->wait_handle != INVALID_HANDLE_VALUE) {
+ UnregisterWait(req->wait_handle);
+ req->wait_handle = INVALID_HANDLE_VALUE;
+ }
+ if (req->event_handle != NULL) {
+ CloseHandle(req->event_handle);
+ req->event_handle = NULL;
+ }
+ }
+
+ if (req->cb) {
+ err = uv_translate_sys_error(GET_REQ_SOCK_ERROR(req));
+ if (err == UV_ECONNABORTED) {
+ /* use UV_ECANCELED for consistency with Unix */
+ err = UV_ECANCELED;
+ }
+ req->cb(req, err);
+ }
+
+ handle->stream.conn.write_reqs_pending--;
+ if (handle->stream.conn.shutdown_req != NULL &&
+ handle->stream.conn.write_reqs_pending == 0) {
+ uv_want_endgame(loop, (uv_handle_t*)handle);
+ }
+
+ DECREASE_PENDING_REQ_COUNT(handle);
+}
+
+
+void uv_process_tcp_accept_req(uv_loop_t* loop, uv_tcp_t* handle,
+ uv_req_t* raw_req) {
+ uv_tcp_accept_t* req = (uv_tcp_accept_t*) raw_req;
+ int err;
+
+ assert(handle->type == UV_TCP);
+
+ /* If handle->accepted_socket is not a valid socket, then uv_queue_accept
+ * must have failed. This is a serious error. We stop accepting connections
+ * and report this error to the connection callback. */
+ if (req->accept_socket == INVALID_SOCKET) {
+ if (handle->flags & UV_HANDLE_LISTENING) {
+ handle->flags &= ~UV_HANDLE_LISTENING;
+ DECREASE_ACTIVE_COUNT(loop, handle);
+ if (handle->stream.serv.connection_cb) {
+ err = GET_REQ_SOCK_ERROR(req);
+ handle->stream.serv.connection_cb((uv_stream_t*)handle,
+ uv_translate_sys_error(err));
+ }
+ }
+ } else if (REQ_SUCCESS(req) &&
+ setsockopt(req->accept_socket,
+ SOL_SOCKET,
+ SO_UPDATE_ACCEPT_CONTEXT,
+ (char*)&handle->socket,
+ sizeof(handle->socket)) == 0) {
+ req->next_pending = handle->tcp.serv.pending_accepts;
+ handle->tcp.serv.pending_accepts = req;
+
+ /* Accept and SO_UPDATE_ACCEPT_CONTEXT were successful. */
+ if (handle->stream.serv.connection_cb) {
+ handle->stream.serv.connection_cb((uv_stream_t*)handle, 0);
+ }
+ } else {
+ /* Error related to accepted socket is ignored because the server socket
+ * may still be healthy. If the server socket is broken uv_queue_accept
+ * will detect it. */
+ closesocket(req->accept_socket);
+ req->accept_socket = INVALID_SOCKET;
+ if (handle->flags & UV_HANDLE_LISTENING) {
+ uv_tcp_queue_accept(handle, req);
+ }
+ }
+
+ DECREASE_PENDING_REQ_COUNT(handle);
+}
+
+
+void uv_process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle,
+ uv_connect_t* req) {
+ int err;
+
+ assert(handle->type == UV_TCP);
+
+ UNREGISTER_HANDLE_REQ(loop, handle, req);
+
+ err = 0;
+ if (REQ_SUCCESS(req)) {
+ if (handle->flags & UV_HANDLE_CLOSING) {
+ /* use UV_ECANCELED for consistency with Unix */
+ err = ERROR_OPERATION_ABORTED;
+ } else if (setsockopt(handle->socket,
+ SOL_SOCKET,
+ SO_UPDATE_CONNECT_CONTEXT,
+ NULL,
+ 0) == 0) {
+ uv_connection_init((uv_stream_t*)handle);
+ handle->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
+ loop->active_tcp_streams++;
+ } else {
+ err = WSAGetLastError();
+ }
+ } else {
+ err = GET_REQ_SOCK_ERROR(req);
+ }
+ req->cb(req, uv_translate_sys_error(err));
+
+ DECREASE_PENDING_REQ_COUNT(handle);
+}
+
+
+int uv__tcp_xfer_export(uv_tcp_t* handle,
+ int target_pid,
+ uv__ipc_socket_xfer_type_t* xfer_type,
+ uv__ipc_socket_xfer_info_t* xfer_info) {
+ if (handle->flags & UV_HANDLE_CONNECTION) {
+ *xfer_type = UV__IPC_SOCKET_XFER_TCP_CONNECTION;
+ } else {
+ *xfer_type = UV__IPC_SOCKET_XFER_TCP_SERVER;
+ /* We're about to share the socket with another process. Because this is a
+ * listening socket, we assume that the other process will be accepting
+ * connections on it. Thus, before sharing the socket with another process,
+ * we call listen here in the parent process. */
+ if (!(handle->flags & UV_HANDLE_LISTENING)) {
+ if (!(handle->flags & UV_HANDLE_BOUND)) {
+ return ERROR_NOT_SUPPORTED;
+ }
+ if (handle->delayed_error == 0 &&
+ listen(handle->socket, SOMAXCONN) == SOCKET_ERROR) {
+ handle->delayed_error = WSAGetLastError();
+ }
+ }
+ }
+
+ if (WSADuplicateSocketW(handle->socket, target_pid, &xfer_info->socket_info))
+ return WSAGetLastError();
+ xfer_info->delayed_error = handle->delayed_error;
+
+ /* Mark the local copy of the handle as 'shared' so we behave in a way that's
+ * friendly to the process(es) that we share the socket with. */
+ handle->flags |= UV_HANDLE_SHARED_TCP_SOCKET;
+
+ return 0;
+}
+
+
+int uv__tcp_xfer_import(uv_tcp_t* tcp,
+ uv__ipc_socket_xfer_type_t xfer_type,
+ uv__ipc_socket_xfer_info_t* xfer_info) {
+ int err;
+ SOCKET socket;
+
+ assert(xfer_type == UV__IPC_SOCKET_XFER_TCP_SERVER ||
+ xfer_type == UV__IPC_SOCKET_XFER_TCP_CONNECTION);
+
+ socket = WSASocketW(FROM_PROTOCOL_INFO,
+ FROM_PROTOCOL_INFO,
+ FROM_PROTOCOL_INFO,
+ &xfer_info->socket_info,
+ 0,
+ WSA_FLAG_OVERLAPPED);
+
+ if (socket == INVALID_SOCKET) {
+ return WSAGetLastError();
+ }
+
+ err = uv_tcp_set_socket(
+ tcp->loop, tcp, socket, xfer_info->socket_info.iAddressFamily, 1);
+ if (err) {
+ closesocket(socket);
+ return err;
+ }
+
+ tcp->delayed_error = xfer_info->delayed_error;
+ tcp->flags |= UV_HANDLE_BOUND | UV_HANDLE_SHARED_TCP_SOCKET;
+
+ if (xfer_type == UV__IPC_SOCKET_XFER_TCP_CONNECTION) {
+ uv_connection_init((uv_stream_t*)tcp);
+ tcp->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
+ }
+
+ tcp->loop->active_tcp_streams++;
+ return 0;
+}
+
+
+int uv_tcp_nodelay(uv_tcp_t* handle, int enable) {
+ int err;
+
+ if (handle->socket != INVALID_SOCKET) {
+ err = uv__tcp_nodelay(handle, handle->socket, enable);
+ if (err)
+ return err;
+ }
+
+ if (enable) {
+ handle->flags |= UV_HANDLE_TCP_NODELAY;
+ } else {
+ handle->flags &= ~UV_HANDLE_TCP_NODELAY;
+ }
+
+ return 0;
+}
+
+
+int uv_tcp_keepalive(uv_tcp_t* handle, int enable, unsigned int delay) {
+ int err;
+
+ if (handle->socket != INVALID_SOCKET) {
+ err = uv__tcp_keepalive(handle, handle->socket, enable, delay);
+ if (err)
+ return err;
+ }
+
+ if (enable) {
+ handle->flags |= UV_HANDLE_TCP_KEEPALIVE;
+ } else {
+ handle->flags &= ~UV_HANDLE_TCP_KEEPALIVE;
+ }
+
+ /* TODO: Store delay if handle->socket isn't created yet. */
+
+ return 0;
+}
+
+
+int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) {
+ if (handle->flags & UV_HANDLE_CONNECTION) {
+ return UV_EINVAL;
+ }
+
+ /* Check if we're already in the desired mode. */
+ if ((enable && !(handle->flags & UV_HANDLE_TCP_SINGLE_ACCEPT)) ||
+ (!enable && handle->flags & UV_HANDLE_TCP_SINGLE_ACCEPT)) {
+ return 0;
+ }
+
+ /* Don't allow switching from single pending accept to many. */
+ if (enable) {
+ return UV_ENOTSUP;
+ }
+
+ /* Check if we're in a middle of changing the number of pending accepts. */
+ if (handle->flags & UV_HANDLE_TCP_ACCEPT_STATE_CHANGING) {
+ return 0;
+ }
+
+ handle->flags |= UV_HANDLE_TCP_SINGLE_ACCEPT;
+
+ /* Flip the changing flag if we have already queued multiple accepts. */
+ if (handle->flags & UV_HANDLE_LISTENING) {
+ handle->flags |= UV_HANDLE_TCP_ACCEPT_STATE_CHANGING;
+ }
+
+ return 0;
+}
+
+
+static int uv_tcp_try_cancel_io(uv_tcp_t* tcp) {
+ SOCKET socket = tcp->socket;
+ int non_ifs_lsp;
+
+ /* Check if we have any non-IFS LSPs stacked on top of TCP */
+ non_ifs_lsp = (tcp->flags & UV_HANDLE_IPV6) ? uv_tcp_non_ifs_lsp_ipv6 :
+ uv_tcp_non_ifs_lsp_ipv4;
+
+ /* If there are non-ifs LSPs then try to obtain a base handle for the socket.
+ * This will always fail on Windows XP/3k. */
+ if (non_ifs_lsp) {
+ DWORD bytes;
+ if (WSAIoctl(socket,
+ SIO_BASE_HANDLE,
+ NULL,
+ 0,
+ &socket,
+ sizeof socket,
+ &bytes,
+ NULL,
+ NULL) != 0) {
+ /* Failed. We can't do CancelIo. */
+ return -1;
+ }
+ }
+
+ assert(socket != 0 && socket != INVALID_SOCKET);
+
+ if (!CancelIo((HANDLE) socket)) {
+ return GetLastError();
+ }
+
+ /* It worked. */
+ return 0;
+}
+
+
+void uv_tcp_close(uv_loop_t* loop, uv_tcp_t* tcp) {
+ int close_socket = 1;
+
+ if (tcp->flags & UV_HANDLE_READ_PENDING) {
+ /* In order for winsock to do a graceful close there must not be any any
+ * pending reads, or the socket must be shut down for writing */
+ if (!(tcp->flags & UV_HANDLE_SHARED_TCP_SOCKET)) {
+ /* Just do shutdown on non-shared sockets, which ensures graceful close. */
+ shutdown(tcp->socket, SD_SEND);
+
+ } else if (uv_tcp_try_cancel_io(tcp) == 0) {
+ /* In case of a shared socket, we try to cancel all outstanding I/O,. If
+ * that works, don't close the socket yet - wait for the read req to
+ * return and close the socket in uv_tcp_endgame. */
+ close_socket = 0;
+
+ } else {
+ /* When cancelling isn't possible - which could happen when an LSP is
+ * present on an old Windows version, we will have to close the socket
+ * with a read pending. That is not nice because trailing sent bytes may
+ * not make it to the other side. */
+ }
+
+ } else if ((tcp->flags & UV_HANDLE_SHARED_TCP_SOCKET) &&
+ tcp->tcp.serv.accept_reqs != NULL) {
+ /* Under normal circumstances closesocket() will ensure that all pending
+ * accept reqs are canceled. However, when the socket is shared the
+ * presence of another reference to the socket in another process will keep
+ * the accept reqs going, so we have to ensure that these are canceled. */
+ if (uv_tcp_try_cancel_io(tcp) != 0) {
+ /* When cancellation is not possible, there is another option: we can
+ * close the incoming sockets, which will also cancel the accept
+ * operations. However this is not cool because we might inadvertently
+ * close a socket that just accepted a new connection, which will cause
+ * the connection to be aborted. */
+ unsigned int i;
+ for (i = 0; i < uv_simultaneous_server_accepts; i++) {
+ uv_tcp_accept_t* req = &tcp->tcp.serv.accept_reqs[i];
+ if (req->accept_socket != INVALID_SOCKET &&
+ !HasOverlappedIoCompleted(&req->u.io.overlapped)) {
+ closesocket(req->accept_socket);
+ req->accept_socket = INVALID_SOCKET;
+ }
+ }
+ }
+ }
+
+ if (tcp->flags & UV_HANDLE_READING) {
+ tcp->flags &= ~UV_HANDLE_READING;
+ DECREASE_ACTIVE_COUNT(loop, tcp);
+ }
+
+ if (tcp->flags & UV_HANDLE_LISTENING) {
+ tcp->flags &= ~UV_HANDLE_LISTENING;
+ DECREASE_ACTIVE_COUNT(loop, tcp);
+ }
+
+ if (close_socket) {
+ closesocket(tcp->socket);
+ tcp->socket = INVALID_SOCKET;
+ tcp->flags |= UV_HANDLE_TCP_SOCKET_CLOSED;
+ }
+
+ tcp->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
+ uv__handle_closing(tcp);
+
+ if (tcp->reqs_pending == 0) {
+ uv_want_endgame(tcp->loop, (uv_handle_t*)tcp);
+ }
+}
+
+
+int uv_tcp_open(uv_tcp_t* handle, uv_os_sock_t sock) {
+ WSAPROTOCOL_INFOW protocol_info;
+ int opt_len;
+ int err;
+ struct sockaddr_storage saddr;
+ int saddr_len;
+
+ /* Detect the address family of the socket. */
+ opt_len = (int) sizeof protocol_info;
+ if (getsockopt(sock,
+ SOL_SOCKET,
+ SO_PROTOCOL_INFOW,
+ (char*) &protocol_info,
+ &opt_len) == SOCKET_ERROR) {
+ return uv_translate_sys_error(GetLastError());
+ }
+
+ err = uv_tcp_set_socket(handle->loop,
+ handle,
+ sock,
+ protocol_info.iAddressFamily,
+ 1);
+ if (err) {
+ return uv_translate_sys_error(err);
+ }
+
+ /* Support already active socket. */
+ saddr_len = sizeof(saddr);
+ if (!uv_tcp_getsockname(handle, (struct sockaddr*) &saddr, &saddr_len)) {
+ /* Socket is already bound. */
+ handle->flags |= UV_HANDLE_BOUND;
+ saddr_len = sizeof(saddr);
+ if (!uv_tcp_getpeername(handle, (struct sockaddr*) &saddr, &saddr_len)) {
+ /* Socket is already connected. */
+ uv_connection_init((uv_stream_t*) handle);
+ handle->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
+ }
+ }
+
+ return 0;
+}
+
+
+/* This function is an egress point, i.e. it returns libuv errors rather than
+ * system errors.
+ */
+int uv__tcp_bind(uv_tcp_t* handle,
+ const struct sockaddr* addr,
+ unsigned int addrlen,
+ unsigned int flags) {
+ int err;
+
+ err = uv_tcp_try_bind(handle, addr, addrlen, flags);
+ if (err)
+ return uv_translate_sys_error(err);
+
+ return 0;
+}
+
+
+/* This function is an egress point, i.e. it returns libuv errors rather than
+ * system errors.
+ */
+int uv__tcp_connect(uv_connect_t* req,
+ uv_tcp_t* handle,
+ const struct sockaddr* addr,
+ unsigned int addrlen,
+ uv_connect_cb cb) {
+ int err;
+
+ err = uv_tcp_try_connect(req, handle, addr, addrlen, cb);
+ if (err)
+ return uv_translate_sys_error(err);
+
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/win/thread.c b/Utilities/cmlibuv/src/win/thread.c
new file mode 100644
index 0000000..89c53ad
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/thread.c
@@ -0,0 +1,520 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <limits.h>
+#include <stdlib.h>
+
+#if defined(__MINGW64_VERSION_MAJOR)
+/* MemoryBarrier expands to __mm_mfence in some cases (x86+sse2), which may
+ * require this header in some versions of mingw64. */
+#include <intrin.h>
+#endif
+
+#include "uv.h"
+#include "internal.h"
+
+static void uv__once_inner(uv_once_t* guard, void (*callback)(void)) {
+ DWORD result;
+ HANDLE existing_event, created_event;
+
+ created_event = CreateEvent(NULL, 1, 0, NULL);
+ if (created_event == 0) {
+ /* Could fail in a low-memory situation? */
+ uv_fatal_error(GetLastError(), "CreateEvent");
+ }
+
+ existing_event = InterlockedCompareExchangePointer(&guard->event,
+ created_event,
+ NULL);
+
+ if (existing_event == NULL) {
+ /* We won the race */
+ callback();
+
+ result = SetEvent(created_event);
+ assert(result);
+ guard->ran = 1;
+
+ } else {
+ /* We lost the race. Destroy the event we created and wait for the existing
+ * one to become signaled. */
+ CloseHandle(created_event);
+ result = WaitForSingleObject(existing_event, INFINITE);
+ assert(result == WAIT_OBJECT_0);
+ }
+}
+
+
+void uv_once(uv_once_t* guard, void (*callback)(void)) {
+ /* Fast case - avoid WaitForSingleObject. */
+ if (guard->ran) {
+ return;
+ }
+
+ uv__once_inner(guard, callback);
+}
+
+
+/* Verify that uv_thread_t can be stored in a TLS slot. */
+STATIC_ASSERT(sizeof(uv_thread_t) <= sizeof(void*));
+
+static uv_key_t uv__current_thread_key;
+static uv_once_t uv__current_thread_init_guard = UV_ONCE_INIT;
+
+
+static void uv__init_current_thread_key(void) {
+ if (uv_key_create(&uv__current_thread_key))
+ abort();
+}
+
+
+struct thread_ctx {
+ void (*entry)(void* arg);
+ void* arg;
+ uv_thread_t self;
+};
+
+
+static UINT __stdcall uv__thread_start(void* arg) {
+ struct thread_ctx *ctx_p;
+ struct thread_ctx ctx;
+
+ ctx_p = arg;
+ ctx = *ctx_p;
+ uv__free(ctx_p);
+
+ uv_once(&uv__current_thread_init_guard, uv__init_current_thread_key);
+ uv_key_set(&uv__current_thread_key, (void*) ctx.self);
+
+ ctx.entry(ctx.arg);
+
+ return 0;
+}
+
+
+int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) {
+ uv_thread_options_t params;
+ params.flags = UV_THREAD_NO_FLAGS;
+ return uv_thread_create_ex(tid, &params, entry, arg);
+}
+
+int uv_thread_create_ex(uv_thread_t* tid,
+ const uv_thread_options_t* params,
+ void (*entry)(void *arg),
+ void *arg) {
+ struct thread_ctx* ctx;
+ int err;
+ HANDLE thread;
+ SYSTEM_INFO sysinfo;
+ size_t stack_size;
+ size_t pagesize;
+
+ stack_size =
+ params->flags & UV_THREAD_HAS_STACK_SIZE ? params->stack_size : 0;
+
+ if (stack_size != 0) {
+ GetNativeSystemInfo(&sysinfo);
+ pagesize = (size_t)sysinfo.dwPageSize;
+ /* Round up to the nearest page boundary. */
+ stack_size = (stack_size + pagesize - 1) &~ (pagesize - 1);
+
+ if ((unsigned)stack_size != stack_size)
+ return UV_EINVAL;
+ }
+
+ ctx = uv__malloc(sizeof(*ctx));
+ if (ctx == NULL)
+ return UV_ENOMEM;
+
+ ctx->entry = entry;
+ ctx->arg = arg;
+
+ /* Create the thread in suspended state so we have a chance to pass
+ * its own creation handle to it */
+ thread = (HANDLE) _beginthreadex(NULL,
+ (unsigned)stack_size,
+ uv__thread_start,
+ ctx,
+ CREATE_SUSPENDED,
+ NULL);
+ if (thread == NULL) {
+ err = errno;
+ uv__free(ctx);
+ } else {
+ err = 0;
+ *tid = thread;
+ ctx->self = thread;
+ ResumeThread(thread);
+ }
+
+ switch (err) {
+ case 0:
+ return 0;
+ case EACCES:
+ return UV_EACCES;
+ case EAGAIN:
+ return UV_EAGAIN;
+ case EINVAL:
+ return UV_EINVAL;
+ }
+
+ return UV_EIO;
+}
+
+
+uv_thread_t uv_thread_self(void) {
+ uv_once(&uv__current_thread_init_guard, uv__init_current_thread_key);
+ return (uv_thread_t) uv_key_get(&uv__current_thread_key);
+}
+
+
+int uv_thread_join(uv_thread_t *tid) {
+ if (WaitForSingleObject(*tid, INFINITE))
+ return uv_translate_sys_error(GetLastError());
+ else {
+ CloseHandle(*tid);
+ *tid = 0;
+ MemoryBarrier(); /* For feature parity with pthread_join(). */
+ return 0;
+ }
+}
+
+
+int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) {
+ return *t1 == *t2;
+}
+
+
+int uv_mutex_init(uv_mutex_t* mutex) {
+ InitializeCriticalSection(mutex);
+ return 0;
+}
+
+
+int uv_mutex_init_recursive(uv_mutex_t* mutex) {
+ return uv_mutex_init(mutex);
+}
+
+
+void uv_mutex_destroy(uv_mutex_t* mutex) {
+ DeleteCriticalSection(mutex);
+}
+
+
+void uv_mutex_lock(uv_mutex_t* mutex) {
+ EnterCriticalSection(mutex);
+}
+
+
+int uv_mutex_trylock(uv_mutex_t* mutex) {
+ if (TryEnterCriticalSection(mutex))
+ return 0;
+ else
+ return UV_EBUSY;
+}
+
+
+void uv_mutex_unlock(uv_mutex_t* mutex) {
+ LeaveCriticalSection(mutex);
+}
+
+
+int uv_rwlock_init(uv_rwlock_t* rwlock) {
+ /* Initialize the semaphore that acts as the write lock. */
+ HANDLE handle = CreateSemaphoreW(NULL, 1, 1, NULL);
+ if (handle == NULL)
+ return uv_translate_sys_error(GetLastError());
+ rwlock->state_.write_semaphore_ = handle;
+
+ /* Initialize the critical section protecting the reader count. */
+ InitializeCriticalSection(&rwlock->state_.num_readers_lock_);
+
+ /* Initialize the reader count. */
+ rwlock->state_.num_readers_ = 0;
+
+ return 0;
+}
+
+
+void uv_rwlock_destroy(uv_rwlock_t* rwlock) {
+ DeleteCriticalSection(&rwlock->state_.num_readers_lock_);
+ CloseHandle(rwlock->state_.write_semaphore_);
+}
+
+
+void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {
+ /* Acquire the lock that protects the reader count. */
+ EnterCriticalSection(&rwlock->state_.num_readers_lock_);
+
+ /* Increase the reader count, and lock for write if this is the first
+ * reader.
+ */
+ if (++rwlock->state_.num_readers_ == 1) {
+ DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, INFINITE);
+ if (r != WAIT_OBJECT_0)
+ uv_fatal_error(GetLastError(), "WaitForSingleObject");
+ }
+
+ /* Release the lock that protects the reader count. */
+ LeaveCriticalSection(&rwlock->state_.num_readers_lock_);
+}
+
+
+int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) {
+ int err;
+
+ if (!TryEnterCriticalSection(&rwlock->state_.num_readers_lock_))
+ return UV_EBUSY;
+
+ err = 0;
+
+ if (rwlock->state_.num_readers_ == 0) {
+ /* Currently there are no other readers, which means that the write lock
+ * needs to be acquired.
+ */
+ DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, 0);
+ if (r == WAIT_OBJECT_0)
+ rwlock->state_.num_readers_++;
+ else if (r == WAIT_TIMEOUT)
+ err = UV_EBUSY;
+ else if (r == WAIT_FAILED)
+ uv_fatal_error(GetLastError(), "WaitForSingleObject");
+
+ } else {
+ /* The write lock has already been acquired because there are other
+ * active readers.
+ */
+ rwlock->state_.num_readers_++;
+ }
+
+ LeaveCriticalSection(&rwlock->state_.num_readers_lock_);
+ return err;
+}
+
+
+void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {
+ EnterCriticalSection(&rwlock->state_.num_readers_lock_);
+
+ if (--rwlock->state_.num_readers_ == 0) {
+ if (!ReleaseSemaphore(rwlock->state_.write_semaphore_, 1, NULL))
+ uv_fatal_error(GetLastError(), "ReleaseSemaphore");
+ }
+
+ LeaveCriticalSection(&rwlock->state_.num_readers_lock_);
+}
+
+
+void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {
+ DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, INFINITE);
+ if (r != WAIT_OBJECT_0)
+ uv_fatal_error(GetLastError(), "WaitForSingleObject");
+}
+
+
+int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) {
+ DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, 0);
+ if (r == WAIT_OBJECT_0)
+ return 0;
+ else if (r == WAIT_TIMEOUT)
+ return UV_EBUSY;
+ else
+ uv_fatal_error(GetLastError(), "WaitForSingleObject");
+}
+
+
+void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {
+ if (!ReleaseSemaphore(rwlock->state_.write_semaphore_, 1, NULL))
+ uv_fatal_error(GetLastError(), "ReleaseSemaphore");
+}
+
+
+int uv_sem_init(uv_sem_t* sem, unsigned int value) {
+ *sem = CreateSemaphore(NULL, value, INT_MAX, NULL);
+ if (*sem == NULL)
+ return uv_translate_sys_error(GetLastError());
+ else
+ return 0;
+}
+
+
+void uv_sem_destroy(uv_sem_t* sem) {
+ if (!CloseHandle(*sem))
+ abort();
+}
+
+
+void uv_sem_post(uv_sem_t* sem) {
+ if (!ReleaseSemaphore(*sem, 1, NULL))
+ abort();
+}
+
+
+void uv_sem_wait(uv_sem_t* sem) {
+ if (WaitForSingleObject(*sem, INFINITE) != WAIT_OBJECT_0)
+ abort();
+}
+
+
+int uv_sem_trywait(uv_sem_t* sem) {
+ DWORD r = WaitForSingleObject(*sem, 0);
+
+ if (r == WAIT_OBJECT_0)
+ return 0;
+
+ if (r == WAIT_TIMEOUT)
+ return UV_EAGAIN;
+
+ abort();
+ return -1; /* Satisfy the compiler. */
+}
+
+
+int uv_cond_init(uv_cond_t* cond) {
+ InitializeConditionVariable(&cond->cond_var);
+ return 0;
+}
+
+
+void uv_cond_destroy(uv_cond_t* cond) {
+ /* nothing to do */
+ (void) &cond;
+}
+
+
+void uv_cond_signal(uv_cond_t* cond) {
+ WakeConditionVariable(&cond->cond_var);
+}
+
+
+void uv_cond_broadcast(uv_cond_t* cond) {
+ WakeAllConditionVariable(&cond->cond_var);
+}
+
+
+void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
+ if (!SleepConditionVariableCS(&cond->cond_var, mutex, INFINITE))
+ abort();
+}
+
+int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
+ if (SleepConditionVariableCS(&cond->cond_var, mutex, (DWORD)(timeout / 1e6)))
+ return 0;
+ if (GetLastError() != ERROR_TIMEOUT)
+ abort();
+ return UV_ETIMEDOUT;
+}
+
+
+int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
+ int err;
+
+ barrier->n = count;
+ barrier->count = 0;
+
+ err = uv_mutex_init(&barrier->mutex);
+ if (err)
+ return err;
+
+ err = uv_sem_init(&barrier->turnstile1, 0);
+ if (err)
+ goto error2;
+
+ err = uv_sem_init(&barrier->turnstile2, 1);
+ if (err)
+ goto error;
+
+ return 0;
+
+error:
+ uv_sem_destroy(&barrier->turnstile1);
+error2:
+ uv_mutex_destroy(&barrier->mutex);
+ return err;
+
+}
+
+
+void uv_barrier_destroy(uv_barrier_t* barrier) {
+ uv_sem_destroy(&barrier->turnstile2);
+ uv_sem_destroy(&barrier->turnstile1);
+ uv_mutex_destroy(&barrier->mutex);
+}
+
+
+int uv_barrier_wait(uv_barrier_t* barrier) {
+ int serial_thread;
+
+ uv_mutex_lock(&barrier->mutex);
+ if (++barrier->count == barrier->n) {
+ uv_sem_wait(&barrier->turnstile2);
+ uv_sem_post(&barrier->turnstile1);
+ }
+ uv_mutex_unlock(&barrier->mutex);
+
+ uv_sem_wait(&barrier->turnstile1);
+ uv_sem_post(&barrier->turnstile1);
+
+ uv_mutex_lock(&barrier->mutex);
+ serial_thread = (--barrier->count == 0);
+ if (serial_thread) {
+ uv_sem_wait(&barrier->turnstile1);
+ uv_sem_post(&barrier->turnstile2);
+ }
+ uv_mutex_unlock(&barrier->mutex);
+
+ uv_sem_wait(&barrier->turnstile2);
+ uv_sem_post(&barrier->turnstile2);
+ return serial_thread;
+}
+
+
+int uv_key_create(uv_key_t* key) {
+ key->tls_index = TlsAlloc();
+ if (key->tls_index == TLS_OUT_OF_INDEXES)
+ return UV_ENOMEM;
+ return 0;
+}
+
+
+void uv_key_delete(uv_key_t* key) {
+ if (TlsFree(key->tls_index) == FALSE)
+ abort();
+ key->tls_index = TLS_OUT_OF_INDEXES;
+}
+
+
+void* uv_key_get(uv_key_t* key) {
+ void* value;
+
+ value = TlsGetValue(key->tls_index);
+ if (value == NULL)
+ if (GetLastError() != ERROR_SUCCESS)
+ abort();
+
+ return value;
+}
+
+
+void uv_key_set(uv_key_t* key, void* value) {
+ if (TlsSetValue(key->tls_index, value) == FALSE)
+ abort();
+}
diff --git a/Utilities/cmlibuv/src/win/tty.c b/Utilities/cmlibuv/src/win/tty.c
new file mode 100644
index 0000000..1b9d4f8
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/tty.c
@@ -0,0 +1,2452 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <io.h>
+#include <string.h>
+#include <stdlib.h>
+
+#if defined(_MSC_VER) && _MSC_VER < 1600
+# include "uv/stdint-msvc2008.h"
+#else
+# include <stdint.h>
+#endif
+
+#ifndef COMMON_LVB_REVERSE_VIDEO
+# define COMMON_LVB_REVERSE_VIDEO 0x4000
+#endif
+
+#include "uv.h"
+#include "internal.h"
+#include "handle-inl.h"
+#include "stream-inl.h"
+#include "req-inl.h"
+
+#ifndef InterlockedOr
+# define InterlockedOr _InterlockedOr
+#endif
+
+#define UNICODE_REPLACEMENT_CHARACTER (0xfffd)
+
+#define ANSI_NORMAL 0x0000
+#define ANSI_ESCAPE_SEEN 0x0002
+#define ANSI_CSI 0x0004
+#define ANSI_ST_CONTROL 0x0008
+#define ANSI_IGNORE 0x0010
+#define ANSI_IN_ARG 0x0020
+#define ANSI_IN_STRING 0x0040
+#define ANSI_BACKSLASH_SEEN 0x0080
+#define ANSI_EXTENSION 0x0100
+#define ANSI_DECSCUSR 0x0200
+
+#define MAX_INPUT_BUFFER_LENGTH 8192
+#define MAX_CONSOLE_CHAR 8192
+
+#ifndef ENABLE_VIRTUAL_TERMINAL_PROCESSING
+#define ENABLE_VIRTUAL_TERMINAL_PROCESSING 0x0004
+#endif
+
+#define CURSOR_SIZE_SMALL 25
+#define CURSOR_SIZE_LARGE 100
+
+static void uv_tty_capture_initial_style(
+ CONSOLE_SCREEN_BUFFER_INFO* screen_buffer_info,
+ CONSOLE_CURSOR_INFO* cursor_info);
+static void uv_tty_update_virtual_window(CONSOLE_SCREEN_BUFFER_INFO* info);
+static int uv__cancel_read_console(uv_tty_t* handle);
+
+
+/* Null uv_buf_t */
+static const uv_buf_t uv_null_buf_ = { 0, NULL };
+
+enum uv__read_console_status_e {
+ NOT_STARTED,
+ IN_PROGRESS,
+ TRAP_REQUESTED,
+ COMPLETED
+};
+
+static volatile LONG uv__read_console_status = NOT_STARTED;
+static volatile LONG uv__restore_screen_state;
+static CONSOLE_SCREEN_BUFFER_INFO uv__saved_screen_state;
+
+
+/*
+ * The console virtual window.
+ *
+ * Normally cursor movement in windows is relative to the console screen buffer,
+ * e.g. the application is allowed to overwrite the 'history'. This is very
+ * inconvenient, it makes absolute cursor movement pretty useless. There is
+ * also the concept of 'client rect' which is defined by the actual size of
+ * the console window and the scroll position of the screen buffer, but it's
+ * very volatile because it changes when the user scrolls.
+ *
+ * To make cursor movement behave sensibly we define a virtual window to which
+ * cursor movement is confined. The virtual window is always as wide as the
+ * console screen buffer, but it's height is defined by the size of the
+ * console window. The top of the virtual window aligns with the position
+ * of the caret when the first stdout/err handle is created, unless that would
+ * mean that it would extend beyond the bottom of the screen buffer - in that
+ * that case it's located as far down as possible.
+ *
+ * When the user writes a long text or many newlines, such that the output
+ * reaches beyond the bottom of the virtual window, the virtual window is
+ * shifted downwards, but not resized.
+ *
+ * Since all tty i/o happens on the same console, this window is shared
+ * between all stdout/stderr handles.
+ */
+
+static int uv_tty_virtual_offset = -1;
+static int uv_tty_virtual_height = -1;
+static int uv_tty_virtual_width = -1;
+
+/* The console window size
+ * We keep this separate from uv_tty_virtual_*. We use those values to only
+ * handle signalling SIGWINCH
+ */
+
+static HANDLE uv__tty_console_handle = INVALID_HANDLE_VALUE;
+static int uv__tty_console_height = -1;
+static int uv__tty_console_width = -1;
+static HANDLE uv__tty_console_resized = INVALID_HANDLE_VALUE;
+static uv_mutex_t uv__tty_console_resize_mutex;
+
+static DWORD WINAPI uv__tty_console_resize_message_loop_thread(void* param);
+static void CALLBACK uv__tty_console_resize_event(HWINEVENTHOOK hWinEventHook,
+ DWORD event,
+ HWND hwnd,
+ LONG idObject,
+ LONG idChild,
+ DWORD dwEventThread,
+ DWORD dwmsEventTime);
+static DWORD WINAPI uv__tty_console_resize_watcher_thread(void* param);
+static void uv__tty_console_signal_resize(void);
+
+/* We use a semaphore rather than a mutex or critical section because in some
+ cases (uv__cancel_read_console) we need take the lock in the main thread and
+ release it in another thread. Using a semaphore ensures that in such
+ scenario the main thread will still block when trying to acquire the lock. */
+static uv_sem_t uv_tty_output_lock;
+
+static WORD uv_tty_default_text_attributes =
+ FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE;
+
+static char uv_tty_default_fg_color = 7;
+static char uv_tty_default_bg_color = 0;
+static char uv_tty_default_fg_bright = 0;
+static char uv_tty_default_bg_bright = 0;
+static char uv_tty_default_inverse = 0;
+
+static CONSOLE_CURSOR_INFO uv_tty_default_cursor_info;
+
+/* Determine whether or not ANSI support is enabled. */
+static BOOL uv__need_check_vterm_state = TRUE;
+static uv_tty_vtermstate_t uv__vterm_state = UV_TTY_UNSUPPORTED;
+static void uv__determine_vterm_state(HANDLE handle);
+
+void uv_console_init(void) {
+ if (uv_sem_init(&uv_tty_output_lock, 1))
+ abort();
+ uv__tty_console_handle = CreateFileW(L"CONOUT$",
+ GENERIC_READ | GENERIC_WRITE,
+ FILE_SHARE_WRITE,
+ 0,
+ OPEN_EXISTING,
+ 0,
+ 0);
+ if (uv__tty_console_handle != INVALID_HANDLE_VALUE) {
+ CONSOLE_SCREEN_BUFFER_INFO sb_info;
+ QueueUserWorkItem(uv__tty_console_resize_message_loop_thread,
+ NULL,
+ WT_EXECUTELONGFUNCTION);
+ uv_mutex_init(&uv__tty_console_resize_mutex);
+ if (GetConsoleScreenBufferInfo(uv__tty_console_handle, &sb_info)) {
+ uv__tty_console_width = sb_info.dwSize.X;
+ uv__tty_console_height = sb_info.srWindow.Bottom - sb_info.srWindow.Top + 1;
+ }
+ }
+}
+
+
+int uv_tty_init(uv_loop_t* loop, uv_tty_t* tty, uv_file fd, int unused) {
+ BOOL readable;
+ DWORD NumberOfEvents;
+ HANDLE handle;
+ CONSOLE_SCREEN_BUFFER_INFO screen_buffer_info;
+ CONSOLE_CURSOR_INFO cursor_info;
+ (void)unused;
+
+ uv__once_init();
+ handle = (HANDLE) uv__get_osfhandle(fd);
+ if (handle == INVALID_HANDLE_VALUE)
+ return UV_EBADF;
+
+ if (fd <= 2) {
+ /* In order to avoid closing a stdio file descriptor 0-2, duplicate the
+ * underlying OS handle and forget about the original fd.
+ * We could also opt to use the original OS handle and just never close it,
+ * but then there would be no reliable way to cancel pending read operations
+ * upon close.
+ */
+ if (!DuplicateHandle(INVALID_HANDLE_VALUE,
+ handle,
+ INVALID_HANDLE_VALUE,
+ &handle,
+ 0,
+ FALSE,
+ DUPLICATE_SAME_ACCESS))
+ return uv_translate_sys_error(GetLastError());
+ fd = -1;
+ }
+
+ readable = GetNumberOfConsoleInputEvents(handle, &NumberOfEvents);
+ if (!readable) {
+ /* Obtain the screen buffer info with the output handle. */
+ if (!GetConsoleScreenBufferInfo(handle, &screen_buffer_info)) {
+ return uv_translate_sys_error(GetLastError());
+ }
+
+ /* Obtain the cursor info with the output handle. */
+ if (!GetConsoleCursorInfo(handle, &cursor_info)) {
+ return uv_translate_sys_error(GetLastError());
+ }
+
+ /* Obtain the tty_output_lock because the virtual window state is shared
+ * between all uv_tty_t handles. */
+ uv_sem_wait(&uv_tty_output_lock);
+
+ if (uv__need_check_vterm_state)
+ uv__determine_vterm_state(handle);
+
+ /* Remember the original console text attributes and cursor info. */
+ uv_tty_capture_initial_style(&screen_buffer_info, &cursor_info);
+
+ uv_tty_update_virtual_window(&screen_buffer_info);
+
+ uv_sem_post(&uv_tty_output_lock);
+ }
+
+
+ uv_stream_init(loop, (uv_stream_t*) tty, UV_TTY);
+ uv_connection_init((uv_stream_t*) tty);
+
+ tty->handle = handle;
+ tty->u.fd = fd;
+ tty->reqs_pending = 0;
+ tty->flags |= UV_HANDLE_BOUND;
+
+ if (readable) {
+ /* Initialize TTY input specific fields. */
+ tty->flags |= UV_HANDLE_TTY_READABLE | UV_HANDLE_READABLE;
+ /* TODO: remove me in v2.x. */
+ tty->tty.rd.unused_ = NULL;
+ tty->tty.rd.read_line_buffer = uv_null_buf_;
+ tty->tty.rd.read_raw_wait = NULL;
+
+ /* Init keycode-to-vt100 mapper state. */
+ tty->tty.rd.last_key_len = 0;
+ tty->tty.rd.last_key_offset = 0;
+ tty->tty.rd.last_utf16_high_surrogate = 0;
+ memset(&tty->tty.rd.last_input_record, 0, sizeof tty->tty.rd.last_input_record);
+ } else {
+ /* TTY output specific fields. */
+ tty->flags |= UV_HANDLE_WRITABLE;
+
+ /* Init utf8-to-utf16 conversion state. */
+ tty->tty.wr.utf8_bytes_left = 0;
+ tty->tty.wr.utf8_codepoint = 0;
+
+ /* Initialize eol conversion state */
+ tty->tty.wr.previous_eol = 0;
+
+ /* Init ANSI parser state. */
+ tty->tty.wr.ansi_parser_state = ANSI_NORMAL;
+ }
+
+ return 0;
+}
+
+
+/* Set the default console text attributes based on how the console was
+ * configured when libuv started.
+ */
+static void uv_tty_capture_initial_style(
+ CONSOLE_SCREEN_BUFFER_INFO* screen_buffer_info,
+ CONSOLE_CURSOR_INFO* cursor_info) {
+ static int style_captured = 0;
+
+ /* Only do this once.
+ Assumption: Caller has acquired uv_tty_output_lock. */
+ if (style_captured)
+ return;
+
+ /* Save raw win32 attributes. */
+ uv_tty_default_text_attributes = screen_buffer_info->wAttributes;
+
+ /* Convert black text on black background to use white text. */
+ if (uv_tty_default_text_attributes == 0)
+ uv_tty_default_text_attributes = 7;
+
+ /* Convert Win32 attributes to ANSI colors. */
+ uv_tty_default_fg_color = 0;
+ uv_tty_default_bg_color = 0;
+ uv_tty_default_fg_bright = 0;
+ uv_tty_default_bg_bright = 0;
+ uv_tty_default_inverse = 0;
+
+ if (uv_tty_default_text_attributes & FOREGROUND_RED)
+ uv_tty_default_fg_color |= 1;
+
+ if (uv_tty_default_text_attributes & FOREGROUND_GREEN)
+ uv_tty_default_fg_color |= 2;
+
+ if (uv_tty_default_text_attributes & FOREGROUND_BLUE)
+ uv_tty_default_fg_color |= 4;
+
+ if (uv_tty_default_text_attributes & BACKGROUND_RED)
+ uv_tty_default_bg_color |= 1;
+
+ if (uv_tty_default_text_attributes & BACKGROUND_GREEN)
+ uv_tty_default_bg_color |= 2;
+
+ if (uv_tty_default_text_attributes & BACKGROUND_BLUE)
+ uv_tty_default_bg_color |= 4;
+
+ if (uv_tty_default_text_attributes & FOREGROUND_INTENSITY)
+ uv_tty_default_fg_bright = 1;
+
+ if (uv_tty_default_text_attributes & BACKGROUND_INTENSITY)
+ uv_tty_default_bg_bright = 1;
+
+ if (uv_tty_default_text_attributes & COMMON_LVB_REVERSE_VIDEO)
+ uv_tty_default_inverse = 1;
+
+ /* Save the cursor size and the cursor state. */
+ uv_tty_default_cursor_info = *cursor_info;
+
+ style_captured = 1;
+}
+
+
+int uv_tty_set_mode(uv_tty_t* tty, uv_tty_mode_t mode) {
+ DWORD flags;
+ unsigned char was_reading;
+ uv_alloc_cb alloc_cb;
+ uv_read_cb read_cb;
+ int err;
+
+ if (!(tty->flags & UV_HANDLE_TTY_READABLE)) {
+ return UV_EINVAL;
+ }
+
+ if (!!mode == !!(tty->flags & UV_HANDLE_TTY_RAW)) {
+ return 0;
+ }
+
+ switch (mode) {
+ case UV_TTY_MODE_NORMAL:
+ flags = ENABLE_ECHO_INPUT | ENABLE_LINE_INPUT | ENABLE_PROCESSED_INPUT;
+ break;
+ case UV_TTY_MODE_RAW:
+ flags = ENABLE_WINDOW_INPUT;
+ break;
+ case UV_TTY_MODE_IO:
+ return UV_ENOTSUP;
+ default:
+ return UV_EINVAL;
+ }
+
+ /* If currently reading, stop, and restart reading. */
+ if (tty->flags & UV_HANDLE_READING) {
+ was_reading = 1;
+ alloc_cb = tty->alloc_cb;
+ read_cb = tty->read_cb;
+ err = uv_tty_read_stop(tty);
+ if (err) {
+ return uv_translate_sys_error(err);
+ }
+ } else {
+ was_reading = 0;
+ alloc_cb = NULL;
+ read_cb = NULL;
+ }
+
+ uv_sem_wait(&uv_tty_output_lock);
+ if (!SetConsoleMode(tty->handle, flags)) {
+ err = uv_translate_sys_error(GetLastError());
+ uv_sem_post(&uv_tty_output_lock);
+ return err;
+ }
+ uv_sem_post(&uv_tty_output_lock);
+
+ /* Update flag. */
+ tty->flags &= ~UV_HANDLE_TTY_RAW;
+ tty->flags |= mode ? UV_HANDLE_TTY_RAW : 0;
+
+ /* If we just stopped reading, restart. */
+ if (was_reading) {
+ err = uv_tty_read_start(tty, alloc_cb, read_cb);
+ if (err) {
+ return uv_translate_sys_error(err);
+ }
+ }
+
+ return 0;
+}
+
+
+int uv_tty_get_winsize(uv_tty_t* tty, int* width, int* height) {
+ CONSOLE_SCREEN_BUFFER_INFO info;
+
+ if (!GetConsoleScreenBufferInfo(tty->handle, &info)) {
+ return uv_translate_sys_error(GetLastError());
+ }
+
+ uv_sem_wait(&uv_tty_output_lock);
+ uv_tty_update_virtual_window(&info);
+ uv_sem_post(&uv_tty_output_lock);
+
+ *width = uv_tty_virtual_width;
+ *height = uv_tty_virtual_height;
+
+ return 0;
+}
+
+
+static void CALLBACK uv_tty_post_raw_read(void* data, BOOLEAN didTimeout) {
+ uv_loop_t* loop;
+ uv_tty_t* handle;
+ uv_req_t* req;
+
+ assert(data);
+ assert(!didTimeout);
+
+ req = (uv_req_t*) data;
+ handle = (uv_tty_t*) req->data;
+ loop = handle->loop;
+
+ UnregisterWait(handle->tty.rd.read_raw_wait);
+ handle->tty.rd.read_raw_wait = NULL;
+
+ SET_REQ_SUCCESS(req);
+ POST_COMPLETION_FOR_REQ(loop, req);
+}
+
+
+static void uv_tty_queue_read_raw(uv_loop_t* loop, uv_tty_t* handle) {
+ uv_read_t* req;
+ BOOL r;
+
+ assert(handle->flags & UV_HANDLE_READING);
+ assert(!(handle->flags & UV_HANDLE_READ_PENDING));
+
+ assert(handle->handle && handle->handle != INVALID_HANDLE_VALUE);
+
+ handle->tty.rd.read_line_buffer = uv_null_buf_;
+
+ req = &handle->read_req;
+ memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
+
+ r = RegisterWaitForSingleObject(&handle->tty.rd.read_raw_wait,
+ handle->handle,
+ uv_tty_post_raw_read,
+ (void*) req,
+ INFINITE,
+ WT_EXECUTEINWAITTHREAD | WT_EXECUTEONLYONCE);
+ if (!r) {
+ handle->tty.rd.read_raw_wait = NULL;
+ SET_REQ_ERROR(req, GetLastError());
+ uv_insert_pending_req(loop, (uv_req_t*)req);
+ }
+
+ handle->flags |= UV_HANDLE_READ_PENDING;
+ handle->reqs_pending++;
+}
+
+
+static DWORD CALLBACK uv_tty_line_read_thread(void* data) {
+ uv_loop_t* loop;
+ uv_tty_t* handle;
+ uv_req_t* req;
+ DWORD bytes, read_bytes;
+ WCHAR utf16[MAX_INPUT_BUFFER_LENGTH / 3];
+ DWORD chars, read_chars;
+ LONG status;
+ COORD pos;
+ BOOL read_console_success;
+
+ assert(data);
+
+ req = (uv_req_t*) data;
+ handle = (uv_tty_t*) req->data;
+ loop = handle->loop;
+
+ assert(handle->tty.rd.read_line_buffer.base != NULL);
+ assert(handle->tty.rd.read_line_buffer.len > 0);
+
+ /* ReadConsole can't handle big buffers. */
+ if (handle->tty.rd.read_line_buffer.len < MAX_INPUT_BUFFER_LENGTH) {
+ bytes = handle->tty.rd.read_line_buffer.len;
+ } else {
+ bytes = MAX_INPUT_BUFFER_LENGTH;
+ }
+
+ /* At last, unicode! One utf-16 codeunit never takes more than 3 utf-8
+ * codeunits to encode. */
+ chars = bytes / 3;
+
+ status = InterlockedExchange(&uv__read_console_status, IN_PROGRESS);
+ if (status == TRAP_REQUESTED) {
+ SET_REQ_SUCCESS(req);
+ InterlockedExchange(&uv__read_console_status, COMPLETED);
+ req->u.io.overlapped.InternalHigh = 0;
+ POST_COMPLETION_FOR_REQ(loop, req);
+ return 0;
+ }
+
+ read_console_success = ReadConsoleW(handle->handle,
+ (void*) utf16,
+ chars,
+ &read_chars,
+ NULL);
+
+ if (read_console_success) {
+ read_bytes = WideCharToMultiByte(CP_UTF8,
+ 0,
+ utf16,
+ read_chars,
+ handle->tty.rd.read_line_buffer.base,
+ bytes,
+ NULL,
+ NULL);
+ SET_REQ_SUCCESS(req);
+ req->u.io.overlapped.InternalHigh = read_bytes;
+ } else {
+ SET_REQ_ERROR(req, GetLastError());
+ }
+
+ status = InterlockedExchange(&uv__read_console_status, COMPLETED);
+
+ if (status == TRAP_REQUESTED) {
+ /* If we canceled the read by sending a VK_RETURN event, restore the
+ screen state to undo the visual effect of the VK_RETURN */
+ if (read_console_success && InterlockedOr(&uv__restore_screen_state, 0)) {
+ HANDLE active_screen_buffer;
+ active_screen_buffer = CreateFileA("conout$",
+ GENERIC_READ | GENERIC_WRITE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE,
+ NULL,
+ OPEN_EXISTING,
+ FILE_ATTRIBUTE_NORMAL,
+ NULL);
+ if (active_screen_buffer != INVALID_HANDLE_VALUE) {
+ pos = uv__saved_screen_state.dwCursorPosition;
+
+ /* If the cursor was at the bottom line of the screen buffer, the
+ VK_RETURN would have caused the buffer contents to scroll up by one
+ line. The right position to reset the cursor to is therefore one line
+ higher */
+ if (pos.Y == uv__saved_screen_state.dwSize.Y - 1)
+ pos.Y--;
+
+ SetConsoleCursorPosition(active_screen_buffer, pos);
+ CloseHandle(active_screen_buffer);
+ }
+ }
+ uv_sem_post(&uv_tty_output_lock);
+ }
+ POST_COMPLETION_FOR_REQ(loop, req);
+ return 0;
+}
+
+
+static void uv_tty_queue_read_line(uv_loop_t* loop, uv_tty_t* handle) {
+ uv_read_t* req;
+ BOOL r;
+
+ assert(handle->flags & UV_HANDLE_READING);
+ assert(!(handle->flags & UV_HANDLE_READ_PENDING));
+ assert(handle->handle && handle->handle != INVALID_HANDLE_VALUE);
+
+ req = &handle->read_req;
+ memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
+
+ handle->tty.rd.read_line_buffer = uv_buf_init(NULL, 0);
+ handle->alloc_cb((uv_handle_t*) handle, 8192, &handle->tty.rd.read_line_buffer);
+ if (handle->tty.rd.read_line_buffer.base == NULL ||
+ handle->tty.rd.read_line_buffer.len == 0) {
+ handle->read_cb((uv_stream_t*) handle,
+ UV_ENOBUFS,
+ &handle->tty.rd.read_line_buffer);
+ return;
+ }
+ assert(handle->tty.rd.read_line_buffer.base != NULL);
+
+ /* Reset flags No locking is required since there cannot be a line read
+ in progress. We are also relying on the memory barrier provided by
+ QueueUserWorkItem*/
+ uv__restore_screen_state = FALSE;
+ uv__read_console_status = NOT_STARTED;
+ r = QueueUserWorkItem(uv_tty_line_read_thread,
+ (void*) req,
+ WT_EXECUTELONGFUNCTION);
+ if (!r) {
+ SET_REQ_ERROR(req, GetLastError());
+ uv_insert_pending_req(loop, (uv_req_t*)req);
+ }
+
+ handle->flags |= UV_HANDLE_READ_PENDING;
+ handle->reqs_pending++;
+}
+
+
+static void uv_tty_queue_read(uv_loop_t* loop, uv_tty_t* handle) {
+ if (handle->flags & UV_HANDLE_TTY_RAW) {
+ uv_tty_queue_read_raw(loop, handle);
+ } else {
+ uv_tty_queue_read_line(loop, handle);
+ }
+}
+
+
+static const char* get_vt100_fn_key(DWORD code, char shift, char ctrl,
+ size_t* len) {
+#define VK_CASE(vk, normal_str, shift_str, ctrl_str, shift_ctrl_str) \
+ case (vk): \
+ if (shift && ctrl) { \
+ *len = sizeof shift_ctrl_str; \
+ return "\033" shift_ctrl_str; \
+ } else if (shift) { \
+ *len = sizeof shift_str ; \
+ return "\033" shift_str; \
+ } else if (ctrl) { \
+ *len = sizeof ctrl_str; \
+ return "\033" ctrl_str; \
+ } else { \
+ *len = sizeof normal_str; \
+ return "\033" normal_str; \
+ }
+
+ switch (code) {
+ /* These mappings are the same as Cygwin's. Unmodified and alt-modified
+ * keypad keys comply with linux console, modifiers comply with xterm
+ * modifier usage. F1. f12 and shift-f1. f10 comply with linux console, f6.
+ * f12 with and without modifiers comply with rxvt. */
+ VK_CASE(VK_INSERT, "[2~", "[2;2~", "[2;5~", "[2;6~")
+ VK_CASE(VK_END, "[4~", "[4;2~", "[4;5~", "[4;6~")
+ VK_CASE(VK_DOWN, "[B", "[1;2B", "[1;5B", "[1;6B")
+ VK_CASE(VK_NEXT, "[6~", "[6;2~", "[6;5~", "[6;6~")
+ VK_CASE(VK_LEFT, "[D", "[1;2D", "[1;5D", "[1;6D")
+ VK_CASE(VK_CLEAR, "[G", "[1;2G", "[1;5G", "[1;6G")
+ VK_CASE(VK_RIGHT, "[C", "[1;2C", "[1;5C", "[1;6C")
+ VK_CASE(VK_UP, "[A", "[1;2A", "[1;5A", "[1;6A")
+ VK_CASE(VK_HOME, "[1~", "[1;2~", "[1;5~", "[1;6~")
+ VK_CASE(VK_PRIOR, "[5~", "[5;2~", "[5;5~", "[5;6~")
+ VK_CASE(VK_DELETE, "[3~", "[3;2~", "[3;5~", "[3;6~")
+ VK_CASE(VK_NUMPAD0, "[2~", "[2;2~", "[2;5~", "[2;6~")
+ VK_CASE(VK_NUMPAD1, "[4~", "[4;2~", "[4;5~", "[4;6~")
+ VK_CASE(VK_NUMPAD2, "[B", "[1;2B", "[1;5B", "[1;6B")
+ VK_CASE(VK_NUMPAD3, "[6~", "[6;2~", "[6;5~", "[6;6~")
+ VK_CASE(VK_NUMPAD4, "[D", "[1;2D", "[1;5D", "[1;6D")
+ VK_CASE(VK_NUMPAD5, "[G", "[1;2G", "[1;5G", "[1;6G")
+ VK_CASE(VK_NUMPAD6, "[C", "[1;2C", "[1;5C", "[1;6C")
+ VK_CASE(VK_NUMPAD7, "[A", "[1;2A", "[1;5A", "[1;6A")
+ VK_CASE(VK_NUMPAD8, "[1~", "[1;2~", "[1;5~", "[1;6~")
+ VK_CASE(VK_NUMPAD9, "[5~", "[5;2~", "[5;5~", "[5;6~")
+ VK_CASE(VK_DECIMAL, "[3~", "[3;2~", "[3;5~", "[3;6~")
+ VK_CASE(VK_F1, "[[A", "[23~", "[11^", "[23^" )
+ VK_CASE(VK_F2, "[[B", "[24~", "[12^", "[24^" )
+ VK_CASE(VK_F3, "[[C", "[25~", "[13^", "[25^" )
+ VK_CASE(VK_F4, "[[D", "[26~", "[14^", "[26^" )
+ VK_CASE(VK_F5, "[[E", "[28~", "[15^", "[28^" )
+ VK_CASE(VK_F6, "[17~", "[29~", "[17^", "[29^" )
+ VK_CASE(VK_F7, "[18~", "[31~", "[18^", "[31^" )
+ VK_CASE(VK_F8, "[19~", "[32~", "[19^", "[32^" )
+ VK_CASE(VK_F9, "[20~", "[33~", "[20^", "[33^" )
+ VK_CASE(VK_F10, "[21~", "[34~", "[21^", "[34^" )
+ VK_CASE(VK_F11, "[23~", "[23$", "[23^", "[23@" )
+ VK_CASE(VK_F12, "[24~", "[24$", "[24^", "[24@" )
+
+ default:
+ *len = 0;
+ return NULL;
+ }
+#undef VK_CASE
+}
+
+
+void uv_process_tty_read_raw_req(uv_loop_t* loop, uv_tty_t* handle,
+ uv_req_t* req) {
+ /* Shortcut for handle->tty.rd.last_input_record.Event.KeyEvent. */
+#define KEV handle->tty.rd.last_input_record.Event.KeyEvent
+
+ DWORD records_left, records_read;
+ uv_buf_t buf;
+ off_t buf_used;
+
+ assert(handle->type == UV_TTY);
+ assert(handle->flags & UV_HANDLE_TTY_READABLE);
+ handle->flags &= ~UV_HANDLE_READ_PENDING;
+
+ if (!(handle->flags & UV_HANDLE_READING) ||
+ !(handle->flags & UV_HANDLE_TTY_RAW)) {
+ goto out;
+ }
+
+ if (!REQ_SUCCESS(req)) {
+ /* An error occurred while waiting for the event. */
+ if ((handle->flags & UV_HANDLE_READING)) {
+ handle->flags &= ~UV_HANDLE_READING;
+ handle->read_cb((uv_stream_t*)handle,
+ uv_translate_sys_error(GET_REQ_ERROR(req)),
+ &uv_null_buf_);
+ }
+ goto out;
+ }
+
+ /* Fetch the number of events */
+ if (!GetNumberOfConsoleInputEvents(handle->handle, &records_left)) {
+ handle->flags &= ~UV_HANDLE_READING;
+ DECREASE_ACTIVE_COUNT(loop, handle);
+ handle->read_cb((uv_stream_t*)handle,
+ uv_translate_sys_error(GetLastError()),
+ &uv_null_buf_);
+ goto out;
+ }
+
+ /* Windows sends a lot of events that we're not interested in, so buf will be
+ * allocated on demand, when there's actually something to emit. */
+ buf = uv_null_buf_;
+ buf_used = 0;
+
+ while ((records_left > 0 || handle->tty.rd.last_key_len > 0) &&
+ (handle->flags & UV_HANDLE_READING)) {
+ if (handle->tty.rd.last_key_len == 0) {
+ /* Read the next input record */
+ if (!ReadConsoleInputW(handle->handle,
+ &handle->tty.rd.last_input_record,
+ 1,
+ &records_read)) {
+ handle->flags &= ~UV_HANDLE_READING;
+ DECREASE_ACTIVE_COUNT(loop, handle);
+ handle->read_cb((uv_stream_t*) handle,
+ uv_translate_sys_error(GetLastError()),
+ &buf);
+ goto out;
+ }
+ records_left--;
+
+ /* We might be not subscribed to EVENT_CONSOLE_LAYOUT or we might be
+ * running under some TTY emulator that does not send those events. */
+ if (handle->tty.rd.last_input_record.EventType == WINDOW_BUFFER_SIZE_EVENT) {
+ uv__tty_console_signal_resize();
+ }
+
+ /* Ignore other events that are not key events. */
+ if (handle->tty.rd.last_input_record.EventType != KEY_EVENT) {
+ continue;
+ }
+
+ /* Ignore keyup events, unless the left alt key was held and a valid
+ * unicode character was emitted. */
+ if (!KEV.bKeyDown &&
+ (KEV.wVirtualKeyCode != VK_MENU ||
+ KEV.uChar.UnicodeChar == 0)) {
+ continue;
+ }
+
+ /* Ignore keypresses to numpad number keys if the left alt is held
+ * because the user is composing a character, or windows simulating this.
+ */
+ if ((KEV.dwControlKeyState & LEFT_ALT_PRESSED) &&
+ !(KEV.dwControlKeyState & ENHANCED_KEY) &&
+ (KEV.wVirtualKeyCode == VK_INSERT ||
+ KEV.wVirtualKeyCode == VK_END ||
+ KEV.wVirtualKeyCode == VK_DOWN ||
+ KEV.wVirtualKeyCode == VK_NEXT ||
+ KEV.wVirtualKeyCode == VK_LEFT ||
+ KEV.wVirtualKeyCode == VK_CLEAR ||
+ KEV.wVirtualKeyCode == VK_RIGHT ||
+ KEV.wVirtualKeyCode == VK_HOME ||
+ KEV.wVirtualKeyCode == VK_UP ||
+ KEV.wVirtualKeyCode == VK_PRIOR ||
+ KEV.wVirtualKeyCode == VK_NUMPAD0 ||
+ KEV.wVirtualKeyCode == VK_NUMPAD1 ||
+ KEV.wVirtualKeyCode == VK_NUMPAD2 ||
+ KEV.wVirtualKeyCode == VK_NUMPAD3 ||
+ KEV.wVirtualKeyCode == VK_NUMPAD4 ||
+ KEV.wVirtualKeyCode == VK_NUMPAD5 ||
+ KEV.wVirtualKeyCode == VK_NUMPAD6 ||
+ KEV.wVirtualKeyCode == VK_NUMPAD7 ||
+ KEV.wVirtualKeyCode == VK_NUMPAD8 ||
+ KEV.wVirtualKeyCode == VK_NUMPAD9)) {
+ continue;
+ }
+
+ if (KEV.uChar.UnicodeChar != 0) {
+ int prefix_len, char_len;
+
+ /* Character key pressed */
+ if (KEV.uChar.UnicodeChar >= 0xD800 &&
+ KEV.uChar.UnicodeChar < 0xDC00) {
+ /* UTF-16 high surrogate */
+ handle->tty.rd.last_utf16_high_surrogate = KEV.uChar.UnicodeChar;
+ continue;
+ }
+
+ /* Prefix with \u033 if alt was held, but alt was not used as part a
+ * compose sequence. */
+ if ((KEV.dwControlKeyState & (LEFT_ALT_PRESSED | RIGHT_ALT_PRESSED))
+ && !(KEV.dwControlKeyState & (LEFT_CTRL_PRESSED |
+ RIGHT_CTRL_PRESSED)) && KEV.bKeyDown) {
+ handle->tty.rd.last_key[0] = '\033';
+ prefix_len = 1;
+ } else {
+ prefix_len = 0;
+ }
+
+ if (KEV.uChar.UnicodeChar >= 0xDC00 &&
+ KEV.uChar.UnicodeChar < 0xE000) {
+ /* UTF-16 surrogate pair */
+ WCHAR utf16_buffer[2];
+ utf16_buffer[0] = handle->tty.rd.last_utf16_high_surrogate;
+ utf16_buffer[1] = KEV.uChar.UnicodeChar;
+ char_len = WideCharToMultiByte(CP_UTF8,
+ 0,
+ utf16_buffer,
+ 2,
+ &handle->tty.rd.last_key[prefix_len],
+ sizeof handle->tty.rd.last_key,
+ NULL,
+ NULL);
+ } else {
+ /* Single UTF-16 character */
+ char_len = WideCharToMultiByte(CP_UTF8,
+ 0,
+ &KEV.uChar.UnicodeChar,
+ 1,
+ &handle->tty.rd.last_key[prefix_len],
+ sizeof handle->tty.rd.last_key,
+ NULL,
+ NULL);
+ }
+
+ /* Whatever happened, the last character wasn't a high surrogate. */
+ handle->tty.rd.last_utf16_high_surrogate = 0;
+
+ /* If the utf16 character(s) couldn't be converted something must be
+ * wrong. */
+ if (!char_len) {
+ handle->flags &= ~UV_HANDLE_READING;
+ DECREASE_ACTIVE_COUNT(loop, handle);
+ handle->read_cb((uv_stream_t*) handle,
+ uv_translate_sys_error(GetLastError()),
+ &buf);
+ goto out;
+ }
+
+ handle->tty.rd.last_key_len = (unsigned char) (prefix_len + char_len);
+ handle->tty.rd.last_key_offset = 0;
+ continue;
+
+ } else {
+ /* Function key pressed */
+ const char* vt100;
+ size_t prefix_len, vt100_len;
+
+ vt100 = get_vt100_fn_key(KEV.wVirtualKeyCode,
+ !!(KEV.dwControlKeyState & SHIFT_PRESSED),
+ !!(KEV.dwControlKeyState & (
+ LEFT_CTRL_PRESSED |
+ RIGHT_CTRL_PRESSED)),
+ &vt100_len);
+
+ /* If we were unable to map to a vt100 sequence, just ignore. */
+ if (!vt100) {
+ continue;
+ }
+
+ /* Prefix with \x033 when the alt key was held. */
+ if (KEV.dwControlKeyState & (LEFT_ALT_PRESSED | RIGHT_ALT_PRESSED)) {
+ handle->tty.rd.last_key[0] = '\033';
+ prefix_len = 1;
+ } else {
+ prefix_len = 0;
+ }
+
+ /* Copy the vt100 sequence to the handle buffer. */
+ assert(prefix_len + vt100_len < sizeof handle->tty.rd.last_key);
+ memcpy(&handle->tty.rd.last_key[prefix_len], vt100, vt100_len);
+
+ handle->tty.rd.last_key_len = (unsigned char) (prefix_len + vt100_len);
+ handle->tty.rd.last_key_offset = 0;
+ continue;
+ }
+ } else {
+ /* Copy any bytes left from the last keypress to the user buffer. */
+ if (handle->tty.rd.last_key_offset < handle->tty.rd.last_key_len) {
+ /* Allocate a buffer if needed */
+ if (buf_used == 0) {
+ buf = uv_buf_init(NULL, 0);
+ handle->alloc_cb((uv_handle_t*) handle, 1024, &buf);
+ if (buf.base == NULL || buf.len == 0) {
+ handle->read_cb((uv_stream_t*) handle, UV_ENOBUFS, &buf);
+ goto out;
+ }
+ assert(buf.base != NULL);
+ }
+
+ buf.base[buf_used++] = handle->tty.rd.last_key[handle->tty.rd.last_key_offset++];
+
+ /* If the buffer is full, emit it */
+ if ((size_t) buf_used == buf.len) {
+ handle->read_cb((uv_stream_t*) handle, buf_used, &buf);
+ buf = uv_null_buf_;
+ buf_used = 0;
+ }
+
+ continue;
+ }
+
+ /* Apply dwRepeat from the last input record. */
+ if (--KEV.wRepeatCount > 0) {
+ handle->tty.rd.last_key_offset = 0;
+ continue;
+ }
+
+ handle->tty.rd.last_key_len = 0;
+ continue;
+ }
+ }
+
+ /* Send the buffer back to the user */
+ if (buf_used > 0) {
+ handle->read_cb((uv_stream_t*) handle, buf_used, &buf);
+ }
+
+ out:
+ /* Wait for more input events. */
+ if ((handle->flags & UV_HANDLE_READING) &&
+ !(handle->flags & UV_HANDLE_READ_PENDING)) {
+ uv_tty_queue_read(loop, handle);
+ }
+
+ DECREASE_PENDING_REQ_COUNT(handle);
+
+#undef KEV
+}
+
+
+
+void uv_process_tty_read_line_req(uv_loop_t* loop, uv_tty_t* handle,
+ uv_req_t* req) {
+ uv_buf_t buf;
+
+ assert(handle->type == UV_TTY);
+ assert(handle->flags & UV_HANDLE_TTY_READABLE);
+
+ buf = handle->tty.rd.read_line_buffer;
+
+ handle->flags &= ~UV_HANDLE_READ_PENDING;
+ handle->tty.rd.read_line_buffer = uv_null_buf_;
+
+ if (!REQ_SUCCESS(req)) {
+ /* Read was not successful */
+ if (handle->flags & UV_HANDLE_READING) {
+ /* Real error */
+ handle->flags &= ~UV_HANDLE_READING;
+ DECREASE_ACTIVE_COUNT(loop, handle);
+ handle->read_cb((uv_stream_t*) handle,
+ uv_translate_sys_error(GET_REQ_ERROR(req)),
+ &buf);
+ }
+ } else {
+ if (!(handle->flags & UV_HANDLE_CANCELLATION_PENDING) &&
+ req->u.io.overlapped.InternalHigh != 0) {
+ /* Read successful. TODO: read unicode, convert to utf-8 */
+ DWORD bytes = req->u.io.overlapped.InternalHigh;
+ handle->read_cb((uv_stream_t*) handle, bytes, &buf);
+ }
+ handle->flags &= ~UV_HANDLE_CANCELLATION_PENDING;
+ }
+
+ /* Wait for more input events. */
+ if ((handle->flags & UV_HANDLE_READING) &&
+ !(handle->flags & UV_HANDLE_READ_PENDING)) {
+ uv_tty_queue_read(loop, handle);
+ }
+
+ DECREASE_PENDING_REQ_COUNT(handle);
+}
+
+
+void uv_process_tty_read_req(uv_loop_t* loop, uv_tty_t* handle,
+ uv_req_t* req) {
+ assert(handle->type == UV_TTY);
+ assert(handle->flags & UV_HANDLE_TTY_READABLE);
+
+ /* If the read_line_buffer member is zero, it must have been an raw read.
+ * Otherwise it was a line-buffered read. FIXME: This is quite obscure. Use a
+ * flag or something. */
+ if (handle->tty.rd.read_line_buffer.len == 0) {
+ uv_process_tty_read_raw_req(loop, handle, req);
+ } else {
+ uv_process_tty_read_line_req(loop, handle, req);
+ }
+}
+
+
+int uv_tty_read_start(uv_tty_t* handle, uv_alloc_cb alloc_cb,
+ uv_read_cb read_cb) {
+ uv_loop_t* loop = handle->loop;
+
+ if (!(handle->flags & UV_HANDLE_TTY_READABLE)) {
+ return ERROR_INVALID_PARAMETER;
+ }
+
+ handle->flags |= UV_HANDLE_READING;
+ INCREASE_ACTIVE_COUNT(loop, handle);
+ handle->read_cb = read_cb;
+ handle->alloc_cb = alloc_cb;
+
+ /* If reading was stopped and then started again, there could still be a read
+ * request pending. */
+ if (handle->flags & UV_HANDLE_READ_PENDING) {
+ return 0;
+ }
+
+ /* Maybe the user stopped reading half-way while processing key events.
+ * Short-circuit if this could be the case. */
+ if (handle->tty.rd.last_key_len > 0) {
+ SET_REQ_SUCCESS(&handle->read_req);
+ uv_insert_pending_req(handle->loop, (uv_req_t*) &handle->read_req);
+ /* Make sure no attempt is made to insert it again until it's handled. */
+ handle->flags |= UV_HANDLE_READ_PENDING;
+ handle->reqs_pending++;
+ return 0;
+ }
+
+ uv_tty_queue_read(loop, handle);
+
+ return 0;
+}
+
+
+int uv_tty_read_stop(uv_tty_t* handle) {
+ INPUT_RECORD record;
+ DWORD written, err;
+
+ handle->flags &= ~UV_HANDLE_READING;
+ DECREASE_ACTIVE_COUNT(handle->loop, handle);
+
+ if (!(handle->flags & UV_HANDLE_READ_PENDING))
+ return 0;
+
+ if (handle->flags & UV_HANDLE_TTY_RAW) {
+ /* Cancel raw read. Write some bullshit event to force the console wait to
+ * return. */
+ memset(&record, 0, sizeof record);
+ record.EventType = FOCUS_EVENT;
+ if (!WriteConsoleInputW(handle->handle, &record, 1, &written)) {
+ return GetLastError();
+ }
+ } else if (!(handle->flags & UV_HANDLE_CANCELLATION_PENDING)) {
+ /* Cancel line-buffered read if not already pending */
+ err = uv__cancel_read_console(handle);
+ if (err)
+ return err;
+
+ handle->flags |= UV_HANDLE_CANCELLATION_PENDING;
+ }
+
+ return 0;
+}
+
+static int uv__cancel_read_console(uv_tty_t* handle) {
+ HANDLE active_screen_buffer = INVALID_HANDLE_VALUE;
+ INPUT_RECORD record;
+ DWORD written;
+ DWORD err = 0;
+ LONG status;
+
+ assert(!(handle->flags & UV_HANDLE_CANCELLATION_PENDING));
+
+ /* Hold the output lock during the cancellation, to ensure that further
+ writes don't interfere with the screen state. It will be the ReadConsole
+ thread's responsibility to release the lock. */
+ uv_sem_wait(&uv_tty_output_lock);
+ status = InterlockedExchange(&uv__read_console_status, TRAP_REQUESTED);
+ if (status != IN_PROGRESS) {
+ /* Either we have managed to set a trap for the other thread before
+ ReadConsole is called, or ReadConsole has returned because the user
+ has pressed ENTER. In either case, there is nothing else to do. */
+ uv_sem_post(&uv_tty_output_lock);
+ return 0;
+ }
+
+ /* Save screen state before sending the VK_RETURN event */
+ active_screen_buffer = CreateFileA("conout$",
+ GENERIC_READ | GENERIC_WRITE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE,
+ NULL,
+ OPEN_EXISTING,
+ FILE_ATTRIBUTE_NORMAL,
+ NULL);
+
+ if (active_screen_buffer != INVALID_HANDLE_VALUE &&
+ GetConsoleScreenBufferInfo(active_screen_buffer,
+ &uv__saved_screen_state)) {
+ InterlockedOr(&uv__restore_screen_state, 1);
+ }
+
+ /* Write enter key event to force the console wait to return. */
+ record.EventType = KEY_EVENT;
+ record.Event.KeyEvent.bKeyDown = TRUE;
+ record.Event.KeyEvent.wRepeatCount = 1;
+ record.Event.KeyEvent.wVirtualKeyCode = VK_RETURN;
+ record.Event.KeyEvent.wVirtualScanCode =
+ MapVirtualKeyW(VK_RETURN, MAPVK_VK_TO_VSC);
+ record.Event.KeyEvent.uChar.UnicodeChar = L'\r';
+ record.Event.KeyEvent.dwControlKeyState = 0;
+ if (!WriteConsoleInputW(handle->handle, &record, 1, &written))
+ err = GetLastError();
+
+ if (active_screen_buffer != INVALID_HANDLE_VALUE)
+ CloseHandle(active_screen_buffer);
+
+ return err;
+}
+
+
+static void uv_tty_update_virtual_window(CONSOLE_SCREEN_BUFFER_INFO* info) {
+ uv_tty_virtual_width = info->dwSize.X;
+ uv_tty_virtual_height = info->srWindow.Bottom - info->srWindow.Top + 1;
+
+ /* Recompute virtual window offset row. */
+ if (uv_tty_virtual_offset == -1) {
+ uv_tty_virtual_offset = info->dwCursorPosition.Y;
+ } else if (uv_tty_virtual_offset < info->dwCursorPosition.Y -
+ uv_tty_virtual_height + 1) {
+ /* If suddenly find the cursor outside of the virtual window, it must have
+ * somehow scrolled. Update the virtual window offset. */
+ uv_tty_virtual_offset = info->dwCursorPosition.Y -
+ uv_tty_virtual_height + 1;
+ }
+ if (uv_tty_virtual_offset + uv_tty_virtual_height > info->dwSize.Y) {
+ uv_tty_virtual_offset = info->dwSize.Y - uv_tty_virtual_height;
+ }
+ if (uv_tty_virtual_offset < 0) {
+ uv_tty_virtual_offset = 0;
+ }
+}
+
+
+static COORD uv_tty_make_real_coord(uv_tty_t* handle,
+ CONSOLE_SCREEN_BUFFER_INFO* info, int x, unsigned char x_relative, int y,
+ unsigned char y_relative) {
+ COORD result;
+
+ uv_tty_update_virtual_window(info);
+
+ /* Adjust y position */
+ if (y_relative) {
+ y = info->dwCursorPosition.Y + y;
+ } else {
+ y = uv_tty_virtual_offset + y;
+ }
+ /* Clip y to virtual client rectangle */
+ if (y < uv_tty_virtual_offset) {
+ y = uv_tty_virtual_offset;
+ } else if (y >= uv_tty_virtual_offset + uv_tty_virtual_height) {
+ y = uv_tty_virtual_offset + uv_tty_virtual_height - 1;
+ }
+
+ /* Adjust x */
+ if (x_relative) {
+ x = info->dwCursorPosition.X + x;
+ }
+ /* Clip x */
+ if (x < 0) {
+ x = 0;
+ } else if (x >= uv_tty_virtual_width) {
+ x = uv_tty_virtual_width - 1;
+ }
+
+ result.X = (unsigned short) x;
+ result.Y = (unsigned short) y;
+ return result;
+}
+
+
+static int uv_tty_emit_text(uv_tty_t* handle, WCHAR buffer[], DWORD length,
+ DWORD* error) {
+ DWORD written;
+
+ if (*error != ERROR_SUCCESS) {
+ return -1;
+ }
+
+ if (!WriteConsoleW(handle->handle,
+ (void*) buffer,
+ length,
+ &written,
+ NULL)) {
+ *error = GetLastError();
+ return -1;
+ }
+
+ return 0;
+}
+
+
+static int uv_tty_move_caret(uv_tty_t* handle, int x, unsigned char x_relative,
+ int y, unsigned char y_relative, DWORD* error) {
+ CONSOLE_SCREEN_BUFFER_INFO info;
+ COORD pos;
+
+ if (*error != ERROR_SUCCESS) {
+ return -1;
+ }
+
+ retry:
+ if (!GetConsoleScreenBufferInfo(handle->handle, &info)) {
+ *error = GetLastError();
+ }
+
+ pos = uv_tty_make_real_coord(handle, &info, x, x_relative, y, y_relative);
+
+ if (!SetConsoleCursorPosition(handle->handle, pos)) {
+ if (GetLastError() == ERROR_INVALID_PARAMETER) {
+ /* The console may be resized - retry */
+ goto retry;
+ } else {
+ *error = GetLastError();
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+
+static int uv_tty_reset(uv_tty_t* handle, DWORD* error) {
+ const COORD origin = {0, 0};
+ const WORD char_attrs = uv_tty_default_text_attributes;
+ CONSOLE_SCREEN_BUFFER_INFO screen_buffer_info;
+ DWORD count, written;
+
+ if (*error != ERROR_SUCCESS) {
+ return -1;
+ }
+
+ /* Reset original text attributes. */
+ if (!SetConsoleTextAttribute(handle->handle, char_attrs)) {
+ *error = GetLastError();
+ return -1;
+ }
+
+ /* Move the cursor position to (0, 0). */
+ if (!SetConsoleCursorPosition(handle->handle, origin)) {
+ *error = GetLastError();
+ return -1;
+ }
+
+ /* Clear the screen buffer. */
+ retry:
+ if (!GetConsoleScreenBufferInfo(handle->handle, &screen_buffer_info)) {
+ *error = GetLastError();
+ return -1;
+ }
+
+ count = screen_buffer_info.dwSize.X * screen_buffer_info.dwSize.Y;
+
+ if (!(FillConsoleOutputCharacterW(handle->handle,
+ L'\x20',
+ count,
+ origin,
+ &written) &&
+ FillConsoleOutputAttribute(handle->handle,
+ char_attrs,
+ written,
+ origin,
+ &written))) {
+ if (GetLastError() == ERROR_INVALID_PARAMETER) {
+ /* The console may be resized - retry */
+ goto retry;
+ } else {
+ *error = GetLastError();
+ return -1;
+ }
+ }
+
+ /* Move the virtual window up to the top. */
+ uv_tty_virtual_offset = 0;
+ uv_tty_update_virtual_window(&screen_buffer_info);
+
+ /* Reset the cursor size and the cursor state. */
+ if (!SetConsoleCursorInfo(handle->handle, &uv_tty_default_cursor_info)) {
+ *error = GetLastError();
+ return -1;
+ }
+
+ return 0;
+}
+
+
+static int uv_tty_clear(uv_tty_t* handle, int dir, char entire_screen,
+ DWORD* error) {
+ CONSOLE_SCREEN_BUFFER_INFO info;
+ COORD start, end;
+ DWORD count, written;
+
+ int x1, x2, y1, y2;
+ int x1r, x2r, y1r, y2r;
+
+ if (*error != ERROR_SUCCESS) {
+ return -1;
+ }
+
+ if (dir == 0) {
+ /* Clear from current position */
+ x1 = 0;
+ x1r = 1;
+ } else {
+ /* Clear from column 0 */
+ x1 = 0;
+ x1r = 0;
+ }
+
+ if (dir == 1) {
+ /* Clear to current position */
+ x2 = 0;
+ x2r = 1;
+ } else {
+ /* Clear to end of row. We pretend the console is 65536 characters wide,
+ * uv_tty_make_real_coord will clip it to the actual console width. */
+ x2 = 0xffff;
+ x2r = 0;
+ }
+
+ if (!entire_screen) {
+ /* Stay on our own row */
+ y1 = y2 = 0;
+ y1r = y2r = 1;
+ } else {
+ /* Apply columns direction to row */
+ y1 = x1;
+ y1r = x1r;
+ y2 = x2;
+ y2r = x2r;
+ }
+
+ retry:
+ if (!GetConsoleScreenBufferInfo(handle->handle, &info)) {
+ *error = GetLastError();
+ return -1;
+ }
+
+ start = uv_tty_make_real_coord(handle, &info, x1, x1r, y1, y1r);
+ end = uv_tty_make_real_coord(handle, &info, x2, x2r, y2, y2r);
+ count = (end.Y * info.dwSize.X + end.X) -
+ (start.Y * info.dwSize.X + start.X) + 1;
+
+ if (!(FillConsoleOutputCharacterW(handle->handle,
+ L'\x20',
+ count,
+ start,
+ &written) &&
+ FillConsoleOutputAttribute(handle->handle,
+ info.wAttributes,
+ written,
+ start,
+ &written))) {
+ if (GetLastError() == ERROR_INVALID_PARAMETER) {
+ /* The console may be resized - retry */
+ goto retry;
+ } else {
+ *error = GetLastError();
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+#define FLIP_FGBG \
+ do { \
+ WORD fg = info.wAttributes & 0xF; \
+ WORD bg = info.wAttributes & 0xF0; \
+ info.wAttributes &= 0xFF00; \
+ info.wAttributes |= fg << 4; \
+ info.wAttributes |= bg >> 4; \
+ } while (0)
+
+static int uv_tty_set_style(uv_tty_t* handle, DWORD* error) {
+ unsigned short argc = handle->tty.wr.ansi_csi_argc;
+ unsigned short* argv = handle->tty.wr.ansi_csi_argv;
+ int i;
+ CONSOLE_SCREEN_BUFFER_INFO info;
+
+ char fg_color = -1, bg_color = -1;
+ char fg_bright = -1, bg_bright = -1;
+ char inverse = -1;
+
+ if (argc == 0) {
+ /* Reset mode */
+ fg_color = uv_tty_default_fg_color;
+ bg_color = uv_tty_default_bg_color;
+ fg_bright = uv_tty_default_fg_bright;
+ bg_bright = uv_tty_default_bg_bright;
+ inverse = uv_tty_default_inverse;
+ }
+
+ for (i = 0; i < argc; i++) {
+ short arg = argv[i];
+
+ if (arg == 0) {
+ /* Reset mode */
+ fg_color = uv_tty_default_fg_color;
+ bg_color = uv_tty_default_bg_color;
+ fg_bright = uv_tty_default_fg_bright;
+ bg_bright = uv_tty_default_bg_bright;
+ inverse = uv_tty_default_inverse;
+
+ } else if (arg == 1) {
+ /* Foreground bright on */
+ fg_bright = 1;
+
+ } else if (arg == 2) {
+ /* Both bright off */
+ fg_bright = 0;
+ bg_bright = 0;
+
+ } else if (arg == 5) {
+ /* Background bright on */
+ bg_bright = 1;
+
+ } else if (arg == 7) {
+ /* Inverse: on */
+ inverse = 1;
+
+ } else if (arg == 21 || arg == 22) {
+ /* Foreground bright off */
+ fg_bright = 0;
+
+ } else if (arg == 25) {
+ /* Background bright off */
+ bg_bright = 0;
+
+ } else if (arg == 27) {
+ /* Inverse: off */
+ inverse = 0;
+
+ } else if (arg >= 30 && arg <= 37) {
+ /* Set foreground color */
+ fg_color = arg - 30;
+
+ } else if (arg == 39) {
+ /* Default text color */
+ fg_color = uv_tty_default_fg_color;
+ fg_bright = uv_tty_default_fg_bright;
+
+ } else if (arg >= 40 && arg <= 47) {
+ /* Set background color */
+ bg_color = arg - 40;
+
+ } else if (arg == 49) {
+ /* Default background color */
+ bg_color = uv_tty_default_bg_color;
+ bg_bright = uv_tty_default_bg_bright;
+
+ } else if (arg >= 90 && arg <= 97) {
+ /* Set bold foreground color */
+ fg_bright = 1;
+ fg_color = arg - 90;
+
+ } else if (arg >= 100 && arg <= 107) {
+ /* Set bold background color */
+ bg_bright = 1;
+ bg_color = arg - 100;
+
+ }
+ }
+
+ if (fg_color == -1 && bg_color == -1 && fg_bright == -1 &&
+ bg_bright == -1 && inverse == -1) {
+ /* Nothing changed */
+ return 0;
+ }
+
+ if (!GetConsoleScreenBufferInfo(handle->handle, &info)) {
+ *error = GetLastError();
+ return -1;
+ }
+
+ if ((info.wAttributes & COMMON_LVB_REVERSE_VIDEO) > 0) {
+ FLIP_FGBG;
+ }
+
+ if (fg_color != -1) {
+ info.wAttributes &= ~(FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE);
+ if (fg_color & 1) info.wAttributes |= FOREGROUND_RED;
+ if (fg_color & 2) info.wAttributes |= FOREGROUND_GREEN;
+ if (fg_color & 4) info.wAttributes |= FOREGROUND_BLUE;
+ }
+
+ if (fg_bright != -1) {
+ if (fg_bright) {
+ info.wAttributes |= FOREGROUND_INTENSITY;
+ } else {
+ info.wAttributes &= ~FOREGROUND_INTENSITY;
+ }
+ }
+
+ if (bg_color != -1) {
+ info.wAttributes &= ~(BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE);
+ if (bg_color & 1) info.wAttributes |= BACKGROUND_RED;
+ if (bg_color & 2) info.wAttributes |= BACKGROUND_GREEN;
+ if (bg_color & 4) info.wAttributes |= BACKGROUND_BLUE;
+ }
+
+ if (bg_bright != -1) {
+ if (bg_bright) {
+ info.wAttributes |= BACKGROUND_INTENSITY;
+ } else {
+ info.wAttributes &= ~BACKGROUND_INTENSITY;
+ }
+ }
+
+ if (inverse != -1) {
+ if (inverse) {
+ info.wAttributes |= COMMON_LVB_REVERSE_VIDEO;
+ } else {
+ info.wAttributes &= ~COMMON_LVB_REVERSE_VIDEO;
+ }
+ }
+
+ if ((info.wAttributes & COMMON_LVB_REVERSE_VIDEO) > 0) {
+ FLIP_FGBG;
+ }
+
+ if (!SetConsoleTextAttribute(handle->handle, info.wAttributes)) {
+ *error = GetLastError();
+ return -1;
+ }
+
+ return 0;
+}
+
+
+static int uv_tty_save_state(uv_tty_t* handle, unsigned char save_attributes,
+ DWORD* error) {
+ CONSOLE_SCREEN_BUFFER_INFO info;
+
+ if (*error != ERROR_SUCCESS) {
+ return -1;
+ }
+
+ if (!GetConsoleScreenBufferInfo(handle->handle, &info)) {
+ *error = GetLastError();
+ return -1;
+ }
+
+ uv_tty_update_virtual_window(&info);
+
+ handle->tty.wr.saved_position.X = info.dwCursorPosition.X;
+ handle->tty.wr.saved_position.Y = info.dwCursorPosition.Y - uv_tty_virtual_offset;
+ handle->flags |= UV_HANDLE_TTY_SAVED_POSITION;
+
+ if (save_attributes) {
+ handle->tty.wr.saved_attributes = info.wAttributes &
+ (FOREGROUND_INTENSITY | BACKGROUND_INTENSITY);
+ handle->flags |= UV_HANDLE_TTY_SAVED_ATTRIBUTES;
+ }
+
+ return 0;
+}
+
+
+static int uv_tty_restore_state(uv_tty_t* handle,
+ unsigned char restore_attributes, DWORD* error) {
+ CONSOLE_SCREEN_BUFFER_INFO info;
+ WORD new_attributes;
+
+ if (*error != ERROR_SUCCESS) {
+ return -1;
+ }
+
+ if (handle->flags & UV_HANDLE_TTY_SAVED_POSITION) {
+ if (uv_tty_move_caret(handle,
+ handle->tty.wr.saved_position.X,
+ 0,
+ handle->tty.wr.saved_position.Y,
+ 0,
+ error) != 0) {
+ return -1;
+ }
+ }
+
+ if (restore_attributes &&
+ (handle->flags & UV_HANDLE_TTY_SAVED_ATTRIBUTES)) {
+ if (!GetConsoleScreenBufferInfo(handle->handle, &info)) {
+ *error = GetLastError();
+ return -1;
+ }
+
+ new_attributes = info.wAttributes;
+ new_attributes &= ~(FOREGROUND_INTENSITY | BACKGROUND_INTENSITY);
+ new_attributes |= handle->tty.wr.saved_attributes;
+
+ if (!SetConsoleTextAttribute(handle->handle, new_attributes)) {
+ *error = GetLastError();
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int uv_tty_set_cursor_visibility(uv_tty_t* handle,
+ BOOL visible,
+ DWORD* error) {
+ CONSOLE_CURSOR_INFO cursor_info;
+
+ if (!GetConsoleCursorInfo(handle->handle, &cursor_info)) {
+ *error = GetLastError();
+ return -1;
+ }
+
+ cursor_info.bVisible = visible;
+
+ if (!SetConsoleCursorInfo(handle->handle, &cursor_info)) {
+ *error = GetLastError();
+ return -1;
+ }
+
+ return 0;
+}
+
+static int uv_tty_set_cursor_shape(uv_tty_t* handle, int style, DWORD* error) {
+ CONSOLE_CURSOR_INFO cursor_info;
+
+ if (!GetConsoleCursorInfo(handle->handle, &cursor_info)) {
+ *error = GetLastError();
+ return -1;
+ }
+
+ if (style == 0) {
+ cursor_info.dwSize = uv_tty_default_cursor_info.dwSize;
+ } else if (style <= 2) {
+ cursor_info.dwSize = CURSOR_SIZE_LARGE;
+ } else {
+ cursor_info.dwSize = CURSOR_SIZE_SMALL;
+ }
+
+ if (!SetConsoleCursorInfo(handle->handle, &cursor_info)) {
+ *error = GetLastError();
+ return -1;
+ }
+
+ return 0;
+}
+
+
+static int uv_tty_write_bufs(uv_tty_t* handle,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ DWORD* error) {
+ /* We can only write 8k characters at a time. Windows can't handle much more
+ * characters in a single console write anyway. */
+ WCHAR utf16_buf[MAX_CONSOLE_CHAR];
+ DWORD utf16_buf_used = 0;
+ unsigned int i;
+
+#define FLUSH_TEXT() \
+ do { \
+ if (utf16_buf_used > 0) { \
+ uv_tty_emit_text(handle, utf16_buf, utf16_buf_used, error); \
+ utf16_buf_used = 0; \
+ } \
+ } while (0)
+
+#define ENSURE_BUFFER_SPACE(wchars_needed) \
+ if (wchars_needed > ARRAY_SIZE(utf16_buf) - utf16_buf_used) { \
+ FLUSH_TEXT(); \
+ }
+
+ /* Cache for fast access */
+ unsigned char utf8_bytes_left = handle->tty.wr.utf8_bytes_left;
+ unsigned int utf8_codepoint = handle->tty.wr.utf8_codepoint;
+ unsigned char previous_eol = handle->tty.wr.previous_eol;
+ unsigned short ansi_parser_state = handle->tty.wr.ansi_parser_state;
+
+ /* Store the error here. If we encounter an error, stop trying to do i/o but
+ * keep parsing the buffer so we leave the parser in a consistent state. */
+ *error = ERROR_SUCCESS;
+
+ uv_sem_wait(&uv_tty_output_lock);
+
+ for (i = 0; i < nbufs; i++) {
+ uv_buf_t buf = bufs[i];
+ unsigned int j;
+
+ for (j = 0; j < buf.len; j++) {
+ unsigned char c = buf.base[j];
+
+ /* Run the character through the utf8 decoder We happily accept non
+ * shortest form encodings and invalid code points - there's no real harm
+ * that can be done. */
+ if (utf8_bytes_left == 0) {
+ /* Read utf-8 start byte */
+ DWORD first_zero_bit;
+ unsigned char not_c = ~c;
+#ifdef _MSC_VER /* msvc */
+ if (_BitScanReverse(&first_zero_bit, not_c)) {
+#else /* assume gcc */
+ if (c != 0) {
+ first_zero_bit = (sizeof(int) * 8) - 1 - __builtin_clz(not_c);
+#endif
+ if (first_zero_bit == 7) {
+ /* Ascii - pass right through */
+ utf8_codepoint = (unsigned int) c;
+
+ } else if (first_zero_bit <= 5) {
+ /* Multibyte sequence */
+ utf8_codepoint = (0xff >> (8 - first_zero_bit)) & c;
+ utf8_bytes_left = (char) (6 - first_zero_bit);
+
+ } else {
+ /* Invalid continuation */
+ utf8_codepoint = UNICODE_REPLACEMENT_CHARACTER;
+ }
+
+ } else {
+ /* 0xff -- invalid */
+ utf8_codepoint = UNICODE_REPLACEMENT_CHARACTER;
+ }
+
+ } else if ((c & 0xc0) == 0x80) {
+ /* Valid continuation of utf-8 multibyte sequence */
+ utf8_bytes_left--;
+ utf8_codepoint <<= 6;
+ utf8_codepoint |= ((unsigned int) c & 0x3f);
+
+ } else {
+ /* Start byte where continuation was expected. */
+ utf8_bytes_left = 0;
+ utf8_codepoint = UNICODE_REPLACEMENT_CHARACTER;
+ /* Patch buf offset so this character will be parsed again as a start
+ * byte. */
+ j--;
+ }
+
+ /* Maybe we need to parse more bytes to find a character. */
+ if (utf8_bytes_left != 0) {
+ continue;
+ }
+
+ /* Parse vt100/ansi escape codes */
+ if (uv__vterm_state == UV_TTY_SUPPORTED) {
+ /* Pass through escape codes if conhost supports them. */
+ } else if (ansi_parser_state == ANSI_NORMAL) {
+ switch (utf8_codepoint) {
+ case '\033':
+ ansi_parser_state = ANSI_ESCAPE_SEEN;
+ continue;
+
+ case 0233:
+ ansi_parser_state = ANSI_CSI;
+ handle->tty.wr.ansi_csi_argc = 0;
+ continue;
+ }
+
+ } else if (ansi_parser_state == ANSI_ESCAPE_SEEN) {
+ switch (utf8_codepoint) {
+ case '[':
+ ansi_parser_state = ANSI_CSI;
+ handle->tty.wr.ansi_csi_argc = 0;
+ continue;
+
+ case '^':
+ case '_':
+ case 'P':
+ case ']':
+ /* Not supported, but we'll have to parse until we see a stop code,
+ * e. g. ESC \ or BEL. */
+ ansi_parser_state = ANSI_ST_CONTROL;
+ continue;
+
+ case '\033':
+ /* Ignore double escape. */
+ continue;
+
+ case 'c':
+ /* Full console reset. */
+ FLUSH_TEXT();
+ uv_tty_reset(handle, error);
+ ansi_parser_state = ANSI_NORMAL;
+ continue;
+
+ case '7':
+ /* Save the cursor position and text attributes. */
+ FLUSH_TEXT();
+ uv_tty_save_state(handle, 1, error);
+ ansi_parser_state = ANSI_NORMAL;
+ continue;
+
+ case '8':
+ /* Restore the cursor position and text attributes */
+ FLUSH_TEXT();
+ uv_tty_restore_state(handle, 1, error);
+ ansi_parser_state = ANSI_NORMAL;
+ continue;
+
+ default:
+ if (utf8_codepoint >= '@' && utf8_codepoint <= '_') {
+ /* Single-char control. */
+ ansi_parser_state = ANSI_NORMAL;
+ continue;
+ } else {
+ /* Invalid - proceed as normal, */
+ ansi_parser_state = ANSI_NORMAL;
+ }
+ }
+
+ } else if (ansi_parser_state == ANSI_IGNORE) {
+ /* We're ignoring this command. Stop only on command character. */
+ if (utf8_codepoint >= '@' && utf8_codepoint <= '~') {
+ ansi_parser_state = ANSI_NORMAL;
+ }
+ continue;
+
+ } else if (ansi_parser_state == ANSI_DECSCUSR) {
+ /* So far we've the sequence `ESC [ arg space`, and we're waiting for
+ * the final command byte. */
+ if (utf8_codepoint >= '@' && utf8_codepoint <= '~') {
+ /* Command byte */
+ if (utf8_codepoint == 'q') {
+ /* Change the cursor shape */
+ int style = handle->tty.wr.ansi_csi_argc
+ ? handle->tty.wr.ansi_csi_argv[0] : 1;
+ if (style >= 0 && style <= 6) {
+ FLUSH_TEXT();
+ uv_tty_set_cursor_shape(handle, style, error);
+ }
+ }
+
+ /* Sequence ended - go back to normal state. */
+ ansi_parser_state = ANSI_NORMAL;
+ continue;
+ }
+ /* Unexpected character, but sequence hasn't ended yet. Ignore the rest
+ * of the sequence. */
+ ansi_parser_state = ANSI_IGNORE;
+
+ } else if (ansi_parser_state & ANSI_CSI) {
+ /* So far we've seen `ESC [`, and we may or may not have already parsed
+ * some of the arguments that follow. */
+
+ if (utf8_codepoint >= '0' && utf8_codepoint <= '9') {
+ /* Parse a numerical argument. */
+ if (!(ansi_parser_state & ANSI_IN_ARG)) {
+ /* We were not currently parsing a number, add a new one. */
+ /* Check for that there are too many arguments. */
+ if (handle->tty.wr.ansi_csi_argc >=
+ ARRAY_SIZE(handle->tty.wr.ansi_csi_argv)) {
+ ansi_parser_state = ANSI_IGNORE;
+ continue;
+ }
+ ansi_parser_state |= ANSI_IN_ARG;
+ handle->tty.wr.ansi_csi_argc++;
+ handle->tty.wr.ansi_csi_argv[handle->tty.wr.ansi_csi_argc - 1] =
+ (unsigned short) utf8_codepoint - '0';
+ continue;
+
+ } else {
+ /* We were already parsing a number. Parse next digit. */
+ uint32_t value = 10 *
+ handle->tty.wr.ansi_csi_argv[handle->tty.wr.ansi_csi_argc - 1];
+
+ /* Check for overflow. */
+ if (value > UINT16_MAX) {
+ ansi_parser_state = ANSI_IGNORE;
+ continue;
+ }
+
+ handle->tty.wr.ansi_csi_argv[handle->tty.wr.ansi_csi_argc - 1] =
+ (unsigned short) value + (utf8_codepoint - '0');
+ continue;
+ }
+
+ } else if (utf8_codepoint == ';') {
+ /* Denotes the end of an argument. */
+ if (ansi_parser_state & ANSI_IN_ARG) {
+ ansi_parser_state &= ~ANSI_IN_ARG;
+ continue;
+
+ } else {
+ /* If ANSI_IN_ARG is not set, add another argument and default
+ * it to 0. */
+
+ /* Check for too many arguments */
+ if (handle->tty.wr.ansi_csi_argc >=
+
+ ARRAY_SIZE(handle->tty.wr.ansi_csi_argv)) {
+ ansi_parser_state = ANSI_IGNORE;
+ continue;
+ }
+
+ handle->tty.wr.ansi_csi_argc++;
+ handle->tty.wr.ansi_csi_argv[handle->tty.wr.ansi_csi_argc - 1] = 0;
+ continue;
+ }
+
+ } else if (utf8_codepoint == '?' &&
+ !(ansi_parser_state & ANSI_IN_ARG) &&
+ !(ansi_parser_state & ANSI_EXTENSION) &&
+ handle->tty.wr.ansi_csi_argc == 0) {
+ /* Pass through '?' if it is the first character after CSI */
+ /* This is an extension character from the VT100 codeset */
+ /* that is supported and used by most ANSI terminals today. */
+ ansi_parser_state |= ANSI_EXTENSION;
+ continue;
+
+ } else if (utf8_codepoint == ' ' &&
+ !(ansi_parser_state & ANSI_EXTENSION)) {
+ /* We expect a command byte to follow after this space. The only
+ * command that we current support is 'set cursor style'. */
+ ansi_parser_state = ANSI_DECSCUSR;
+ continue;
+
+ } else if (utf8_codepoint >= '@' && utf8_codepoint <= '~') {
+ /* Command byte */
+ if (ansi_parser_state & ANSI_EXTENSION) {
+ /* Sequence is `ESC [ ? args command`. */
+ switch (utf8_codepoint) {
+ case 'l':
+ /* Hide the cursor */
+ if (handle->tty.wr.ansi_csi_argc == 1 &&
+ handle->tty.wr.ansi_csi_argv[0] == 25) {
+ FLUSH_TEXT();
+ uv_tty_set_cursor_visibility(handle, 0, error);
+ }
+ break;
+
+ case 'h':
+ /* Show the cursor */
+ if (handle->tty.wr.ansi_csi_argc == 1 &&
+ handle->tty.wr.ansi_csi_argv[0] == 25) {
+ FLUSH_TEXT();
+ uv_tty_set_cursor_visibility(handle, 1, error);
+ }
+ break;
+ }
+
+ } else {
+ /* Sequence is `ESC [ args command`. */
+ int x, y, d;
+ switch (utf8_codepoint) {
+ case 'A':
+ /* cursor up */
+ FLUSH_TEXT();
+ y = -(handle->tty.wr.ansi_csi_argc
+ ? handle->tty.wr.ansi_csi_argv[0] : 1);
+ uv_tty_move_caret(handle, 0, 1, y, 1, error);
+ break;
+
+ case 'B':
+ /* cursor down */
+ FLUSH_TEXT();
+ y = handle->tty.wr.ansi_csi_argc
+ ? handle->tty.wr.ansi_csi_argv[0] : 1;
+ uv_tty_move_caret(handle, 0, 1, y, 1, error);
+ break;
+
+ case 'C':
+ /* cursor forward */
+ FLUSH_TEXT();
+ x = handle->tty.wr.ansi_csi_argc
+ ? handle->tty.wr.ansi_csi_argv[0] : 1;
+ uv_tty_move_caret(handle, x, 1, 0, 1, error);
+ break;
+
+ case 'D':
+ /* cursor back */
+ FLUSH_TEXT();
+ x = -(handle->tty.wr.ansi_csi_argc
+ ? handle->tty.wr.ansi_csi_argv[0] : 1);
+ uv_tty_move_caret(handle, x, 1, 0, 1, error);
+ break;
+
+ case 'E':
+ /* cursor next line */
+ FLUSH_TEXT();
+ y = handle->tty.wr.ansi_csi_argc
+ ? handle->tty.wr.ansi_csi_argv[0] : 1;
+ uv_tty_move_caret(handle, 0, 0, y, 1, error);
+ break;
+
+ case 'F':
+ /* cursor previous line */
+ FLUSH_TEXT();
+ y = -(handle->tty.wr.ansi_csi_argc
+ ? handle->tty.wr.ansi_csi_argv[0] : 1);
+ uv_tty_move_caret(handle, 0, 0, y, 1, error);
+ break;
+
+ case 'G':
+ /* cursor horizontal move absolute */
+ FLUSH_TEXT();
+ x = (handle->tty.wr.ansi_csi_argc >= 1 &&
+ handle->tty.wr.ansi_csi_argv[0])
+ ? handle->tty.wr.ansi_csi_argv[0] - 1 : 0;
+ uv_tty_move_caret(handle, x, 0, 0, 1, error);
+ break;
+
+ case 'H':
+ case 'f':
+ /* cursor move absolute */
+ FLUSH_TEXT();
+ y = (handle->tty.wr.ansi_csi_argc >= 1 &&
+ handle->tty.wr.ansi_csi_argv[0])
+ ? handle->tty.wr.ansi_csi_argv[0] - 1 : 0;
+ x = (handle->tty.wr.ansi_csi_argc >= 2 &&
+ handle->tty.wr.ansi_csi_argv[1])
+ ? handle->tty.wr.ansi_csi_argv[1] - 1 : 0;
+ uv_tty_move_caret(handle, x, 0, y, 0, error);
+ break;
+
+ case 'J':
+ /* Erase screen */
+ FLUSH_TEXT();
+ d = handle->tty.wr.ansi_csi_argc
+ ? handle->tty.wr.ansi_csi_argv[0] : 0;
+ if (d >= 0 && d <= 2) {
+ uv_tty_clear(handle, d, 1, error);
+ }
+ break;
+
+ case 'K':
+ /* Erase line */
+ FLUSH_TEXT();
+ d = handle->tty.wr.ansi_csi_argc
+ ? handle->tty.wr.ansi_csi_argv[0] : 0;
+ if (d >= 0 && d <= 2) {
+ uv_tty_clear(handle, d, 0, error);
+ }
+ break;
+
+ case 'm':
+ /* Set style */
+ FLUSH_TEXT();
+ uv_tty_set_style(handle, error);
+ break;
+
+ case 's':
+ /* Save the cursor position. */
+ FLUSH_TEXT();
+ uv_tty_save_state(handle, 0, error);
+ break;
+
+ case 'u':
+ /* Restore the cursor position */
+ FLUSH_TEXT();
+ uv_tty_restore_state(handle, 0, error);
+ break;
+ }
+ }
+
+ /* Sequence ended - go back to normal state. */
+ ansi_parser_state = ANSI_NORMAL;
+ continue;
+
+ } else {
+ /* We don't support commands that use private mode characters or
+ * intermediaries. Ignore the rest of the sequence. */
+ ansi_parser_state = ANSI_IGNORE;
+ continue;
+ }
+
+ } else if (ansi_parser_state & ANSI_ST_CONTROL) {
+ /* Unsupported control code.
+ * Ignore everything until we see `BEL` or `ESC \`. */
+ if (ansi_parser_state & ANSI_IN_STRING) {
+ if (!(ansi_parser_state & ANSI_BACKSLASH_SEEN)) {
+ if (utf8_codepoint == '"') {
+ ansi_parser_state &= ~ANSI_IN_STRING;
+ } else if (utf8_codepoint == '\\') {
+ ansi_parser_state |= ANSI_BACKSLASH_SEEN;
+ }
+ } else {
+ ansi_parser_state &= ~ANSI_BACKSLASH_SEEN;
+ }
+ } else {
+ if (utf8_codepoint == '\007' || (utf8_codepoint == '\\' &&
+ (ansi_parser_state & ANSI_ESCAPE_SEEN))) {
+ /* End of sequence */
+ ansi_parser_state = ANSI_NORMAL;
+ } else if (utf8_codepoint == '\033') {
+ /* Escape character */
+ ansi_parser_state |= ANSI_ESCAPE_SEEN;
+ } else if (utf8_codepoint == '"') {
+ /* String starting */
+ ansi_parser_state |= ANSI_IN_STRING;
+ ansi_parser_state &= ~ANSI_ESCAPE_SEEN;
+ ansi_parser_state &= ~ANSI_BACKSLASH_SEEN;
+ } else {
+ ansi_parser_state &= ~ANSI_ESCAPE_SEEN;
+ }
+ }
+ continue;
+ } else {
+ /* Inconsistent state */
+ abort();
+ }
+
+ if (utf8_codepoint == 0x0a || utf8_codepoint == 0x0d) {
+ /* EOL conversion - emit \r\n when we see \n. */
+
+ if (utf8_codepoint == 0x0a && previous_eol != 0x0d) {
+ /* \n was not preceded by \r; print \r\n. */
+ ENSURE_BUFFER_SPACE(2);
+ utf16_buf[utf16_buf_used++] = L'\r';
+ utf16_buf[utf16_buf_used++] = L'\n';
+ } else if (utf8_codepoint == 0x0d && previous_eol == 0x0a) {
+ /* \n was followed by \r; do not print the \r, since the source was
+ * either \r\n\r (so the second \r is redundant) or was \n\r (so the
+ * \n was processed by the last case and an \r automatically
+ * inserted). */
+ } else {
+ /* \r without \n; print \r as-is. */
+ ENSURE_BUFFER_SPACE(1);
+ utf16_buf[utf16_buf_used++] = (WCHAR) utf8_codepoint;
+ }
+
+ previous_eol = (char) utf8_codepoint;
+
+ } else if (utf8_codepoint <= 0xffff) {
+ /* Encode character into utf-16 buffer. */
+ ENSURE_BUFFER_SPACE(1);
+ utf16_buf[utf16_buf_used++] = (WCHAR) utf8_codepoint;
+ previous_eol = 0;
+ } else {
+ ENSURE_BUFFER_SPACE(2);
+ utf8_codepoint -= 0x10000;
+ utf16_buf[utf16_buf_used++] = (WCHAR) (utf8_codepoint / 0x400 + 0xD800);
+ utf16_buf[utf16_buf_used++] = (WCHAR) (utf8_codepoint % 0x400 + 0xDC00);
+ previous_eol = 0;
+ }
+ }
+ }
+
+ /* Flush remaining characters */
+ FLUSH_TEXT();
+
+ /* Copy cached values back to struct. */
+ handle->tty.wr.utf8_bytes_left = utf8_bytes_left;
+ handle->tty.wr.utf8_codepoint = utf8_codepoint;
+ handle->tty.wr.previous_eol = previous_eol;
+ handle->tty.wr.ansi_parser_state = ansi_parser_state;
+
+ uv_sem_post(&uv_tty_output_lock);
+
+ if (*error == STATUS_SUCCESS) {
+ return 0;
+ } else {
+ return -1;
+ }
+
+#undef FLUSH_TEXT
+}
+
+
+int uv_tty_write(uv_loop_t* loop,
+ uv_write_t* req,
+ uv_tty_t* handle,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ uv_write_cb cb) {
+ DWORD error;
+
+ UV_REQ_INIT(req, UV_WRITE);
+ req->handle = (uv_stream_t*) handle;
+ req->cb = cb;
+
+ handle->reqs_pending++;
+ handle->stream.conn.write_reqs_pending++;
+ REGISTER_HANDLE_REQ(loop, handle, req);
+
+ req->u.io.queued_bytes = 0;
+
+ if (!uv_tty_write_bufs(handle, bufs, nbufs, &error)) {
+ SET_REQ_SUCCESS(req);
+ } else {
+ SET_REQ_ERROR(req, error);
+ }
+
+ uv_insert_pending_req(loop, (uv_req_t*) req);
+
+ return 0;
+}
+
+
+int uv__tty_try_write(uv_tty_t* handle,
+ const uv_buf_t bufs[],
+ unsigned int nbufs) {
+ DWORD error;
+
+ if (handle->stream.conn.write_reqs_pending > 0)
+ return UV_EAGAIN;
+
+ if (uv_tty_write_bufs(handle, bufs, nbufs, &error))
+ return uv_translate_sys_error(error);
+
+ return uv__count_bufs(bufs, nbufs);
+}
+
+
+void uv_process_tty_write_req(uv_loop_t* loop, uv_tty_t* handle,
+ uv_write_t* req) {
+ int err;
+
+ handle->write_queue_size -= req->u.io.queued_bytes;
+ UNREGISTER_HANDLE_REQ(loop, handle, req);
+
+ if (req->cb) {
+ err = GET_REQ_ERROR(req);
+ req->cb(req, uv_translate_sys_error(err));
+ }
+
+ handle->stream.conn.write_reqs_pending--;
+ if (handle->stream.conn.shutdown_req != NULL &&
+ handle->stream.conn.write_reqs_pending == 0) {
+ uv_want_endgame(loop, (uv_handle_t*)handle);
+ }
+
+ DECREASE_PENDING_REQ_COUNT(handle);
+}
+
+
+void uv_tty_close(uv_tty_t* handle) {
+ assert(handle->u.fd == -1 || handle->u.fd > 2);
+ if (handle->flags & UV_HANDLE_READING)
+ uv_tty_read_stop(handle);
+
+ if (handle->u.fd == -1)
+ CloseHandle(handle->handle);
+ else
+ close(handle->u.fd);
+
+ handle->u.fd = -1;
+ handle->handle = INVALID_HANDLE_VALUE;
+ handle->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
+ uv__handle_closing(handle);
+
+ if (handle->reqs_pending == 0) {
+ uv_want_endgame(handle->loop, (uv_handle_t*) handle);
+ }
+}
+
+
+void uv_tty_endgame(uv_loop_t* loop, uv_tty_t* handle) {
+ if (!(handle->flags & UV_HANDLE_TTY_READABLE) &&
+ handle->stream.conn.shutdown_req != NULL &&
+ handle->stream.conn.write_reqs_pending == 0) {
+ UNREGISTER_HANDLE_REQ(loop, handle, handle->stream.conn.shutdown_req);
+
+ /* TTY shutdown is really just a no-op */
+ if (handle->stream.conn.shutdown_req->cb) {
+ if (handle->flags & UV_HANDLE_CLOSING) {
+ handle->stream.conn.shutdown_req->cb(handle->stream.conn.shutdown_req, UV_ECANCELED);
+ } else {
+ handle->stream.conn.shutdown_req->cb(handle->stream.conn.shutdown_req, 0);
+ }
+ }
+
+ handle->stream.conn.shutdown_req = NULL;
+
+ DECREASE_PENDING_REQ_COUNT(handle);
+ return;
+ }
+
+ if (handle->flags & UV_HANDLE_CLOSING &&
+ handle->reqs_pending == 0) {
+ /* The wait handle used for raw reading should be unregistered when the
+ * wait callback runs. */
+ assert(!(handle->flags & UV_HANDLE_TTY_READABLE) ||
+ handle->tty.rd.read_raw_wait == NULL);
+
+ assert(!(handle->flags & UV_HANDLE_CLOSED));
+ uv__handle_close(handle);
+ }
+}
+
+
+/*
+ * uv_process_tty_accept_req() is a stub to keep DELEGATE_STREAM_REQ working
+ * TODO: find a way to remove it
+ */
+void uv_process_tty_accept_req(uv_loop_t* loop, uv_tty_t* handle,
+ uv_req_t* raw_req) {
+ abort();
+}
+
+
+/*
+ * uv_process_tty_connect_req() is a stub to keep DELEGATE_STREAM_REQ working
+ * TODO: find a way to remove it
+ */
+void uv_process_tty_connect_req(uv_loop_t* loop, uv_tty_t* handle,
+ uv_connect_t* req) {
+ abort();
+}
+
+
+int uv_tty_reset_mode(void) {
+ /* Not necessary to do anything. */
+ return 0;
+}
+
+/* Determine whether or not this version of windows supports
+ * proper ANSI color codes. Should be supported as of windows
+ * 10 version 1511, build number 10.0.10586.
+ */
+static void uv__determine_vterm_state(HANDLE handle) {
+ DWORD dwMode = 0;
+
+ uv__need_check_vterm_state = FALSE;
+ if (!GetConsoleMode(handle, &dwMode)) {
+ return;
+ }
+
+ dwMode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING;
+ if (!SetConsoleMode(handle, dwMode)) {
+ return;
+ }
+
+ uv__vterm_state = UV_TTY_SUPPORTED;
+}
+
+static DWORD WINAPI uv__tty_console_resize_message_loop_thread(void* param) {
+ NTSTATUS status;
+ ULONG_PTR conhost_pid;
+ MSG msg;
+
+ if (pSetWinEventHook == NULL || pNtQueryInformationProcess == NULL)
+ return 0;
+
+ status = pNtQueryInformationProcess(GetCurrentProcess(),
+ ProcessConsoleHostProcess,
+ &conhost_pid,
+ sizeof(conhost_pid),
+ NULL);
+
+ if (!NT_SUCCESS(status)) {
+ /* We couldn't retrieve our console host process, probably because this
+ * is a 32-bit process running on 64-bit Windows. Fall back to receiving
+ * console events from the input stream only. */
+ return 0;
+ }
+
+ /* Ensure the PID is a multiple of 4, which is required by SetWinEventHook */
+ conhost_pid &= ~(ULONG_PTR)0x3;
+
+ uv__tty_console_resized = CreateEvent(NULL, TRUE, FALSE, NULL);
+ if (uv__tty_console_resized == NULL)
+ return 0;
+ if (QueueUserWorkItem(uv__tty_console_resize_watcher_thread,
+ NULL,
+ WT_EXECUTELONGFUNCTION) == 0)
+ return 0;
+
+ if (!pSetWinEventHook(EVENT_CONSOLE_LAYOUT,
+ EVENT_CONSOLE_LAYOUT,
+ NULL,
+ uv__tty_console_resize_event,
+ (DWORD)conhost_pid,
+ 0,
+ WINEVENT_OUTOFCONTEXT))
+ return 0;
+
+ while (GetMessage(&msg, NULL, 0, 0)) {
+ TranslateMessage(&msg);
+ DispatchMessage(&msg);
+ }
+ return 0;
+}
+
+static void CALLBACK uv__tty_console_resize_event(HWINEVENTHOOK hWinEventHook,
+ DWORD event,
+ HWND hwnd,
+ LONG idObject,
+ LONG idChild,
+ DWORD dwEventThread,
+ DWORD dwmsEventTime) {
+ SetEvent(uv__tty_console_resized);
+}
+
+static DWORD WINAPI uv__tty_console_resize_watcher_thread(void* param) {
+ for (;;) {
+ /* Make sure to not overwhelm the system with resize events */
+ Sleep(33);
+ WaitForSingleObject(uv__tty_console_resized, INFINITE);
+ uv__tty_console_signal_resize();
+ ResetEvent(uv__tty_console_resized);
+ }
+ return 0;
+}
+
+static void uv__tty_console_signal_resize(void) {
+ CONSOLE_SCREEN_BUFFER_INFO sb_info;
+ int width, height;
+
+ if (!GetConsoleScreenBufferInfo(uv__tty_console_handle, &sb_info))
+ return;
+
+ width = sb_info.dwSize.X;
+ height = sb_info.srWindow.Bottom - sb_info.srWindow.Top + 1;
+
+ uv_mutex_lock(&uv__tty_console_resize_mutex);
+ assert(uv__tty_console_width != -1 && uv__tty_console_height != -1);
+ if (width != uv__tty_console_width || height != uv__tty_console_height) {
+ uv__tty_console_width = width;
+ uv__tty_console_height = height;
+ uv_mutex_unlock(&uv__tty_console_resize_mutex);
+ uv__signal_dispatch(SIGWINCH);
+ } else {
+ uv_mutex_unlock(&uv__tty_console_resize_mutex);
+ }
+}
+
+void uv_tty_set_vterm_state(uv_tty_vtermstate_t state) {
+ uv_sem_wait(&uv_tty_output_lock);
+ uv__need_check_vterm_state = FALSE;
+ uv__vterm_state = state;
+ uv_sem_post(&uv_tty_output_lock);
+}
+
+int uv_tty_get_vterm_state(uv_tty_vtermstate_t* state) {
+ uv_sem_wait(&uv_tty_output_lock);
+ *state = uv__vterm_state;
+ uv_sem_post(&uv_tty_output_lock);
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/win/udp.c b/Utilities/cmlibuv/src/win/udp.c
new file mode 100644
index 0000000..68ca728
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/udp.c
@@ -0,0 +1,1181 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+
+#include "uv.h"
+#include "internal.h"
+#include "handle-inl.h"
+#include "stream-inl.h"
+#include "req-inl.h"
+
+
+/*
+ * Threshold of active udp streams for which to preallocate udp read buffers.
+ */
+const unsigned int uv_active_udp_streams_threshold = 0;
+
+/* A zero-size buffer for use by uv_udp_read */
+static char uv_zero_[] = "";
+int uv_udp_getpeername(const uv_udp_t* handle,
+ struct sockaddr* name,
+ int* namelen) {
+
+ return uv__getsockpeername((const uv_handle_t*) handle,
+ getpeername,
+ name,
+ namelen,
+ 0);
+}
+
+
+int uv_udp_getsockname(const uv_udp_t* handle,
+ struct sockaddr* name,
+ int* namelen) {
+
+ return uv__getsockpeername((const uv_handle_t*) handle,
+ getsockname,
+ name,
+ namelen,
+ 0);
+}
+
+
+static int uv_udp_set_socket(uv_loop_t* loop, uv_udp_t* handle, SOCKET socket,
+ int family) {
+ DWORD yes = 1;
+ WSAPROTOCOL_INFOW info;
+ int opt_len;
+
+ if (handle->socket != INVALID_SOCKET)
+ return UV_EBUSY;
+
+ /* Set the socket to nonblocking mode */
+ if (ioctlsocket(socket, FIONBIO, &yes) == SOCKET_ERROR) {
+ return WSAGetLastError();
+ }
+
+ /* Make the socket non-inheritable */
+ if (!SetHandleInformation((HANDLE)socket, HANDLE_FLAG_INHERIT, 0)) {
+ return GetLastError();
+ }
+
+ /* Associate it with the I/O completion port. Use uv_handle_t pointer as
+ * completion key. */
+ if (CreateIoCompletionPort((HANDLE)socket,
+ loop->iocp,
+ (ULONG_PTR)socket,
+ 0) == NULL) {
+ return GetLastError();
+ }
+
+ /* All known Windows that support SetFileCompletionNotificationModes have a
+ * bug that makes it impossible to use this function in conjunction with
+ * datagram sockets. We can work around that but only if the user is using
+ * the default UDP driver (AFD) and has no other. LSPs stacked on top. Here
+ * we check whether that is the case. */
+ opt_len = (int) sizeof info;
+ if (getsockopt(
+ socket, SOL_SOCKET, SO_PROTOCOL_INFOW, (char*) &info, &opt_len) ==
+ SOCKET_ERROR) {
+ return GetLastError();
+ }
+
+ if (info.ProtocolChain.ChainLen == 1) {
+ if (SetFileCompletionNotificationModes(
+ (HANDLE) socket,
+ FILE_SKIP_SET_EVENT_ON_HANDLE |
+ FILE_SKIP_COMPLETION_PORT_ON_SUCCESS)) {
+ handle->flags |= UV_HANDLE_SYNC_BYPASS_IOCP;
+ handle->func_wsarecv = uv_wsarecv_workaround;
+ handle->func_wsarecvfrom = uv_wsarecvfrom_workaround;
+ } else if (GetLastError() != ERROR_INVALID_FUNCTION) {
+ return GetLastError();
+ }
+ }
+
+ handle->socket = socket;
+
+ if (family == AF_INET6) {
+ handle->flags |= UV_HANDLE_IPV6;
+ } else {
+ assert(!(handle->flags & UV_HANDLE_IPV6));
+ }
+
+ return 0;
+}
+
+
+int uv__udp_init_ex(uv_loop_t* loop,
+ uv_udp_t* handle,
+ unsigned flags,
+ int domain) {
+ uv__handle_init(loop, (uv_handle_t*) handle, UV_UDP);
+ handle->socket = INVALID_SOCKET;
+ handle->reqs_pending = 0;
+ handle->activecnt = 0;
+ handle->func_wsarecv = WSARecv;
+ handle->func_wsarecvfrom = WSARecvFrom;
+ handle->send_queue_size = 0;
+ handle->send_queue_count = 0;
+ UV_REQ_INIT(&handle->recv_req, UV_UDP_RECV);
+ handle->recv_req.data = handle;
+
+ /* If anything fails beyond this point we need to remove the handle from
+ * the handle queue, since it was added by uv__handle_init.
+ */
+
+ if (domain != AF_UNSPEC) {
+ SOCKET sock;
+ DWORD err;
+
+ sock = socket(domain, SOCK_DGRAM, 0);
+ if (sock == INVALID_SOCKET) {
+ err = WSAGetLastError();
+ QUEUE_REMOVE(&handle->handle_queue);
+ return uv_translate_sys_error(err);
+ }
+
+ err = uv_udp_set_socket(handle->loop, handle, sock, domain);
+ if (err) {
+ closesocket(sock);
+ QUEUE_REMOVE(&handle->handle_queue);
+ return uv_translate_sys_error(err);
+ }
+ }
+
+ return 0;
+}
+
+
+void uv_udp_close(uv_loop_t* loop, uv_udp_t* handle) {
+ uv_udp_recv_stop(handle);
+ closesocket(handle->socket);
+ handle->socket = INVALID_SOCKET;
+
+ uv__handle_closing(handle);
+
+ if (handle->reqs_pending == 0) {
+ uv_want_endgame(loop, (uv_handle_t*) handle);
+ }
+}
+
+
+void uv_udp_endgame(uv_loop_t* loop, uv_udp_t* handle) {
+ if (handle->flags & UV_HANDLE_CLOSING &&
+ handle->reqs_pending == 0) {
+ assert(!(handle->flags & UV_HANDLE_CLOSED));
+ uv__handle_close(handle);
+ }
+}
+
+
+int uv_udp_using_recvmmsg(const uv_udp_t* handle) {
+ return 0;
+}
+
+
+static int uv_udp_maybe_bind(uv_udp_t* handle,
+ const struct sockaddr* addr,
+ unsigned int addrlen,
+ unsigned int flags) {
+ int r;
+ int err;
+ DWORD no = 0;
+
+ if (handle->flags & UV_HANDLE_BOUND)
+ return 0;
+
+ if ((flags & UV_UDP_IPV6ONLY) && addr->sa_family != AF_INET6) {
+ /* UV_UDP_IPV6ONLY is supported only for IPV6 sockets */
+ return ERROR_INVALID_PARAMETER;
+ }
+
+ if (handle->socket == INVALID_SOCKET) {
+ SOCKET sock = socket(addr->sa_family, SOCK_DGRAM, 0);
+ if (sock == INVALID_SOCKET) {
+ return WSAGetLastError();
+ }
+
+ err = uv_udp_set_socket(handle->loop, handle, sock, addr->sa_family);
+ if (err) {
+ closesocket(sock);
+ return err;
+ }
+ }
+
+ if (flags & UV_UDP_REUSEADDR) {
+ DWORD yes = 1;
+ /* Set SO_REUSEADDR on the socket. */
+ if (setsockopt(handle->socket,
+ SOL_SOCKET,
+ SO_REUSEADDR,
+ (char*) &yes,
+ sizeof yes) == SOCKET_ERROR) {
+ err = WSAGetLastError();
+ return err;
+ }
+ }
+
+ if (addr->sa_family == AF_INET6)
+ handle->flags |= UV_HANDLE_IPV6;
+
+ if (addr->sa_family == AF_INET6 && !(flags & UV_UDP_IPV6ONLY)) {
+ /* On windows IPV6ONLY is on by default. If the user doesn't specify it
+ * libuv turns it off. */
+
+ /* TODO: how to handle errors? This may fail if there is no ipv4 stack
+ * available, or when run on XP/2003 which have no support for dualstack
+ * sockets. For now we're silently ignoring the error. */
+ setsockopt(handle->socket,
+ IPPROTO_IPV6,
+ IPV6_V6ONLY,
+ (char*) &no,
+ sizeof no);
+ }
+
+ r = bind(handle->socket, addr, addrlen);
+ if (r == SOCKET_ERROR) {
+ return WSAGetLastError();
+ }
+
+ handle->flags |= UV_HANDLE_BOUND;
+
+ return 0;
+}
+
+
+static void uv_udp_queue_recv(uv_loop_t* loop, uv_udp_t* handle) {
+ uv_req_t* req;
+ uv_buf_t buf;
+ DWORD bytes, flags;
+ int result;
+
+ assert(handle->flags & UV_HANDLE_READING);
+ assert(!(handle->flags & UV_HANDLE_READ_PENDING));
+
+ req = &handle->recv_req;
+ memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
+
+ /*
+ * Preallocate a read buffer if the number of active streams is below
+ * the threshold.
+ */
+ if (loop->active_udp_streams < uv_active_udp_streams_threshold) {
+ handle->flags &= ~UV_HANDLE_ZERO_READ;
+
+ handle->recv_buffer = uv_buf_init(NULL, 0);
+ handle->alloc_cb((uv_handle_t*) handle, 65536, &handle->recv_buffer);
+ if (handle->recv_buffer.base == NULL || handle->recv_buffer.len == 0) {
+ handle->recv_cb(handle, UV_ENOBUFS, &handle->recv_buffer, NULL, 0);
+ return;
+ }
+ assert(handle->recv_buffer.base != NULL);
+
+ buf = handle->recv_buffer;
+ memset(&handle->recv_from, 0, sizeof handle->recv_from);
+ handle->recv_from_len = sizeof handle->recv_from;
+ flags = 0;
+
+ result = handle->func_wsarecvfrom(handle->socket,
+ (WSABUF*) &buf,
+ 1,
+ &bytes,
+ &flags,
+ (struct sockaddr*) &handle->recv_from,
+ &handle->recv_from_len,
+ &req->u.io.overlapped,
+ NULL);
+
+ if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
+ /* Process the req without IOCP. */
+ handle->flags |= UV_HANDLE_READ_PENDING;
+ req->u.io.overlapped.InternalHigh = bytes;
+ handle->reqs_pending++;
+ uv_insert_pending_req(loop, req);
+ } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
+ /* The req will be processed with IOCP. */
+ handle->flags |= UV_HANDLE_READ_PENDING;
+ handle->reqs_pending++;
+ } else {
+ /* Make this req pending reporting an error. */
+ SET_REQ_ERROR(req, WSAGetLastError());
+ uv_insert_pending_req(loop, req);
+ handle->reqs_pending++;
+ }
+
+ } else {
+ handle->flags |= UV_HANDLE_ZERO_READ;
+
+ buf.base = (char*) uv_zero_;
+ buf.len = 0;
+ flags = MSG_PEEK;
+
+ result = handle->func_wsarecv(handle->socket,
+ (WSABUF*) &buf,
+ 1,
+ &bytes,
+ &flags,
+ &req->u.io.overlapped,
+ NULL);
+
+ if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
+ /* Process the req without IOCP. */
+ handle->flags |= UV_HANDLE_READ_PENDING;
+ req->u.io.overlapped.InternalHigh = bytes;
+ handle->reqs_pending++;
+ uv_insert_pending_req(loop, req);
+ } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
+ /* The req will be processed with IOCP. */
+ handle->flags |= UV_HANDLE_READ_PENDING;
+ handle->reqs_pending++;
+ } else {
+ /* Make this req pending reporting an error. */
+ SET_REQ_ERROR(req, WSAGetLastError());
+ uv_insert_pending_req(loop, req);
+ handle->reqs_pending++;
+ }
+ }
+}
+
+
+int uv__udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloc_cb,
+ uv_udp_recv_cb recv_cb) {
+ uv_loop_t* loop = handle->loop;
+ int err;
+
+ if (handle->flags & UV_HANDLE_READING) {
+ return UV_EALREADY;
+ }
+
+ err = uv_udp_maybe_bind(handle,
+ (const struct sockaddr*) &uv_addr_ip4_any_,
+ sizeof(uv_addr_ip4_any_),
+ 0);
+ if (err)
+ return uv_translate_sys_error(err);
+
+ handle->flags |= UV_HANDLE_READING;
+ INCREASE_ACTIVE_COUNT(loop, handle);
+ loop->active_udp_streams++;
+
+ handle->recv_cb = recv_cb;
+ handle->alloc_cb = alloc_cb;
+
+ /* If reading was stopped and then started again, there could still be a recv
+ * request pending. */
+ if (!(handle->flags & UV_HANDLE_READ_PENDING))
+ uv_udp_queue_recv(loop, handle);
+
+ return 0;
+}
+
+
+int uv__udp_recv_stop(uv_udp_t* handle) {
+ if (handle->flags & UV_HANDLE_READING) {
+ handle->flags &= ~UV_HANDLE_READING;
+ handle->loop->active_udp_streams--;
+ DECREASE_ACTIVE_COUNT(loop, handle);
+ }
+
+ return 0;
+}
+
+
+static int uv__send(uv_udp_send_t* req,
+ uv_udp_t* handle,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ const struct sockaddr* addr,
+ unsigned int addrlen,
+ uv_udp_send_cb cb) {
+ uv_loop_t* loop = handle->loop;
+ DWORD result, bytes;
+
+ UV_REQ_INIT(req, UV_UDP_SEND);
+ req->handle = handle;
+ req->cb = cb;
+ memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
+
+ result = WSASendTo(handle->socket,
+ (WSABUF*)bufs,
+ nbufs,
+ &bytes,
+ 0,
+ addr,
+ addrlen,
+ &req->u.io.overlapped,
+ NULL);
+
+ if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
+ /* Request completed immediately. */
+ req->u.io.queued_bytes = 0;
+ handle->reqs_pending++;
+ handle->send_queue_size += req->u.io.queued_bytes;
+ handle->send_queue_count++;
+ REGISTER_HANDLE_REQ(loop, handle, req);
+ uv_insert_pending_req(loop, (uv_req_t*)req);
+ } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
+ /* Request queued by the kernel. */
+ req->u.io.queued_bytes = uv__count_bufs(bufs, nbufs);
+ handle->reqs_pending++;
+ handle->send_queue_size += req->u.io.queued_bytes;
+ handle->send_queue_count++;
+ REGISTER_HANDLE_REQ(loop, handle, req);
+ } else {
+ /* Send failed due to an error. */
+ return WSAGetLastError();
+ }
+
+ return 0;
+}
+
+
+void uv_process_udp_recv_req(uv_loop_t* loop, uv_udp_t* handle,
+ uv_req_t* req) {
+ uv_buf_t buf;
+ int partial;
+
+ assert(handle->type == UV_UDP);
+
+ handle->flags &= ~UV_HANDLE_READ_PENDING;
+
+ if (!REQ_SUCCESS(req)) {
+ DWORD err = GET_REQ_SOCK_ERROR(req);
+ if (err == WSAEMSGSIZE) {
+ /* Not a real error, it just indicates that the received packet was
+ * bigger than the receive buffer. */
+ } else if (err == WSAECONNRESET || err == WSAENETRESET) {
+ /* A previous sendto operation failed; ignore this error. If zero-reading
+ * we need to call WSARecv/WSARecvFrom _without_ the. MSG_PEEK flag to
+ * clear out the error queue. For nonzero reads, immediately queue a new
+ * receive. */
+ if (!(handle->flags & UV_HANDLE_ZERO_READ)) {
+ goto done;
+ }
+ } else {
+ /* A real error occurred. Report the error to the user only if we're
+ * currently reading. */
+ if (handle->flags & UV_HANDLE_READING) {
+ uv_udp_recv_stop(handle);
+ buf = (handle->flags & UV_HANDLE_ZERO_READ) ?
+ uv_buf_init(NULL, 0) : handle->recv_buffer;
+ handle->recv_cb(handle, uv_translate_sys_error(err), &buf, NULL, 0);
+ }
+ goto done;
+ }
+ }
+
+ if (!(handle->flags & UV_HANDLE_ZERO_READ)) {
+ /* Successful read */
+ partial = !REQ_SUCCESS(req);
+ handle->recv_cb(handle,
+ req->u.io.overlapped.InternalHigh,
+ &handle->recv_buffer,
+ (const struct sockaddr*) &handle->recv_from,
+ partial ? UV_UDP_PARTIAL : 0);
+ } else if (handle->flags & UV_HANDLE_READING) {
+ DWORD bytes, err, flags;
+ struct sockaddr_storage from;
+ int from_len;
+
+ /* Do a nonblocking receive.
+ * TODO: try to read multiple datagrams at once. FIONREAD maybe? */
+ buf = uv_buf_init(NULL, 0);
+ handle->alloc_cb((uv_handle_t*) handle, 65536, &buf);
+ if (buf.base == NULL || buf.len == 0) {
+ handle->recv_cb(handle, UV_ENOBUFS, &buf, NULL, 0);
+ goto done;
+ }
+ assert(buf.base != NULL);
+
+ memset(&from, 0, sizeof from);
+ from_len = sizeof from;
+
+ flags = 0;
+
+ if (WSARecvFrom(handle->socket,
+ (WSABUF*)&buf,
+ 1,
+ &bytes,
+ &flags,
+ (struct sockaddr*) &from,
+ &from_len,
+ NULL,
+ NULL) != SOCKET_ERROR) {
+
+ /* Message received */
+ handle->recv_cb(handle, bytes, &buf, (const struct sockaddr*) &from, 0);
+ } else {
+ err = WSAGetLastError();
+ if (err == WSAEMSGSIZE) {
+ /* Message truncated */
+ handle->recv_cb(handle,
+ bytes,
+ &buf,
+ (const struct sockaddr*) &from,
+ UV_UDP_PARTIAL);
+ } else if (err == WSAEWOULDBLOCK) {
+ /* Kernel buffer empty */
+ handle->recv_cb(handle, 0, &buf, NULL, 0);
+ } else if (err == WSAECONNRESET || err == WSAENETRESET) {
+ /* WSAECONNRESET/WSANETRESET is ignored because this just indicates
+ * that a previous sendto operation failed.
+ */
+ handle->recv_cb(handle, 0, &buf, NULL, 0);
+ } else {
+ /* Any other error that we want to report back to the user. */
+ uv_udp_recv_stop(handle);
+ handle->recv_cb(handle, uv_translate_sys_error(err), &buf, NULL, 0);
+ }
+ }
+ }
+
+done:
+ /* Post another read if still reading and not closing. */
+ if ((handle->flags & UV_HANDLE_READING) &&
+ !(handle->flags & UV_HANDLE_READ_PENDING)) {
+ uv_udp_queue_recv(loop, handle);
+ }
+
+ DECREASE_PENDING_REQ_COUNT(handle);
+}
+
+
+void uv_process_udp_send_req(uv_loop_t* loop, uv_udp_t* handle,
+ uv_udp_send_t* req) {
+ int err;
+
+ assert(handle->type == UV_UDP);
+
+ assert(handle->send_queue_size >= req->u.io.queued_bytes);
+ assert(handle->send_queue_count >= 1);
+ handle->send_queue_size -= req->u.io.queued_bytes;
+ handle->send_queue_count--;
+
+ UNREGISTER_HANDLE_REQ(loop, handle, req);
+
+ if (req->cb) {
+ err = 0;
+ if (!REQ_SUCCESS(req)) {
+ err = GET_REQ_SOCK_ERROR(req);
+ }
+ req->cb(req, uv_translate_sys_error(err));
+ }
+
+ DECREASE_PENDING_REQ_COUNT(handle);
+}
+
+
+static int uv__udp_set_membership4(uv_udp_t* handle,
+ const struct sockaddr_in* multicast_addr,
+ const char* interface_addr,
+ uv_membership membership) {
+ int err;
+ int optname;
+ struct ip_mreq mreq;
+
+ if (handle->flags & UV_HANDLE_IPV6)
+ return UV_EINVAL;
+
+ /* If the socket is unbound, bind to inaddr_any. */
+ err = uv_udp_maybe_bind(handle,
+ (const struct sockaddr*) &uv_addr_ip4_any_,
+ sizeof(uv_addr_ip4_any_),
+ UV_UDP_REUSEADDR);
+ if (err)
+ return uv_translate_sys_error(err);
+
+ memset(&mreq, 0, sizeof mreq);
+
+ if (interface_addr) {
+ err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr);
+ if (err)
+ return err;
+ } else {
+ mreq.imr_interface.s_addr = htonl(INADDR_ANY);
+ }
+
+ mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr;
+
+ switch (membership) {
+ case UV_JOIN_GROUP:
+ optname = IP_ADD_MEMBERSHIP;
+ break;
+ case UV_LEAVE_GROUP:
+ optname = IP_DROP_MEMBERSHIP;
+ break;
+ default:
+ return UV_EINVAL;
+ }
+
+ if (setsockopt(handle->socket,
+ IPPROTO_IP,
+ optname,
+ (char*) &mreq,
+ sizeof mreq) == SOCKET_ERROR) {
+ return uv_translate_sys_error(WSAGetLastError());
+ }
+
+ return 0;
+}
+
+
+int uv__udp_set_membership6(uv_udp_t* handle,
+ const struct sockaddr_in6* multicast_addr,
+ const char* interface_addr,
+ uv_membership membership) {
+ int optname;
+ int err;
+ struct ipv6_mreq mreq;
+ struct sockaddr_in6 addr6;
+
+ if ((handle->flags & UV_HANDLE_BOUND) && !(handle->flags & UV_HANDLE_IPV6))
+ return UV_EINVAL;
+
+ err = uv_udp_maybe_bind(handle,
+ (const struct sockaddr*) &uv_addr_ip6_any_,
+ sizeof(uv_addr_ip6_any_),
+ UV_UDP_REUSEADDR);
+
+ if (err)
+ return uv_translate_sys_error(err);
+
+ memset(&mreq, 0, sizeof(mreq));
+
+ if (interface_addr) {
+ if (uv_ip6_addr(interface_addr, 0, &addr6))
+ return UV_EINVAL;
+ mreq.ipv6mr_interface = addr6.sin6_scope_id;
+ } else {
+ mreq.ipv6mr_interface = 0;
+ }
+
+ mreq.ipv6mr_multiaddr = multicast_addr->sin6_addr;
+
+ switch (membership) {
+ case UV_JOIN_GROUP:
+ optname = IPV6_ADD_MEMBERSHIP;
+ break;
+ case UV_LEAVE_GROUP:
+ optname = IPV6_DROP_MEMBERSHIP;
+ break;
+ default:
+ return UV_EINVAL;
+ }
+
+ if (setsockopt(handle->socket,
+ IPPROTO_IPV6,
+ optname,
+ (char*) &mreq,
+ sizeof mreq) == SOCKET_ERROR) {
+ return uv_translate_sys_error(WSAGetLastError());
+ }
+
+ return 0;
+}
+
+
+static int uv__udp_set_source_membership4(uv_udp_t* handle,
+ const struct sockaddr_in* multicast_addr,
+ const char* interface_addr,
+ const struct sockaddr_in* source_addr,
+ uv_membership membership) {
+ struct ip_mreq_source mreq;
+ int optname;
+ int err;
+
+ if (handle->flags & UV_HANDLE_IPV6)
+ return UV_EINVAL;
+
+ /* If the socket is unbound, bind to inaddr_any. */
+ err = uv_udp_maybe_bind(handle,
+ (const struct sockaddr*) &uv_addr_ip4_any_,
+ sizeof(uv_addr_ip4_any_),
+ UV_UDP_REUSEADDR);
+ if (err)
+ return uv_translate_sys_error(err);
+
+ memset(&mreq, 0, sizeof(mreq));
+
+ if (interface_addr != NULL) {
+ err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr);
+ if (err)
+ return err;
+ } else {
+ mreq.imr_interface.s_addr = htonl(INADDR_ANY);
+ }
+
+ mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr;
+ mreq.imr_sourceaddr.s_addr = source_addr->sin_addr.s_addr;
+
+ if (membership == UV_JOIN_GROUP)
+ optname = IP_ADD_SOURCE_MEMBERSHIP;
+ else if (membership == UV_LEAVE_GROUP)
+ optname = IP_DROP_SOURCE_MEMBERSHIP;
+ else
+ return UV_EINVAL;
+
+ if (setsockopt(handle->socket,
+ IPPROTO_IP,
+ optname,
+ (char*) &mreq,
+ sizeof(mreq)) == SOCKET_ERROR) {
+ return uv_translate_sys_error(WSAGetLastError());
+ }
+
+ return 0;
+}
+
+
+int uv__udp_set_source_membership6(uv_udp_t* handle,
+ const struct sockaddr_in6* multicast_addr,
+ const char* interface_addr,
+ const struct sockaddr_in6* source_addr,
+ uv_membership membership) {
+ struct group_source_req mreq;
+ struct sockaddr_in6 addr6;
+ int optname;
+ int err;
+
+ STATIC_ASSERT(sizeof(mreq.gsr_group) >= sizeof(*multicast_addr));
+ STATIC_ASSERT(sizeof(mreq.gsr_source) >= sizeof(*source_addr));
+
+ if ((handle->flags & UV_HANDLE_BOUND) && !(handle->flags & UV_HANDLE_IPV6))
+ return UV_EINVAL;
+
+ err = uv_udp_maybe_bind(handle,
+ (const struct sockaddr*) &uv_addr_ip6_any_,
+ sizeof(uv_addr_ip6_any_),
+ UV_UDP_REUSEADDR);
+
+ if (err)
+ return uv_translate_sys_error(err);
+
+ memset(&mreq, 0, sizeof(mreq));
+
+ if (interface_addr != NULL) {
+ err = uv_ip6_addr(interface_addr, 0, &addr6);
+ if (err)
+ return err;
+ mreq.gsr_interface = addr6.sin6_scope_id;
+ } else {
+ mreq.gsr_interface = 0;
+ }
+
+ memcpy(&mreq.gsr_group, multicast_addr, sizeof(*multicast_addr));
+ memcpy(&mreq.gsr_source, source_addr, sizeof(*source_addr));
+
+ if (membership == UV_JOIN_GROUP)
+ optname = MCAST_JOIN_SOURCE_GROUP;
+ else if (membership == UV_LEAVE_GROUP)
+ optname = MCAST_LEAVE_SOURCE_GROUP;
+ else
+ return UV_EINVAL;
+
+ if (setsockopt(handle->socket,
+ IPPROTO_IPV6,
+ optname,
+ (char*) &mreq,
+ sizeof(mreq)) == SOCKET_ERROR) {
+ return uv_translate_sys_error(WSAGetLastError());
+ }
+
+ return 0;
+}
+
+
+int uv_udp_set_membership(uv_udp_t* handle,
+ const char* multicast_addr,
+ const char* interface_addr,
+ uv_membership membership) {
+ struct sockaddr_in addr4;
+ struct sockaddr_in6 addr6;
+
+ if (uv_ip4_addr(multicast_addr, 0, &addr4) == 0)
+ return uv__udp_set_membership4(handle, &addr4, interface_addr, membership);
+ else if (uv_ip6_addr(multicast_addr, 0, &addr6) == 0)
+ return uv__udp_set_membership6(handle, &addr6, interface_addr, membership);
+ else
+ return UV_EINVAL;
+}
+
+
+int uv_udp_set_source_membership(uv_udp_t* handle,
+ const char* multicast_addr,
+ const char* interface_addr,
+ const char* source_addr,
+ uv_membership membership) {
+ int err;
+ struct sockaddr_storage mcast_addr;
+ struct sockaddr_in* mcast_addr4;
+ struct sockaddr_in6* mcast_addr6;
+ struct sockaddr_storage src_addr;
+ struct sockaddr_in* src_addr4;
+ struct sockaddr_in6* src_addr6;
+
+ mcast_addr4 = (struct sockaddr_in*)&mcast_addr;
+ mcast_addr6 = (struct sockaddr_in6*)&mcast_addr;
+ src_addr4 = (struct sockaddr_in*)&src_addr;
+ src_addr6 = (struct sockaddr_in6*)&src_addr;
+
+ err = uv_ip4_addr(multicast_addr, 0, mcast_addr4);
+ if (err) {
+ err = uv_ip6_addr(multicast_addr, 0, mcast_addr6);
+ if (err)
+ return err;
+ err = uv_ip6_addr(source_addr, 0, src_addr6);
+ if (err)
+ return err;
+ return uv__udp_set_source_membership6(handle,
+ mcast_addr6,
+ interface_addr,
+ src_addr6,
+ membership);
+ }
+
+ err = uv_ip4_addr(source_addr, 0, src_addr4);
+ if (err)
+ return err;
+ return uv__udp_set_source_membership4(handle,
+ mcast_addr4,
+ interface_addr,
+ src_addr4,
+ membership);
+}
+
+
+int uv_udp_set_multicast_interface(uv_udp_t* handle, const char* interface_addr) {
+ struct sockaddr_storage addr_st;
+ struct sockaddr_in* addr4;
+ struct sockaddr_in6* addr6;
+
+ addr4 = (struct sockaddr_in*) &addr_st;
+ addr6 = (struct sockaddr_in6*) &addr_st;
+
+ if (!interface_addr) {
+ memset(&addr_st, 0, sizeof addr_st);
+ if (handle->flags & UV_HANDLE_IPV6) {
+ addr_st.ss_family = AF_INET6;
+ addr6->sin6_scope_id = 0;
+ } else {
+ addr_st.ss_family = AF_INET;
+ addr4->sin_addr.s_addr = htonl(INADDR_ANY);
+ }
+ } else if (uv_ip4_addr(interface_addr, 0, addr4) == 0) {
+ /* nothing, address was parsed */
+ } else if (uv_ip6_addr(interface_addr, 0, addr6) == 0) {
+ /* nothing, address was parsed */
+ } else {
+ return UV_EINVAL;
+ }
+
+ if (handle->socket == INVALID_SOCKET)
+ return UV_EBADF;
+
+ if (addr_st.ss_family == AF_INET) {
+ if (setsockopt(handle->socket,
+ IPPROTO_IP,
+ IP_MULTICAST_IF,
+ (char*) &addr4->sin_addr,
+ sizeof(addr4->sin_addr)) == SOCKET_ERROR) {
+ return uv_translate_sys_error(WSAGetLastError());
+ }
+ } else if (addr_st.ss_family == AF_INET6) {
+ if (setsockopt(handle->socket,
+ IPPROTO_IPV6,
+ IPV6_MULTICAST_IF,
+ (char*) &addr6->sin6_scope_id,
+ sizeof(addr6->sin6_scope_id)) == SOCKET_ERROR) {
+ return uv_translate_sys_error(WSAGetLastError());
+ }
+ } else {
+ assert(0 && "unexpected address family");
+ abort();
+ }
+
+ return 0;
+}
+
+
+int uv_udp_set_broadcast(uv_udp_t* handle, int value) {
+ BOOL optval = (BOOL) value;
+
+ if (handle->socket == INVALID_SOCKET)
+ return UV_EBADF;
+
+ if (setsockopt(handle->socket,
+ SOL_SOCKET,
+ SO_BROADCAST,
+ (char*) &optval,
+ sizeof optval)) {
+ return uv_translate_sys_error(WSAGetLastError());
+ }
+
+ return 0;
+}
+
+
+int uv__udp_is_bound(uv_udp_t* handle) {
+ struct sockaddr_storage addr;
+ int addrlen;
+
+ addrlen = sizeof(addr);
+ if (uv_udp_getsockname(handle, (struct sockaddr*) &addr, &addrlen) != 0)
+ return 0;
+
+ return addrlen > 0;
+}
+
+
+int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock) {
+ WSAPROTOCOL_INFOW protocol_info;
+ int opt_len;
+ int err;
+
+ /* Detect the address family of the socket. */
+ opt_len = (int) sizeof protocol_info;
+ if (getsockopt(sock,
+ SOL_SOCKET,
+ SO_PROTOCOL_INFOW,
+ (char*) &protocol_info,
+ &opt_len) == SOCKET_ERROR) {
+ return uv_translate_sys_error(GetLastError());
+ }
+
+ err = uv_udp_set_socket(handle->loop,
+ handle,
+ sock,
+ protocol_info.iAddressFamily);
+ if (err)
+ return uv_translate_sys_error(err);
+
+ if (uv__udp_is_bound(handle))
+ handle->flags |= UV_HANDLE_BOUND;
+
+ if (uv__udp_is_connected(handle))
+ handle->flags |= UV_HANDLE_UDP_CONNECTED;
+
+ return 0;
+}
+
+
+#define SOCKOPT_SETTER(name, option4, option6, validate) \
+ int uv_udp_set_##name(uv_udp_t* handle, int value) { \
+ DWORD optval = (DWORD) value; \
+ \
+ if (!(validate(value))) { \
+ return UV_EINVAL; \
+ } \
+ \
+ if (handle->socket == INVALID_SOCKET) \
+ return UV_EBADF; \
+ \
+ if (!(handle->flags & UV_HANDLE_IPV6)) { \
+ /* Set IPv4 socket option */ \
+ if (setsockopt(handle->socket, \
+ IPPROTO_IP, \
+ option4, \
+ (char*) &optval, \
+ sizeof optval)) { \
+ return uv_translate_sys_error(WSAGetLastError()); \
+ } \
+ } else { \
+ /* Set IPv6 socket option */ \
+ if (setsockopt(handle->socket, \
+ IPPROTO_IPV6, \
+ option6, \
+ (char*) &optval, \
+ sizeof optval)) { \
+ return uv_translate_sys_error(WSAGetLastError()); \
+ } \
+ } \
+ return 0; \
+ }
+
+#define VALIDATE_TTL(value) ((value) >= 1 && (value) <= 255)
+#define VALIDATE_MULTICAST_TTL(value) ((value) >= -1 && (value) <= 255)
+#define VALIDATE_MULTICAST_LOOP(value) (1)
+
+SOCKOPT_SETTER(ttl,
+ IP_TTL,
+ IPV6_HOPLIMIT,
+ VALIDATE_TTL)
+SOCKOPT_SETTER(multicast_ttl,
+ IP_MULTICAST_TTL,
+ IPV6_MULTICAST_HOPS,
+ VALIDATE_MULTICAST_TTL)
+SOCKOPT_SETTER(multicast_loop,
+ IP_MULTICAST_LOOP,
+ IPV6_MULTICAST_LOOP,
+ VALIDATE_MULTICAST_LOOP)
+
+#undef SOCKOPT_SETTER
+#undef VALIDATE_TTL
+#undef VALIDATE_MULTICAST_TTL
+#undef VALIDATE_MULTICAST_LOOP
+
+
+/* This function is an egress point, i.e. it returns libuv errors rather than
+ * system errors.
+ */
+int uv__udp_bind(uv_udp_t* handle,
+ const struct sockaddr* addr,
+ unsigned int addrlen,
+ unsigned int flags) {
+ int err;
+
+ err = uv_udp_maybe_bind(handle, addr, addrlen, flags);
+ if (err)
+ return uv_translate_sys_error(err);
+
+ return 0;
+}
+
+
+int uv__udp_connect(uv_udp_t* handle,
+ const struct sockaddr* addr,
+ unsigned int addrlen) {
+ const struct sockaddr* bind_addr;
+ int err;
+
+ if (!(handle->flags & UV_HANDLE_BOUND)) {
+ if (addrlen == sizeof(uv_addr_ip4_any_))
+ bind_addr = (const struct sockaddr*) &uv_addr_ip4_any_;
+ else if (addrlen == sizeof(uv_addr_ip6_any_))
+ bind_addr = (const struct sockaddr*) &uv_addr_ip6_any_;
+ else
+ return UV_EINVAL;
+
+ err = uv_udp_maybe_bind(handle, bind_addr, addrlen, 0);
+ if (err)
+ return uv_translate_sys_error(err);
+ }
+
+ err = connect(handle->socket, addr, addrlen);
+ if (err)
+ return uv_translate_sys_error(WSAGetLastError());
+
+ handle->flags |= UV_HANDLE_UDP_CONNECTED;
+
+ return 0;
+}
+
+
+int uv__udp_disconnect(uv_udp_t* handle) {
+ int err;
+ struct sockaddr addr;
+
+ memset(&addr, 0, sizeof(addr));
+
+ err = connect(handle->socket, &addr, sizeof(addr));
+ if (err)
+ return uv_translate_sys_error(WSAGetLastError());
+
+ handle->flags &= ~UV_HANDLE_UDP_CONNECTED;
+ return 0;
+}
+
+
+/* This function is an egress point, i.e. it returns libuv errors rather than
+ * system errors.
+ */
+int uv__udp_send(uv_udp_send_t* req,
+ uv_udp_t* handle,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ const struct sockaddr* addr,
+ unsigned int addrlen,
+ uv_udp_send_cb send_cb) {
+ const struct sockaddr* bind_addr;
+ int err;
+
+ if (!(handle->flags & UV_HANDLE_BOUND)) {
+ if (addrlen == sizeof(uv_addr_ip4_any_))
+ bind_addr = (const struct sockaddr*) &uv_addr_ip4_any_;
+ else if (addrlen == sizeof(uv_addr_ip6_any_))
+ bind_addr = (const struct sockaddr*) &uv_addr_ip6_any_;
+ else
+ return UV_EINVAL;
+
+ err = uv_udp_maybe_bind(handle, bind_addr, addrlen, 0);
+ if (err)
+ return uv_translate_sys_error(err);
+ }
+
+ err = uv__send(req, handle, bufs, nbufs, addr, addrlen, send_cb);
+ if (err)
+ return uv_translate_sys_error(err);
+
+ return 0;
+}
+
+
+int uv__udp_try_send(uv_udp_t* handle,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ const struct sockaddr* addr,
+ unsigned int addrlen) {
+ DWORD bytes;
+ const struct sockaddr* bind_addr;
+ struct sockaddr_storage converted;
+ int err;
+
+ assert(nbufs > 0);
+
+ if (addr != NULL) {
+ err = uv__convert_to_localhost_if_unspecified(addr, &converted);
+ if (err)
+ return err;
+ }
+
+ /* Already sending a message.*/
+ if (handle->send_queue_count != 0)
+ return UV_EAGAIN;
+
+ if (!(handle->flags & UV_HANDLE_BOUND)) {
+ if (addrlen == sizeof(uv_addr_ip4_any_))
+ bind_addr = (const struct sockaddr*) &uv_addr_ip4_any_;
+ else if (addrlen == sizeof(uv_addr_ip6_any_))
+ bind_addr = (const struct sockaddr*) &uv_addr_ip6_any_;
+ else
+ return UV_EINVAL;
+ err = uv_udp_maybe_bind(handle, bind_addr, addrlen, 0);
+ if (err)
+ return uv_translate_sys_error(err);
+ }
+
+ err = WSASendTo(handle->socket,
+ (WSABUF*)bufs,
+ nbufs,
+ &bytes,
+ 0,
+ (const struct sockaddr*) &converted,
+ addrlen,
+ NULL,
+ NULL);
+
+ if (err)
+ return uv_translate_sys_error(WSAGetLastError());
+
+ return bytes;
+}
diff --git a/Utilities/cmlibuv/src/win/util.c b/Utilities/cmlibuv/src/win/util.c
new file mode 100644
index 0000000..aad8f1a
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/util.c
@@ -0,0 +1,1977 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <direct.h>
+#include <limits.h>
+#include <stdio.h>
+#include <string.h>
+#include <time.h>
+#include <wchar.h>
+
+#include "uv.h"
+#include "internal.h"
+
+/* clang-format off */
+#include <winsock2.h>
+#include <winperf.h>
+#include <iphlpapi.h>
+#include <psapi.h>
+#include <tlhelp32.h>
+#include <windows.h>
+/* clang-format on */
+#include <userenv.h>
+#include <math.h>
+
+/*
+ * Max title length; the only thing MSDN tells us about the maximum length
+ * of the console title is that it is smaller than 64K. However in practice
+ * it is much smaller, and there is no way to figure out what the exact length
+ * of the title is or can be, at least not on XP. To make it even more
+ * annoying, GetConsoleTitle fails when the buffer to be read into is bigger
+ * than the actual maximum length. So we make a conservative guess here;
+ * just don't put the novel you're writing in the title, unless the plot
+ * survives truncation.
+ */
+#define MAX_TITLE_LENGTH 8192
+
+/* The number of nanoseconds in one second. */
+#define UV__NANOSEC 1000000000
+
+/* Max user name length, from iphlpapi.h */
+#ifndef UNLEN
+# define UNLEN 256
+#endif
+
+
+/* A RtlGenRandom() by any other name... */
+extern BOOLEAN NTAPI SystemFunction036(PVOID Buffer, ULONG BufferLength);
+
+/* Cached copy of the process title, plus a mutex guarding it. */
+static char *process_title;
+static CRITICAL_SECTION process_title_lock;
+
+/* Frequency of the high-resolution clock. */
+static uint64_t hrtime_frequency_ = 0;
+
+
+/*
+ * One-time initialization code for functionality defined in util.c.
+ */
+void uv__util_init(void) {
+ LARGE_INTEGER perf_frequency;
+
+ /* Initialize process title access mutex. */
+ InitializeCriticalSection(&process_title_lock);
+
+ /* Retrieve high-resolution timer frequency
+ * and precompute its reciprocal.
+ */
+ if (QueryPerformanceFrequency(&perf_frequency)) {
+ hrtime_frequency_ = perf_frequency.QuadPart;
+ } else {
+ uv_fatal_error(GetLastError(), "QueryPerformanceFrequency");
+ }
+}
+
+
+int uv_exepath(char* buffer, size_t* size_ptr) {
+ int utf8_len, utf16_buffer_len, utf16_len;
+ WCHAR* utf16_buffer;
+ int err;
+
+ if (buffer == NULL || size_ptr == NULL || *size_ptr == 0) {
+ return UV_EINVAL;
+ }
+
+ if (*size_ptr > 32768) {
+ /* Windows paths can never be longer than this. */
+ utf16_buffer_len = 32768;
+ } else {
+ utf16_buffer_len = (int) *size_ptr;
+ }
+
+ utf16_buffer = (WCHAR*) uv__malloc(sizeof(WCHAR) * utf16_buffer_len);
+ if (!utf16_buffer) {
+ return UV_ENOMEM;
+ }
+
+ /* Get the path as UTF-16. */
+ utf16_len = GetModuleFileNameW(NULL, utf16_buffer, utf16_buffer_len);
+ if (utf16_len <= 0) {
+ err = GetLastError();
+ goto error;
+ }
+
+ /* utf16_len contains the length, *not* including the terminating null. */
+ utf16_buffer[utf16_len] = L'\0';
+
+ /* Convert to UTF-8 */
+ utf8_len = WideCharToMultiByte(CP_UTF8,
+ 0,
+ utf16_buffer,
+ -1,
+ buffer,
+ (int) *size_ptr,
+ NULL,
+ NULL);
+ if (utf8_len == 0) {
+ err = GetLastError();
+ goto error;
+ }
+
+ uv__free(utf16_buffer);
+
+ /* utf8_len *does* include the terminating null at this point, but the
+ * returned size shouldn't. */
+ *size_ptr = utf8_len - 1;
+ return 0;
+
+ error:
+ uv__free(utf16_buffer);
+ return uv_translate_sys_error(err);
+}
+
+
+int uv_cwd(char* buffer, size_t* size) {
+ DWORD utf16_len;
+ WCHAR *utf16_buffer;
+ int r;
+
+ if (buffer == NULL || size == NULL) {
+ return UV_EINVAL;
+ }
+
+ utf16_len = GetCurrentDirectoryW(0, NULL);
+ if (utf16_len == 0) {
+ return uv_translate_sys_error(GetLastError());
+ }
+ utf16_buffer = uv__malloc(utf16_len * sizeof(WCHAR));
+ if (utf16_buffer == NULL) {
+ return UV_ENOMEM;
+ }
+
+ utf16_len = GetCurrentDirectoryW(utf16_len, utf16_buffer);
+ if (utf16_len == 0) {
+ uv__free(utf16_buffer);
+ return uv_translate_sys_error(GetLastError());
+ }
+
+ /* utf16_len contains the length, *not* including the terminating null. */
+ utf16_buffer[utf16_len] = L'\0';
+
+ /* The returned directory should not have a trailing slash, unless it points
+ * at a drive root, like c:\. Remove it if needed. */
+ if (utf16_buffer[utf16_len - 1] == L'\\' &&
+ !(utf16_len == 3 && utf16_buffer[1] == L':')) {
+ utf16_len--;
+ utf16_buffer[utf16_len] = L'\0';
+ }
+
+ /* Check how much space we need */
+ r = WideCharToMultiByte(CP_UTF8,
+ 0,
+ utf16_buffer,
+ -1,
+ NULL,
+ 0,
+ NULL,
+ NULL);
+ if (r == 0) {
+ uv__free(utf16_buffer);
+ return uv_translate_sys_error(GetLastError());
+ } else if (r > (int) *size) {
+ uv__free(utf16_buffer);
+ *size = r;
+ return UV_ENOBUFS;
+ }
+
+ /* Convert to UTF-8 */
+ r = WideCharToMultiByte(CP_UTF8,
+ 0,
+ utf16_buffer,
+ -1,
+ buffer,
+ *size > INT_MAX ? INT_MAX : (int) *size,
+ NULL,
+ NULL);
+ uv__free(utf16_buffer);
+
+ if (r == 0) {
+ return uv_translate_sys_error(GetLastError());
+ }
+
+ *size = r - 1;
+ return 0;
+}
+
+
+int uv_chdir(const char* dir) {
+ WCHAR *utf16_buffer;
+ size_t utf16_len, new_utf16_len;
+ WCHAR drive_letter, env_var[4];
+
+ if (dir == NULL) {
+ return UV_EINVAL;
+ }
+
+ utf16_len = MultiByteToWideChar(CP_UTF8,
+ 0,
+ dir,
+ -1,
+ NULL,
+ 0);
+ if (utf16_len == 0) {
+ return uv_translate_sys_error(GetLastError());
+ }
+ utf16_buffer = uv__malloc(utf16_len * sizeof(WCHAR));
+ if (utf16_buffer == NULL) {
+ return UV_ENOMEM;
+ }
+
+ if (MultiByteToWideChar(CP_UTF8,
+ 0,
+ dir,
+ -1,
+ utf16_buffer,
+ utf16_len) == 0) {
+ uv__free(utf16_buffer);
+ return uv_translate_sys_error(GetLastError());
+ }
+
+ if (!SetCurrentDirectoryW(utf16_buffer)) {
+ uv__free(utf16_buffer);
+ return uv_translate_sys_error(GetLastError());
+ }
+
+ /* Windows stores the drive-local path in an "hidden" environment variable,
+ * which has the form "=C:=C:\Windows". SetCurrentDirectory does not update
+ * this, so we'll have to do it. */
+ new_utf16_len = GetCurrentDirectoryW(utf16_len, utf16_buffer);
+ if (new_utf16_len > utf16_len ) {
+ uv__free(utf16_buffer);
+ utf16_buffer = uv__malloc(new_utf16_len * sizeof(WCHAR));
+ if (utf16_buffer == NULL) {
+ /* When updating the environment variable fails, return UV_OK anyway.
+ * We did successfully change current working directory, only updating
+ * hidden env variable failed. */
+ return 0;
+ }
+ new_utf16_len = GetCurrentDirectoryW(new_utf16_len, utf16_buffer);
+ }
+ if (utf16_len == 0) {
+ uv__free(utf16_buffer);
+ return 0;
+ }
+
+ /* The returned directory should not have a trailing slash, unless it points
+ * at a drive root, like c:\. Remove it if needed. */
+ if (utf16_buffer[utf16_len - 1] == L'\\' &&
+ !(utf16_len == 3 && utf16_buffer[1] == L':')) {
+ utf16_len--;
+ utf16_buffer[utf16_len] = L'\0';
+ }
+
+ if (utf16_len < 2 || utf16_buffer[1] != L':') {
+ /* Doesn't look like a drive letter could be there - probably an UNC path.
+ * TODO: Need to handle win32 namespaces like \\?\C:\ ? */
+ drive_letter = 0;
+ } else if (utf16_buffer[0] >= L'A' && utf16_buffer[0] <= L'Z') {
+ drive_letter = utf16_buffer[0];
+ } else if (utf16_buffer[0] >= L'a' && utf16_buffer[0] <= L'z') {
+ /* Convert to uppercase. */
+ drive_letter = utf16_buffer[0] - L'a' + L'A';
+ } else {
+ /* Not valid. */
+ drive_letter = 0;
+ }
+
+ if (drive_letter != 0) {
+ /* Construct the environment variable name and set it. */
+ env_var[0] = L'=';
+ env_var[1] = drive_letter;
+ env_var[2] = L':';
+ env_var[3] = L'\0';
+
+ SetEnvironmentVariableW(env_var, utf16_buffer);
+ }
+
+ uv__free(utf16_buffer);
+ return 0;
+}
+
+
+void uv_loadavg(double avg[3]) {
+ /* Can't be implemented */
+ avg[0] = avg[1] = avg[2] = 0;
+}
+
+
+uint64_t uv_get_free_memory(void) {
+ MEMORYSTATUSEX memory_status;
+ memory_status.dwLength = sizeof(memory_status);
+
+ if (!GlobalMemoryStatusEx(&memory_status)) {
+ return -1;
+ }
+
+ return (uint64_t)memory_status.ullAvailPhys;
+}
+
+
+uint64_t uv_get_total_memory(void) {
+ MEMORYSTATUSEX memory_status;
+ memory_status.dwLength = sizeof(memory_status);
+
+ if (!GlobalMemoryStatusEx(&memory_status)) {
+ return -1;
+ }
+
+ return (uint64_t)memory_status.ullTotalPhys;
+}
+
+
+uint64_t uv_get_constrained_memory(void) {
+ return 0; /* Memory constraints are unknown. */
+}
+
+
+uv_pid_t uv_os_getpid(void) {
+ return GetCurrentProcessId();
+}
+
+
+uv_pid_t uv_os_getppid(void) {
+ int parent_pid = -1;
+ HANDLE handle;
+ PROCESSENTRY32 pe;
+ DWORD current_pid = GetCurrentProcessId();
+
+ pe.dwSize = sizeof(PROCESSENTRY32);
+ handle = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0);
+
+ if (Process32First(handle, &pe)) {
+ do {
+ if (pe.th32ProcessID == current_pid) {
+ parent_pid = pe.th32ParentProcessID;
+ break;
+ }
+ } while( Process32Next(handle, &pe));
+ }
+
+ CloseHandle(handle);
+ return parent_pid;
+}
+
+
+char** uv_setup_args(int argc, char** argv) {
+ return argv;
+}
+
+
+void uv__process_title_cleanup(void) {
+}
+
+
+int uv_set_process_title(const char* title) {
+ int err;
+ int length;
+ WCHAR* title_w = NULL;
+
+ uv__once_init();
+
+ /* Find out how big the buffer for the wide-char title must be */
+ length = MultiByteToWideChar(CP_UTF8, 0, title, -1, NULL, 0);
+ if (!length) {
+ err = GetLastError();
+ goto done;
+ }
+
+ /* Convert to wide-char string */
+ title_w = (WCHAR*)uv__malloc(sizeof(WCHAR) * length);
+ if (!title_w) {
+ uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+ }
+
+ length = MultiByteToWideChar(CP_UTF8, 0, title, -1, title_w, length);
+ if (!length) {
+ err = GetLastError();
+ goto done;
+ }
+
+ /* If the title must be truncated insert a \0 terminator there */
+ if (length > MAX_TITLE_LENGTH) {
+ title_w[MAX_TITLE_LENGTH - 1] = L'\0';
+ }
+
+ if (!SetConsoleTitleW(title_w)) {
+ err = GetLastError();
+ goto done;
+ }
+
+ EnterCriticalSection(&process_title_lock);
+ uv__free(process_title);
+ process_title = uv__strdup(title);
+ LeaveCriticalSection(&process_title_lock);
+
+ err = 0;
+
+done:
+ uv__free(title_w);
+ return uv_translate_sys_error(err);
+}
+
+
+static int uv__get_process_title(void) {
+ WCHAR title_w[MAX_TITLE_LENGTH];
+
+ if (!GetConsoleTitleW(title_w, sizeof(title_w) / sizeof(WCHAR))) {
+ return -1;
+ }
+
+ if (uv__convert_utf16_to_utf8(title_w, -1, &process_title) != 0)
+ return -1;
+
+ return 0;
+}
+
+
+int uv_get_process_title(char* buffer, size_t size) {
+ size_t len;
+
+ if (buffer == NULL || size == 0)
+ return UV_EINVAL;
+
+ uv__once_init();
+
+ EnterCriticalSection(&process_title_lock);
+ /*
+ * If the process_title was never read before nor explicitly set,
+ * we must query it with getConsoleTitleW
+ */
+ if (!process_title && uv__get_process_title() == -1) {
+ LeaveCriticalSection(&process_title_lock);
+ return uv_translate_sys_error(GetLastError());
+ }
+
+ assert(process_title);
+ len = strlen(process_title) + 1;
+
+ if (size < len) {
+ LeaveCriticalSection(&process_title_lock);
+ return UV_ENOBUFS;
+ }
+
+ memcpy(buffer, process_title, len);
+ LeaveCriticalSection(&process_title_lock);
+
+ return 0;
+}
+
+
+uint64_t uv_hrtime(void) {
+ uv__once_init();
+ return uv__hrtime(UV__NANOSEC);
+}
+
+uint64_t uv__hrtime(unsigned int scale) {
+ LARGE_INTEGER counter;
+ double scaled_freq;
+ double result;
+
+ assert(hrtime_frequency_ != 0);
+ assert(scale != 0);
+ if (!QueryPerformanceCounter(&counter)) {
+ uv_fatal_error(GetLastError(), "QueryPerformanceCounter");
+ }
+ assert(counter.QuadPart != 0);
+
+ /* Because we have no guarantee about the order of magnitude of the
+ * performance counter interval, integer math could cause this computation
+ * to overflow. Therefore we resort to floating point math.
+ */
+ scaled_freq = (double) hrtime_frequency_ / scale;
+ result = (double) counter.QuadPart / scaled_freq;
+ return (uint64_t) result;
+}
+
+
+int uv_resident_set_memory(size_t* rss) {
+ HANDLE current_process;
+ PROCESS_MEMORY_COUNTERS pmc;
+
+ current_process = GetCurrentProcess();
+
+ if (!GetProcessMemoryInfo(current_process, &pmc, sizeof(pmc))) {
+ return uv_translate_sys_error(GetLastError());
+ }
+
+ *rss = pmc.WorkingSetSize;
+
+ return 0;
+}
+
+
+int uv_uptime(double* uptime) {
+ BYTE stack_buffer[4096];
+ BYTE* malloced_buffer = NULL;
+ BYTE* buffer = (BYTE*) stack_buffer;
+ size_t buffer_size = sizeof(stack_buffer);
+ DWORD data_size;
+
+ PERF_DATA_BLOCK* data_block;
+ PERF_OBJECT_TYPE* object_type;
+ PERF_COUNTER_DEFINITION* counter_definition;
+
+ DWORD i;
+
+ for (;;) {
+ LONG result;
+
+ data_size = (DWORD) buffer_size;
+ result = RegQueryValueExW(HKEY_PERFORMANCE_DATA,
+ L"2",
+ NULL,
+ NULL,
+ buffer,
+ &data_size);
+ if (result == ERROR_SUCCESS) {
+ break;
+ } else if (result != ERROR_MORE_DATA) {
+ *uptime = 0;
+ return uv_translate_sys_error(result);
+ }
+
+ buffer_size *= 2;
+ /* Don't let the buffer grow infinitely. */
+ if (buffer_size > 1 << 20) {
+ goto internalError;
+ }
+
+ uv__free(malloced_buffer);
+
+ buffer = malloced_buffer = (BYTE*) uv__malloc(buffer_size);
+ if (malloced_buffer == NULL) {
+ *uptime = 0;
+ return UV_ENOMEM;
+ }
+ }
+
+ if (data_size < sizeof(*data_block))
+ goto internalError;
+
+ data_block = (PERF_DATA_BLOCK*) buffer;
+
+ if (wmemcmp(data_block->Signature, L"PERF", 4) != 0)
+ goto internalError;
+
+ if (data_size < data_block->HeaderLength + sizeof(*object_type))
+ goto internalError;
+
+ object_type = (PERF_OBJECT_TYPE*) (buffer + data_block->HeaderLength);
+
+ if (object_type->NumInstances != PERF_NO_INSTANCES)
+ goto internalError;
+
+ counter_definition = (PERF_COUNTER_DEFINITION*) (buffer +
+ data_block->HeaderLength + object_type->HeaderLength);
+ for (i = 0; i < object_type->NumCounters; i++) {
+ if ((BYTE*) counter_definition + sizeof(*counter_definition) >
+ buffer + data_size) {
+ break;
+ }
+
+ if (counter_definition->CounterNameTitleIndex == 674 &&
+ counter_definition->CounterSize == sizeof(uint64_t)) {
+ if (counter_definition->CounterOffset + sizeof(uint64_t) > data_size ||
+ !(counter_definition->CounterType & PERF_OBJECT_TIMER)) {
+ goto internalError;
+ } else {
+ BYTE* address = (BYTE*) object_type + object_type->DefinitionLength +
+ counter_definition->CounterOffset;
+ uint64_t value = *((uint64_t*) address);
+ *uptime = floor((double) (object_type->PerfTime.QuadPart - value) /
+ (double) object_type->PerfFreq.QuadPart);
+ uv__free(malloced_buffer);
+ return 0;
+ }
+ }
+
+ counter_definition = (PERF_COUNTER_DEFINITION*)
+ ((BYTE*) counter_definition + counter_definition->ByteLength);
+ }
+
+ /* If we get here, the uptime value was not found. */
+ uv__free(malloced_buffer);
+ *uptime = 0;
+ return UV_ENOSYS;
+
+ internalError:
+ uv__free(malloced_buffer);
+ *uptime = 0;
+ return UV_EIO;
+}
+
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos_ptr, int* cpu_count_ptr) {
+ uv_cpu_info_t* cpu_infos;
+ SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION* sppi;
+ DWORD sppi_size;
+ SYSTEM_INFO system_info;
+ DWORD cpu_count, i;
+ NTSTATUS status;
+ ULONG result_size;
+ int err;
+ uv_cpu_info_t* cpu_info;
+
+ cpu_infos = NULL;
+ cpu_count = 0;
+ sppi = NULL;
+
+ uv__once_init();
+
+ GetSystemInfo(&system_info);
+ cpu_count = system_info.dwNumberOfProcessors;
+
+ cpu_infos = uv__calloc(cpu_count, sizeof *cpu_infos);
+ if (cpu_infos == NULL) {
+ err = ERROR_OUTOFMEMORY;
+ goto error;
+ }
+
+ sppi_size = cpu_count * sizeof(*sppi);
+ sppi = uv__malloc(sppi_size);
+ if (sppi == NULL) {
+ err = ERROR_OUTOFMEMORY;
+ goto error;
+ }
+
+ status = pNtQuerySystemInformation(SystemProcessorPerformanceInformation,
+ sppi,
+ sppi_size,
+ &result_size);
+ if (!NT_SUCCESS(status)) {
+ err = pRtlNtStatusToDosError(status);
+ goto error;
+ }
+
+ assert(result_size == sppi_size);
+
+ for (i = 0; i < cpu_count; i++) {
+ WCHAR key_name[128];
+ HKEY processor_key;
+ DWORD cpu_speed;
+ DWORD cpu_speed_size = sizeof(cpu_speed);
+ WCHAR cpu_brand[256];
+ DWORD cpu_brand_size = sizeof(cpu_brand);
+ size_t len;
+
+ len = _snwprintf(key_name,
+ ARRAY_SIZE(key_name),
+ L"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\%d",
+ i);
+
+ assert(len > 0 && len < ARRAY_SIZE(key_name));
+
+ err = RegOpenKeyExW(HKEY_LOCAL_MACHINE,
+ key_name,
+ 0,
+ KEY_QUERY_VALUE,
+ &processor_key);
+ if (err != ERROR_SUCCESS) {
+ goto error;
+ }
+
+ err = RegQueryValueExW(processor_key,
+ L"~MHz",
+ NULL,
+ NULL,
+ (BYTE*)&cpu_speed,
+ &cpu_speed_size);
+ if (err != ERROR_SUCCESS) {
+ RegCloseKey(processor_key);
+ goto error;
+ }
+
+ err = RegQueryValueExW(processor_key,
+ L"ProcessorNameString",
+ NULL,
+ NULL,
+ (BYTE*)&cpu_brand,
+ &cpu_brand_size);
+ RegCloseKey(processor_key);
+ if (err != ERROR_SUCCESS)
+ goto error;
+
+ cpu_info = &cpu_infos[i];
+ cpu_info->speed = cpu_speed;
+ cpu_info->cpu_times.user = sppi[i].UserTime.QuadPart / 10000;
+ cpu_info->cpu_times.sys = (sppi[i].KernelTime.QuadPart -
+ sppi[i].IdleTime.QuadPart) / 10000;
+ cpu_info->cpu_times.idle = sppi[i].IdleTime.QuadPart / 10000;
+ cpu_info->cpu_times.irq = sppi[i].InterruptTime.QuadPart / 10000;
+ cpu_info->cpu_times.nice = 0;
+
+ uv__convert_utf16_to_utf8(cpu_brand,
+ cpu_brand_size / sizeof(WCHAR),
+ &(cpu_info->model));
+ }
+
+ uv__free(sppi);
+
+ *cpu_count_ptr = cpu_count;
+ *cpu_infos_ptr = cpu_infos;
+
+ return 0;
+
+ error:
+ if (cpu_infos != NULL) {
+ /* This is safe because the cpu_infos array is zeroed on allocation. */
+ for (i = 0; i < cpu_count; i++)
+ uv__free(cpu_infos[i].model);
+ }
+
+ uv__free(cpu_infos);
+ uv__free(sppi);
+
+ return uv_translate_sys_error(err);
+}
+
+
+static int is_windows_version_or_greater(DWORD os_major,
+ DWORD os_minor,
+ WORD service_pack_major,
+ WORD service_pack_minor) {
+ OSVERSIONINFOEX osvi;
+ DWORDLONG condition_mask = 0;
+ int op = VER_GREATER_EQUAL;
+
+ /* Initialize the OSVERSIONINFOEX structure. */
+ ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
+ osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
+ osvi.dwMajorVersion = os_major;
+ osvi.dwMinorVersion = os_minor;
+ osvi.wServicePackMajor = service_pack_major;
+ osvi.wServicePackMinor = service_pack_minor;
+
+ /* Initialize the condition mask. */
+ VER_SET_CONDITION(condition_mask, VER_MAJORVERSION, op);
+ VER_SET_CONDITION(condition_mask, VER_MINORVERSION, op);
+ VER_SET_CONDITION(condition_mask, VER_SERVICEPACKMAJOR, op);
+ VER_SET_CONDITION(condition_mask, VER_SERVICEPACKMINOR, op);
+
+ /* Perform the test. */
+ return (int) VerifyVersionInfo(
+ &osvi,
+ VER_MAJORVERSION | VER_MINORVERSION |
+ VER_SERVICEPACKMAJOR | VER_SERVICEPACKMINOR,
+ condition_mask);
+}
+
+
+static int address_prefix_match(int family,
+ struct sockaddr* address,
+ struct sockaddr* prefix_address,
+ int prefix_len) {
+ uint8_t* address_data;
+ uint8_t* prefix_address_data;
+ int i;
+
+ assert(address->sa_family == family);
+ assert(prefix_address->sa_family == family);
+
+ if (family == AF_INET6) {
+ address_data = (uint8_t*) &(((struct sockaddr_in6 *) address)->sin6_addr);
+ prefix_address_data =
+ (uint8_t*) &(((struct sockaddr_in6 *) prefix_address)->sin6_addr);
+ } else {
+ address_data = (uint8_t*) &(((struct sockaddr_in *) address)->sin_addr);
+ prefix_address_data =
+ (uint8_t*) &(((struct sockaddr_in *) prefix_address)->sin_addr);
+ }
+
+ for (i = 0; i < prefix_len >> 3; i++) {
+ if (address_data[i] != prefix_address_data[i])
+ return 0;
+ }
+
+ if (prefix_len % 8)
+ return prefix_address_data[i] ==
+ (address_data[i] & (0xff << (8 - prefix_len % 8)));
+
+ return 1;
+}
+
+
+int uv_interface_addresses(uv_interface_address_t** addresses_ptr,
+ int* count_ptr) {
+ IP_ADAPTER_ADDRESSES* win_address_buf;
+ ULONG win_address_buf_size;
+ IP_ADAPTER_ADDRESSES* adapter;
+
+ uv_interface_address_t* uv_address_buf;
+ char* name_buf;
+ size_t uv_address_buf_size;
+ uv_interface_address_t* uv_address;
+
+ int count;
+
+ int is_vista_or_greater;
+ ULONG flags;
+
+ *addresses_ptr = NULL;
+ *count_ptr = 0;
+
+ is_vista_or_greater = is_windows_version_or_greater(6, 0, 0, 0);
+ if (is_vista_or_greater) {
+ flags = GAA_FLAG_SKIP_ANYCAST | GAA_FLAG_SKIP_MULTICAST |
+ GAA_FLAG_SKIP_DNS_SERVER;
+ } else {
+ /* We need at least XP SP1. */
+ if (!is_windows_version_or_greater(5, 1, 1, 0))
+ return UV_ENOTSUP;
+
+ flags = GAA_FLAG_SKIP_ANYCAST | GAA_FLAG_SKIP_MULTICAST |
+ GAA_FLAG_SKIP_DNS_SERVER | GAA_FLAG_INCLUDE_PREFIX;
+ }
+
+
+ /* Fetch the size of the adapters reported by windows, and then get the list
+ * itself. */
+ win_address_buf_size = 0;
+ win_address_buf = NULL;
+
+ for (;;) {
+ ULONG r;
+
+ /* If win_address_buf is 0, then GetAdaptersAddresses will fail with.
+ * ERROR_BUFFER_OVERFLOW, and the required buffer size will be stored in
+ * win_address_buf_size. */
+ r = GetAdaptersAddresses(AF_UNSPEC,
+ flags,
+ NULL,
+ win_address_buf,
+ &win_address_buf_size);
+
+ if (r == ERROR_SUCCESS)
+ break;
+
+ uv__free(win_address_buf);
+
+ switch (r) {
+ case ERROR_BUFFER_OVERFLOW:
+ /* This happens when win_address_buf is NULL or too small to hold all
+ * adapters. */
+ win_address_buf = uv__malloc(win_address_buf_size);
+ if (win_address_buf == NULL)
+ return UV_ENOMEM;
+
+ continue;
+
+ case ERROR_NO_DATA: {
+ /* No adapters were found. */
+ uv_address_buf = uv__malloc(1);
+ if (uv_address_buf == NULL)
+ return UV_ENOMEM;
+
+ *count_ptr = 0;
+ *addresses_ptr = uv_address_buf;
+
+ return 0;
+ }
+
+ case ERROR_ADDRESS_NOT_ASSOCIATED:
+ return UV_EAGAIN;
+
+ case ERROR_INVALID_PARAMETER:
+ /* MSDN says:
+ * "This error is returned for any of the following conditions: the
+ * SizePointer parameter is NULL, the Address parameter is not
+ * AF_INET, AF_INET6, or AF_UNSPEC, or the address information for
+ * the parameters requested is greater than ULONG_MAX."
+ * Since the first two conditions are not met, it must be that the
+ * adapter data is too big.
+ */
+ return UV_ENOBUFS;
+
+ default:
+ /* Other (unspecified) errors can happen, but we don't have any special
+ * meaning for them. */
+ assert(r != ERROR_SUCCESS);
+ return uv_translate_sys_error(r);
+ }
+ }
+
+ /* Count the number of enabled interfaces and compute how much space is
+ * needed to store their info. */
+ count = 0;
+ uv_address_buf_size = 0;
+
+ for (adapter = win_address_buf;
+ adapter != NULL;
+ adapter = adapter->Next) {
+ IP_ADAPTER_UNICAST_ADDRESS* unicast_address;
+ int name_size;
+
+ /* Interfaces that are not 'up' should not be reported. Also skip
+ * interfaces that have no associated unicast address, as to avoid
+ * allocating space for the name for this interface. */
+ if (adapter->OperStatus != IfOperStatusUp ||
+ adapter->FirstUnicastAddress == NULL)
+ continue;
+
+ /* Compute the size of the interface name. */
+ name_size = WideCharToMultiByte(CP_UTF8,
+ 0,
+ adapter->FriendlyName,
+ -1,
+ NULL,
+ 0,
+ NULL,
+ FALSE);
+ if (name_size <= 0) {
+ uv__free(win_address_buf);
+ return uv_translate_sys_error(GetLastError());
+ }
+ uv_address_buf_size += name_size;
+
+ /* Count the number of addresses associated with this interface, and
+ * compute the size. */
+ for (unicast_address = (IP_ADAPTER_UNICAST_ADDRESS*)
+ adapter->FirstUnicastAddress;
+ unicast_address != NULL;
+ unicast_address = unicast_address->Next) {
+ count++;
+ uv_address_buf_size += sizeof(uv_interface_address_t);
+ }
+ }
+
+ /* Allocate space to store interface data plus adapter names. */
+ uv_address_buf = uv__malloc(uv_address_buf_size);
+ if (uv_address_buf == NULL) {
+ uv__free(win_address_buf);
+ return UV_ENOMEM;
+ }
+
+ /* Compute the start of the uv_interface_address_t array, and the place in
+ * the buffer where the interface names will be stored. */
+ uv_address = uv_address_buf;
+ name_buf = (char*) (uv_address_buf + count);
+
+ /* Fill out the output buffer. */
+ for (adapter = win_address_buf;
+ adapter != NULL;
+ adapter = adapter->Next) {
+ IP_ADAPTER_UNICAST_ADDRESS* unicast_address;
+ int name_size;
+ size_t max_name_size;
+
+ if (adapter->OperStatus != IfOperStatusUp ||
+ adapter->FirstUnicastAddress == NULL)
+ continue;
+
+ /* Convert the interface name to UTF8. */
+ max_name_size = (char*) uv_address_buf + uv_address_buf_size - name_buf;
+ if (max_name_size > (size_t) INT_MAX)
+ max_name_size = INT_MAX;
+ name_size = WideCharToMultiByte(CP_UTF8,
+ 0,
+ adapter->FriendlyName,
+ -1,
+ name_buf,
+ (int) max_name_size,
+ NULL,
+ FALSE);
+ if (name_size <= 0) {
+ uv__free(win_address_buf);
+ uv__free(uv_address_buf);
+ return uv_translate_sys_error(GetLastError());
+ }
+
+ /* Add an uv_interface_address_t element for every unicast address. */
+ for (unicast_address = (IP_ADAPTER_UNICAST_ADDRESS*)
+ adapter->FirstUnicastAddress;
+ unicast_address != NULL;
+ unicast_address = unicast_address->Next) {
+ struct sockaddr* sa;
+ ULONG prefix_len;
+
+ sa = unicast_address->Address.lpSockaddr;
+
+ /* XP has no OnLinkPrefixLength field. */
+ if (is_vista_or_greater) {
+ prefix_len =
+ ((IP_ADAPTER_UNICAST_ADDRESS_LH*) unicast_address)->OnLinkPrefixLength;
+ } else {
+ /* Prior to Windows Vista the FirstPrefix pointed to the list with
+ * single prefix for each IP address assigned to the adapter.
+ * Order of FirstPrefix does not match order of FirstUnicastAddress,
+ * so we need to find corresponding prefix.
+ */
+ IP_ADAPTER_PREFIX* prefix;
+ prefix_len = 0;
+
+ for (prefix = adapter->FirstPrefix; prefix; prefix = prefix->Next) {
+ /* We want the longest matching prefix. */
+ if (prefix->Address.lpSockaddr->sa_family != sa->sa_family ||
+ prefix->PrefixLength <= prefix_len)
+ continue;
+
+ if (address_prefix_match(sa->sa_family, sa,
+ prefix->Address.lpSockaddr, prefix->PrefixLength)) {
+ prefix_len = prefix->PrefixLength;
+ }
+ }
+
+ /* If there is no matching prefix information, return a single-host
+ * subnet mask (e.g. 255.255.255.255 for IPv4).
+ */
+ if (!prefix_len)
+ prefix_len = (sa->sa_family == AF_INET6) ? 128 : 32;
+ }
+
+ memset(uv_address, 0, sizeof *uv_address);
+
+ uv_address->name = name_buf;
+
+ if (adapter->PhysicalAddressLength == sizeof(uv_address->phys_addr)) {
+ memcpy(uv_address->phys_addr,
+ adapter->PhysicalAddress,
+ sizeof(uv_address->phys_addr));
+ }
+
+ uv_address->is_internal =
+ (adapter->IfType == IF_TYPE_SOFTWARE_LOOPBACK);
+
+ if (sa->sa_family == AF_INET6) {
+ uv_address->address.address6 = *((struct sockaddr_in6 *) sa);
+
+ uv_address->netmask.netmask6.sin6_family = AF_INET6;
+ memset(uv_address->netmask.netmask6.sin6_addr.s6_addr, 0xff, prefix_len >> 3);
+ /* This check ensures that we don't write past the size of the data. */
+ if (prefix_len % 8) {
+ uv_address->netmask.netmask6.sin6_addr.s6_addr[prefix_len >> 3] =
+ 0xff << (8 - prefix_len % 8);
+ }
+
+ } else {
+ uv_address->address.address4 = *((struct sockaddr_in *) sa);
+
+ uv_address->netmask.netmask4.sin_family = AF_INET;
+ uv_address->netmask.netmask4.sin_addr.s_addr = (prefix_len > 0) ?
+ htonl(0xffffffff << (32 - prefix_len)) : 0;
+ }
+
+ uv_address++;
+ }
+
+ name_buf += name_size;
+ }
+
+ uv__free(win_address_buf);
+
+ *addresses_ptr = uv_address_buf;
+ *count_ptr = count;
+
+ return 0;
+}
+
+
+void uv_free_interface_addresses(uv_interface_address_t* addresses,
+ int count) {
+ uv__free(addresses);
+}
+
+
+int uv_getrusage(uv_rusage_t *uv_rusage) {
+ FILETIME createTime, exitTime, kernelTime, userTime;
+ SYSTEMTIME kernelSystemTime, userSystemTime;
+ PROCESS_MEMORY_COUNTERS memCounters;
+ IO_COUNTERS ioCounters;
+ int ret;
+
+ ret = GetProcessTimes(GetCurrentProcess(), &createTime, &exitTime, &kernelTime, &userTime);
+ if (ret == 0) {
+ return uv_translate_sys_error(GetLastError());
+ }
+
+ ret = FileTimeToSystemTime(&kernelTime, &kernelSystemTime);
+ if (ret == 0) {
+ return uv_translate_sys_error(GetLastError());
+ }
+
+ ret = FileTimeToSystemTime(&userTime, &userSystemTime);
+ if (ret == 0) {
+ return uv_translate_sys_error(GetLastError());
+ }
+
+ ret = GetProcessMemoryInfo(GetCurrentProcess(),
+ &memCounters,
+ sizeof(memCounters));
+ if (ret == 0) {
+ return uv_translate_sys_error(GetLastError());
+ }
+
+ ret = GetProcessIoCounters(GetCurrentProcess(), &ioCounters);
+ if (ret == 0) {
+ return uv_translate_sys_error(GetLastError());
+ }
+
+ memset(uv_rusage, 0, sizeof(*uv_rusage));
+
+ uv_rusage->ru_utime.tv_sec = userSystemTime.wHour * 3600 +
+ userSystemTime.wMinute * 60 +
+ userSystemTime.wSecond;
+ uv_rusage->ru_utime.tv_usec = userSystemTime.wMilliseconds * 1000;
+
+ uv_rusage->ru_stime.tv_sec = kernelSystemTime.wHour * 3600 +
+ kernelSystemTime.wMinute * 60 +
+ kernelSystemTime.wSecond;
+ uv_rusage->ru_stime.tv_usec = kernelSystemTime.wMilliseconds * 1000;
+
+ uv_rusage->ru_majflt = (uint64_t) memCounters.PageFaultCount;
+ uv_rusage->ru_maxrss = (uint64_t) memCounters.PeakWorkingSetSize / 1024;
+
+ uv_rusage->ru_oublock = (uint64_t) ioCounters.WriteOperationCount;
+ uv_rusage->ru_inblock = (uint64_t) ioCounters.ReadOperationCount;
+
+ return 0;
+}
+
+
+int uv_os_homedir(char* buffer, size_t* size) {
+ uv_passwd_t pwd;
+ size_t len;
+ int r;
+
+ /* Check if the USERPROFILE environment variable is set first. The task of
+ performing input validation on buffer and size is taken care of by
+ uv_os_getenv(). */
+ r = uv_os_getenv("USERPROFILE", buffer, size);
+
+ /* Don't return an error if USERPROFILE was not found. */
+ if (r != UV_ENOENT)
+ return r;
+
+ /* USERPROFILE is not set, so call uv__getpwuid_r() */
+ r = uv__getpwuid_r(&pwd);
+
+ if (r != 0) {
+ return r;
+ }
+
+ len = strlen(pwd.homedir);
+
+ if (len >= *size) {
+ *size = len + 1;
+ uv_os_free_passwd(&pwd);
+ return UV_ENOBUFS;
+ }
+
+ memcpy(buffer, pwd.homedir, len + 1);
+ *size = len;
+ uv_os_free_passwd(&pwd);
+
+ return 0;
+}
+
+
+int uv_os_tmpdir(char* buffer, size_t* size) {
+ wchar_t *path;
+ DWORD bufsize;
+ size_t len;
+
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ len = 0;
+ len = GetTempPathW(0, NULL);
+ if (len == 0) {
+ return uv_translate_sys_error(GetLastError());
+ }
+ /* Include space for terminating null char. */
+ len += 1;
+ path = uv__malloc(len * sizeof(wchar_t));
+ if (path == NULL) {
+ return UV_ENOMEM;
+ }
+ len = GetTempPathW(len, path);
+
+ if (len == 0) {
+ uv__free(path);
+ return uv_translate_sys_error(GetLastError());
+ }
+
+ /* The returned directory should not have a trailing slash, unless it points
+ * at a drive root, like c:\. Remove it if needed. */
+ if (path[len - 1] == L'\\' &&
+ !(len == 3 && path[1] == L':')) {
+ len--;
+ path[len] = L'\0';
+ }
+
+ /* Check how much space we need */
+ bufsize = WideCharToMultiByte(CP_UTF8, 0, path, -1, NULL, 0, NULL, NULL);
+
+ if (bufsize == 0) {
+ uv__free(path);
+ return uv_translate_sys_error(GetLastError());
+ } else if (bufsize > *size) {
+ uv__free(path);
+ *size = bufsize;
+ return UV_ENOBUFS;
+ }
+
+ /* Convert to UTF-8 */
+ bufsize = WideCharToMultiByte(CP_UTF8,
+ 0,
+ path,
+ -1,
+ buffer,
+ *size,
+ NULL,
+ NULL);
+ uv__free(path);
+
+ if (bufsize == 0)
+ return uv_translate_sys_error(GetLastError());
+
+ *size = bufsize - 1;
+ return 0;
+}
+
+
+void uv_os_free_passwd(uv_passwd_t* pwd) {
+ if (pwd == NULL)
+ return;
+
+ uv__free(pwd->username);
+ uv__free(pwd->homedir);
+ pwd->username = NULL;
+ pwd->homedir = NULL;
+}
+
+
+/*
+ * Converts a UTF-16 string into a UTF-8 one. The resulting string is
+ * null-terminated.
+ *
+ * If utf16 is null terminated, utf16len can be set to -1, otherwise it must
+ * be specified.
+ */
+int uv__convert_utf16_to_utf8(const WCHAR* utf16, int utf16len, char** utf8) {
+ DWORD bufsize;
+
+ if (utf16 == NULL)
+ return UV_EINVAL;
+
+ /* Check how much space we need */
+ bufsize = WideCharToMultiByte(CP_UTF8,
+ 0,
+ utf16,
+ utf16len,
+ NULL,
+ 0,
+ NULL,
+ NULL);
+
+ if (bufsize == 0)
+ return uv_translate_sys_error(GetLastError());
+
+ /* Allocate the destination buffer adding an extra byte for the terminating
+ * NULL. If utf16len is not -1 WideCharToMultiByte will not add it, so
+ * we do it ourselves always, just in case. */
+ *utf8 = uv__malloc(bufsize + 1);
+
+ if (*utf8 == NULL)
+ return UV_ENOMEM;
+
+ /* Convert to UTF-8 */
+ bufsize = WideCharToMultiByte(CP_UTF8,
+ 0,
+ utf16,
+ utf16len,
+ *utf8,
+ bufsize,
+ NULL,
+ NULL);
+
+ if (bufsize == 0) {
+ uv__free(*utf8);
+ *utf8 = NULL;
+ return uv_translate_sys_error(GetLastError());
+ }
+
+ (*utf8)[bufsize] = '\0';
+ return 0;
+}
+
+
+/*
+ * Converts a UTF-8 string into a UTF-16 one. The resulting string is
+ * null-terminated.
+ *
+ * If utf8 is null terminated, utf8len can be set to -1, otherwise it must
+ * be specified.
+ */
+int uv__convert_utf8_to_utf16(const char* utf8, int utf8len, WCHAR** utf16) {
+ int bufsize;
+
+ if (utf8 == NULL)
+ return UV_EINVAL;
+
+ /* Check how much space we need */
+ bufsize = MultiByteToWideChar(CP_UTF8, 0, utf8, utf8len, NULL, 0);
+
+ if (bufsize == 0)
+ return uv_translate_sys_error(GetLastError());
+
+ /* Allocate the destination buffer adding an extra byte for the terminating
+ * NULL. If utf8len is not -1 MultiByteToWideChar will not add it, so
+ * we do it ourselves always, just in case. */
+ *utf16 = uv__malloc(sizeof(WCHAR) * (bufsize + 1));
+
+ if (*utf16 == NULL)
+ return UV_ENOMEM;
+
+ /* Convert to UTF-16 */
+ bufsize = MultiByteToWideChar(CP_UTF8, 0, utf8, utf8len, *utf16, bufsize);
+
+ if (bufsize == 0) {
+ uv__free(*utf16);
+ *utf16 = NULL;
+ return uv_translate_sys_error(GetLastError());
+ }
+
+ (*utf16)[bufsize] = L'\0';
+ return 0;
+}
+
+
+int uv__getpwuid_r(uv_passwd_t* pwd) {
+ HANDLE token;
+ wchar_t username[UNLEN + 1];
+ wchar_t *path;
+ DWORD bufsize;
+ int r;
+
+ if (pwd == NULL)
+ return UV_EINVAL;
+
+ /* Get the home directory using GetUserProfileDirectoryW() */
+ if (OpenProcessToken(GetCurrentProcess(), TOKEN_READ, &token) == 0)
+ return uv_translate_sys_error(GetLastError());
+
+ bufsize = 0;
+ GetUserProfileDirectoryW(token, NULL, &bufsize);
+ if (GetLastError() != ERROR_INSUFFICIENT_BUFFER) {
+ r = GetLastError();
+ CloseHandle(token);
+ return uv_translate_sys_error(r);
+ }
+
+ path = uv__malloc(bufsize * sizeof(wchar_t));
+ if (path == NULL) {
+ CloseHandle(token);
+ return UV_ENOMEM;
+ }
+
+ if (!GetUserProfileDirectoryW(token, path, &bufsize)) {
+ r = GetLastError();
+ CloseHandle(token);
+ uv__free(path);
+ return uv_translate_sys_error(r);
+ }
+
+ CloseHandle(token);
+
+ /* Get the username using GetUserNameW() */
+ bufsize = ARRAY_SIZE(username);
+ if (!GetUserNameW(username, &bufsize)) {
+ r = GetLastError();
+ uv__free(path);
+
+ /* This should not be possible */
+ if (r == ERROR_INSUFFICIENT_BUFFER)
+ return UV_ENOMEM;
+
+ return uv_translate_sys_error(r);
+ }
+
+ pwd->homedir = NULL;
+ r = uv__convert_utf16_to_utf8(path, -1, &pwd->homedir);
+ uv__free(path);
+
+ if (r != 0)
+ return r;
+
+ pwd->username = NULL;
+ r = uv__convert_utf16_to_utf8(username, -1, &pwd->username);
+
+ if (r != 0) {
+ uv__free(pwd->homedir);
+ return r;
+ }
+
+ pwd->shell = NULL;
+ pwd->uid = -1;
+ pwd->gid = -1;
+
+ return 0;
+}
+
+
+int uv_os_get_passwd(uv_passwd_t* pwd) {
+ return uv__getpwuid_r(pwd);
+}
+
+
+int uv_os_environ(uv_env_item_t** envitems, int* count) {
+ wchar_t* env;
+ wchar_t* penv;
+ int i, cnt;
+ uv_env_item_t* envitem;
+
+ *envitems = NULL;
+ *count = 0;
+
+ env = GetEnvironmentStringsW();
+ if (env == NULL)
+ return 0;
+
+ for (penv = env, i = 0; *penv != L'\0'; penv += wcslen(penv) + 1, i++);
+
+ *envitems = uv__calloc(i, sizeof(**envitems));
+ if (*envitems == NULL) {
+ FreeEnvironmentStringsW(env);
+ return UV_ENOMEM;
+ }
+
+ penv = env;
+ cnt = 0;
+
+ while (*penv != L'\0' && cnt < i) {
+ char* buf;
+ char* ptr;
+
+ if (uv__convert_utf16_to_utf8(penv, -1, &buf) != 0)
+ goto fail;
+
+ /* Using buf + 1 here because we know that `buf` has length at least 1,
+ * and some special environment variables on Windows start with a = sign. */
+ ptr = strchr(buf + 1, '=');
+ if (ptr == NULL) {
+ uv__free(buf);
+ goto do_continue;
+ }
+
+ *ptr = '\0';
+
+ envitem = &(*envitems)[cnt];
+ envitem->name = buf;
+ envitem->value = ptr + 1;
+
+ cnt++;
+
+ do_continue:
+ penv += wcslen(penv) + 1;
+ }
+
+ FreeEnvironmentStringsW(env);
+
+ *count = cnt;
+ return 0;
+
+fail:
+ FreeEnvironmentStringsW(env);
+
+ for (i = 0; i < cnt; i++) {
+ envitem = &(*envitems)[cnt];
+ uv__free(envitem->name);
+ }
+ uv__free(*envitems);
+
+ *envitems = NULL;
+ *count = 0;
+ return UV_ENOMEM;
+}
+
+
+int uv_os_getenv(const char* name, char* buffer, size_t* size) {
+ wchar_t fastvar[512];
+ wchar_t* var;
+ DWORD varlen;
+ wchar_t* name_w;
+ DWORD bufsize;
+ size_t len;
+ int r;
+
+ if (name == NULL || buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ r = uv__convert_utf8_to_utf16(name, -1, &name_w);
+
+ if (r != 0)
+ return r;
+
+ var = fastvar;
+ varlen = ARRAY_SIZE(fastvar);
+
+ for (;;) {
+ SetLastError(ERROR_SUCCESS);
+ len = GetEnvironmentVariableW(name_w, var, varlen);
+
+ if (len < varlen)
+ break;
+
+ /* Try repeatedly because we might have been preempted by another thread
+ * modifying the environment variable just as we're trying to read it.
+ */
+ if (var != fastvar)
+ uv__free(var);
+
+ varlen = 1 + len;
+ var = uv__malloc(varlen * sizeof(*var));
+
+ if (var == NULL) {
+ r = UV_ENOMEM;
+ goto fail;
+ }
+ }
+
+ uv__free(name_w);
+ name_w = NULL;
+
+ if (len == 0) {
+ r = GetLastError();
+ if (r != ERROR_SUCCESS) {
+ r = uv_translate_sys_error(r);
+ goto fail;
+ }
+ }
+
+ /* Check how much space we need */
+ bufsize = WideCharToMultiByte(CP_UTF8, 0, var, -1, NULL, 0, NULL, NULL);
+
+ if (bufsize == 0) {
+ r = uv_translate_sys_error(GetLastError());
+ goto fail;
+ } else if (bufsize > *size) {
+ *size = bufsize;
+ r = UV_ENOBUFS;
+ goto fail;
+ }
+
+ /* Convert to UTF-8 */
+ bufsize = WideCharToMultiByte(CP_UTF8,
+ 0,
+ var,
+ -1,
+ buffer,
+ *size,
+ NULL,
+ NULL);
+
+ if (bufsize == 0) {
+ r = uv_translate_sys_error(GetLastError());
+ goto fail;
+ }
+
+ *size = bufsize - 1;
+ r = 0;
+
+fail:
+
+ if (name_w != NULL)
+ uv__free(name_w);
+
+ if (var != fastvar)
+ uv__free(var);
+
+ return r;
+}
+
+
+int uv_os_setenv(const char* name, const char* value) {
+ wchar_t* name_w;
+ wchar_t* value_w;
+ int r;
+
+ if (name == NULL || value == NULL)
+ return UV_EINVAL;
+
+ r = uv__convert_utf8_to_utf16(name, -1, &name_w);
+
+ if (r != 0)
+ return r;
+
+ r = uv__convert_utf8_to_utf16(value, -1, &value_w);
+
+ if (r != 0) {
+ uv__free(name_w);
+ return r;
+ }
+
+ r = SetEnvironmentVariableW(name_w, value_w);
+ uv__free(name_w);
+ uv__free(value_w);
+
+ if (r == 0)
+ return uv_translate_sys_error(GetLastError());
+
+ return 0;
+}
+
+
+int uv_os_unsetenv(const char* name) {
+ wchar_t* name_w;
+ int r;
+
+ if (name == NULL)
+ return UV_EINVAL;
+
+ r = uv__convert_utf8_to_utf16(name, -1, &name_w);
+
+ if (r != 0)
+ return r;
+
+ r = SetEnvironmentVariableW(name_w, NULL);
+ uv__free(name_w);
+
+ if (r == 0)
+ return uv_translate_sys_error(GetLastError());
+
+ return 0;
+}
+
+
+int uv_os_gethostname(char* buffer, size_t* size) {
+ char buf[UV_MAXHOSTNAMESIZE];
+ size_t len;
+
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ uv__once_init(); /* Initialize winsock */
+
+ if (gethostname(buf, sizeof(buf)) != 0)
+ return uv_translate_sys_error(WSAGetLastError());
+
+ buf[sizeof(buf) - 1] = '\0'; /* Null terminate, just to be safe. */
+ len = strlen(buf);
+
+ if (len >= *size) {
+ *size = len + 1;
+ return UV_ENOBUFS;
+ }
+
+ memcpy(buffer, buf, len + 1);
+ *size = len;
+ return 0;
+}
+
+
+static int uv__get_handle(uv_pid_t pid, int access, HANDLE* handle) {
+ int r;
+
+ if (pid == 0)
+ *handle = GetCurrentProcess();
+ else
+ *handle = OpenProcess(access, FALSE, pid);
+
+ if (*handle == NULL) {
+ r = GetLastError();
+
+ if (r == ERROR_INVALID_PARAMETER)
+ return UV_ESRCH;
+ else
+ return uv_translate_sys_error(r);
+ }
+
+ return 0;
+}
+
+
+int uv_os_getpriority(uv_pid_t pid, int* priority) {
+ HANDLE handle;
+ int r;
+
+ if (priority == NULL)
+ return UV_EINVAL;
+
+ r = uv__get_handle(pid, PROCESS_QUERY_LIMITED_INFORMATION, &handle);
+
+ if (r != 0)
+ return r;
+
+ r = GetPriorityClass(handle);
+
+ if (r == 0) {
+ r = uv_translate_sys_error(GetLastError());
+ } else {
+ /* Map Windows priority classes to Unix nice values. */
+ if (r == REALTIME_PRIORITY_CLASS)
+ *priority = UV_PRIORITY_HIGHEST;
+ else if (r == HIGH_PRIORITY_CLASS)
+ *priority = UV_PRIORITY_HIGH;
+ else if (r == ABOVE_NORMAL_PRIORITY_CLASS)
+ *priority = UV_PRIORITY_ABOVE_NORMAL;
+ else if (r == NORMAL_PRIORITY_CLASS)
+ *priority = UV_PRIORITY_NORMAL;
+ else if (r == BELOW_NORMAL_PRIORITY_CLASS)
+ *priority = UV_PRIORITY_BELOW_NORMAL;
+ else /* IDLE_PRIORITY_CLASS */
+ *priority = UV_PRIORITY_LOW;
+
+ r = 0;
+ }
+
+ CloseHandle(handle);
+ return r;
+}
+
+
+int uv_os_setpriority(uv_pid_t pid, int priority) {
+ HANDLE handle;
+ int priority_class;
+ int r;
+
+ /* Map Unix nice values to Windows priority classes. */
+ if (priority < UV_PRIORITY_HIGHEST || priority > UV_PRIORITY_LOW)
+ return UV_EINVAL;
+ else if (priority < UV_PRIORITY_HIGH)
+ priority_class = REALTIME_PRIORITY_CLASS;
+ else if (priority < UV_PRIORITY_ABOVE_NORMAL)
+ priority_class = HIGH_PRIORITY_CLASS;
+ else if (priority < UV_PRIORITY_NORMAL)
+ priority_class = ABOVE_NORMAL_PRIORITY_CLASS;
+ else if (priority < UV_PRIORITY_BELOW_NORMAL)
+ priority_class = NORMAL_PRIORITY_CLASS;
+ else if (priority < UV_PRIORITY_LOW)
+ priority_class = BELOW_NORMAL_PRIORITY_CLASS;
+ else
+ priority_class = IDLE_PRIORITY_CLASS;
+
+ r = uv__get_handle(pid, PROCESS_SET_INFORMATION, &handle);
+
+ if (r != 0)
+ return r;
+
+ if (SetPriorityClass(handle, priority_class) == 0)
+ r = uv_translate_sys_error(GetLastError());
+
+ CloseHandle(handle);
+ return r;
+}
+
+
+int uv_os_uname(uv_utsname_t* buffer) {
+ /* Implementation loosely based on
+ https://github.com/gagern/gnulib/blob/master/lib/uname.c */
+ OSVERSIONINFOW os_info;
+ SYSTEM_INFO system_info;
+ HKEY registry_key;
+ WCHAR product_name_w[256];
+ DWORD product_name_w_size;
+ int version_size;
+ int processor_level;
+ int r;
+
+ if (buffer == NULL)
+ return UV_EINVAL;
+
+ uv__once_init();
+ os_info.dwOSVersionInfoSize = sizeof(os_info);
+ os_info.szCSDVersion[0] = L'\0';
+
+ /* Try calling RtlGetVersion(), and fall back to the deprecated GetVersionEx()
+ if RtlGetVersion() is not available. */
+ if (pRtlGetVersion) {
+ pRtlGetVersion(&os_info);
+ } else {
+ /* Silence GetVersionEx() deprecation warning. */
+ #ifdef _MSC_VER
+ #pragma warning(suppress : 4996)
+ #endif
+ if (GetVersionExW(&os_info) == 0) {
+ r = uv_translate_sys_error(GetLastError());
+ goto error;
+ }
+ }
+
+ /* Populate the version field. */
+ version_size = 0;
+ r = RegOpenKeyExW(HKEY_LOCAL_MACHINE,
+ L"SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion",
+ 0,
+ KEY_QUERY_VALUE,
+ &registry_key);
+
+ if (r == ERROR_SUCCESS) {
+ product_name_w_size = sizeof(product_name_w);
+ r = RegGetValueW(registry_key,
+ NULL,
+ L"ProductName",
+ RRF_RT_REG_SZ,
+ NULL,
+ (PVOID) product_name_w,
+ &product_name_w_size);
+ RegCloseKey(registry_key);
+
+ if (r == ERROR_SUCCESS) {
+ version_size = WideCharToMultiByte(CP_UTF8,
+ 0,
+ product_name_w,
+ -1,
+ buffer->version,
+ sizeof(buffer->version),
+ NULL,
+ NULL);
+ if (version_size == 0) {
+ r = uv_translate_sys_error(GetLastError());
+ goto error;
+ }
+ }
+ }
+
+ /* Append service pack information to the version if present. */
+ if (os_info.szCSDVersion[0] != L'\0') {
+ if (version_size > 0)
+ buffer->version[version_size - 1] = ' ';
+
+ if (WideCharToMultiByte(CP_UTF8,
+ 0,
+ os_info.szCSDVersion,
+ -1,
+ buffer->version + version_size,
+ sizeof(buffer->version) - version_size,
+ NULL,
+ NULL) == 0) {
+ r = uv_translate_sys_error(GetLastError());
+ goto error;
+ }
+ }
+
+ /* Populate the sysname field. */
+#ifdef __MINGW32__
+ r = snprintf(buffer->sysname,
+ sizeof(buffer->sysname),
+ "MINGW32_NT-%u.%u",
+ (unsigned int) os_info.dwMajorVersion,
+ (unsigned int) os_info.dwMinorVersion);
+ assert((size_t)r < sizeof(buffer->sysname));
+#else
+ uv__strscpy(buffer->sysname, "Windows_NT", sizeof(buffer->sysname));
+#endif
+
+ /* Populate the release field. */
+ r = snprintf(buffer->release,
+ sizeof(buffer->release),
+ "%d.%d.%d",
+ (unsigned int) os_info.dwMajorVersion,
+ (unsigned int) os_info.dwMinorVersion,
+ (unsigned int) os_info.dwBuildNumber);
+ assert((size_t)r < sizeof(buffer->release));
+
+ /* Populate the machine field. */
+ GetSystemInfo(&system_info);
+
+ switch (system_info.wProcessorArchitecture) {
+ case PROCESSOR_ARCHITECTURE_AMD64:
+ uv__strscpy(buffer->machine, "x86_64", sizeof(buffer->machine));
+ break;
+ case PROCESSOR_ARCHITECTURE_IA64:
+ uv__strscpy(buffer->machine, "ia64", sizeof(buffer->machine));
+ break;
+ case PROCESSOR_ARCHITECTURE_INTEL:
+ uv__strscpy(buffer->machine, "i386", sizeof(buffer->machine));
+
+ if (system_info.wProcessorLevel > 3) {
+ processor_level = system_info.wProcessorLevel < 6 ?
+ system_info.wProcessorLevel : 6;
+ buffer->machine[1] = '0' + processor_level;
+ }
+
+ break;
+ case PROCESSOR_ARCHITECTURE_IA32_ON_WIN64:
+ uv__strscpy(buffer->machine, "i686", sizeof(buffer->machine));
+ break;
+ case PROCESSOR_ARCHITECTURE_MIPS:
+ uv__strscpy(buffer->machine, "mips", sizeof(buffer->machine));
+ break;
+ case PROCESSOR_ARCHITECTURE_ALPHA:
+ case PROCESSOR_ARCHITECTURE_ALPHA64:
+ uv__strscpy(buffer->machine, "alpha", sizeof(buffer->machine));
+ break;
+ case PROCESSOR_ARCHITECTURE_PPC:
+ uv__strscpy(buffer->machine, "powerpc", sizeof(buffer->machine));
+ break;
+ case PROCESSOR_ARCHITECTURE_SHX:
+ uv__strscpy(buffer->machine, "sh", sizeof(buffer->machine));
+ break;
+ case PROCESSOR_ARCHITECTURE_ARM:
+ uv__strscpy(buffer->machine, "arm", sizeof(buffer->machine));
+ break;
+ default:
+ uv__strscpy(buffer->machine, "unknown", sizeof(buffer->machine));
+ break;
+ }
+
+ return 0;
+
+error:
+ buffer->sysname[0] = '\0';
+ buffer->release[0] = '\0';
+ buffer->version[0] = '\0';
+ buffer->machine[0] = '\0';
+ return r;
+}
+
+int uv_gettimeofday(uv_timeval64_t* tv) {
+ /* Based on https://doxygen.postgresql.org/gettimeofday_8c_source.html */
+ const uint64_t epoch = (uint64_t) 116444736000000000ULL;
+ FILETIME file_time;
+ ULARGE_INTEGER ularge;
+
+ if (tv == NULL)
+ return UV_EINVAL;
+
+ GetSystemTimeAsFileTime(&file_time);
+ ularge.LowPart = file_time.dwLowDateTime;
+ ularge.HighPart = file_time.dwHighDateTime;
+ tv->tv_sec = (int64_t) ((ularge.QuadPart - epoch) / 10000000L);
+ tv->tv_usec = (int32_t) (((ularge.QuadPart - epoch) % 10000000L) / 10);
+ return 0;
+}
+
+int uv__random_rtlgenrandom(void* buf, size_t buflen) {
+ if (buflen == 0)
+ return 0;
+
+ if (SystemFunction036(buf, buflen) == FALSE)
+ return UV_EIO;
+
+ return 0;
+}
+
+void uv_sleep(unsigned int msec) {
+ Sleep(msec);
+}
diff --git a/Utilities/cmlibuv/src/win/winapi.c b/Utilities/cmlibuv/src/win/winapi.c
new file mode 100644
index 0000000..bb86ec8
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/winapi.c
@@ -0,0 +1,137 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+
+#include "uv.h"
+#include "internal.h"
+
+
+/* Ntdll function pointers */
+sRtlGetVersion pRtlGetVersion;
+sRtlNtStatusToDosError pRtlNtStatusToDosError;
+sNtDeviceIoControlFile pNtDeviceIoControlFile;
+sNtQueryInformationFile pNtQueryInformationFile;
+sNtSetInformationFile pNtSetInformationFile;
+sNtQueryVolumeInformationFile pNtQueryVolumeInformationFile;
+sNtQueryDirectoryFile pNtQueryDirectoryFile;
+sNtQuerySystemInformation pNtQuerySystemInformation;
+sNtQueryInformationProcess pNtQueryInformationProcess;
+
+/* Kernel32 function pointers */
+sGetQueuedCompletionStatusEx pGetQueuedCompletionStatusEx;
+
+/* Powrprof.dll function pointer */
+sPowerRegisterSuspendResumeNotification pPowerRegisterSuspendResumeNotification;
+
+/* User32.dll function pointer */
+sSetWinEventHook pSetWinEventHook;
+
+
+void uv_winapi_init(void) {
+ HMODULE ntdll_module;
+ HMODULE powrprof_module;
+ HMODULE user32_module;
+ HMODULE kernel32_module;
+
+ ntdll_module = GetModuleHandleA("ntdll.dll");
+ if (ntdll_module == NULL) {
+ uv_fatal_error(GetLastError(), "GetModuleHandleA");
+ }
+
+ pRtlGetVersion = (sRtlGetVersion) GetProcAddress(ntdll_module,
+ "RtlGetVersion");
+
+ pRtlNtStatusToDosError = (sRtlNtStatusToDosError) GetProcAddress(
+ ntdll_module,
+ "RtlNtStatusToDosError");
+ if (pRtlNtStatusToDosError == NULL) {
+ uv_fatal_error(GetLastError(), "GetProcAddress");
+ }
+
+ pNtDeviceIoControlFile = (sNtDeviceIoControlFile) GetProcAddress(
+ ntdll_module,
+ "NtDeviceIoControlFile");
+ if (pNtDeviceIoControlFile == NULL) {
+ uv_fatal_error(GetLastError(), "GetProcAddress");
+ }
+
+ pNtQueryInformationFile = (sNtQueryInformationFile) GetProcAddress(
+ ntdll_module,
+ "NtQueryInformationFile");
+ if (pNtQueryInformationFile == NULL) {
+ uv_fatal_error(GetLastError(), "GetProcAddress");
+ }
+
+ pNtSetInformationFile = (sNtSetInformationFile) GetProcAddress(
+ ntdll_module,
+ "NtSetInformationFile");
+ if (pNtSetInformationFile == NULL) {
+ uv_fatal_error(GetLastError(), "GetProcAddress");
+ }
+
+ pNtQueryVolumeInformationFile = (sNtQueryVolumeInformationFile)
+ GetProcAddress(ntdll_module, "NtQueryVolumeInformationFile");
+ if (pNtQueryVolumeInformationFile == NULL) {
+ uv_fatal_error(GetLastError(), "GetProcAddress");
+ }
+
+ pNtQueryDirectoryFile = (sNtQueryDirectoryFile)
+ GetProcAddress(ntdll_module, "NtQueryDirectoryFile");
+ if (pNtQueryVolumeInformationFile == NULL) {
+ uv_fatal_error(GetLastError(), "GetProcAddress");
+ }
+
+ pNtQuerySystemInformation = (sNtQuerySystemInformation) GetProcAddress(
+ ntdll_module,
+ "NtQuerySystemInformation");
+ if (pNtQuerySystemInformation == NULL) {
+ uv_fatal_error(GetLastError(), "GetProcAddress");
+ }
+
+ pNtQueryInformationProcess = (sNtQueryInformationProcess) GetProcAddress(
+ ntdll_module,
+ "NtQueryInformationProcess");
+ if (pNtQueryInformationProcess == NULL) {
+ uv_fatal_error(GetLastError(), "GetProcAddress");
+ }
+
+ kernel32_module = GetModuleHandleA("kernel32.dll");
+ if (kernel32_module == NULL) {
+ uv_fatal_error(GetLastError(), "GetModuleHandleA");
+ }
+
+ pGetQueuedCompletionStatusEx = (sGetQueuedCompletionStatusEx) GetProcAddress(
+ kernel32_module,
+ "GetQueuedCompletionStatusEx");
+
+ powrprof_module = LoadLibraryA("powrprof.dll");
+ if (powrprof_module != NULL) {
+ pPowerRegisterSuspendResumeNotification = (sPowerRegisterSuspendResumeNotification)
+ GetProcAddress(powrprof_module, "PowerRegisterSuspendResumeNotification");
+ }
+
+ user32_module = LoadLibraryA("user32.dll");
+ if (user32_module != NULL) {
+ pSetWinEventHook = (sSetWinEventHook)
+ GetProcAddress(user32_module, "SetWinEventHook");
+ }
+}
diff --git a/Utilities/cmlibuv/src/win/winapi.h b/Utilities/cmlibuv/src/win/winapi.h
new file mode 100644
index 0000000..9bc4559
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/winapi.h
@@ -0,0 +1,4771 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_WIN_WINAPI_H_
+#define UV_WIN_WINAPI_H_
+
+#include <windows.h>
+
+
+/*
+ * Ntdll headers
+ */
+#ifndef STATUS_SEVERITY_SUCCESS
+# define STATUS_SEVERITY_SUCCESS 0x0
+#endif
+
+#ifndef STATUS_SEVERITY_INFORMATIONAL
+# define STATUS_SEVERITY_INFORMATIONAL 0x1
+#endif
+
+#ifndef STATUS_SEVERITY_WARNING
+# define STATUS_SEVERITY_WARNING 0x2
+#endif
+
+#ifndef STATUS_SEVERITY_ERROR
+# define STATUS_SEVERITY_ERROR 0x3
+#endif
+
+#ifndef FACILITY_NTWIN32
+# define FACILITY_NTWIN32 0x7
+#endif
+
+#ifndef NT_SUCCESS
+# define NT_SUCCESS(status) (((NTSTATUS) (status)) >= 0)
+#endif
+
+#ifndef NT_INFORMATION
+# define NT_INFORMATION(status) ((((ULONG) (status)) >> 30) == 1)
+#endif
+
+#ifndef NT_WARNING
+# define NT_WARNING(status) ((((ULONG) (status)) >> 30) == 2)
+#endif
+
+#ifndef NT_ERROR
+# define NT_ERROR(status) ((((ULONG) (status)) >> 30) == 3)
+#endif
+
+#ifndef STATUS_SUCCESS
+# define STATUS_SUCCESS ((NTSTATUS) 0x00000000L)
+#endif
+
+#ifndef STATUS_WAIT_0
+# define STATUS_WAIT_0 ((NTSTATUS) 0x00000000L)
+#endif
+
+#ifndef STATUS_WAIT_1
+# define STATUS_WAIT_1 ((NTSTATUS) 0x00000001L)
+#endif
+
+#ifndef STATUS_WAIT_2
+# define STATUS_WAIT_2 ((NTSTATUS) 0x00000002L)
+#endif
+
+#ifndef STATUS_WAIT_3
+# define STATUS_WAIT_3 ((NTSTATUS) 0x00000003L)
+#endif
+
+#ifndef STATUS_WAIT_63
+# define STATUS_WAIT_63 ((NTSTATUS) 0x0000003FL)
+#endif
+
+#ifndef STATUS_ABANDONED
+# define STATUS_ABANDONED ((NTSTATUS) 0x00000080L)
+#endif
+
+#ifndef STATUS_ABANDONED_WAIT_0
+# define STATUS_ABANDONED_WAIT_0 ((NTSTATUS) 0x00000080L)
+#endif
+
+#ifndef STATUS_ABANDONED_WAIT_63
+# define STATUS_ABANDONED_WAIT_63 ((NTSTATUS) 0x000000BFL)
+#endif
+
+#ifndef STATUS_USER_APC
+# define STATUS_USER_APC ((NTSTATUS) 0x000000C0L)
+#endif
+
+#ifndef STATUS_KERNEL_APC
+# define STATUS_KERNEL_APC ((NTSTATUS) 0x00000100L)
+#endif
+
+#ifndef STATUS_ALERTED
+# define STATUS_ALERTED ((NTSTATUS) 0x00000101L)
+#endif
+
+#ifndef STATUS_TIMEOUT
+# define STATUS_TIMEOUT ((NTSTATUS) 0x00000102L)
+#endif
+
+#ifndef STATUS_PENDING
+# define STATUS_PENDING ((NTSTATUS) 0x00000103L)
+#endif
+
+#ifndef STATUS_REPARSE
+# define STATUS_REPARSE ((NTSTATUS) 0x00000104L)
+#endif
+
+#ifndef STATUS_MORE_ENTRIES
+# define STATUS_MORE_ENTRIES ((NTSTATUS) 0x00000105L)
+#endif
+
+#ifndef STATUS_NOT_ALL_ASSIGNED
+# define STATUS_NOT_ALL_ASSIGNED ((NTSTATUS) 0x00000106L)
+#endif
+
+#ifndef STATUS_SOME_NOT_MAPPED
+# define STATUS_SOME_NOT_MAPPED ((NTSTATUS) 0x00000107L)
+#endif
+
+#ifndef STATUS_OPLOCK_BREAK_IN_PROGRESS
+# define STATUS_OPLOCK_BREAK_IN_PROGRESS ((NTSTATUS) 0x00000108L)
+#endif
+
+#ifndef STATUS_VOLUME_MOUNTED
+# define STATUS_VOLUME_MOUNTED ((NTSTATUS) 0x00000109L)
+#endif
+
+#ifndef STATUS_RXACT_COMMITTED
+# define STATUS_RXACT_COMMITTED ((NTSTATUS) 0x0000010AL)
+#endif
+
+#ifndef STATUS_NOTIFY_CLEANUP
+# define STATUS_NOTIFY_CLEANUP ((NTSTATUS) 0x0000010BL)
+#endif
+
+#ifndef STATUS_NOTIFY_ENUM_DIR
+# define STATUS_NOTIFY_ENUM_DIR ((NTSTATUS) 0x0000010CL)
+#endif
+
+#ifndef STATUS_NO_QUOTAS_FOR_ACCOUNT
+# define STATUS_NO_QUOTAS_FOR_ACCOUNT ((NTSTATUS) 0x0000010DL)
+#endif
+
+#ifndef STATUS_PRIMARY_TRANSPORT_CONNECT_FAILED
+# define STATUS_PRIMARY_TRANSPORT_CONNECT_FAILED ((NTSTATUS) 0x0000010EL)
+#endif
+
+#ifndef STATUS_PAGE_FAULT_TRANSITION
+# define STATUS_PAGE_FAULT_TRANSITION ((NTSTATUS) 0x00000110L)
+#endif
+
+#ifndef STATUS_PAGE_FAULT_DEMAND_ZERO
+# define STATUS_PAGE_FAULT_DEMAND_ZERO ((NTSTATUS) 0x00000111L)
+#endif
+
+#ifndef STATUS_PAGE_FAULT_COPY_ON_WRITE
+# define STATUS_PAGE_FAULT_COPY_ON_WRITE ((NTSTATUS) 0x00000112L)
+#endif
+
+#ifndef STATUS_PAGE_FAULT_GUARD_PAGE
+# define STATUS_PAGE_FAULT_GUARD_PAGE ((NTSTATUS) 0x00000113L)
+#endif
+
+#ifndef STATUS_PAGE_FAULT_PAGING_FILE
+# define STATUS_PAGE_FAULT_PAGING_FILE ((NTSTATUS) 0x00000114L)
+#endif
+
+#ifndef STATUS_CACHE_PAGE_LOCKED
+# define STATUS_CACHE_PAGE_LOCKED ((NTSTATUS) 0x00000115L)
+#endif
+
+#ifndef STATUS_CRASH_DUMP
+# define STATUS_CRASH_DUMP ((NTSTATUS) 0x00000116L)
+#endif
+
+#ifndef STATUS_BUFFER_ALL_ZEROS
+# define STATUS_BUFFER_ALL_ZEROS ((NTSTATUS) 0x00000117L)
+#endif
+
+#ifndef STATUS_REPARSE_OBJECT
+# define STATUS_REPARSE_OBJECT ((NTSTATUS) 0x00000118L)
+#endif
+
+#ifndef STATUS_RESOURCE_REQUIREMENTS_CHANGED
+# define STATUS_RESOURCE_REQUIREMENTS_CHANGED ((NTSTATUS) 0x00000119L)
+#endif
+
+#ifndef STATUS_TRANSLATION_COMPLETE
+# define STATUS_TRANSLATION_COMPLETE ((NTSTATUS) 0x00000120L)
+#endif
+
+#ifndef STATUS_DS_MEMBERSHIP_EVALUATED_LOCALLY
+# define STATUS_DS_MEMBERSHIP_EVALUATED_LOCALLY ((NTSTATUS) 0x00000121L)
+#endif
+
+#ifndef STATUS_NOTHING_TO_TERMINATE
+# define STATUS_NOTHING_TO_TERMINATE ((NTSTATUS) 0x00000122L)
+#endif
+
+#ifndef STATUS_PROCESS_NOT_IN_JOB
+# define STATUS_PROCESS_NOT_IN_JOB ((NTSTATUS) 0x00000123L)
+#endif
+
+#ifndef STATUS_PROCESS_IN_JOB
+# define STATUS_PROCESS_IN_JOB ((NTSTATUS) 0x00000124L)
+#endif
+
+#ifndef STATUS_VOLSNAP_HIBERNATE_READY
+# define STATUS_VOLSNAP_HIBERNATE_READY ((NTSTATUS) 0x00000125L)
+#endif
+
+#ifndef STATUS_FSFILTER_OP_COMPLETED_SUCCESSFULLY
+# define STATUS_FSFILTER_OP_COMPLETED_SUCCESSFULLY ((NTSTATUS) 0x00000126L)
+#endif
+
+#ifndef STATUS_INTERRUPT_VECTOR_ALREADY_CONNECTED
+# define STATUS_INTERRUPT_VECTOR_ALREADY_CONNECTED ((NTSTATUS) 0x00000127L)
+#endif
+
+#ifndef STATUS_INTERRUPT_STILL_CONNECTED
+# define STATUS_INTERRUPT_STILL_CONNECTED ((NTSTATUS) 0x00000128L)
+#endif
+
+#ifndef STATUS_PROCESS_CLONED
+# define STATUS_PROCESS_CLONED ((NTSTATUS) 0x00000129L)
+#endif
+
+#ifndef STATUS_FILE_LOCKED_WITH_ONLY_READERS
+# define STATUS_FILE_LOCKED_WITH_ONLY_READERS ((NTSTATUS) 0x0000012AL)
+#endif
+
+#ifndef STATUS_FILE_LOCKED_WITH_WRITERS
+# define STATUS_FILE_LOCKED_WITH_WRITERS ((NTSTATUS) 0x0000012BL)
+#endif
+
+#ifndef STATUS_RESOURCEMANAGER_READ_ONLY
+# define STATUS_RESOURCEMANAGER_READ_ONLY ((NTSTATUS) 0x00000202L)
+#endif
+
+#ifndef STATUS_RING_PREVIOUSLY_EMPTY
+# define STATUS_RING_PREVIOUSLY_EMPTY ((NTSTATUS) 0x00000210L)
+#endif
+
+#ifndef STATUS_RING_PREVIOUSLY_FULL
+# define STATUS_RING_PREVIOUSLY_FULL ((NTSTATUS) 0x00000211L)
+#endif
+
+#ifndef STATUS_RING_PREVIOUSLY_ABOVE_QUOTA
+# define STATUS_RING_PREVIOUSLY_ABOVE_QUOTA ((NTSTATUS) 0x00000212L)
+#endif
+
+#ifndef STATUS_RING_NEWLY_EMPTY
+# define STATUS_RING_NEWLY_EMPTY ((NTSTATUS) 0x00000213L)
+#endif
+
+#ifndef STATUS_RING_SIGNAL_OPPOSITE_ENDPOINT
+# define STATUS_RING_SIGNAL_OPPOSITE_ENDPOINT ((NTSTATUS) 0x00000214L)
+#endif
+
+#ifndef STATUS_OPLOCK_SWITCHED_TO_NEW_HANDLE
+# define STATUS_OPLOCK_SWITCHED_TO_NEW_HANDLE ((NTSTATUS) 0x00000215L)
+#endif
+
+#ifndef STATUS_OPLOCK_HANDLE_CLOSED
+# define STATUS_OPLOCK_HANDLE_CLOSED ((NTSTATUS) 0x00000216L)
+#endif
+
+#ifndef STATUS_WAIT_FOR_OPLOCK
+# define STATUS_WAIT_FOR_OPLOCK ((NTSTATUS) 0x00000367L)
+#endif
+
+#ifndef STATUS_OBJECT_NAME_EXISTS
+# define STATUS_OBJECT_NAME_EXISTS ((NTSTATUS) 0x40000000L)
+#endif
+
+#ifndef STATUS_THREAD_WAS_SUSPENDED
+# define STATUS_THREAD_WAS_SUSPENDED ((NTSTATUS) 0x40000001L)
+#endif
+
+#ifndef STATUS_WORKING_SET_LIMIT_RANGE
+# define STATUS_WORKING_SET_LIMIT_RANGE ((NTSTATUS) 0x40000002L)
+#endif
+
+#ifndef STATUS_IMAGE_NOT_AT_BASE
+# define STATUS_IMAGE_NOT_AT_BASE ((NTSTATUS) 0x40000003L)
+#endif
+
+#ifndef STATUS_RXACT_STATE_CREATED
+# define STATUS_RXACT_STATE_CREATED ((NTSTATUS) 0x40000004L)
+#endif
+
+#ifndef STATUS_SEGMENT_NOTIFICATION
+# define STATUS_SEGMENT_NOTIFICATION ((NTSTATUS) 0x40000005L)
+#endif
+
+#ifndef STATUS_LOCAL_USER_SESSION_KEY
+# define STATUS_LOCAL_USER_SESSION_KEY ((NTSTATUS) 0x40000006L)
+#endif
+
+#ifndef STATUS_BAD_CURRENT_DIRECTORY
+# define STATUS_BAD_CURRENT_DIRECTORY ((NTSTATUS) 0x40000007L)
+#endif
+
+#ifndef STATUS_SERIAL_MORE_WRITES
+# define STATUS_SERIAL_MORE_WRITES ((NTSTATUS) 0x40000008L)
+#endif
+
+#ifndef STATUS_REGISTRY_RECOVERED
+# define STATUS_REGISTRY_RECOVERED ((NTSTATUS) 0x40000009L)
+#endif
+
+#ifndef STATUS_FT_READ_RECOVERY_FROM_BACKUP
+# define STATUS_FT_READ_RECOVERY_FROM_BACKUP ((NTSTATUS) 0x4000000AL)
+#endif
+
+#ifndef STATUS_FT_WRITE_RECOVERY
+# define STATUS_FT_WRITE_RECOVERY ((NTSTATUS) 0x4000000BL)
+#endif
+
+#ifndef STATUS_SERIAL_COUNTER_TIMEOUT
+# define STATUS_SERIAL_COUNTER_TIMEOUT ((NTSTATUS) 0x4000000CL)
+#endif
+
+#ifndef STATUS_NULL_LM_PASSWORD
+# define STATUS_NULL_LM_PASSWORD ((NTSTATUS) 0x4000000DL)
+#endif
+
+#ifndef STATUS_IMAGE_MACHINE_TYPE_MISMATCH
+# define STATUS_IMAGE_MACHINE_TYPE_MISMATCH ((NTSTATUS) 0x4000000EL)
+#endif
+
+#ifndef STATUS_RECEIVE_PARTIAL
+# define STATUS_RECEIVE_PARTIAL ((NTSTATUS) 0x4000000FL)
+#endif
+
+#ifndef STATUS_RECEIVE_EXPEDITED
+# define STATUS_RECEIVE_EXPEDITED ((NTSTATUS) 0x40000010L)
+#endif
+
+#ifndef STATUS_RECEIVE_PARTIAL_EXPEDITED
+# define STATUS_RECEIVE_PARTIAL_EXPEDITED ((NTSTATUS) 0x40000011L)
+#endif
+
+#ifndef STATUS_EVENT_DONE
+# define STATUS_EVENT_DONE ((NTSTATUS) 0x40000012L)
+#endif
+
+#ifndef STATUS_EVENT_PENDING
+# define STATUS_EVENT_PENDING ((NTSTATUS) 0x40000013L)
+#endif
+
+#ifndef STATUS_CHECKING_FILE_SYSTEM
+# define STATUS_CHECKING_FILE_SYSTEM ((NTSTATUS) 0x40000014L)
+#endif
+
+#ifndef STATUS_FATAL_APP_EXIT
+# define STATUS_FATAL_APP_EXIT ((NTSTATUS) 0x40000015L)
+#endif
+
+#ifndef STATUS_PREDEFINED_HANDLE
+# define STATUS_PREDEFINED_HANDLE ((NTSTATUS) 0x40000016L)
+#endif
+
+#ifndef STATUS_WAS_UNLOCKED
+# define STATUS_WAS_UNLOCKED ((NTSTATUS) 0x40000017L)
+#endif
+
+#ifndef STATUS_SERVICE_NOTIFICATION
+# define STATUS_SERVICE_NOTIFICATION ((NTSTATUS) 0x40000018L)
+#endif
+
+#ifndef STATUS_WAS_LOCKED
+# define STATUS_WAS_LOCKED ((NTSTATUS) 0x40000019L)
+#endif
+
+#ifndef STATUS_LOG_HARD_ERROR
+# define STATUS_LOG_HARD_ERROR ((NTSTATUS) 0x4000001AL)
+#endif
+
+#ifndef STATUS_ALREADY_WIN32
+# define STATUS_ALREADY_WIN32 ((NTSTATUS) 0x4000001BL)
+#endif
+
+#ifndef STATUS_WX86_UNSIMULATE
+# define STATUS_WX86_UNSIMULATE ((NTSTATUS) 0x4000001CL)
+#endif
+
+#ifndef STATUS_WX86_CONTINUE
+# define STATUS_WX86_CONTINUE ((NTSTATUS) 0x4000001DL)
+#endif
+
+#ifndef STATUS_WX86_SINGLE_STEP
+# define STATUS_WX86_SINGLE_STEP ((NTSTATUS) 0x4000001EL)
+#endif
+
+#ifndef STATUS_WX86_BREAKPOINT
+# define STATUS_WX86_BREAKPOINT ((NTSTATUS) 0x4000001FL)
+#endif
+
+#ifndef STATUS_WX86_EXCEPTION_CONTINUE
+# define STATUS_WX86_EXCEPTION_CONTINUE ((NTSTATUS) 0x40000020L)
+#endif
+
+#ifndef STATUS_WX86_EXCEPTION_LASTCHANCE
+# define STATUS_WX86_EXCEPTION_LASTCHANCE ((NTSTATUS) 0x40000021L)
+#endif
+
+#ifndef STATUS_WX86_EXCEPTION_CHAIN
+# define STATUS_WX86_EXCEPTION_CHAIN ((NTSTATUS) 0x40000022L)
+#endif
+
+#ifndef STATUS_IMAGE_MACHINE_TYPE_MISMATCH_EXE
+# define STATUS_IMAGE_MACHINE_TYPE_MISMATCH_EXE ((NTSTATUS) 0x40000023L)
+#endif
+
+#ifndef STATUS_NO_YIELD_PERFORMED
+# define STATUS_NO_YIELD_PERFORMED ((NTSTATUS) 0x40000024L)
+#endif
+
+#ifndef STATUS_TIMER_RESUME_IGNORED
+# define STATUS_TIMER_RESUME_IGNORED ((NTSTATUS) 0x40000025L)
+#endif
+
+#ifndef STATUS_ARBITRATION_UNHANDLED
+# define STATUS_ARBITRATION_UNHANDLED ((NTSTATUS) 0x40000026L)
+#endif
+
+#ifndef STATUS_CARDBUS_NOT_SUPPORTED
+# define STATUS_CARDBUS_NOT_SUPPORTED ((NTSTATUS) 0x40000027L)
+#endif
+
+#ifndef STATUS_WX86_CREATEWX86TIB
+# define STATUS_WX86_CREATEWX86TIB ((NTSTATUS) 0x40000028L)
+#endif
+
+#ifndef STATUS_MP_PROCESSOR_MISMATCH
+# define STATUS_MP_PROCESSOR_MISMATCH ((NTSTATUS) 0x40000029L)
+#endif
+
+#ifndef STATUS_HIBERNATED
+# define STATUS_HIBERNATED ((NTSTATUS) 0x4000002AL)
+#endif
+
+#ifndef STATUS_RESUME_HIBERNATION
+# define STATUS_RESUME_HIBERNATION ((NTSTATUS) 0x4000002BL)
+#endif
+
+#ifndef STATUS_FIRMWARE_UPDATED
+# define STATUS_FIRMWARE_UPDATED ((NTSTATUS) 0x4000002CL)
+#endif
+
+#ifndef STATUS_DRIVERS_LEAKING_LOCKED_PAGES
+# define STATUS_DRIVERS_LEAKING_LOCKED_PAGES ((NTSTATUS) 0x4000002DL)
+#endif
+
+#ifndef STATUS_MESSAGE_RETRIEVED
+# define STATUS_MESSAGE_RETRIEVED ((NTSTATUS) 0x4000002EL)
+#endif
+
+#ifndef STATUS_SYSTEM_POWERSTATE_TRANSITION
+# define STATUS_SYSTEM_POWERSTATE_TRANSITION ((NTSTATUS) 0x4000002FL)
+#endif
+
+#ifndef STATUS_ALPC_CHECK_COMPLETION_LIST
+# define STATUS_ALPC_CHECK_COMPLETION_LIST ((NTSTATUS) 0x40000030L)
+#endif
+
+#ifndef STATUS_SYSTEM_POWERSTATE_COMPLEX_TRANSITION
+# define STATUS_SYSTEM_POWERSTATE_COMPLEX_TRANSITION ((NTSTATUS) 0x40000031L)
+#endif
+
+#ifndef STATUS_ACCESS_AUDIT_BY_POLICY
+# define STATUS_ACCESS_AUDIT_BY_POLICY ((NTSTATUS) 0x40000032L)
+#endif
+
+#ifndef STATUS_ABANDON_HIBERFILE
+# define STATUS_ABANDON_HIBERFILE ((NTSTATUS) 0x40000033L)
+#endif
+
+#ifndef STATUS_BIZRULES_NOT_ENABLED
+# define STATUS_BIZRULES_NOT_ENABLED ((NTSTATUS) 0x40000034L)
+#endif
+
+#ifndef STATUS_GUARD_PAGE_VIOLATION
+# define STATUS_GUARD_PAGE_VIOLATION ((NTSTATUS) 0x80000001L)
+#endif
+
+#ifndef STATUS_DATATYPE_MISALIGNMENT
+# define STATUS_DATATYPE_MISALIGNMENT ((NTSTATUS) 0x80000002L)
+#endif
+
+#ifndef STATUS_BREAKPOINT
+# define STATUS_BREAKPOINT ((NTSTATUS) 0x80000003L)
+#endif
+
+#ifndef STATUS_SINGLE_STEP
+# define STATUS_SINGLE_STEP ((NTSTATUS) 0x80000004L)
+#endif
+
+#ifndef STATUS_BUFFER_OVERFLOW
+# define STATUS_BUFFER_OVERFLOW ((NTSTATUS) 0x80000005L)
+#endif
+
+#ifndef STATUS_NO_MORE_FILES
+# define STATUS_NO_MORE_FILES ((NTSTATUS) 0x80000006L)
+#endif
+
+#ifndef STATUS_WAKE_SYSTEM_DEBUGGER
+# define STATUS_WAKE_SYSTEM_DEBUGGER ((NTSTATUS) 0x80000007L)
+#endif
+
+#ifndef STATUS_HANDLES_CLOSED
+# define STATUS_HANDLES_CLOSED ((NTSTATUS) 0x8000000AL)
+#endif
+
+#ifndef STATUS_NO_INHERITANCE
+# define STATUS_NO_INHERITANCE ((NTSTATUS) 0x8000000BL)
+#endif
+
+#ifndef STATUS_GUID_SUBSTITUTION_MADE
+# define STATUS_GUID_SUBSTITUTION_MADE ((NTSTATUS) 0x8000000CL)
+#endif
+
+#ifndef STATUS_PARTIAL_COPY
+# define STATUS_PARTIAL_COPY ((NTSTATUS) 0x8000000DL)
+#endif
+
+#ifndef STATUS_DEVICE_PAPER_EMPTY
+# define STATUS_DEVICE_PAPER_EMPTY ((NTSTATUS) 0x8000000EL)
+#endif
+
+#ifndef STATUS_DEVICE_POWERED_OFF
+# define STATUS_DEVICE_POWERED_OFF ((NTSTATUS) 0x8000000FL)
+#endif
+
+#ifndef STATUS_DEVICE_OFF_LINE
+# define STATUS_DEVICE_OFF_LINE ((NTSTATUS) 0x80000010L)
+#endif
+
+#ifndef STATUS_DEVICE_BUSY
+# define STATUS_DEVICE_BUSY ((NTSTATUS) 0x80000011L)
+#endif
+
+#ifndef STATUS_NO_MORE_EAS
+# define STATUS_NO_MORE_EAS ((NTSTATUS) 0x80000012L)
+#endif
+
+#ifndef STATUS_INVALID_EA_NAME
+# define STATUS_INVALID_EA_NAME ((NTSTATUS) 0x80000013L)
+#endif
+
+#ifndef STATUS_EA_LIST_INCONSISTENT
+# define STATUS_EA_LIST_INCONSISTENT ((NTSTATUS) 0x80000014L)
+#endif
+
+#ifndef STATUS_INVALID_EA_FLAG
+# define STATUS_INVALID_EA_FLAG ((NTSTATUS) 0x80000015L)
+#endif
+
+#ifndef STATUS_VERIFY_REQUIRED
+# define STATUS_VERIFY_REQUIRED ((NTSTATUS) 0x80000016L)
+#endif
+
+#ifndef STATUS_EXTRANEOUS_INFORMATION
+# define STATUS_EXTRANEOUS_INFORMATION ((NTSTATUS) 0x80000017L)
+#endif
+
+#ifndef STATUS_RXACT_COMMIT_NECESSARY
+# define STATUS_RXACT_COMMIT_NECESSARY ((NTSTATUS) 0x80000018L)
+#endif
+
+#ifndef STATUS_NO_MORE_ENTRIES
+# define STATUS_NO_MORE_ENTRIES ((NTSTATUS) 0x8000001AL)
+#endif
+
+#ifndef STATUS_FILEMARK_DETECTED
+# define STATUS_FILEMARK_DETECTED ((NTSTATUS) 0x8000001BL)
+#endif
+
+#ifndef STATUS_MEDIA_CHANGED
+# define STATUS_MEDIA_CHANGED ((NTSTATUS) 0x8000001CL)
+#endif
+
+#ifndef STATUS_BUS_RESET
+# define STATUS_BUS_RESET ((NTSTATUS) 0x8000001DL)
+#endif
+
+#ifndef STATUS_END_OF_MEDIA
+# define STATUS_END_OF_MEDIA ((NTSTATUS) 0x8000001EL)
+#endif
+
+#ifndef STATUS_BEGINNING_OF_MEDIA
+# define STATUS_BEGINNING_OF_MEDIA ((NTSTATUS) 0x8000001FL)
+#endif
+
+#ifndef STATUS_MEDIA_CHECK
+# define STATUS_MEDIA_CHECK ((NTSTATUS) 0x80000020L)
+#endif
+
+#ifndef STATUS_SETMARK_DETECTED
+# define STATUS_SETMARK_DETECTED ((NTSTATUS) 0x80000021L)
+#endif
+
+#ifndef STATUS_NO_DATA_DETECTED
+# define STATUS_NO_DATA_DETECTED ((NTSTATUS) 0x80000022L)
+#endif
+
+#ifndef STATUS_REDIRECTOR_HAS_OPEN_HANDLES
+# define STATUS_REDIRECTOR_HAS_OPEN_HANDLES ((NTSTATUS) 0x80000023L)
+#endif
+
+#ifndef STATUS_SERVER_HAS_OPEN_HANDLES
+# define STATUS_SERVER_HAS_OPEN_HANDLES ((NTSTATUS) 0x80000024L)
+#endif
+
+#ifndef STATUS_ALREADY_DISCONNECTED
+# define STATUS_ALREADY_DISCONNECTED ((NTSTATUS) 0x80000025L)
+#endif
+
+#ifndef STATUS_LONGJUMP
+# define STATUS_LONGJUMP ((NTSTATUS) 0x80000026L)
+#endif
+
+#ifndef STATUS_CLEANER_CARTRIDGE_INSTALLED
+# define STATUS_CLEANER_CARTRIDGE_INSTALLED ((NTSTATUS) 0x80000027L)
+#endif
+
+#ifndef STATUS_PLUGPLAY_QUERY_VETOED
+# define STATUS_PLUGPLAY_QUERY_VETOED ((NTSTATUS) 0x80000028L)
+#endif
+
+#ifndef STATUS_UNWIND_CONSOLIDATE
+# define STATUS_UNWIND_CONSOLIDATE ((NTSTATUS) 0x80000029L)
+#endif
+
+#ifndef STATUS_REGISTRY_HIVE_RECOVERED
+# define STATUS_REGISTRY_HIVE_RECOVERED ((NTSTATUS) 0x8000002AL)
+#endif
+
+#ifndef STATUS_DLL_MIGHT_BE_INSECURE
+# define STATUS_DLL_MIGHT_BE_INSECURE ((NTSTATUS) 0x8000002BL)
+#endif
+
+#ifndef STATUS_DLL_MIGHT_BE_INCOMPATIBLE
+# define STATUS_DLL_MIGHT_BE_INCOMPATIBLE ((NTSTATUS) 0x8000002CL)
+#endif
+
+#ifndef STATUS_STOPPED_ON_SYMLINK
+# define STATUS_STOPPED_ON_SYMLINK ((NTSTATUS) 0x8000002DL)
+#endif
+
+#ifndef STATUS_CANNOT_GRANT_REQUESTED_OPLOCK
+# define STATUS_CANNOT_GRANT_REQUESTED_OPLOCK ((NTSTATUS) 0x8000002EL)
+#endif
+
+#ifndef STATUS_NO_ACE_CONDITION
+# define STATUS_NO_ACE_CONDITION ((NTSTATUS) 0x8000002FL)
+#endif
+
+#ifndef STATUS_UNSUCCESSFUL
+# define STATUS_UNSUCCESSFUL ((NTSTATUS) 0xC0000001L)
+#endif
+
+#ifndef STATUS_NOT_IMPLEMENTED
+# define STATUS_NOT_IMPLEMENTED ((NTSTATUS) 0xC0000002L)
+#endif
+
+#ifndef STATUS_INVALID_INFO_CLASS
+# define STATUS_INVALID_INFO_CLASS ((NTSTATUS) 0xC0000003L)
+#endif
+
+#ifndef STATUS_INFO_LENGTH_MISMATCH
+# define STATUS_INFO_LENGTH_MISMATCH ((NTSTATUS) 0xC0000004L)
+#endif
+
+#ifndef STATUS_ACCESS_VIOLATION
+# define STATUS_ACCESS_VIOLATION ((NTSTATUS) 0xC0000005L)
+#endif
+
+#ifndef STATUS_IN_PAGE_ERROR
+# define STATUS_IN_PAGE_ERROR ((NTSTATUS) 0xC0000006L)
+#endif
+
+#ifndef STATUS_PAGEFILE_QUOTA
+# define STATUS_PAGEFILE_QUOTA ((NTSTATUS) 0xC0000007L)
+#endif
+
+#ifndef STATUS_INVALID_HANDLE
+# define STATUS_INVALID_HANDLE ((NTSTATUS) 0xC0000008L)
+#endif
+
+#ifndef STATUS_BAD_INITIAL_STACK
+# define STATUS_BAD_INITIAL_STACK ((NTSTATUS) 0xC0000009L)
+#endif
+
+#ifndef STATUS_BAD_INITIAL_PC
+# define STATUS_BAD_INITIAL_PC ((NTSTATUS) 0xC000000AL)
+#endif
+
+#ifndef STATUS_INVALID_CID
+# define STATUS_INVALID_CID ((NTSTATUS) 0xC000000BL)
+#endif
+
+#ifndef STATUS_TIMER_NOT_CANCELED
+# define STATUS_TIMER_NOT_CANCELED ((NTSTATUS) 0xC000000CL)
+#endif
+
+#ifndef STATUS_INVALID_PARAMETER
+# define STATUS_INVALID_PARAMETER ((NTSTATUS) 0xC000000DL)
+#endif
+
+#ifndef STATUS_NO_SUCH_DEVICE
+# define STATUS_NO_SUCH_DEVICE ((NTSTATUS) 0xC000000EL)
+#endif
+
+#ifndef STATUS_NO_SUCH_FILE
+# define STATUS_NO_SUCH_FILE ((NTSTATUS) 0xC000000FL)
+#endif
+
+#ifndef STATUS_INVALID_DEVICE_REQUEST
+# define STATUS_INVALID_DEVICE_REQUEST ((NTSTATUS) 0xC0000010L)
+#endif
+
+#ifndef STATUS_END_OF_FILE
+# define STATUS_END_OF_FILE ((NTSTATUS) 0xC0000011L)
+#endif
+
+#ifndef STATUS_WRONG_VOLUME
+# define STATUS_WRONG_VOLUME ((NTSTATUS) 0xC0000012L)
+#endif
+
+#ifndef STATUS_NO_MEDIA_IN_DEVICE
+# define STATUS_NO_MEDIA_IN_DEVICE ((NTSTATUS) 0xC0000013L)
+#endif
+
+#ifndef STATUS_UNRECOGNIZED_MEDIA
+# define STATUS_UNRECOGNIZED_MEDIA ((NTSTATUS) 0xC0000014L)
+#endif
+
+#ifndef STATUS_NONEXISTENT_SECTOR
+# define STATUS_NONEXISTENT_SECTOR ((NTSTATUS) 0xC0000015L)
+#endif
+
+#ifndef STATUS_MORE_PROCESSING_REQUIRED
+# define STATUS_MORE_PROCESSING_REQUIRED ((NTSTATUS) 0xC0000016L)
+#endif
+
+#ifndef STATUS_NO_MEMORY
+# define STATUS_NO_MEMORY ((NTSTATUS) 0xC0000017L)
+#endif
+
+#ifndef STATUS_CONFLICTING_ADDRESSES
+# define STATUS_CONFLICTING_ADDRESSES ((NTSTATUS) 0xC0000018L)
+#endif
+
+#ifndef STATUS_NOT_MAPPED_VIEW
+# define STATUS_NOT_MAPPED_VIEW ((NTSTATUS) 0xC0000019L)
+#endif
+
+#ifndef STATUS_UNABLE_TO_FREE_VM
+# define STATUS_UNABLE_TO_FREE_VM ((NTSTATUS) 0xC000001AL)
+#endif
+
+#ifndef STATUS_UNABLE_TO_DELETE_SECTION
+# define STATUS_UNABLE_TO_DELETE_SECTION ((NTSTATUS) 0xC000001BL)
+#endif
+
+#ifndef STATUS_INVALID_SYSTEM_SERVICE
+# define STATUS_INVALID_SYSTEM_SERVICE ((NTSTATUS) 0xC000001CL)
+#endif
+
+#ifndef STATUS_ILLEGAL_INSTRUCTION
+# define STATUS_ILLEGAL_INSTRUCTION ((NTSTATUS) 0xC000001DL)
+#endif
+
+#ifndef STATUS_INVALID_LOCK_SEQUENCE
+# define STATUS_INVALID_LOCK_SEQUENCE ((NTSTATUS) 0xC000001EL)
+#endif
+
+#ifndef STATUS_INVALID_VIEW_SIZE
+# define STATUS_INVALID_VIEW_SIZE ((NTSTATUS) 0xC000001FL)
+#endif
+
+#ifndef STATUS_INVALID_FILE_FOR_SECTION
+# define STATUS_INVALID_FILE_FOR_SECTION ((NTSTATUS) 0xC0000020L)
+#endif
+
+#ifndef STATUS_ALREADY_COMMITTED
+# define STATUS_ALREADY_COMMITTED ((NTSTATUS) 0xC0000021L)
+#endif
+
+#ifndef STATUS_ACCESS_DENIED
+# define STATUS_ACCESS_DENIED ((NTSTATUS) 0xC0000022L)
+#endif
+
+#ifndef STATUS_BUFFER_TOO_SMALL
+# define STATUS_BUFFER_TOO_SMALL ((NTSTATUS) 0xC0000023L)
+#endif
+
+#ifndef STATUS_OBJECT_TYPE_MISMATCH
+# define STATUS_OBJECT_TYPE_MISMATCH ((NTSTATUS) 0xC0000024L)
+#endif
+
+#ifndef STATUS_NONCONTINUABLE_EXCEPTION
+# define STATUS_NONCONTINUABLE_EXCEPTION ((NTSTATUS) 0xC0000025L)
+#endif
+
+#ifndef STATUS_INVALID_DISPOSITION
+# define STATUS_INVALID_DISPOSITION ((NTSTATUS) 0xC0000026L)
+#endif
+
+#ifndef STATUS_UNWIND
+# define STATUS_UNWIND ((NTSTATUS) 0xC0000027L)
+#endif
+
+#ifndef STATUS_BAD_STACK
+# define STATUS_BAD_STACK ((NTSTATUS) 0xC0000028L)
+#endif
+
+#ifndef STATUS_INVALID_UNWIND_TARGET
+# define STATUS_INVALID_UNWIND_TARGET ((NTSTATUS) 0xC0000029L)
+#endif
+
+#ifndef STATUS_NOT_LOCKED
+# define STATUS_NOT_LOCKED ((NTSTATUS) 0xC000002AL)
+#endif
+
+#ifndef STATUS_PARITY_ERROR
+# define STATUS_PARITY_ERROR ((NTSTATUS) 0xC000002BL)
+#endif
+
+#ifndef STATUS_UNABLE_TO_DECOMMIT_VM
+# define STATUS_UNABLE_TO_DECOMMIT_VM ((NTSTATUS) 0xC000002CL)
+#endif
+
+#ifndef STATUS_NOT_COMMITTED
+# define STATUS_NOT_COMMITTED ((NTSTATUS) 0xC000002DL)
+#endif
+
+#ifndef STATUS_INVALID_PORT_ATTRIBUTES
+# define STATUS_INVALID_PORT_ATTRIBUTES ((NTSTATUS) 0xC000002EL)
+#endif
+
+#ifndef STATUS_PORT_MESSAGE_TOO_LONG
+# define STATUS_PORT_MESSAGE_TOO_LONG ((NTSTATUS) 0xC000002FL)
+#endif
+
+#ifndef STATUS_INVALID_PARAMETER_MIX
+# define STATUS_INVALID_PARAMETER_MIX ((NTSTATUS) 0xC0000030L)
+#endif
+
+#ifndef STATUS_INVALID_QUOTA_LOWER
+# define STATUS_INVALID_QUOTA_LOWER ((NTSTATUS) 0xC0000031L)
+#endif
+
+#ifndef STATUS_DISK_CORRUPT_ERROR
+# define STATUS_DISK_CORRUPT_ERROR ((NTSTATUS) 0xC0000032L)
+#endif
+
+#ifndef STATUS_OBJECT_NAME_INVALID
+# define STATUS_OBJECT_NAME_INVALID ((NTSTATUS) 0xC0000033L)
+#endif
+
+#ifndef STATUS_OBJECT_NAME_NOT_FOUND
+# define STATUS_OBJECT_NAME_NOT_FOUND ((NTSTATUS) 0xC0000034L)
+#endif
+
+#ifndef STATUS_OBJECT_NAME_COLLISION
+# define STATUS_OBJECT_NAME_COLLISION ((NTSTATUS) 0xC0000035L)
+#endif
+
+#ifndef STATUS_PORT_DISCONNECTED
+# define STATUS_PORT_DISCONNECTED ((NTSTATUS) 0xC0000037L)
+#endif
+
+#ifndef STATUS_DEVICE_ALREADY_ATTACHED
+# define STATUS_DEVICE_ALREADY_ATTACHED ((NTSTATUS) 0xC0000038L)
+#endif
+
+#ifndef STATUS_OBJECT_PATH_INVALID
+# define STATUS_OBJECT_PATH_INVALID ((NTSTATUS) 0xC0000039L)
+#endif
+
+#ifndef STATUS_OBJECT_PATH_NOT_FOUND
+# define STATUS_OBJECT_PATH_NOT_FOUND ((NTSTATUS) 0xC000003AL)
+#endif
+
+#ifndef STATUS_OBJECT_PATH_SYNTAX_BAD
+# define STATUS_OBJECT_PATH_SYNTAX_BAD ((NTSTATUS) 0xC000003BL)
+#endif
+
+#ifndef STATUS_DATA_OVERRUN
+# define STATUS_DATA_OVERRUN ((NTSTATUS) 0xC000003CL)
+#endif
+
+#ifndef STATUS_DATA_LATE_ERROR
+# define STATUS_DATA_LATE_ERROR ((NTSTATUS) 0xC000003DL)
+#endif
+
+#ifndef STATUS_DATA_ERROR
+# define STATUS_DATA_ERROR ((NTSTATUS) 0xC000003EL)
+#endif
+
+#ifndef STATUS_CRC_ERROR
+# define STATUS_CRC_ERROR ((NTSTATUS) 0xC000003FL)
+#endif
+
+#ifndef STATUS_SECTION_TOO_BIG
+# define STATUS_SECTION_TOO_BIG ((NTSTATUS) 0xC0000040L)
+#endif
+
+#ifndef STATUS_PORT_CONNECTION_REFUSED
+# define STATUS_PORT_CONNECTION_REFUSED ((NTSTATUS) 0xC0000041L)
+#endif
+
+#ifndef STATUS_INVALID_PORT_HANDLE
+# define STATUS_INVALID_PORT_HANDLE ((NTSTATUS) 0xC0000042L)
+#endif
+
+#ifndef STATUS_SHARING_VIOLATION
+# define STATUS_SHARING_VIOLATION ((NTSTATUS) 0xC0000043L)
+#endif
+
+#ifndef STATUS_QUOTA_EXCEEDED
+# define STATUS_QUOTA_EXCEEDED ((NTSTATUS) 0xC0000044L)
+#endif
+
+#ifndef STATUS_INVALID_PAGE_PROTECTION
+# define STATUS_INVALID_PAGE_PROTECTION ((NTSTATUS) 0xC0000045L)
+#endif
+
+#ifndef STATUS_MUTANT_NOT_OWNED
+# define STATUS_MUTANT_NOT_OWNED ((NTSTATUS) 0xC0000046L)
+#endif
+
+#ifndef STATUS_SEMAPHORE_LIMIT_EXCEEDED
+# define STATUS_SEMAPHORE_LIMIT_EXCEEDED ((NTSTATUS) 0xC0000047L)
+#endif
+
+#ifndef STATUS_PORT_ALREADY_SET
+# define STATUS_PORT_ALREADY_SET ((NTSTATUS) 0xC0000048L)
+#endif
+
+#ifndef STATUS_SECTION_NOT_IMAGE
+# define STATUS_SECTION_NOT_IMAGE ((NTSTATUS) 0xC0000049L)
+#endif
+
+#ifndef STATUS_SUSPEND_COUNT_EXCEEDED
+# define STATUS_SUSPEND_COUNT_EXCEEDED ((NTSTATUS) 0xC000004AL)
+#endif
+
+#ifndef STATUS_THREAD_IS_TERMINATING
+# define STATUS_THREAD_IS_TERMINATING ((NTSTATUS) 0xC000004BL)
+#endif
+
+#ifndef STATUS_BAD_WORKING_SET_LIMIT
+# define STATUS_BAD_WORKING_SET_LIMIT ((NTSTATUS) 0xC000004CL)
+#endif
+
+#ifndef STATUS_INCOMPATIBLE_FILE_MAP
+# define STATUS_INCOMPATIBLE_FILE_MAP ((NTSTATUS) 0xC000004DL)
+#endif
+
+#ifndef STATUS_SECTION_PROTECTION
+# define STATUS_SECTION_PROTECTION ((NTSTATUS) 0xC000004EL)
+#endif
+
+#ifndef STATUS_EAS_NOT_SUPPORTED
+# define STATUS_EAS_NOT_SUPPORTED ((NTSTATUS) 0xC000004FL)
+#endif
+
+#ifndef STATUS_EA_TOO_LARGE
+# define STATUS_EA_TOO_LARGE ((NTSTATUS) 0xC0000050L)
+#endif
+
+#ifndef STATUS_NONEXISTENT_EA_ENTRY
+# define STATUS_NONEXISTENT_EA_ENTRY ((NTSTATUS) 0xC0000051L)
+#endif
+
+#ifndef STATUS_NO_EAS_ON_FILE
+# define STATUS_NO_EAS_ON_FILE ((NTSTATUS) 0xC0000052L)
+#endif
+
+#ifndef STATUS_EA_CORRUPT_ERROR
+# define STATUS_EA_CORRUPT_ERROR ((NTSTATUS) 0xC0000053L)
+#endif
+
+#ifndef STATUS_FILE_LOCK_CONFLICT
+# define STATUS_FILE_LOCK_CONFLICT ((NTSTATUS) 0xC0000054L)
+#endif
+
+#ifndef STATUS_LOCK_NOT_GRANTED
+# define STATUS_LOCK_NOT_GRANTED ((NTSTATUS) 0xC0000055L)
+#endif
+
+#ifndef STATUS_DELETE_PENDING
+# define STATUS_DELETE_PENDING ((NTSTATUS) 0xC0000056L)
+#endif
+
+#ifndef STATUS_CTL_FILE_NOT_SUPPORTED
+# define STATUS_CTL_FILE_NOT_SUPPORTED ((NTSTATUS) 0xC0000057L)
+#endif
+
+#ifndef STATUS_UNKNOWN_REVISION
+# define STATUS_UNKNOWN_REVISION ((NTSTATUS) 0xC0000058L)
+#endif
+
+#ifndef STATUS_REVISION_MISMATCH
+# define STATUS_REVISION_MISMATCH ((NTSTATUS) 0xC0000059L)
+#endif
+
+#ifndef STATUS_INVALID_OWNER
+# define STATUS_INVALID_OWNER ((NTSTATUS) 0xC000005AL)
+#endif
+
+#ifndef STATUS_INVALID_PRIMARY_GROUP
+# define STATUS_INVALID_PRIMARY_GROUP ((NTSTATUS) 0xC000005BL)
+#endif
+
+#ifndef STATUS_NO_IMPERSONATION_TOKEN
+# define STATUS_NO_IMPERSONATION_TOKEN ((NTSTATUS) 0xC000005CL)
+#endif
+
+#ifndef STATUS_CANT_DISABLE_MANDATORY
+# define STATUS_CANT_DISABLE_MANDATORY ((NTSTATUS) 0xC000005DL)
+#endif
+
+#ifndef STATUS_NO_LOGON_SERVERS
+# define STATUS_NO_LOGON_SERVERS ((NTSTATUS) 0xC000005EL)
+#endif
+
+#ifndef STATUS_NO_SUCH_LOGON_SESSION
+# define STATUS_NO_SUCH_LOGON_SESSION ((NTSTATUS) 0xC000005FL)
+#endif
+
+#ifndef STATUS_NO_SUCH_PRIVILEGE
+# define STATUS_NO_SUCH_PRIVILEGE ((NTSTATUS) 0xC0000060L)
+#endif
+
+#ifndef STATUS_PRIVILEGE_NOT_HELD
+# define STATUS_PRIVILEGE_NOT_HELD ((NTSTATUS) 0xC0000061L)
+#endif
+
+#ifndef STATUS_INVALID_ACCOUNT_NAME
+# define STATUS_INVALID_ACCOUNT_NAME ((NTSTATUS) 0xC0000062L)
+#endif
+
+#ifndef STATUS_USER_EXISTS
+# define STATUS_USER_EXISTS ((NTSTATUS) 0xC0000063L)
+#endif
+
+#ifndef STATUS_NO_SUCH_USER
+# define STATUS_NO_SUCH_USER ((NTSTATUS) 0xC0000064L)
+#endif
+
+#ifndef STATUS_GROUP_EXISTS
+# define STATUS_GROUP_EXISTS ((NTSTATUS) 0xC0000065L)
+#endif
+
+#ifndef STATUS_NO_SUCH_GROUP
+# define STATUS_NO_SUCH_GROUP ((NTSTATUS) 0xC0000066L)
+#endif
+
+#ifndef STATUS_MEMBER_IN_GROUP
+# define STATUS_MEMBER_IN_GROUP ((NTSTATUS) 0xC0000067L)
+#endif
+
+#ifndef STATUS_MEMBER_NOT_IN_GROUP
+# define STATUS_MEMBER_NOT_IN_GROUP ((NTSTATUS) 0xC0000068L)
+#endif
+
+#ifndef STATUS_LAST_ADMIN
+# define STATUS_LAST_ADMIN ((NTSTATUS) 0xC0000069L)
+#endif
+
+#ifndef STATUS_WRONG_PASSWORD
+# define STATUS_WRONG_PASSWORD ((NTSTATUS) 0xC000006AL)
+#endif
+
+#ifndef STATUS_ILL_FORMED_PASSWORD
+# define STATUS_ILL_FORMED_PASSWORD ((NTSTATUS) 0xC000006BL)
+#endif
+
+#ifndef STATUS_PASSWORD_RESTRICTION
+# define STATUS_PASSWORD_RESTRICTION ((NTSTATUS) 0xC000006CL)
+#endif
+
+#ifndef STATUS_LOGON_FAILURE
+# define STATUS_LOGON_FAILURE ((NTSTATUS) 0xC000006DL)
+#endif
+
+#ifndef STATUS_ACCOUNT_RESTRICTION
+# define STATUS_ACCOUNT_RESTRICTION ((NTSTATUS) 0xC000006EL)
+#endif
+
+#ifndef STATUS_INVALID_LOGON_HOURS
+# define STATUS_INVALID_LOGON_HOURS ((NTSTATUS) 0xC000006FL)
+#endif
+
+#ifndef STATUS_INVALID_WORKSTATION
+# define STATUS_INVALID_WORKSTATION ((NTSTATUS) 0xC0000070L)
+#endif
+
+#ifndef STATUS_PASSWORD_EXPIRED
+# define STATUS_PASSWORD_EXPIRED ((NTSTATUS) 0xC0000071L)
+#endif
+
+#ifndef STATUS_ACCOUNT_DISABLED
+# define STATUS_ACCOUNT_DISABLED ((NTSTATUS) 0xC0000072L)
+#endif
+
+#ifndef STATUS_NONE_MAPPED
+# define STATUS_NONE_MAPPED ((NTSTATUS) 0xC0000073L)
+#endif
+
+#ifndef STATUS_TOO_MANY_LUIDS_REQUESTED
+# define STATUS_TOO_MANY_LUIDS_REQUESTED ((NTSTATUS) 0xC0000074L)
+#endif
+
+#ifndef STATUS_LUIDS_EXHAUSTED
+# define STATUS_LUIDS_EXHAUSTED ((NTSTATUS) 0xC0000075L)
+#endif
+
+#ifndef STATUS_INVALID_SUB_AUTHORITY
+# define STATUS_INVALID_SUB_AUTHORITY ((NTSTATUS) 0xC0000076L)
+#endif
+
+#ifndef STATUS_INVALID_ACL
+# define STATUS_INVALID_ACL ((NTSTATUS) 0xC0000077L)
+#endif
+
+#ifndef STATUS_INVALID_SID
+# define STATUS_INVALID_SID ((NTSTATUS) 0xC0000078L)
+#endif
+
+#ifndef STATUS_INVALID_SECURITY_DESCR
+# define STATUS_INVALID_SECURITY_DESCR ((NTSTATUS) 0xC0000079L)
+#endif
+
+#ifndef STATUS_PROCEDURE_NOT_FOUND
+# define STATUS_PROCEDURE_NOT_FOUND ((NTSTATUS) 0xC000007AL)
+#endif
+
+#ifndef STATUS_INVALID_IMAGE_FORMAT
+# define STATUS_INVALID_IMAGE_FORMAT ((NTSTATUS) 0xC000007BL)
+#endif
+
+#ifndef STATUS_NO_TOKEN
+# define STATUS_NO_TOKEN ((NTSTATUS) 0xC000007CL)
+#endif
+
+#ifndef STATUS_BAD_INHERITANCE_ACL
+# define STATUS_BAD_INHERITANCE_ACL ((NTSTATUS) 0xC000007DL)
+#endif
+
+#ifndef STATUS_RANGE_NOT_LOCKED
+# define STATUS_RANGE_NOT_LOCKED ((NTSTATUS) 0xC000007EL)
+#endif
+
+#ifndef STATUS_DISK_FULL
+# define STATUS_DISK_FULL ((NTSTATUS) 0xC000007FL)
+#endif
+
+#ifndef STATUS_SERVER_DISABLED
+# define STATUS_SERVER_DISABLED ((NTSTATUS) 0xC0000080L)
+#endif
+
+#ifndef STATUS_SERVER_NOT_DISABLED
+# define STATUS_SERVER_NOT_DISABLED ((NTSTATUS) 0xC0000081L)
+#endif
+
+#ifndef STATUS_TOO_MANY_GUIDS_REQUESTED
+# define STATUS_TOO_MANY_GUIDS_REQUESTED ((NTSTATUS) 0xC0000082L)
+#endif
+
+#ifndef STATUS_GUIDS_EXHAUSTED
+# define STATUS_GUIDS_EXHAUSTED ((NTSTATUS) 0xC0000083L)
+#endif
+
+#ifndef STATUS_INVALID_ID_AUTHORITY
+# define STATUS_INVALID_ID_AUTHORITY ((NTSTATUS) 0xC0000084L)
+#endif
+
+#ifndef STATUS_AGENTS_EXHAUSTED
+# define STATUS_AGENTS_EXHAUSTED ((NTSTATUS) 0xC0000085L)
+#endif
+
+#ifndef STATUS_INVALID_VOLUME_LABEL
+# define STATUS_INVALID_VOLUME_LABEL ((NTSTATUS) 0xC0000086L)
+#endif
+
+#ifndef STATUS_SECTION_NOT_EXTENDED
+# define STATUS_SECTION_NOT_EXTENDED ((NTSTATUS) 0xC0000087L)
+#endif
+
+#ifndef STATUS_NOT_MAPPED_DATA
+# define STATUS_NOT_MAPPED_DATA ((NTSTATUS) 0xC0000088L)
+#endif
+
+#ifndef STATUS_RESOURCE_DATA_NOT_FOUND
+# define STATUS_RESOURCE_DATA_NOT_FOUND ((NTSTATUS) 0xC0000089L)
+#endif
+
+#ifndef STATUS_RESOURCE_TYPE_NOT_FOUND
+# define STATUS_RESOURCE_TYPE_NOT_FOUND ((NTSTATUS) 0xC000008AL)
+#endif
+
+#ifndef STATUS_RESOURCE_NAME_NOT_FOUND
+# define STATUS_RESOURCE_NAME_NOT_FOUND ((NTSTATUS) 0xC000008BL)
+#endif
+
+#ifndef STATUS_ARRAY_BOUNDS_EXCEEDED
+# define STATUS_ARRAY_BOUNDS_EXCEEDED ((NTSTATUS) 0xC000008CL)
+#endif
+
+#ifndef STATUS_FLOAT_DENORMAL_OPERAND
+# define STATUS_FLOAT_DENORMAL_OPERAND ((NTSTATUS) 0xC000008DL)
+#endif
+
+#ifndef STATUS_FLOAT_DIVIDE_BY_ZERO
+# define STATUS_FLOAT_DIVIDE_BY_ZERO ((NTSTATUS) 0xC000008EL)
+#endif
+
+#ifndef STATUS_FLOAT_INEXACT_RESULT
+# define STATUS_FLOAT_INEXACT_RESULT ((NTSTATUS) 0xC000008FL)
+#endif
+
+#ifndef STATUS_FLOAT_INVALID_OPERATION
+# define STATUS_FLOAT_INVALID_OPERATION ((NTSTATUS) 0xC0000090L)
+#endif
+
+#ifndef STATUS_FLOAT_OVERFLOW
+# define STATUS_FLOAT_OVERFLOW ((NTSTATUS) 0xC0000091L)
+#endif
+
+#ifndef STATUS_FLOAT_STACK_CHECK
+# define STATUS_FLOAT_STACK_CHECK ((NTSTATUS) 0xC0000092L)
+#endif
+
+#ifndef STATUS_FLOAT_UNDERFLOW
+# define STATUS_FLOAT_UNDERFLOW ((NTSTATUS) 0xC0000093L)
+#endif
+
+#ifndef STATUS_INTEGER_DIVIDE_BY_ZERO
+# define STATUS_INTEGER_DIVIDE_BY_ZERO ((NTSTATUS) 0xC0000094L)
+#endif
+
+#ifndef STATUS_INTEGER_OVERFLOW
+# define STATUS_INTEGER_OVERFLOW ((NTSTATUS) 0xC0000095L)
+#endif
+
+#ifndef STATUS_PRIVILEGED_INSTRUCTION
+# define STATUS_PRIVILEGED_INSTRUCTION ((NTSTATUS) 0xC0000096L)
+#endif
+
+#ifndef STATUS_TOO_MANY_PAGING_FILES
+# define STATUS_TOO_MANY_PAGING_FILES ((NTSTATUS) 0xC0000097L)
+#endif
+
+#ifndef STATUS_FILE_INVALID
+# define STATUS_FILE_INVALID ((NTSTATUS) 0xC0000098L)
+#endif
+
+#ifndef STATUS_ALLOTTED_SPACE_EXCEEDED
+# define STATUS_ALLOTTED_SPACE_EXCEEDED ((NTSTATUS) 0xC0000099L)
+#endif
+
+#ifndef STATUS_INSUFFICIENT_RESOURCES
+# define STATUS_INSUFFICIENT_RESOURCES ((NTSTATUS) 0xC000009AL)
+#endif
+
+#ifndef STATUS_DFS_EXIT_PATH_FOUND
+# define STATUS_DFS_EXIT_PATH_FOUND ((NTSTATUS) 0xC000009BL)
+#endif
+
+#ifndef STATUS_DEVICE_DATA_ERROR
+# define STATUS_DEVICE_DATA_ERROR ((NTSTATUS) 0xC000009CL)
+#endif
+
+#ifndef STATUS_DEVICE_NOT_CONNECTED
+# define STATUS_DEVICE_NOT_CONNECTED ((NTSTATUS) 0xC000009DL)
+#endif
+
+#ifndef STATUS_DEVICE_POWER_FAILURE
+# define STATUS_DEVICE_POWER_FAILURE ((NTSTATUS) 0xC000009EL)
+#endif
+
+#ifndef STATUS_FREE_VM_NOT_AT_BASE
+# define STATUS_FREE_VM_NOT_AT_BASE ((NTSTATUS) 0xC000009FL)
+#endif
+
+#ifndef STATUS_MEMORY_NOT_ALLOCATED
+# define STATUS_MEMORY_NOT_ALLOCATED ((NTSTATUS) 0xC00000A0L)
+#endif
+
+#ifndef STATUS_WORKING_SET_QUOTA
+# define STATUS_WORKING_SET_QUOTA ((NTSTATUS) 0xC00000A1L)
+#endif
+
+#ifndef STATUS_MEDIA_WRITE_PROTECTED
+# define STATUS_MEDIA_WRITE_PROTECTED ((NTSTATUS) 0xC00000A2L)
+#endif
+
+#ifndef STATUS_DEVICE_NOT_READY
+# define STATUS_DEVICE_NOT_READY ((NTSTATUS) 0xC00000A3L)
+#endif
+
+#ifndef STATUS_INVALID_GROUP_ATTRIBUTES
+# define STATUS_INVALID_GROUP_ATTRIBUTES ((NTSTATUS) 0xC00000A4L)
+#endif
+
+#ifndef STATUS_BAD_IMPERSONATION_LEVEL
+# define STATUS_BAD_IMPERSONATION_LEVEL ((NTSTATUS) 0xC00000A5L)
+#endif
+
+#ifndef STATUS_CANT_OPEN_ANONYMOUS
+# define STATUS_CANT_OPEN_ANONYMOUS ((NTSTATUS) 0xC00000A6L)
+#endif
+
+#ifndef STATUS_BAD_VALIDATION_CLASS
+# define STATUS_BAD_VALIDATION_CLASS ((NTSTATUS) 0xC00000A7L)
+#endif
+
+#ifndef STATUS_BAD_TOKEN_TYPE
+# define STATUS_BAD_TOKEN_TYPE ((NTSTATUS) 0xC00000A8L)
+#endif
+
+#ifndef STATUS_BAD_MASTER_BOOT_RECORD
+# define STATUS_BAD_MASTER_BOOT_RECORD ((NTSTATUS) 0xC00000A9L)
+#endif
+
+#ifndef STATUS_INSTRUCTION_MISALIGNMENT
+# define STATUS_INSTRUCTION_MISALIGNMENT ((NTSTATUS) 0xC00000AAL)
+#endif
+
+#ifndef STATUS_INSTANCE_NOT_AVAILABLE
+# define STATUS_INSTANCE_NOT_AVAILABLE ((NTSTATUS) 0xC00000ABL)
+#endif
+
+#ifndef STATUS_PIPE_NOT_AVAILABLE
+# define STATUS_PIPE_NOT_AVAILABLE ((NTSTATUS) 0xC00000ACL)
+#endif
+
+#ifndef STATUS_INVALID_PIPE_STATE
+# define STATUS_INVALID_PIPE_STATE ((NTSTATUS) 0xC00000ADL)
+#endif
+
+#ifndef STATUS_PIPE_BUSY
+# define STATUS_PIPE_BUSY ((NTSTATUS) 0xC00000AEL)
+#endif
+
+#ifndef STATUS_ILLEGAL_FUNCTION
+# define STATUS_ILLEGAL_FUNCTION ((NTSTATUS) 0xC00000AFL)
+#endif
+
+#ifndef STATUS_PIPE_DISCONNECTED
+# define STATUS_PIPE_DISCONNECTED ((NTSTATUS) 0xC00000B0L)
+#endif
+
+#ifndef STATUS_PIPE_CLOSING
+# define STATUS_PIPE_CLOSING ((NTSTATUS) 0xC00000B1L)
+#endif
+
+#ifndef STATUS_PIPE_CONNECTED
+# define STATUS_PIPE_CONNECTED ((NTSTATUS) 0xC00000B2L)
+#endif
+
+#ifndef STATUS_PIPE_LISTENING
+# define STATUS_PIPE_LISTENING ((NTSTATUS) 0xC00000B3L)
+#endif
+
+#ifndef STATUS_INVALID_READ_MODE
+# define STATUS_INVALID_READ_MODE ((NTSTATUS) 0xC00000B4L)
+#endif
+
+#ifndef STATUS_IO_TIMEOUT
+# define STATUS_IO_TIMEOUT ((NTSTATUS) 0xC00000B5L)
+#endif
+
+#ifndef STATUS_FILE_FORCED_CLOSED
+# define STATUS_FILE_FORCED_CLOSED ((NTSTATUS) 0xC00000B6L)
+#endif
+
+#ifndef STATUS_PROFILING_NOT_STARTED
+# define STATUS_PROFILING_NOT_STARTED ((NTSTATUS) 0xC00000B7L)
+#endif
+
+#ifndef STATUS_PROFILING_NOT_STOPPED
+# define STATUS_PROFILING_NOT_STOPPED ((NTSTATUS) 0xC00000B8L)
+#endif
+
+#ifndef STATUS_COULD_NOT_INTERPRET
+# define STATUS_COULD_NOT_INTERPRET ((NTSTATUS) 0xC00000B9L)
+#endif
+
+#ifndef STATUS_FILE_IS_A_DIRECTORY
+# define STATUS_FILE_IS_A_DIRECTORY ((NTSTATUS) 0xC00000BAL)
+#endif
+
+#ifndef STATUS_NOT_SUPPORTED
+# define STATUS_NOT_SUPPORTED ((NTSTATUS) 0xC00000BBL)
+#endif
+
+#ifndef STATUS_REMOTE_NOT_LISTENING
+# define STATUS_REMOTE_NOT_LISTENING ((NTSTATUS) 0xC00000BCL)
+#endif
+
+#ifndef STATUS_DUPLICATE_NAME
+# define STATUS_DUPLICATE_NAME ((NTSTATUS) 0xC00000BDL)
+#endif
+
+#ifndef STATUS_BAD_NETWORK_PATH
+# define STATUS_BAD_NETWORK_PATH ((NTSTATUS) 0xC00000BEL)
+#endif
+
+#ifndef STATUS_NETWORK_BUSY
+# define STATUS_NETWORK_BUSY ((NTSTATUS) 0xC00000BFL)
+#endif
+
+#ifndef STATUS_DEVICE_DOES_NOT_EXIST
+# define STATUS_DEVICE_DOES_NOT_EXIST ((NTSTATUS) 0xC00000C0L)
+#endif
+
+#ifndef STATUS_TOO_MANY_COMMANDS
+# define STATUS_TOO_MANY_COMMANDS ((NTSTATUS) 0xC00000C1L)
+#endif
+
+#ifndef STATUS_ADAPTER_HARDWARE_ERROR
+# define STATUS_ADAPTER_HARDWARE_ERROR ((NTSTATUS) 0xC00000C2L)
+#endif
+
+#ifndef STATUS_INVALID_NETWORK_RESPONSE
+# define STATUS_INVALID_NETWORK_RESPONSE ((NTSTATUS) 0xC00000C3L)
+#endif
+
+#ifndef STATUS_UNEXPECTED_NETWORK_ERROR
+# define STATUS_UNEXPECTED_NETWORK_ERROR ((NTSTATUS) 0xC00000C4L)
+#endif
+
+#ifndef STATUS_BAD_REMOTE_ADAPTER
+# define STATUS_BAD_REMOTE_ADAPTER ((NTSTATUS) 0xC00000C5L)
+#endif
+
+#ifndef STATUS_PRINT_QUEUE_FULL
+# define STATUS_PRINT_QUEUE_FULL ((NTSTATUS) 0xC00000C6L)
+#endif
+
+#ifndef STATUS_NO_SPOOL_SPACE
+# define STATUS_NO_SPOOL_SPACE ((NTSTATUS) 0xC00000C7L)
+#endif
+
+#ifndef STATUS_PRINT_CANCELLED
+# define STATUS_PRINT_CANCELLED ((NTSTATUS) 0xC00000C8L)
+#endif
+
+#ifndef STATUS_NETWORK_NAME_DELETED
+# define STATUS_NETWORK_NAME_DELETED ((NTSTATUS) 0xC00000C9L)
+#endif
+
+#ifndef STATUS_NETWORK_ACCESS_DENIED
+# define STATUS_NETWORK_ACCESS_DENIED ((NTSTATUS) 0xC00000CAL)
+#endif
+
+#ifndef STATUS_BAD_DEVICE_TYPE
+# define STATUS_BAD_DEVICE_TYPE ((NTSTATUS) 0xC00000CBL)
+#endif
+
+#ifndef STATUS_BAD_NETWORK_NAME
+# define STATUS_BAD_NETWORK_NAME ((NTSTATUS) 0xC00000CCL)
+#endif
+
+#ifndef STATUS_TOO_MANY_NAMES
+# define STATUS_TOO_MANY_NAMES ((NTSTATUS) 0xC00000CDL)
+#endif
+
+#ifndef STATUS_TOO_MANY_SESSIONS
+# define STATUS_TOO_MANY_SESSIONS ((NTSTATUS) 0xC00000CEL)
+#endif
+
+#ifndef STATUS_SHARING_PAUSED
+# define STATUS_SHARING_PAUSED ((NTSTATUS) 0xC00000CFL)
+#endif
+
+#ifndef STATUS_REQUEST_NOT_ACCEPTED
+# define STATUS_REQUEST_NOT_ACCEPTED ((NTSTATUS) 0xC00000D0L)
+#endif
+
+#ifndef STATUS_REDIRECTOR_PAUSED
+# define STATUS_REDIRECTOR_PAUSED ((NTSTATUS) 0xC00000D1L)
+#endif
+
+#ifndef STATUS_NET_WRITE_FAULT
+# define STATUS_NET_WRITE_FAULT ((NTSTATUS) 0xC00000D2L)
+#endif
+
+#ifndef STATUS_PROFILING_AT_LIMIT
+# define STATUS_PROFILING_AT_LIMIT ((NTSTATUS) 0xC00000D3L)
+#endif
+
+#ifndef STATUS_NOT_SAME_DEVICE
+# define STATUS_NOT_SAME_DEVICE ((NTSTATUS) 0xC00000D4L)
+#endif
+
+#ifndef STATUS_FILE_RENAMED
+# define STATUS_FILE_RENAMED ((NTSTATUS) 0xC00000D5L)
+#endif
+
+#ifndef STATUS_VIRTUAL_CIRCUIT_CLOSED
+# define STATUS_VIRTUAL_CIRCUIT_CLOSED ((NTSTATUS) 0xC00000D6L)
+#endif
+
+#ifndef STATUS_NO_SECURITY_ON_OBJECT
+# define STATUS_NO_SECURITY_ON_OBJECT ((NTSTATUS) 0xC00000D7L)
+#endif
+
+#ifndef STATUS_CANT_WAIT
+# define STATUS_CANT_WAIT ((NTSTATUS) 0xC00000D8L)
+#endif
+
+#ifndef STATUS_PIPE_EMPTY
+# define STATUS_PIPE_EMPTY ((NTSTATUS) 0xC00000D9L)
+#endif
+
+#ifndef STATUS_CANT_ACCESS_DOMAIN_INFO
+# define STATUS_CANT_ACCESS_DOMAIN_INFO ((NTSTATUS) 0xC00000DAL)
+#endif
+
+#ifndef STATUS_CANT_TERMINATE_SELF
+# define STATUS_CANT_TERMINATE_SELF ((NTSTATUS) 0xC00000DBL)
+#endif
+
+#ifndef STATUS_INVALID_SERVER_STATE
+# define STATUS_INVALID_SERVER_STATE ((NTSTATUS) 0xC00000DCL)
+#endif
+
+#ifndef STATUS_INVALID_DOMAIN_STATE
+# define STATUS_INVALID_DOMAIN_STATE ((NTSTATUS) 0xC00000DDL)
+#endif
+
+#ifndef STATUS_INVALID_DOMAIN_ROLE
+# define STATUS_INVALID_DOMAIN_ROLE ((NTSTATUS) 0xC00000DEL)
+#endif
+
+#ifndef STATUS_NO_SUCH_DOMAIN
+# define STATUS_NO_SUCH_DOMAIN ((NTSTATUS) 0xC00000DFL)
+#endif
+
+#ifndef STATUS_DOMAIN_EXISTS
+# define STATUS_DOMAIN_EXISTS ((NTSTATUS) 0xC00000E0L)
+#endif
+
+#ifndef STATUS_DOMAIN_LIMIT_EXCEEDED
+# define STATUS_DOMAIN_LIMIT_EXCEEDED ((NTSTATUS) 0xC00000E1L)
+#endif
+
+#ifndef STATUS_OPLOCK_NOT_GRANTED
+# define STATUS_OPLOCK_NOT_GRANTED ((NTSTATUS) 0xC00000E2L)
+#endif
+
+#ifndef STATUS_INVALID_OPLOCK_PROTOCOL
+# define STATUS_INVALID_OPLOCK_PROTOCOL ((NTSTATUS) 0xC00000E3L)
+#endif
+
+#ifndef STATUS_INTERNAL_DB_CORRUPTION
+# define STATUS_INTERNAL_DB_CORRUPTION ((NTSTATUS) 0xC00000E4L)
+#endif
+
+#ifndef STATUS_INTERNAL_ERROR
+# define STATUS_INTERNAL_ERROR ((NTSTATUS) 0xC00000E5L)
+#endif
+
+#ifndef STATUS_GENERIC_NOT_MAPPED
+# define STATUS_GENERIC_NOT_MAPPED ((NTSTATUS) 0xC00000E6L)
+#endif
+
+#ifndef STATUS_BAD_DESCRIPTOR_FORMAT
+# define STATUS_BAD_DESCRIPTOR_FORMAT ((NTSTATUS) 0xC00000E7L)
+#endif
+
+#ifndef STATUS_INVALID_USER_BUFFER
+# define STATUS_INVALID_USER_BUFFER ((NTSTATUS) 0xC00000E8L)
+#endif
+
+#ifndef STATUS_UNEXPECTED_IO_ERROR
+# define STATUS_UNEXPECTED_IO_ERROR ((NTSTATUS) 0xC00000E9L)
+#endif
+
+#ifndef STATUS_UNEXPECTED_MM_CREATE_ERR
+# define STATUS_UNEXPECTED_MM_CREATE_ERR ((NTSTATUS) 0xC00000EAL)
+#endif
+
+#ifndef STATUS_UNEXPECTED_MM_MAP_ERROR
+# define STATUS_UNEXPECTED_MM_MAP_ERROR ((NTSTATUS) 0xC00000EBL)
+#endif
+
+#ifndef STATUS_UNEXPECTED_MM_EXTEND_ERR
+# define STATUS_UNEXPECTED_MM_EXTEND_ERR ((NTSTATUS) 0xC00000ECL)
+#endif
+
+#ifndef STATUS_NOT_LOGON_PROCESS
+# define STATUS_NOT_LOGON_PROCESS ((NTSTATUS) 0xC00000EDL)
+#endif
+
+#ifndef STATUS_LOGON_SESSION_EXISTS
+# define STATUS_LOGON_SESSION_EXISTS ((NTSTATUS) 0xC00000EEL)
+#endif
+
+#ifndef STATUS_INVALID_PARAMETER_1
+# define STATUS_INVALID_PARAMETER_1 ((NTSTATUS) 0xC00000EFL)
+#endif
+
+#ifndef STATUS_INVALID_PARAMETER_2
+# define STATUS_INVALID_PARAMETER_2 ((NTSTATUS) 0xC00000F0L)
+#endif
+
+#ifndef STATUS_INVALID_PARAMETER_3
+# define STATUS_INVALID_PARAMETER_3 ((NTSTATUS) 0xC00000F1L)
+#endif
+
+#ifndef STATUS_INVALID_PARAMETER_4
+# define STATUS_INVALID_PARAMETER_4 ((NTSTATUS) 0xC00000F2L)
+#endif
+
+#ifndef STATUS_INVALID_PARAMETER_5
+# define STATUS_INVALID_PARAMETER_5 ((NTSTATUS) 0xC00000F3L)
+#endif
+
+#ifndef STATUS_INVALID_PARAMETER_6
+# define STATUS_INVALID_PARAMETER_6 ((NTSTATUS) 0xC00000F4L)
+#endif
+
+#ifndef STATUS_INVALID_PARAMETER_7
+# define STATUS_INVALID_PARAMETER_7 ((NTSTATUS) 0xC00000F5L)
+#endif
+
+#ifndef STATUS_INVALID_PARAMETER_8
+# define STATUS_INVALID_PARAMETER_8 ((NTSTATUS) 0xC00000F6L)
+#endif
+
+#ifndef STATUS_INVALID_PARAMETER_9
+# define STATUS_INVALID_PARAMETER_9 ((NTSTATUS) 0xC00000F7L)
+#endif
+
+#ifndef STATUS_INVALID_PARAMETER_10
+# define STATUS_INVALID_PARAMETER_10 ((NTSTATUS) 0xC00000F8L)
+#endif
+
+#ifndef STATUS_INVALID_PARAMETER_11
+# define STATUS_INVALID_PARAMETER_11 ((NTSTATUS) 0xC00000F9L)
+#endif
+
+#ifndef STATUS_INVALID_PARAMETER_12
+# define STATUS_INVALID_PARAMETER_12 ((NTSTATUS) 0xC00000FAL)
+#endif
+
+#ifndef STATUS_REDIRECTOR_NOT_STARTED
+# define STATUS_REDIRECTOR_NOT_STARTED ((NTSTATUS) 0xC00000FBL)
+#endif
+
+#ifndef STATUS_REDIRECTOR_STARTED
+# define STATUS_REDIRECTOR_STARTED ((NTSTATUS) 0xC00000FCL)
+#endif
+
+#ifndef STATUS_STACK_OVERFLOW
+# define STATUS_STACK_OVERFLOW ((NTSTATUS) 0xC00000FDL)
+#endif
+
+#ifndef STATUS_NO_SUCH_PACKAGE
+# define STATUS_NO_SUCH_PACKAGE ((NTSTATUS) 0xC00000FEL)
+#endif
+
+#ifndef STATUS_BAD_FUNCTION_TABLE
+# define STATUS_BAD_FUNCTION_TABLE ((NTSTATUS) 0xC00000FFL)
+#endif
+
+#ifndef STATUS_VARIABLE_NOT_FOUND
+# define STATUS_VARIABLE_NOT_FOUND ((NTSTATUS) 0xC0000100L)
+#endif
+
+#ifndef STATUS_DIRECTORY_NOT_EMPTY
+# define STATUS_DIRECTORY_NOT_EMPTY ((NTSTATUS) 0xC0000101L)
+#endif
+
+#ifndef STATUS_FILE_CORRUPT_ERROR
+# define STATUS_FILE_CORRUPT_ERROR ((NTSTATUS) 0xC0000102L)
+#endif
+
+#ifndef STATUS_NOT_A_DIRECTORY
+# define STATUS_NOT_A_DIRECTORY ((NTSTATUS) 0xC0000103L)
+#endif
+
+#ifndef STATUS_BAD_LOGON_SESSION_STATE
+# define STATUS_BAD_LOGON_SESSION_STATE ((NTSTATUS) 0xC0000104L)
+#endif
+
+#ifndef STATUS_LOGON_SESSION_COLLISION
+# define STATUS_LOGON_SESSION_COLLISION ((NTSTATUS) 0xC0000105L)
+#endif
+
+#ifndef STATUS_NAME_TOO_LONG
+# define STATUS_NAME_TOO_LONG ((NTSTATUS) 0xC0000106L)
+#endif
+
+#ifndef STATUS_FILES_OPEN
+# define STATUS_FILES_OPEN ((NTSTATUS) 0xC0000107L)
+#endif
+
+#ifndef STATUS_CONNECTION_IN_USE
+# define STATUS_CONNECTION_IN_USE ((NTSTATUS) 0xC0000108L)
+#endif
+
+#ifndef STATUS_MESSAGE_NOT_FOUND
+# define STATUS_MESSAGE_NOT_FOUND ((NTSTATUS) 0xC0000109L)
+#endif
+
+#ifndef STATUS_PROCESS_IS_TERMINATING
+# define STATUS_PROCESS_IS_TERMINATING ((NTSTATUS) 0xC000010AL)
+#endif
+
+#ifndef STATUS_INVALID_LOGON_TYPE
+# define STATUS_INVALID_LOGON_TYPE ((NTSTATUS) 0xC000010BL)
+#endif
+
+#ifndef STATUS_NO_GUID_TRANSLATION
+# define STATUS_NO_GUID_TRANSLATION ((NTSTATUS) 0xC000010CL)
+#endif
+
+#ifndef STATUS_CANNOT_IMPERSONATE
+# define STATUS_CANNOT_IMPERSONATE ((NTSTATUS) 0xC000010DL)
+#endif
+
+#ifndef STATUS_IMAGE_ALREADY_LOADED
+# define STATUS_IMAGE_ALREADY_LOADED ((NTSTATUS) 0xC000010EL)
+#endif
+
+#ifndef STATUS_ABIOS_NOT_PRESENT
+# define STATUS_ABIOS_NOT_PRESENT ((NTSTATUS) 0xC000010FL)
+#endif
+
+#ifndef STATUS_ABIOS_LID_NOT_EXIST
+# define STATUS_ABIOS_LID_NOT_EXIST ((NTSTATUS) 0xC0000110L)
+#endif
+
+#ifndef STATUS_ABIOS_LID_ALREADY_OWNED
+# define STATUS_ABIOS_LID_ALREADY_OWNED ((NTSTATUS) 0xC0000111L)
+#endif
+
+#ifndef STATUS_ABIOS_NOT_LID_OWNER
+# define STATUS_ABIOS_NOT_LID_OWNER ((NTSTATUS) 0xC0000112L)
+#endif
+
+#ifndef STATUS_ABIOS_INVALID_COMMAND
+# define STATUS_ABIOS_INVALID_COMMAND ((NTSTATUS) 0xC0000113L)
+#endif
+
+#ifndef STATUS_ABIOS_INVALID_LID
+# define STATUS_ABIOS_INVALID_LID ((NTSTATUS) 0xC0000114L)
+#endif
+
+#ifndef STATUS_ABIOS_SELECTOR_NOT_AVAILABLE
+# define STATUS_ABIOS_SELECTOR_NOT_AVAILABLE ((NTSTATUS) 0xC0000115L)
+#endif
+
+#ifndef STATUS_ABIOS_INVALID_SELECTOR
+# define STATUS_ABIOS_INVALID_SELECTOR ((NTSTATUS) 0xC0000116L)
+#endif
+
+#ifndef STATUS_NO_LDT
+# define STATUS_NO_LDT ((NTSTATUS) 0xC0000117L)
+#endif
+
+#ifndef STATUS_INVALID_LDT_SIZE
+# define STATUS_INVALID_LDT_SIZE ((NTSTATUS) 0xC0000118L)
+#endif
+
+#ifndef STATUS_INVALID_LDT_OFFSET
+# define STATUS_INVALID_LDT_OFFSET ((NTSTATUS) 0xC0000119L)
+#endif
+
+#ifndef STATUS_INVALID_LDT_DESCRIPTOR
+# define STATUS_INVALID_LDT_DESCRIPTOR ((NTSTATUS) 0xC000011AL)
+#endif
+
+#ifndef STATUS_INVALID_IMAGE_NE_FORMAT
+# define STATUS_INVALID_IMAGE_NE_FORMAT ((NTSTATUS) 0xC000011BL)
+#endif
+
+#ifndef STATUS_RXACT_INVALID_STATE
+# define STATUS_RXACT_INVALID_STATE ((NTSTATUS) 0xC000011CL)
+#endif
+
+#ifndef STATUS_RXACT_COMMIT_FAILURE
+# define STATUS_RXACT_COMMIT_FAILURE ((NTSTATUS) 0xC000011DL)
+#endif
+
+#ifndef STATUS_MAPPED_FILE_SIZE_ZERO
+# define STATUS_MAPPED_FILE_SIZE_ZERO ((NTSTATUS) 0xC000011EL)
+#endif
+
+#ifndef STATUS_TOO_MANY_OPENED_FILES
+# define STATUS_TOO_MANY_OPENED_FILES ((NTSTATUS) 0xC000011FL)
+#endif
+
+#ifndef STATUS_CANCELLED
+# define STATUS_CANCELLED ((NTSTATUS) 0xC0000120L)
+#endif
+
+#ifndef STATUS_CANNOT_DELETE
+# define STATUS_CANNOT_DELETE ((NTSTATUS) 0xC0000121L)
+#endif
+
+#ifndef STATUS_INVALID_COMPUTER_NAME
+# define STATUS_INVALID_COMPUTER_NAME ((NTSTATUS) 0xC0000122L)
+#endif
+
+#ifndef STATUS_FILE_DELETED
+# define STATUS_FILE_DELETED ((NTSTATUS) 0xC0000123L)
+#endif
+
+#ifndef STATUS_SPECIAL_ACCOUNT
+# define STATUS_SPECIAL_ACCOUNT ((NTSTATUS) 0xC0000124L)
+#endif
+
+#ifndef STATUS_SPECIAL_GROUP
+# define STATUS_SPECIAL_GROUP ((NTSTATUS) 0xC0000125L)
+#endif
+
+#ifndef STATUS_SPECIAL_USER
+# define STATUS_SPECIAL_USER ((NTSTATUS) 0xC0000126L)
+#endif
+
+#ifndef STATUS_MEMBERS_PRIMARY_GROUP
+# define STATUS_MEMBERS_PRIMARY_GROUP ((NTSTATUS) 0xC0000127L)
+#endif
+
+#ifndef STATUS_FILE_CLOSED
+# define STATUS_FILE_CLOSED ((NTSTATUS) 0xC0000128L)
+#endif
+
+#ifndef STATUS_TOO_MANY_THREADS
+# define STATUS_TOO_MANY_THREADS ((NTSTATUS) 0xC0000129L)
+#endif
+
+#ifndef STATUS_THREAD_NOT_IN_PROCESS
+# define STATUS_THREAD_NOT_IN_PROCESS ((NTSTATUS) 0xC000012AL)
+#endif
+
+#ifndef STATUS_TOKEN_ALREADY_IN_USE
+# define STATUS_TOKEN_ALREADY_IN_USE ((NTSTATUS) 0xC000012BL)
+#endif
+
+#ifndef STATUS_PAGEFILE_QUOTA_EXCEEDED
+# define STATUS_PAGEFILE_QUOTA_EXCEEDED ((NTSTATUS) 0xC000012CL)
+#endif
+
+#ifndef STATUS_COMMITMENT_LIMIT
+# define STATUS_COMMITMENT_LIMIT ((NTSTATUS) 0xC000012DL)
+#endif
+
+#ifndef STATUS_INVALID_IMAGE_LE_FORMAT
+# define STATUS_INVALID_IMAGE_LE_FORMAT ((NTSTATUS) 0xC000012EL)
+#endif
+
+#ifndef STATUS_INVALID_IMAGE_NOT_MZ
+# define STATUS_INVALID_IMAGE_NOT_MZ ((NTSTATUS) 0xC000012FL)
+#endif
+
+#ifndef STATUS_INVALID_IMAGE_PROTECT
+# define STATUS_INVALID_IMAGE_PROTECT ((NTSTATUS) 0xC0000130L)
+#endif
+
+#ifndef STATUS_INVALID_IMAGE_WIN_16
+# define STATUS_INVALID_IMAGE_WIN_16 ((NTSTATUS) 0xC0000131L)
+#endif
+
+#ifndef STATUS_LOGON_SERVER_CONFLICT
+# define STATUS_LOGON_SERVER_CONFLICT ((NTSTATUS) 0xC0000132L)
+#endif
+
+#ifndef STATUS_TIME_DIFFERENCE_AT_DC
+# define STATUS_TIME_DIFFERENCE_AT_DC ((NTSTATUS) 0xC0000133L)
+#endif
+
+#ifndef STATUS_SYNCHRONIZATION_REQUIRED
+# define STATUS_SYNCHRONIZATION_REQUIRED ((NTSTATUS) 0xC0000134L)
+#endif
+
+#ifndef STATUS_DLL_NOT_FOUND
+# define STATUS_DLL_NOT_FOUND ((NTSTATUS) 0xC0000135L)
+#endif
+
+#ifndef STATUS_OPEN_FAILED
+# define STATUS_OPEN_FAILED ((NTSTATUS) 0xC0000136L)
+#endif
+
+#ifndef STATUS_IO_PRIVILEGE_FAILED
+# define STATUS_IO_PRIVILEGE_FAILED ((NTSTATUS) 0xC0000137L)
+#endif
+
+#ifndef STATUS_ORDINAL_NOT_FOUND
+# define STATUS_ORDINAL_NOT_FOUND ((NTSTATUS) 0xC0000138L)
+#endif
+
+#ifndef STATUS_ENTRYPOINT_NOT_FOUND
+# define STATUS_ENTRYPOINT_NOT_FOUND ((NTSTATUS) 0xC0000139L)
+#endif
+
+#ifndef STATUS_CONTROL_C_EXIT
+# define STATUS_CONTROL_C_EXIT ((NTSTATUS) 0xC000013AL)
+#endif
+
+#ifndef STATUS_LOCAL_DISCONNECT
+# define STATUS_LOCAL_DISCONNECT ((NTSTATUS) 0xC000013BL)
+#endif
+
+#ifndef STATUS_REMOTE_DISCONNECT
+# define STATUS_REMOTE_DISCONNECT ((NTSTATUS) 0xC000013CL)
+#endif
+
+#ifndef STATUS_REMOTE_RESOURCES
+# define STATUS_REMOTE_RESOURCES ((NTSTATUS) 0xC000013DL)
+#endif
+
+#ifndef STATUS_LINK_FAILED
+# define STATUS_LINK_FAILED ((NTSTATUS) 0xC000013EL)
+#endif
+
+#ifndef STATUS_LINK_TIMEOUT
+# define STATUS_LINK_TIMEOUT ((NTSTATUS) 0xC000013FL)
+#endif
+
+#ifndef STATUS_INVALID_CONNECTION
+# define STATUS_INVALID_CONNECTION ((NTSTATUS) 0xC0000140L)
+#endif
+
+#ifndef STATUS_INVALID_ADDRESS
+# define STATUS_INVALID_ADDRESS ((NTSTATUS) 0xC0000141L)
+#endif
+
+#ifndef STATUS_DLL_INIT_FAILED
+# define STATUS_DLL_INIT_FAILED ((NTSTATUS) 0xC0000142L)
+#endif
+
+#ifndef STATUS_MISSING_SYSTEMFILE
+# define STATUS_MISSING_SYSTEMFILE ((NTSTATUS) 0xC0000143L)
+#endif
+
+#ifndef STATUS_UNHANDLED_EXCEPTION
+# define STATUS_UNHANDLED_EXCEPTION ((NTSTATUS) 0xC0000144L)
+#endif
+
+#ifndef STATUS_APP_INIT_FAILURE
+# define STATUS_APP_INIT_FAILURE ((NTSTATUS) 0xC0000145L)
+#endif
+
+#ifndef STATUS_PAGEFILE_CREATE_FAILED
+# define STATUS_PAGEFILE_CREATE_FAILED ((NTSTATUS) 0xC0000146L)
+#endif
+
+#ifndef STATUS_NO_PAGEFILE
+# define STATUS_NO_PAGEFILE ((NTSTATUS) 0xC0000147L)
+#endif
+
+#ifndef STATUS_INVALID_LEVEL
+# define STATUS_INVALID_LEVEL ((NTSTATUS) 0xC0000148L)
+#endif
+
+#ifndef STATUS_WRONG_PASSWORD_CORE
+# define STATUS_WRONG_PASSWORD_CORE ((NTSTATUS) 0xC0000149L)
+#endif
+
+#ifndef STATUS_ILLEGAL_FLOAT_CONTEXT
+# define STATUS_ILLEGAL_FLOAT_CONTEXT ((NTSTATUS) 0xC000014AL)
+#endif
+
+#ifndef STATUS_PIPE_BROKEN
+# define STATUS_PIPE_BROKEN ((NTSTATUS) 0xC000014BL)
+#endif
+
+#ifndef STATUS_REGISTRY_CORRUPT
+# define STATUS_REGISTRY_CORRUPT ((NTSTATUS) 0xC000014CL)
+#endif
+
+#ifndef STATUS_REGISTRY_IO_FAILED
+# define STATUS_REGISTRY_IO_FAILED ((NTSTATUS) 0xC000014DL)
+#endif
+
+#ifndef STATUS_NO_EVENT_PAIR
+# define STATUS_NO_EVENT_PAIR ((NTSTATUS) 0xC000014EL)
+#endif
+
+#ifndef STATUS_UNRECOGNIZED_VOLUME
+# define STATUS_UNRECOGNIZED_VOLUME ((NTSTATUS) 0xC000014FL)
+#endif
+
+#ifndef STATUS_SERIAL_NO_DEVICE_INITED
+# define STATUS_SERIAL_NO_DEVICE_INITED ((NTSTATUS) 0xC0000150L)
+#endif
+
+#ifndef STATUS_NO_SUCH_ALIAS
+# define STATUS_NO_SUCH_ALIAS ((NTSTATUS) 0xC0000151L)
+#endif
+
+#ifndef STATUS_MEMBER_NOT_IN_ALIAS
+# define STATUS_MEMBER_NOT_IN_ALIAS ((NTSTATUS) 0xC0000152L)
+#endif
+
+#ifndef STATUS_MEMBER_IN_ALIAS
+# define STATUS_MEMBER_IN_ALIAS ((NTSTATUS) 0xC0000153L)
+#endif
+
+#ifndef STATUS_ALIAS_EXISTS
+# define STATUS_ALIAS_EXISTS ((NTSTATUS) 0xC0000154L)
+#endif
+
+#ifndef STATUS_LOGON_NOT_GRANTED
+# define STATUS_LOGON_NOT_GRANTED ((NTSTATUS) 0xC0000155L)
+#endif
+
+#ifndef STATUS_TOO_MANY_SECRETS
+# define STATUS_TOO_MANY_SECRETS ((NTSTATUS) 0xC0000156L)
+#endif
+
+#ifndef STATUS_SECRET_TOO_LONG
+# define STATUS_SECRET_TOO_LONG ((NTSTATUS) 0xC0000157L)
+#endif
+
+#ifndef STATUS_INTERNAL_DB_ERROR
+# define STATUS_INTERNAL_DB_ERROR ((NTSTATUS) 0xC0000158L)
+#endif
+
+#ifndef STATUS_FULLSCREEN_MODE
+# define STATUS_FULLSCREEN_MODE ((NTSTATUS) 0xC0000159L)
+#endif
+
+#ifndef STATUS_TOO_MANY_CONTEXT_IDS
+# define STATUS_TOO_MANY_CONTEXT_IDS ((NTSTATUS) 0xC000015AL)
+#endif
+
+#ifndef STATUS_LOGON_TYPE_NOT_GRANTED
+# define STATUS_LOGON_TYPE_NOT_GRANTED ((NTSTATUS) 0xC000015BL)
+#endif
+
+#ifndef STATUS_NOT_REGISTRY_FILE
+# define STATUS_NOT_REGISTRY_FILE ((NTSTATUS) 0xC000015CL)
+#endif
+
+#ifndef STATUS_NT_CROSS_ENCRYPTION_REQUIRED
+# define STATUS_NT_CROSS_ENCRYPTION_REQUIRED ((NTSTATUS) 0xC000015DL)
+#endif
+
+#ifndef STATUS_DOMAIN_CTRLR_CONFIG_ERROR
+# define STATUS_DOMAIN_CTRLR_CONFIG_ERROR ((NTSTATUS) 0xC000015EL)
+#endif
+
+#ifndef STATUS_FT_MISSING_MEMBER
+# define STATUS_FT_MISSING_MEMBER ((NTSTATUS) 0xC000015FL)
+#endif
+
+#ifndef STATUS_ILL_FORMED_SERVICE_ENTRY
+# define STATUS_ILL_FORMED_SERVICE_ENTRY ((NTSTATUS) 0xC0000160L)
+#endif
+
+#ifndef STATUS_ILLEGAL_CHARACTER
+# define STATUS_ILLEGAL_CHARACTER ((NTSTATUS) 0xC0000161L)
+#endif
+
+#ifndef STATUS_UNMAPPABLE_CHARACTER
+# define STATUS_UNMAPPABLE_CHARACTER ((NTSTATUS) 0xC0000162L)
+#endif
+
+#ifndef STATUS_UNDEFINED_CHARACTER
+# define STATUS_UNDEFINED_CHARACTER ((NTSTATUS) 0xC0000163L)
+#endif
+
+#ifndef STATUS_FLOPPY_VOLUME
+# define STATUS_FLOPPY_VOLUME ((NTSTATUS) 0xC0000164L)
+#endif
+
+#ifndef STATUS_FLOPPY_ID_MARK_NOT_FOUND
+# define STATUS_FLOPPY_ID_MARK_NOT_FOUND ((NTSTATUS) 0xC0000165L)
+#endif
+
+#ifndef STATUS_FLOPPY_WRONG_CYLINDER
+# define STATUS_FLOPPY_WRONG_CYLINDER ((NTSTATUS) 0xC0000166L)
+#endif
+
+#ifndef STATUS_FLOPPY_UNKNOWN_ERROR
+# define STATUS_FLOPPY_UNKNOWN_ERROR ((NTSTATUS) 0xC0000167L)
+#endif
+
+#ifndef STATUS_FLOPPY_BAD_REGISTERS
+# define STATUS_FLOPPY_BAD_REGISTERS ((NTSTATUS) 0xC0000168L)
+#endif
+
+#ifndef STATUS_DISK_RECALIBRATE_FAILED
+# define STATUS_DISK_RECALIBRATE_FAILED ((NTSTATUS) 0xC0000169L)
+#endif
+
+#ifndef STATUS_DISK_OPERATION_FAILED
+# define STATUS_DISK_OPERATION_FAILED ((NTSTATUS) 0xC000016AL)
+#endif
+
+#ifndef STATUS_DISK_RESET_FAILED
+# define STATUS_DISK_RESET_FAILED ((NTSTATUS) 0xC000016BL)
+#endif
+
+#ifndef STATUS_SHARED_IRQ_BUSY
+# define STATUS_SHARED_IRQ_BUSY ((NTSTATUS) 0xC000016CL)
+#endif
+
+#ifndef STATUS_FT_ORPHANING
+# define STATUS_FT_ORPHANING ((NTSTATUS) 0xC000016DL)
+#endif
+
+#ifndef STATUS_BIOS_FAILED_TO_CONNECT_INTERRUPT
+# define STATUS_BIOS_FAILED_TO_CONNECT_INTERRUPT ((NTSTATUS) 0xC000016EL)
+#endif
+
+#ifndef STATUS_PARTITION_FAILURE
+# define STATUS_PARTITION_FAILURE ((NTSTATUS) 0xC0000172L)
+#endif
+
+#ifndef STATUS_INVALID_BLOCK_LENGTH
+# define STATUS_INVALID_BLOCK_LENGTH ((NTSTATUS) 0xC0000173L)
+#endif
+
+#ifndef STATUS_DEVICE_NOT_PARTITIONED
+# define STATUS_DEVICE_NOT_PARTITIONED ((NTSTATUS) 0xC0000174L)
+#endif
+
+#ifndef STATUS_UNABLE_TO_LOCK_MEDIA
+# define STATUS_UNABLE_TO_LOCK_MEDIA ((NTSTATUS) 0xC0000175L)
+#endif
+
+#ifndef STATUS_UNABLE_TO_UNLOAD_MEDIA
+# define STATUS_UNABLE_TO_UNLOAD_MEDIA ((NTSTATUS) 0xC0000176L)
+#endif
+
+#ifndef STATUS_EOM_OVERFLOW
+# define STATUS_EOM_OVERFLOW ((NTSTATUS) 0xC0000177L)
+#endif
+
+#ifndef STATUS_NO_MEDIA
+# define STATUS_NO_MEDIA ((NTSTATUS) 0xC0000178L)
+#endif
+
+#ifndef STATUS_NO_SUCH_MEMBER
+# define STATUS_NO_SUCH_MEMBER ((NTSTATUS) 0xC000017AL)
+#endif
+
+#ifndef STATUS_INVALID_MEMBER
+# define STATUS_INVALID_MEMBER ((NTSTATUS) 0xC000017BL)
+#endif
+
+#ifndef STATUS_KEY_DELETED
+# define STATUS_KEY_DELETED ((NTSTATUS) 0xC000017CL)
+#endif
+
+#ifndef STATUS_NO_LOG_SPACE
+# define STATUS_NO_LOG_SPACE ((NTSTATUS) 0xC000017DL)
+#endif
+
+#ifndef STATUS_TOO_MANY_SIDS
+# define STATUS_TOO_MANY_SIDS ((NTSTATUS) 0xC000017EL)
+#endif
+
+#ifndef STATUS_LM_CROSS_ENCRYPTION_REQUIRED
+# define STATUS_LM_CROSS_ENCRYPTION_REQUIRED ((NTSTATUS) 0xC000017FL)
+#endif
+
+#ifndef STATUS_KEY_HAS_CHILDREN
+# define STATUS_KEY_HAS_CHILDREN ((NTSTATUS) 0xC0000180L)
+#endif
+
+#ifndef STATUS_CHILD_MUST_BE_VOLATILE
+# define STATUS_CHILD_MUST_BE_VOLATILE ((NTSTATUS) 0xC0000181L)
+#endif
+
+#ifndef STATUS_DEVICE_CONFIGURATION_ERROR
+# define STATUS_DEVICE_CONFIGURATION_ERROR ((NTSTATUS) 0xC0000182L)
+#endif
+
+#ifndef STATUS_DRIVER_INTERNAL_ERROR
+# define STATUS_DRIVER_INTERNAL_ERROR ((NTSTATUS) 0xC0000183L)
+#endif
+
+#ifndef STATUS_INVALID_DEVICE_STATE
+# define STATUS_INVALID_DEVICE_STATE ((NTSTATUS) 0xC0000184L)
+#endif
+
+#ifndef STATUS_IO_DEVICE_ERROR
+# define STATUS_IO_DEVICE_ERROR ((NTSTATUS) 0xC0000185L)
+#endif
+
+#ifndef STATUS_DEVICE_PROTOCOL_ERROR
+# define STATUS_DEVICE_PROTOCOL_ERROR ((NTSTATUS) 0xC0000186L)
+#endif
+
+#ifndef STATUS_BACKUP_CONTROLLER
+# define STATUS_BACKUP_CONTROLLER ((NTSTATUS) 0xC0000187L)
+#endif
+
+#ifndef STATUS_LOG_FILE_FULL
+# define STATUS_LOG_FILE_FULL ((NTSTATUS) 0xC0000188L)
+#endif
+
+#ifndef STATUS_TOO_LATE
+# define STATUS_TOO_LATE ((NTSTATUS) 0xC0000189L)
+#endif
+
+#ifndef STATUS_NO_TRUST_LSA_SECRET
+# define STATUS_NO_TRUST_LSA_SECRET ((NTSTATUS) 0xC000018AL)
+#endif
+
+#ifndef STATUS_NO_TRUST_SAM_ACCOUNT
+# define STATUS_NO_TRUST_SAM_ACCOUNT ((NTSTATUS) 0xC000018BL)
+#endif
+
+#ifndef STATUS_TRUSTED_DOMAIN_FAILURE
+# define STATUS_TRUSTED_DOMAIN_FAILURE ((NTSTATUS) 0xC000018CL)
+#endif
+
+#ifndef STATUS_TRUSTED_RELATIONSHIP_FAILURE
+# define STATUS_TRUSTED_RELATIONSHIP_FAILURE ((NTSTATUS) 0xC000018DL)
+#endif
+
+#ifndef STATUS_EVENTLOG_FILE_CORRUPT
+# define STATUS_EVENTLOG_FILE_CORRUPT ((NTSTATUS) 0xC000018EL)
+#endif
+
+#ifndef STATUS_EVENTLOG_CANT_START
+# define STATUS_EVENTLOG_CANT_START ((NTSTATUS) 0xC000018FL)
+#endif
+
+#ifndef STATUS_TRUST_FAILURE
+# define STATUS_TRUST_FAILURE ((NTSTATUS) 0xC0000190L)
+#endif
+
+#ifndef STATUS_MUTANT_LIMIT_EXCEEDED
+# define STATUS_MUTANT_LIMIT_EXCEEDED ((NTSTATUS) 0xC0000191L)
+#endif
+
+#ifndef STATUS_NETLOGON_NOT_STARTED
+# define STATUS_NETLOGON_NOT_STARTED ((NTSTATUS) 0xC0000192L)
+#endif
+
+#ifndef STATUS_ACCOUNT_EXPIRED
+# define STATUS_ACCOUNT_EXPIRED ((NTSTATUS) 0xC0000193L)
+#endif
+
+#ifndef STATUS_POSSIBLE_DEADLOCK
+# define STATUS_POSSIBLE_DEADLOCK ((NTSTATUS) 0xC0000194L)
+#endif
+
+#ifndef STATUS_NETWORK_CREDENTIAL_CONFLICT
+# define STATUS_NETWORK_CREDENTIAL_CONFLICT ((NTSTATUS) 0xC0000195L)
+#endif
+
+#ifndef STATUS_REMOTE_SESSION_LIMIT
+# define STATUS_REMOTE_SESSION_LIMIT ((NTSTATUS) 0xC0000196L)
+#endif
+
+#ifndef STATUS_EVENTLOG_FILE_CHANGED
+# define STATUS_EVENTLOG_FILE_CHANGED ((NTSTATUS) 0xC0000197L)
+#endif
+
+#ifndef STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT
+# define STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT ((NTSTATUS) 0xC0000198L)
+#endif
+
+#ifndef STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT
+# define STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT ((NTSTATUS) 0xC0000199L)
+#endif
+
+#ifndef STATUS_NOLOGON_SERVER_TRUST_ACCOUNT
+# define STATUS_NOLOGON_SERVER_TRUST_ACCOUNT ((NTSTATUS) 0xC000019AL)
+#endif
+
+#ifndef STATUS_DOMAIN_TRUST_INCONSISTENT
+# define STATUS_DOMAIN_TRUST_INCONSISTENT ((NTSTATUS) 0xC000019BL)
+#endif
+
+#ifndef STATUS_FS_DRIVER_REQUIRED
+# define STATUS_FS_DRIVER_REQUIRED ((NTSTATUS) 0xC000019CL)
+#endif
+
+#ifndef STATUS_IMAGE_ALREADY_LOADED_AS_DLL
+# define STATUS_IMAGE_ALREADY_LOADED_AS_DLL ((NTSTATUS) 0xC000019DL)
+#endif
+
+#ifndef STATUS_INCOMPATIBLE_WITH_GLOBAL_SHORT_NAME_REGISTRY_SETTING
+# define STATUS_INCOMPATIBLE_WITH_GLOBAL_SHORT_NAME_REGISTRY_SETTING ((NTSTATUS) 0xC000019EL)
+#endif
+
+#ifndef STATUS_SHORT_NAMES_NOT_ENABLED_ON_VOLUME
+# define STATUS_SHORT_NAMES_NOT_ENABLED_ON_VOLUME ((NTSTATUS) 0xC000019FL)
+#endif
+
+#ifndef STATUS_SECURITY_STREAM_IS_INCONSISTENT
+# define STATUS_SECURITY_STREAM_IS_INCONSISTENT ((NTSTATUS) 0xC00001A0L)
+#endif
+
+#ifndef STATUS_INVALID_LOCK_RANGE
+# define STATUS_INVALID_LOCK_RANGE ((NTSTATUS) 0xC00001A1L)
+#endif
+
+#ifndef STATUS_INVALID_ACE_CONDITION
+# define STATUS_INVALID_ACE_CONDITION ((NTSTATUS) 0xC00001A2L)
+#endif
+
+#ifndef STATUS_IMAGE_SUBSYSTEM_NOT_PRESENT
+# define STATUS_IMAGE_SUBSYSTEM_NOT_PRESENT ((NTSTATUS) 0xC00001A3L)
+#endif
+
+#ifndef STATUS_NOTIFICATION_GUID_ALREADY_DEFINED
+# define STATUS_NOTIFICATION_GUID_ALREADY_DEFINED ((NTSTATUS) 0xC00001A4L)
+#endif
+
+#ifndef STATUS_NETWORK_OPEN_RESTRICTION
+# define STATUS_NETWORK_OPEN_RESTRICTION ((NTSTATUS) 0xC0000201L)
+#endif
+
+#ifndef STATUS_NO_USER_SESSION_KEY
+# define STATUS_NO_USER_SESSION_KEY ((NTSTATUS) 0xC0000202L)
+#endif
+
+#ifndef STATUS_USER_SESSION_DELETED
+# define STATUS_USER_SESSION_DELETED ((NTSTATUS) 0xC0000203L)
+#endif
+
+#ifndef STATUS_RESOURCE_LANG_NOT_FOUND
+# define STATUS_RESOURCE_LANG_NOT_FOUND ((NTSTATUS) 0xC0000204L)
+#endif
+
+#ifndef STATUS_INSUFF_SERVER_RESOURCES
+# define STATUS_INSUFF_SERVER_RESOURCES ((NTSTATUS) 0xC0000205L)
+#endif
+
+#ifndef STATUS_INVALID_BUFFER_SIZE
+# define STATUS_INVALID_BUFFER_SIZE ((NTSTATUS) 0xC0000206L)
+#endif
+
+#ifndef STATUS_INVALID_ADDRESS_COMPONENT
+# define STATUS_INVALID_ADDRESS_COMPONENT ((NTSTATUS) 0xC0000207L)
+#endif
+
+#ifndef STATUS_INVALID_ADDRESS_WILDCARD
+# define STATUS_INVALID_ADDRESS_WILDCARD ((NTSTATUS) 0xC0000208L)
+#endif
+
+#ifndef STATUS_TOO_MANY_ADDRESSES
+# define STATUS_TOO_MANY_ADDRESSES ((NTSTATUS) 0xC0000209L)
+#endif
+
+#ifndef STATUS_ADDRESS_ALREADY_EXISTS
+# define STATUS_ADDRESS_ALREADY_EXISTS ((NTSTATUS) 0xC000020AL)
+#endif
+
+#ifndef STATUS_ADDRESS_CLOSED
+# define STATUS_ADDRESS_CLOSED ((NTSTATUS) 0xC000020BL)
+#endif
+
+#ifndef STATUS_CONNECTION_DISCONNECTED
+# define STATUS_CONNECTION_DISCONNECTED ((NTSTATUS) 0xC000020CL)
+#endif
+
+#ifndef STATUS_CONNECTION_RESET
+# define STATUS_CONNECTION_RESET ((NTSTATUS) 0xC000020DL)
+#endif
+
+#ifndef STATUS_TOO_MANY_NODES
+# define STATUS_TOO_MANY_NODES ((NTSTATUS) 0xC000020EL)
+#endif
+
+#ifndef STATUS_TRANSACTION_ABORTED
+# define STATUS_TRANSACTION_ABORTED ((NTSTATUS) 0xC000020FL)
+#endif
+
+#ifndef STATUS_TRANSACTION_TIMED_OUT
+# define STATUS_TRANSACTION_TIMED_OUT ((NTSTATUS) 0xC0000210L)
+#endif
+
+#ifndef STATUS_TRANSACTION_NO_RELEASE
+# define STATUS_TRANSACTION_NO_RELEASE ((NTSTATUS) 0xC0000211L)
+#endif
+
+#ifndef STATUS_TRANSACTION_NO_MATCH
+# define STATUS_TRANSACTION_NO_MATCH ((NTSTATUS) 0xC0000212L)
+#endif
+
+#ifndef STATUS_TRANSACTION_RESPONDED
+# define STATUS_TRANSACTION_RESPONDED ((NTSTATUS) 0xC0000213L)
+#endif
+
+#ifndef STATUS_TRANSACTION_INVALID_ID
+# define STATUS_TRANSACTION_INVALID_ID ((NTSTATUS) 0xC0000214L)
+#endif
+
+#ifndef STATUS_TRANSACTION_INVALID_TYPE
+# define STATUS_TRANSACTION_INVALID_TYPE ((NTSTATUS) 0xC0000215L)
+#endif
+
+#ifndef STATUS_NOT_SERVER_SESSION
+# define STATUS_NOT_SERVER_SESSION ((NTSTATUS) 0xC0000216L)
+#endif
+
+#ifndef STATUS_NOT_CLIENT_SESSION
+# define STATUS_NOT_CLIENT_SESSION ((NTSTATUS) 0xC0000217L)
+#endif
+
+#ifndef STATUS_CANNOT_LOAD_REGISTRY_FILE
+# define STATUS_CANNOT_LOAD_REGISTRY_FILE ((NTSTATUS) 0xC0000218L)
+#endif
+
+#ifndef STATUS_DEBUG_ATTACH_FAILED
+# define STATUS_DEBUG_ATTACH_FAILED ((NTSTATUS) 0xC0000219L)
+#endif
+
+#ifndef STATUS_SYSTEM_PROCESS_TERMINATED
+# define STATUS_SYSTEM_PROCESS_TERMINATED ((NTSTATUS) 0xC000021AL)
+#endif
+
+#ifndef STATUS_DATA_NOT_ACCEPTED
+# define STATUS_DATA_NOT_ACCEPTED ((NTSTATUS) 0xC000021BL)
+#endif
+
+#ifndef STATUS_NO_BROWSER_SERVERS_FOUND
+# define STATUS_NO_BROWSER_SERVERS_FOUND ((NTSTATUS) 0xC000021CL)
+#endif
+
+#ifndef STATUS_VDM_HARD_ERROR
+# define STATUS_VDM_HARD_ERROR ((NTSTATUS) 0xC000021DL)
+#endif
+
+#ifndef STATUS_DRIVER_CANCEL_TIMEOUT
+# define STATUS_DRIVER_CANCEL_TIMEOUT ((NTSTATUS) 0xC000021EL)
+#endif
+
+#ifndef STATUS_REPLY_MESSAGE_MISMATCH
+# define STATUS_REPLY_MESSAGE_MISMATCH ((NTSTATUS) 0xC000021FL)
+#endif
+
+#ifndef STATUS_MAPPED_ALIGNMENT
+# define STATUS_MAPPED_ALIGNMENT ((NTSTATUS) 0xC0000220L)
+#endif
+
+#ifndef STATUS_IMAGE_CHECKSUM_MISMATCH
+# define STATUS_IMAGE_CHECKSUM_MISMATCH ((NTSTATUS) 0xC0000221L)
+#endif
+
+#ifndef STATUS_LOST_WRITEBEHIND_DATA
+# define STATUS_LOST_WRITEBEHIND_DATA ((NTSTATUS) 0xC0000222L)
+#endif
+
+#ifndef STATUS_CLIENT_SERVER_PARAMETERS_INVALID
+# define STATUS_CLIENT_SERVER_PARAMETERS_INVALID ((NTSTATUS) 0xC0000223L)
+#endif
+
+#ifndef STATUS_PASSWORD_MUST_CHANGE
+# define STATUS_PASSWORD_MUST_CHANGE ((NTSTATUS) 0xC0000224L)
+#endif
+
+#ifndef STATUS_NOT_FOUND
+# define STATUS_NOT_FOUND ((NTSTATUS) 0xC0000225L)
+#endif
+
+#ifndef STATUS_NOT_TINY_STREAM
+# define STATUS_NOT_TINY_STREAM ((NTSTATUS) 0xC0000226L)
+#endif
+
+#ifndef STATUS_RECOVERY_FAILURE
+# define STATUS_RECOVERY_FAILURE ((NTSTATUS) 0xC0000227L)
+#endif
+
+#ifndef STATUS_STACK_OVERFLOW_READ
+# define STATUS_STACK_OVERFLOW_READ ((NTSTATUS) 0xC0000228L)
+#endif
+
+#ifndef STATUS_FAIL_CHECK
+# define STATUS_FAIL_CHECK ((NTSTATUS) 0xC0000229L)
+#endif
+
+#ifndef STATUS_DUPLICATE_OBJECTID
+# define STATUS_DUPLICATE_OBJECTID ((NTSTATUS) 0xC000022AL)
+#endif
+
+#ifndef STATUS_OBJECTID_EXISTS
+# define STATUS_OBJECTID_EXISTS ((NTSTATUS) 0xC000022BL)
+#endif
+
+#ifndef STATUS_CONVERT_TO_LARGE
+# define STATUS_CONVERT_TO_LARGE ((NTSTATUS) 0xC000022CL)
+#endif
+
+#ifndef STATUS_RETRY
+# define STATUS_RETRY ((NTSTATUS) 0xC000022DL)
+#endif
+
+#ifndef STATUS_FOUND_OUT_OF_SCOPE
+# define STATUS_FOUND_OUT_OF_SCOPE ((NTSTATUS) 0xC000022EL)
+#endif
+
+#ifndef STATUS_ALLOCATE_BUCKET
+# define STATUS_ALLOCATE_BUCKET ((NTSTATUS) 0xC000022FL)
+#endif
+
+#ifndef STATUS_PROPSET_NOT_FOUND
+# define STATUS_PROPSET_NOT_FOUND ((NTSTATUS) 0xC0000230L)
+#endif
+
+#ifndef STATUS_MARSHALL_OVERFLOW
+# define STATUS_MARSHALL_OVERFLOW ((NTSTATUS) 0xC0000231L)
+#endif
+
+#ifndef STATUS_INVALID_VARIANT
+# define STATUS_INVALID_VARIANT ((NTSTATUS) 0xC0000232L)
+#endif
+
+#ifndef STATUS_DOMAIN_CONTROLLER_NOT_FOUND
+# define STATUS_DOMAIN_CONTROLLER_NOT_FOUND ((NTSTATUS) 0xC0000233L)
+#endif
+
+#ifndef STATUS_ACCOUNT_LOCKED_OUT
+# define STATUS_ACCOUNT_LOCKED_OUT ((NTSTATUS) 0xC0000234L)
+#endif
+
+#ifndef STATUS_HANDLE_NOT_CLOSABLE
+# define STATUS_HANDLE_NOT_CLOSABLE ((NTSTATUS) 0xC0000235L)
+#endif
+
+#ifndef STATUS_CONNECTION_REFUSED
+# define STATUS_CONNECTION_REFUSED ((NTSTATUS) 0xC0000236L)
+#endif
+
+#ifndef STATUS_GRACEFUL_DISCONNECT
+# define STATUS_GRACEFUL_DISCONNECT ((NTSTATUS) 0xC0000237L)
+#endif
+
+#ifndef STATUS_ADDRESS_ALREADY_ASSOCIATED
+# define STATUS_ADDRESS_ALREADY_ASSOCIATED ((NTSTATUS) 0xC0000238L)
+#endif
+
+#ifndef STATUS_ADDRESS_NOT_ASSOCIATED
+# define STATUS_ADDRESS_NOT_ASSOCIATED ((NTSTATUS) 0xC0000239L)
+#endif
+
+#ifndef STATUS_CONNECTION_INVALID
+# define STATUS_CONNECTION_INVALID ((NTSTATUS) 0xC000023AL)
+#endif
+
+#ifndef STATUS_CONNECTION_ACTIVE
+# define STATUS_CONNECTION_ACTIVE ((NTSTATUS) 0xC000023BL)
+#endif
+
+#ifndef STATUS_NETWORK_UNREACHABLE
+# define STATUS_NETWORK_UNREACHABLE ((NTSTATUS) 0xC000023CL)
+#endif
+
+#ifndef STATUS_HOST_UNREACHABLE
+# define STATUS_HOST_UNREACHABLE ((NTSTATUS) 0xC000023DL)
+#endif
+
+#ifndef STATUS_PROTOCOL_UNREACHABLE
+# define STATUS_PROTOCOL_UNREACHABLE ((NTSTATUS) 0xC000023EL)
+#endif
+
+#ifndef STATUS_PORT_UNREACHABLE
+# define STATUS_PORT_UNREACHABLE ((NTSTATUS) 0xC000023FL)
+#endif
+
+#ifndef STATUS_REQUEST_ABORTED
+# define STATUS_REQUEST_ABORTED ((NTSTATUS) 0xC0000240L)
+#endif
+
+#ifndef STATUS_CONNECTION_ABORTED
+# define STATUS_CONNECTION_ABORTED ((NTSTATUS) 0xC0000241L)
+#endif
+
+#ifndef STATUS_BAD_COMPRESSION_BUFFER
+# define STATUS_BAD_COMPRESSION_BUFFER ((NTSTATUS) 0xC0000242L)
+#endif
+
+#ifndef STATUS_USER_MAPPED_FILE
+# define STATUS_USER_MAPPED_FILE ((NTSTATUS) 0xC0000243L)
+#endif
+
+#ifndef STATUS_AUDIT_FAILED
+# define STATUS_AUDIT_FAILED ((NTSTATUS) 0xC0000244L)
+#endif
+
+#ifndef STATUS_TIMER_RESOLUTION_NOT_SET
+# define STATUS_TIMER_RESOLUTION_NOT_SET ((NTSTATUS) 0xC0000245L)
+#endif
+
+#ifndef STATUS_CONNECTION_COUNT_LIMIT
+# define STATUS_CONNECTION_COUNT_LIMIT ((NTSTATUS) 0xC0000246L)
+#endif
+
+#ifndef STATUS_LOGIN_TIME_RESTRICTION
+# define STATUS_LOGIN_TIME_RESTRICTION ((NTSTATUS) 0xC0000247L)
+#endif
+
+#ifndef STATUS_LOGIN_WKSTA_RESTRICTION
+# define STATUS_LOGIN_WKSTA_RESTRICTION ((NTSTATUS) 0xC0000248L)
+#endif
+
+#ifndef STATUS_IMAGE_MP_UP_MISMATCH
+# define STATUS_IMAGE_MP_UP_MISMATCH ((NTSTATUS) 0xC0000249L)
+#endif
+
+#ifndef STATUS_INSUFFICIENT_LOGON_INFO
+# define STATUS_INSUFFICIENT_LOGON_INFO ((NTSTATUS) 0xC0000250L)
+#endif
+
+#ifndef STATUS_BAD_DLL_ENTRYPOINT
+# define STATUS_BAD_DLL_ENTRYPOINT ((NTSTATUS) 0xC0000251L)
+#endif
+
+#ifndef STATUS_BAD_SERVICE_ENTRYPOINT
+# define STATUS_BAD_SERVICE_ENTRYPOINT ((NTSTATUS) 0xC0000252L)
+#endif
+
+#ifndef STATUS_LPC_REPLY_LOST
+# define STATUS_LPC_REPLY_LOST ((NTSTATUS) 0xC0000253L)
+#endif
+
+#ifndef STATUS_IP_ADDRESS_CONFLICT1
+# define STATUS_IP_ADDRESS_CONFLICT1 ((NTSTATUS) 0xC0000254L)
+#endif
+
+#ifndef STATUS_IP_ADDRESS_CONFLICT2
+# define STATUS_IP_ADDRESS_CONFLICT2 ((NTSTATUS) 0xC0000255L)
+#endif
+
+#ifndef STATUS_REGISTRY_QUOTA_LIMIT
+# define STATUS_REGISTRY_QUOTA_LIMIT ((NTSTATUS) 0xC0000256L)
+#endif
+
+#ifndef STATUS_PATH_NOT_COVERED
+# define STATUS_PATH_NOT_COVERED ((NTSTATUS) 0xC0000257L)
+#endif
+
+#ifndef STATUS_NO_CALLBACK_ACTIVE
+# define STATUS_NO_CALLBACK_ACTIVE ((NTSTATUS) 0xC0000258L)
+#endif
+
+#ifndef STATUS_LICENSE_QUOTA_EXCEEDED
+# define STATUS_LICENSE_QUOTA_EXCEEDED ((NTSTATUS) 0xC0000259L)
+#endif
+
+#ifndef STATUS_PWD_TOO_SHORT
+# define STATUS_PWD_TOO_SHORT ((NTSTATUS) 0xC000025AL)
+#endif
+
+#ifndef STATUS_PWD_TOO_RECENT
+# define STATUS_PWD_TOO_RECENT ((NTSTATUS) 0xC000025BL)
+#endif
+
+#ifndef STATUS_PWD_HISTORY_CONFLICT
+# define STATUS_PWD_HISTORY_CONFLICT ((NTSTATUS) 0xC000025CL)
+#endif
+
+#ifndef STATUS_PLUGPLAY_NO_DEVICE
+# define STATUS_PLUGPLAY_NO_DEVICE ((NTSTATUS) 0xC000025EL)
+#endif
+
+#ifndef STATUS_UNSUPPORTED_COMPRESSION
+# define STATUS_UNSUPPORTED_COMPRESSION ((NTSTATUS) 0xC000025FL)
+#endif
+
+#ifndef STATUS_INVALID_HW_PROFILE
+# define STATUS_INVALID_HW_PROFILE ((NTSTATUS) 0xC0000260L)
+#endif
+
+#ifndef STATUS_INVALID_PLUGPLAY_DEVICE_PATH
+# define STATUS_INVALID_PLUGPLAY_DEVICE_PATH ((NTSTATUS) 0xC0000261L)
+#endif
+
+#ifndef STATUS_DRIVER_ORDINAL_NOT_FOUND
+# define STATUS_DRIVER_ORDINAL_NOT_FOUND ((NTSTATUS) 0xC0000262L)
+#endif
+
+#ifndef STATUS_DRIVER_ENTRYPOINT_NOT_FOUND
+# define STATUS_DRIVER_ENTRYPOINT_NOT_FOUND ((NTSTATUS) 0xC0000263L)
+#endif
+
+#ifndef STATUS_RESOURCE_NOT_OWNED
+# define STATUS_RESOURCE_NOT_OWNED ((NTSTATUS) 0xC0000264L)
+#endif
+
+#ifndef STATUS_TOO_MANY_LINKS
+# define STATUS_TOO_MANY_LINKS ((NTSTATUS) 0xC0000265L)
+#endif
+
+#ifndef STATUS_QUOTA_LIST_INCONSISTENT
+# define STATUS_QUOTA_LIST_INCONSISTENT ((NTSTATUS) 0xC0000266L)
+#endif
+
+#ifndef STATUS_FILE_IS_OFFLINE
+# define STATUS_FILE_IS_OFFLINE ((NTSTATUS) 0xC0000267L)
+#endif
+
+#ifndef STATUS_EVALUATION_EXPIRATION
+# define STATUS_EVALUATION_EXPIRATION ((NTSTATUS) 0xC0000268L)
+#endif
+
+#ifndef STATUS_ILLEGAL_DLL_RELOCATION
+# define STATUS_ILLEGAL_DLL_RELOCATION ((NTSTATUS) 0xC0000269L)
+#endif
+
+#ifndef STATUS_LICENSE_VIOLATION
+# define STATUS_LICENSE_VIOLATION ((NTSTATUS) 0xC000026AL)
+#endif
+
+#ifndef STATUS_DLL_INIT_FAILED_LOGOFF
+# define STATUS_DLL_INIT_FAILED_LOGOFF ((NTSTATUS) 0xC000026BL)
+#endif
+
+#ifndef STATUS_DRIVER_UNABLE_TO_LOAD
+# define STATUS_DRIVER_UNABLE_TO_LOAD ((NTSTATUS) 0xC000026CL)
+#endif
+
+#ifndef STATUS_DFS_UNAVAILABLE
+# define STATUS_DFS_UNAVAILABLE ((NTSTATUS) 0xC000026DL)
+#endif
+
+#ifndef STATUS_VOLUME_DISMOUNTED
+# define STATUS_VOLUME_DISMOUNTED ((NTSTATUS) 0xC000026EL)
+#endif
+
+#ifndef STATUS_WX86_INTERNAL_ERROR
+# define STATUS_WX86_INTERNAL_ERROR ((NTSTATUS) 0xC000026FL)
+#endif
+
+#ifndef STATUS_WX86_FLOAT_STACK_CHECK
+# define STATUS_WX86_FLOAT_STACK_CHECK ((NTSTATUS) 0xC0000270L)
+#endif
+
+#ifndef STATUS_VALIDATE_CONTINUE
+# define STATUS_VALIDATE_CONTINUE ((NTSTATUS) 0xC0000271L)
+#endif
+
+#ifndef STATUS_NO_MATCH
+# define STATUS_NO_MATCH ((NTSTATUS) 0xC0000272L)
+#endif
+
+#ifndef STATUS_NO_MORE_MATCHES
+# define STATUS_NO_MORE_MATCHES ((NTSTATUS) 0xC0000273L)
+#endif
+
+#ifndef STATUS_NOT_A_REPARSE_POINT
+# define STATUS_NOT_A_REPARSE_POINT ((NTSTATUS) 0xC0000275L)
+#endif
+
+#ifndef STATUS_IO_REPARSE_TAG_INVALID
+# define STATUS_IO_REPARSE_TAG_INVALID ((NTSTATUS) 0xC0000276L)
+#endif
+
+#ifndef STATUS_IO_REPARSE_TAG_MISMATCH
+# define STATUS_IO_REPARSE_TAG_MISMATCH ((NTSTATUS) 0xC0000277L)
+#endif
+
+#ifndef STATUS_IO_REPARSE_DATA_INVALID
+# define STATUS_IO_REPARSE_DATA_INVALID ((NTSTATUS) 0xC0000278L)
+#endif
+
+#ifndef STATUS_IO_REPARSE_TAG_NOT_HANDLED
+# define STATUS_IO_REPARSE_TAG_NOT_HANDLED ((NTSTATUS) 0xC0000279L)
+#endif
+
+#ifndef STATUS_REPARSE_POINT_NOT_RESOLVED
+# define STATUS_REPARSE_POINT_NOT_RESOLVED ((NTSTATUS) 0xC0000280L)
+#endif
+
+#ifndef STATUS_DIRECTORY_IS_A_REPARSE_POINT
+# define STATUS_DIRECTORY_IS_A_REPARSE_POINT ((NTSTATUS) 0xC0000281L)
+#endif
+
+#ifndef STATUS_RANGE_LIST_CONFLICT
+# define STATUS_RANGE_LIST_CONFLICT ((NTSTATUS) 0xC0000282L)
+#endif
+
+#ifndef STATUS_SOURCE_ELEMENT_EMPTY
+# define STATUS_SOURCE_ELEMENT_EMPTY ((NTSTATUS) 0xC0000283L)
+#endif
+
+#ifndef STATUS_DESTINATION_ELEMENT_FULL
+# define STATUS_DESTINATION_ELEMENT_FULL ((NTSTATUS) 0xC0000284L)
+#endif
+
+#ifndef STATUS_ILLEGAL_ELEMENT_ADDRESS
+# define STATUS_ILLEGAL_ELEMENT_ADDRESS ((NTSTATUS) 0xC0000285L)
+#endif
+
+#ifndef STATUS_MAGAZINE_NOT_PRESENT
+# define STATUS_MAGAZINE_NOT_PRESENT ((NTSTATUS) 0xC0000286L)
+#endif
+
+#ifndef STATUS_REINITIALIZATION_NEEDED
+# define STATUS_REINITIALIZATION_NEEDED ((NTSTATUS) 0xC0000287L)
+#endif
+
+#ifndef STATUS_DEVICE_REQUIRES_CLEANING
+# define STATUS_DEVICE_REQUIRES_CLEANING ((NTSTATUS) 0x80000288L)
+#endif
+
+#ifndef STATUS_DEVICE_DOOR_OPEN
+# define STATUS_DEVICE_DOOR_OPEN ((NTSTATUS) 0x80000289L)
+#endif
+
+#ifndef STATUS_ENCRYPTION_FAILED
+# define STATUS_ENCRYPTION_FAILED ((NTSTATUS) 0xC000028AL)
+#endif
+
+#ifndef STATUS_DECRYPTION_FAILED
+# define STATUS_DECRYPTION_FAILED ((NTSTATUS) 0xC000028BL)
+#endif
+
+#ifndef STATUS_RANGE_NOT_FOUND
+# define STATUS_RANGE_NOT_FOUND ((NTSTATUS) 0xC000028CL)
+#endif
+
+#ifndef STATUS_NO_RECOVERY_POLICY
+# define STATUS_NO_RECOVERY_POLICY ((NTSTATUS) 0xC000028DL)
+#endif
+
+#ifndef STATUS_NO_EFS
+# define STATUS_NO_EFS ((NTSTATUS) 0xC000028EL)
+#endif
+
+#ifndef STATUS_WRONG_EFS
+# define STATUS_WRONG_EFS ((NTSTATUS) 0xC000028FL)
+#endif
+
+#ifndef STATUS_NO_USER_KEYS
+# define STATUS_NO_USER_KEYS ((NTSTATUS) 0xC0000290L)
+#endif
+
+#ifndef STATUS_FILE_NOT_ENCRYPTED
+# define STATUS_FILE_NOT_ENCRYPTED ((NTSTATUS) 0xC0000291L)
+#endif
+
+#ifndef STATUS_NOT_EXPORT_FORMAT
+# define STATUS_NOT_EXPORT_FORMAT ((NTSTATUS) 0xC0000292L)
+#endif
+
+#ifndef STATUS_FILE_ENCRYPTED
+# define STATUS_FILE_ENCRYPTED ((NTSTATUS) 0xC0000293L)
+#endif
+
+#ifndef STATUS_WAKE_SYSTEM
+# define STATUS_WAKE_SYSTEM ((NTSTATUS) 0x40000294L)
+#endif
+
+#ifndef STATUS_WMI_GUID_NOT_FOUND
+# define STATUS_WMI_GUID_NOT_FOUND ((NTSTATUS) 0xC0000295L)
+#endif
+
+#ifndef STATUS_WMI_INSTANCE_NOT_FOUND
+# define STATUS_WMI_INSTANCE_NOT_FOUND ((NTSTATUS) 0xC0000296L)
+#endif
+
+#ifndef STATUS_WMI_ITEMID_NOT_FOUND
+# define STATUS_WMI_ITEMID_NOT_FOUND ((NTSTATUS) 0xC0000297L)
+#endif
+
+#ifndef STATUS_WMI_TRY_AGAIN
+# define STATUS_WMI_TRY_AGAIN ((NTSTATUS) 0xC0000298L)
+#endif
+
+#ifndef STATUS_SHARED_POLICY
+# define STATUS_SHARED_POLICY ((NTSTATUS) 0xC0000299L)
+#endif
+
+#ifndef STATUS_POLICY_OBJECT_NOT_FOUND
+# define STATUS_POLICY_OBJECT_NOT_FOUND ((NTSTATUS) 0xC000029AL)
+#endif
+
+#ifndef STATUS_POLICY_ONLY_IN_DS
+# define STATUS_POLICY_ONLY_IN_DS ((NTSTATUS) 0xC000029BL)
+#endif
+
+#ifndef STATUS_VOLUME_NOT_UPGRADED
+# define STATUS_VOLUME_NOT_UPGRADED ((NTSTATUS) 0xC000029CL)
+#endif
+
+#ifndef STATUS_REMOTE_STORAGE_NOT_ACTIVE
+# define STATUS_REMOTE_STORAGE_NOT_ACTIVE ((NTSTATUS) 0xC000029DL)
+#endif
+
+#ifndef STATUS_REMOTE_STORAGE_MEDIA_ERROR
+# define STATUS_REMOTE_STORAGE_MEDIA_ERROR ((NTSTATUS) 0xC000029EL)
+#endif
+
+#ifndef STATUS_NO_TRACKING_SERVICE
+# define STATUS_NO_TRACKING_SERVICE ((NTSTATUS) 0xC000029FL)
+#endif
+
+#ifndef STATUS_SERVER_SID_MISMATCH
+# define STATUS_SERVER_SID_MISMATCH ((NTSTATUS) 0xC00002A0L)
+#endif
+
+#ifndef STATUS_DS_NO_ATTRIBUTE_OR_VALUE
+# define STATUS_DS_NO_ATTRIBUTE_OR_VALUE ((NTSTATUS) 0xC00002A1L)
+#endif
+
+#ifndef STATUS_DS_INVALID_ATTRIBUTE_SYNTAX
+# define STATUS_DS_INVALID_ATTRIBUTE_SYNTAX ((NTSTATUS) 0xC00002A2L)
+#endif
+
+#ifndef STATUS_DS_ATTRIBUTE_TYPE_UNDEFINED
+# define STATUS_DS_ATTRIBUTE_TYPE_UNDEFINED ((NTSTATUS) 0xC00002A3L)
+#endif
+
+#ifndef STATUS_DS_ATTRIBUTE_OR_VALUE_EXISTS
+# define STATUS_DS_ATTRIBUTE_OR_VALUE_EXISTS ((NTSTATUS) 0xC00002A4L)
+#endif
+
+#ifndef STATUS_DS_BUSY
+# define STATUS_DS_BUSY ((NTSTATUS) 0xC00002A5L)
+#endif
+
+#ifndef STATUS_DS_UNAVAILABLE
+# define STATUS_DS_UNAVAILABLE ((NTSTATUS) 0xC00002A6L)
+#endif
+
+#ifndef STATUS_DS_NO_RIDS_ALLOCATED
+# define STATUS_DS_NO_RIDS_ALLOCATED ((NTSTATUS) 0xC00002A7L)
+#endif
+
+#ifndef STATUS_DS_NO_MORE_RIDS
+# define STATUS_DS_NO_MORE_RIDS ((NTSTATUS) 0xC00002A8L)
+#endif
+
+#ifndef STATUS_DS_INCORRECT_ROLE_OWNER
+# define STATUS_DS_INCORRECT_ROLE_OWNER ((NTSTATUS) 0xC00002A9L)
+#endif
+
+#ifndef STATUS_DS_RIDMGR_INIT_ERROR
+# define STATUS_DS_RIDMGR_INIT_ERROR ((NTSTATUS) 0xC00002AAL)
+#endif
+
+#ifndef STATUS_DS_OBJ_CLASS_VIOLATION
+# define STATUS_DS_OBJ_CLASS_VIOLATION ((NTSTATUS) 0xC00002ABL)
+#endif
+
+#ifndef STATUS_DS_CANT_ON_NON_LEAF
+# define STATUS_DS_CANT_ON_NON_LEAF ((NTSTATUS) 0xC00002ACL)
+#endif
+
+#ifndef STATUS_DS_CANT_ON_RDN
+# define STATUS_DS_CANT_ON_RDN ((NTSTATUS) 0xC00002ADL)
+#endif
+
+#ifndef STATUS_DS_CANT_MOD_OBJ_CLASS
+# define STATUS_DS_CANT_MOD_OBJ_CLASS ((NTSTATUS) 0xC00002AEL)
+#endif
+
+#ifndef STATUS_DS_CROSS_DOM_MOVE_FAILED
+# define STATUS_DS_CROSS_DOM_MOVE_FAILED ((NTSTATUS) 0xC00002AFL)
+#endif
+
+#ifndef STATUS_DS_GC_NOT_AVAILABLE
+# define STATUS_DS_GC_NOT_AVAILABLE ((NTSTATUS) 0xC00002B0L)
+#endif
+
+#ifndef STATUS_DIRECTORY_SERVICE_REQUIRED
+# define STATUS_DIRECTORY_SERVICE_REQUIRED ((NTSTATUS) 0xC00002B1L)
+#endif
+
+#ifndef STATUS_REPARSE_ATTRIBUTE_CONFLICT
+# define STATUS_REPARSE_ATTRIBUTE_CONFLICT ((NTSTATUS) 0xC00002B2L)
+#endif
+
+#ifndef STATUS_CANT_ENABLE_DENY_ONLY
+# define STATUS_CANT_ENABLE_DENY_ONLY ((NTSTATUS) 0xC00002B3L)
+#endif
+
+#ifndef STATUS_FLOAT_MULTIPLE_FAULTS
+# define STATUS_FLOAT_MULTIPLE_FAULTS ((NTSTATUS) 0xC00002B4L)
+#endif
+
+#ifndef STATUS_FLOAT_MULTIPLE_TRAPS
+# define STATUS_FLOAT_MULTIPLE_TRAPS ((NTSTATUS) 0xC00002B5L)
+#endif
+
+#ifndef STATUS_DEVICE_REMOVED
+# define STATUS_DEVICE_REMOVED ((NTSTATUS) 0xC00002B6L)
+#endif
+
+#ifndef STATUS_JOURNAL_DELETE_IN_PROGRESS
+# define STATUS_JOURNAL_DELETE_IN_PROGRESS ((NTSTATUS) 0xC00002B7L)
+#endif
+
+#ifndef STATUS_JOURNAL_NOT_ACTIVE
+# define STATUS_JOURNAL_NOT_ACTIVE ((NTSTATUS) 0xC00002B8L)
+#endif
+
+#ifndef STATUS_NOINTERFACE
+# define STATUS_NOINTERFACE ((NTSTATUS) 0xC00002B9L)
+#endif
+
+#ifndef STATUS_DS_ADMIN_LIMIT_EXCEEDED
+# define STATUS_DS_ADMIN_LIMIT_EXCEEDED ((NTSTATUS) 0xC00002C1L)
+#endif
+
+#ifndef STATUS_DRIVER_FAILED_SLEEP
+# define STATUS_DRIVER_FAILED_SLEEP ((NTSTATUS) 0xC00002C2L)
+#endif
+
+#ifndef STATUS_MUTUAL_AUTHENTICATION_FAILED
+# define STATUS_MUTUAL_AUTHENTICATION_FAILED ((NTSTATUS) 0xC00002C3L)
+#endif
+
+#ifndef STATUS_CORRUPT_SYSTEM_FILE
+# define STATUS_CORRUPT_SYSTEM_FILE ((NTSTATUS) 0xC00002C4L)
+#endif
+
+#ifndef STATUS_DATATYPE_MISALIGNMENT_ERROR
+# define STATUS_DATATYPE_MISALIGNMENT_ERROR ((NTSTATUS) 0xC00002C5L)
+#endif
+
+#ifndef STATUS_WMI_READ_ONLY
+# define STATUS_WMI_READ_ONLY ((NTSTATUS) 0xC00002C6L)
+#endif
+
+#ifndef STATUS_WMI_SET_FAILURE
+# define STATUS_WMI_SET_FAILURE ((NTSTATUS) 0xC00002C7L)
+#endif
+
+#ifndef STATUS_COMMITMENT_MINIMUM
+# define STATUS_COMMITMENT_MINIMUM ((NTSTATUS) 0xC00002C8L)
+#endif
+
+#ifndef STATUS_REG_NAT_CONSUMPTION
+# define STATUS_REG_NAT_CONSUMPTION ((NTSTATUS) 0xC00002C9L)
+#endif
+
+#ifndef STATUS_TRANSPORT_FULL
+# define STATUS_TRANSPORT_FULL ((NTSTATUS) 0xC00002CAL)
+#endif
+
+#ifndef STATUS_DS_SAM_INIT_FAILURE
+# define STATUS_DS_SAM_INIT_FAILURE ((NTSTATUS) 0xC00002CBL)
+#endif
+
+#ifndef STATUS_ONLY_IF_CONNECTED
+# define STATUS_ONLY_IF_CONNECTED ((NTSTATUS) 0xC00002CCL)
+#endif
+
+#ifndef STATUS_DS_SENSITIVE_GROUP_VIOLATION
+# define STATUS_DS_SENSITIVE_GROUP_VIOLATION ((NTSTATUS) 0xC00002CDL)
+#endif
+
+#ifndef STATUS_PNP_RESTART_ENUMERATION
+# define STATUS_PNP_RESTART_ENUMERATION ((NTSTATUS) 0xC00002CEL)
+#endif
+
+#ifndef STATUS_JOURNAL_ENTRY_DELETED
+# define STATUS_JOURNAL_ENTRY_DELETED ((NTSTATUS) 0xC00002CFL)
+#endif
+
+#ifndef STATUS_DS_CANT_MOD_PRIMARYGROUPID
+# define STATUS_DS_CANT_MOD_PRIMARYGROUPID ((NTSTATUS) 0xC00002D0L)
+#endif
+
+#ifndef STATUS_SYSTEM_IMAGE_BAD_SIGNATURE
+# define STATUS_SYSTEM_IMAGE_BAD_SIGNATURE ((NTSTATUS) 0xC00002D1L)
+#endif
+
+#ifndef STATUS_PNP_REBOOT_REQUIRED
+# define STATUS_PNP_REBOOT_REQUIRED ((NTSTATUS) 0xC00002D2L)
+#endif
+
+#ifndef STATUS_POWER_STATE_INVALID
+# define STATUS_POWER_STATE_INVALID ((NTSTATUS) 0xC00002D3L)
+#endif
+
+#ifndef STATUS_DS_INVALID_GROUP_TYPE
+# define STATUS_DS_INVALID_GROUP_TYPE ((NTSTATUS) 0xC00002D4L)
+#endif
+
+#ifndef STATUS_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN
+# define STATUS_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN ((NTSTATUS) 0xC00002D5L)
+#endif
+
+#ifndef STATUS_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN
+# define STATUS_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN ((NTSTATUS) 0xC00002D6L)
+#endif
+
+#ifndef STATUS_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER
+# define STATUS_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER ((NTSTATUS) 0xC00002D7L)
+#endif
+
+#ifndef STATUS_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER
+# define STATUS_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER ((NTSTATUS) 0xC00002D8L)
+#endif
+
+#ifndef STATUS_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER
+# define STATUS_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER ((NTSTATUS) 0xC00002D9L)
+#endif
+
+#ifndef STATUS_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER
+# define STATUS_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER ((NTSTATUS) 0xC00002DAL)
+#endif
+
+#ifndef STATUS_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER
+# define STATUS_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER ((NTSTATUS) 0xC00002DBL)
+#endif
+
+#ifndef STATUS_DS_HAVE_PRIMARY_MEMBERS
+# define STATUS_DS_HAVE_PRIMARY_MEMBERS ((NTSTATUS) 0xC00002DCL)
+#endif
+
+#ifndef STATUS_WMI_NOT_SUPPORTED
+# define STATUS_WMI_NOT_SUPPORTED ((NTSTATUS) 0xC00002DDL)
+#endif
+
+#ifndef STATUS_INSUFFICIENT_POWER
+# define STATUS_INSUFFICIENT_POWER ((NTSTATUS) 0xC00002DEL)
+#endif
+
+#ifndef STATUS_SAM_NEED_BOOTKEY_PASSWORD
+# define STATUS_SAM_NEED_BOOTKEY_PASSWORD ((NTSTATUS) 0xC00002DFL)
+#endif
+
+#ifndef STATUS_SAM_NEED_BOOTKEY_FLOPPY
+# define STATUS_SAM_NEED_BOOTKEY_FLOPPY ((NTSTATUS) 0xC00002E0L)
+#endif
+
+#ifndef STATUS_DS_CANT_START
+# define STATUS_DS_CANT_START ((NTSTATUS) 0xC00002E1L)
+#endif
+
+#ifndef STATUS_DS_INIT_FAILURE
+# define STATUS_DS_INIT_FAILURE ((NTSTATUS) 0xC00002E2L)
+#endif
+
+#ifndef STATUS_SAM_INIT_FAILURE
+# define STATUS_SAM_INIT_FAILURE ((NTSTATUS) 0xC00002E3L)
+#endif
+
+#ifndef STATUS_DS_GC_REQUIRED
+# define STATUS_DS_GC_REQUIRED ((NTSTATUS) 0xC00002E4L)
+#endif
+
+#ifndef STATUS_DS_LOCAL_MEMBER_OF_LOCAL_ONLY
+# define STATUS_DS_LOCAL_MEMBER_OF_LOCAL_ONLY ((NTSTATUS) 0xC00002E5L)
+#endif
+
+#ifndef STATUS_DS_NO_FPO_IN_UNIVERSAL_GROUPS
+# define STATUS_DS_NO_FPO_IN_UNIVERSAL_GROUPS ((NTSTATUS) 0xC00002E6L)
+#endif
+
+#ifndef STATUS_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED
+# define STATUS_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED ((NTSTATUS) 0xC00002E7L)
+#endif
+
+#ifndef STATUS_MULTIPLE_FAULT_VIOLATION
+# define STATUS_MULTIPLE_FAULT_VIOLATION ((NTSTATUS) 0xC00002E8L)
+#endif
+
+#ifndef STATUS_CURRENT_DOMAIN_NOT_ALLOWED
+# define STATUS_CURRENT_DOMAIN_NOT_ALLOWED ((NTSTATUS) 0xC00002E9L)
+#endif
+
+#ifndef STATUS_CANNOT_MAKE
+# define STATUS_CANNOT_MAKE ((NTSTATUS) 0xC00002EAL)
+#endif
+
+#ifndef STATUS_SYSTEM_SHUTDOWN
+# define STATUS_SYSTEM_SHUTDOWN ((NTSTATUS) 0xC00002EBL)
+#endif
+
+#ifndef STATUS_DS_INIT_FAILURE_CONSOLE
+# define STATUS_DS_INIT_FAILURE_CONSOLE ((NTSTATUS) 0xC00002ECL)
+#endif
+
+#ifndef STATUS_DS_SAM_INIT_FAILURE_CONSOLE
+# define STATUS_DS_SAM_INIT_FAILURE_CONSOLE ((NTSTATUS) 0xC00002EDL)
+#endif
+
+#ifndef STATUS_UNFINISHED_CONTEXT_DELETED
+# define STATUS_UNFINISHED_CONTEXT_DELETED ((NTSTATUS) 0xC00002EEL)
+#endif
+
+#ifndef STATUS_NO_TGT_REPLY
+# define STATUS_NO_TGT_REPLY ((NTSTATUS) 0xC00002EFL)
+#endif
+
+#ifndef STATUS_OBJECTID_NOT_FOUND
+# define STATUS_OBJECTID_NOT_FOUND ((NTSTATUS) 0xC00002F0L)
+#endif
+
+#ifndef STATUS_NO_IP_ADDRESSES
+# define STATUS_NO_IP_ADDRESSES ((NTSTATUS) 0xC00002F1L)
+#endif
+
+#ifndef STATUS_WRONG_CREDENTIAL_HANDLE
+# define STATUS_WRONG_CREDENTIAL_HANDLE ((NTSTATUS) 0xC00002F2L)
+#endif
+
+#ifndef STATUS_CRYPTO_SYSTEM_INVALID
+# define STATUS_CRYPTO_SYSTEM_INVALID ((NTSTATUS) 0xC00002F3L)
+#endif
+
+#ifndef STATUS_MAX_REFERRALS_EXCEEDED
+# define STATUS_MAX_REFERRALS_EXCEEDED ((NTSTATUS) 0xC00002F4L)
+#endif
+
+#ifndef STATUS_MUST_BE_KDC
+# define STATUS_MUST_BE_KDC ((NTSTATUS) 0xC00002F5L)
+#endif
+
+#ifndef STATUS_STRONG_CRYPTO_NOT_SUPPORTED
+# define STATUS_STRONG_CRYPTO_NOT_SUPPORTED ((NTSTATUS) 0xC00002F6L)
+#endif
+
+#ifndef STATUS_TOO_MANY_PRINCIPALS
+# define STATUS_TOO_MANY_PRINCIPALS ((NTSTATUS) 0xC00002F7L)
+#endif
+
+#ifndef STATUS_NO_PA_DATA
+# define STATUS_NO_PA_DATA ((NTSTATUS) 0xC00002F8L)
+#endif
+
+#ifndef STATUS_PKINIT_NAME_MISMATCH
+# define STATUS_PKINIT_NAME_MISMATCH ((NTSTATUS) 0xC00002F9L)
+#endif
+
+#ifndef STATUS_SMARTCARD_LOGON_REQUIRED
+# define STATUS_SMARTCARD_LOGON_REQUIRED ((NTSTATUS) 0xC00002FAL)
+#endif
+
+#ifndef STATUS_KDC_INVALID_REQUEST
+# define STATUS_KDC_INVALID_REQUEST ((NTSTATUS) 0xC00002FBL)
+#endif
+
+#ifndef STATUS_KDC_UNABLE_TO_REFER
+# define STATUS_KDC_UNABLE_TO_REFER ((NTSTATUS) 0xC00002FCL)
+#endif
+
+#ifndef STATUS_KDC_UNKNOWN_ETYPE
+# define STATUS_KDC_UNKNOWN_ETYPE ((NTSTATUS) 0xC00002FDL)
+#endif
+
+#ifndef STATUS_SHUTDOWN_IN_PROGRESS
+# define STATUS_SHUTDOWN_IN_PROGRESS ((NTSTATUS) 0xC00002FEL)
+#endif
+
+#ifndef STATUS_SERVER_SHUTDOWN_IN_PROGRESS
+# define STATUS_SERVER_SHUTDOWN_IN_PROGRESS ((NTSTATUS) 0xC00002FFL)
+#endif
+
+#ifndef STATUS_NOT_SUPPORTED_ON_SBS
+# define STATUS_NOT_SUPPORTED_ON_SBS ((NTSTATUS) 0xC0000300L)
+#endif
+
+#ifndef STATUS_WMI_GUID_DISCONNECTED
+# define STATUS_WMI_GUID_DISCONNECTED ((NTSTATUS) 0xC0000301L)
+#endif
+
+#ifndef STATUS_WMI_ALREADY_DISABLED
+# define STATUS_WMI_ALREADY_DISABLED ((NTSTATUS) 0xC0000302L)
+#endif
+
+#ifndef STATUS_WMI_ALREADY_ENABLED
+# define STATUS_WMI_ALREADY_ENABLED ((NTSTATUS) 0xC0000303L)
+#endif
+
+#ifndef STATUS_MFT_TOO_FRAGMENTED
+# define STATUS_MFT_TOO_FRAGMENTED ((NTSTATUS) 0xC0000304L)
+#endif
+
+#ifndef STATUS_COPY_PROTECTION_FAILURE
+# define STATUS_COPY_PROTECTION_FAILURE ((NTSTATUS) 0xC0000305L)
+#endif
+
+#ifndef STATUS_CSS_AUTHENTICATION_FAILURE
+# define STATUS_CSS_AUTHENTICATION_FAILURE ((NTSTATUS) 0xC0000306L)
+#endif
+
+#ifndef STATUS_CSS_KEY_NOT_PRESENT
+# define STATUS_CSS_KEY_NOT_PRESENT ((NTSTATUS) 0xC0000307L)
+#endif
+
+#ifndef STATUS_CSS_KEY_NOT_ESTABLISHED
+# define STATUS_CSS_KEY_NOT_ESTABLISHED ((NTSTATUS) 0xC0000308L)
+#endif
+
+#ifndef STATUS_CSS_SCRAMBLED_SECTOR
+# define STATUS_CSS_SCRAMBLED_SECTOR ((NTSTATUS) 0xC0000309L)
+#endif
+
+#ifndef STATUS_CSS_REGION_MISMATCH
+# define STATUS_CSS_REGION_MISMATCH ((NTSTATUS) 0xC000030AL)
+#endif
+
+#ifndef STATUS_CSS_RESETS_EXHAUSTED
+# define STATUS_CSS_RESETS_EXHAUSTED ((NTSTATUS) 0xC000030BL)
+#endif
+
+#ifndef STATUS_PKINIT_FAILURE
+# define STATUS_PKINIT_FAILURE ((NTSTATUS) 0xC0000320L)
+#endif
+
+#ifndef STATUS_SMARTCARD_SUBSYSTEM_FAILURE
+# define STATUS_SMARTCARD_SUBSYSTEM_FAILURE ((NTSTATUS) 0xC0000321L)
+#endif
+
+#ifndef STATUS_NO_KERB_KEY
+# define STATUS_NO_KERB_KEY ((NTSTATUS) 0xC0000322L)
+#endif
+
+#ifndef STATUS_HOST_DOWN
+# define STATUS_HOST_DOWN ((NTSTATUS) 0xC0000350L)
+#endif
+
+#ifndef STATUS_UNSUPPORTED_PREAUTH
+# define STATUS_UNSUPPORTED_PREAUTH ((NTSTATUS) 0xC0000351L)
+#endif
+
+#ifndef STATUS_EFS_ALG_BLOB_TOO_BIG
+# define STATUS_EFS_ALG_BLOB_TOO_BIG ((NTSTATUS) 0xC0000352L)
+#endif
+
+#ifndef STATUS_PORT_NOT_SET
+# define STATUS_PORT_NOT_SET ((NTSTATUS) 0xC0000353L)
+#endif
+
+#ifndef STATUS_DEBUGGER_INACTIVE
+# define STATUS_DEBUGGER_INACTIVE ((NTSTATUS) 0xC0000354L)
+#endif
+
+#ifndef STATUS_DS_VERSION_CHECK_FAILURE
+# define STATUS_DS_VERSION_CHECK_FAILURE ((NTSTATUS) 0xC0000355L)
+#endif
+
+#ifndef STATUS_AUDITING_DISABLED
+# define STATUS_AUDITING_DISABLED ((NTSTATUS) 0xC0000356L)
+#endif
+
+#ifndef STATUS_PRENT4_MACHINE_ACCOUNT
+# define STATUS_PRENT4_MACHINE_ACCOUNT ((NTSTATUS) 0xC0000357L)
+#endif
+
+#ifndef STATUS_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER
+# define STATUS_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER ((NTSTATUS) 0xC0000358L)
+#endif
+
+#ifndef STATUS_INVALID_IMAGE_WIN_32
+# define STATUS_INVALID_IMAGE_WIN_32 ((NTSTATUS) 0xC0000359L)
+#endif
+
+#ifndef STATUS_INVALID_IMAGE_WIN_64
+# define STATUS_INVALID_IMAGE_WIN_64 ((NTSTATUS) 0xC000035AL)
+#endif
+
+#ifndef STATUS_BAD_BINDINGS
+# define STATUS_BAD_BINDINGS ((NTSTATUS) 0xC000035BL)
+#endif
+
+#ifndef STATUS_NETWORK_SESSION_EXPIRED
+# define STATUS_NETWORK_SESSION_EXPIRED ((NTSTATUS) 0xC000035CL)
+#endif
+
+#ifndef STATUS_APPHELP_BLOCK
+# define STATUS_APPHELP_BLOCK ((NTSTATUS) 0xC000035DL)
+#endif
+
+#ifndef STATUS_ALL_SIDS_FILTERED
+# define STATUS_ALL_SIDS_FILTERED ((NTSTATUS) 0xC000035EL)
+#endif
+
+#ifndef STATUS_NOT_SAFE_MODE_DRIVER
+# define STATUS_NOT_SAFE_MODE_DRIVER ((NTSTATUS) 0xC000035FL)
+#endif
+
+#ifndef STATUS_ACCESS_DISABLED_BY_POLICY_DEFAULT
+# define STATUS_ACCESS_DISABLED_BY_POLICY_DEFAULT ((NTSTATUS) 0xC0000361L)
+#endif
+
+#ifndef STATUS_ACCESS_DISABLED_BY_POLICY_PATH
+# define STATUS_ACCESS_DISABLED_BY_POLICY_PATH ((NTSTATUS) 0xC0000362L)
+#endif
+
+#ifndef STATUS_ACCESS_DISABLED_BY_POLICY_PUBLISHER
+# define STATUS_ACCESS_DISABLED_BY_POLICY_PUBLISHER ((NTSTATUS) 0xC0000363L)
+#endif
+
+#ifndef STATUS_ACCESS_DISABLED_BY_POLICY_OTHER
+# define STATUS_ACCESS_DISABLED_BY_POLICY_OTHER ((NTSTATUS) 0xC0000364L)
+#endif
+
+#ifndef STATUS_FAILED_DRIVER_ENTRY
+# define STATUS_FAILED_DRIVER_ENTRY ((NTSTATUS) 0xC0000365L)
+#endif
+
+#ifndef STATUS_DEVICE_ENUMERATION_ERROR
+# define STATUS_DEVICE_ENUMERATION_ERROR ((NTSTATUS) 0xC0000366L)
+#endif
+
+#ifndef STATUS_MOUNT_POINT_NOT_RESOLVED
+# define STATUS_MOUNT_POINT_NOT_RESOLVED ((NTSTATUS) 0xC0000368L)
+#endif
+
+#ifndef STATUS_INVALID_DEVICE_OBJECT_PARAMETER
+# define STATUS_INVALID_DEVICE_OBJECT_PARAMETER ((NTSTATUS) 0xC0000369L)
+#endif
+
+#ifndef STATUS_MCA_OCCURED
+# define STATUS_MCA_OCCURED ((NTSTATUS) 0xC000036AL)
+#endif
+
+#ifndef STATUS_DRIVER_BLOCKED_CRITICAL
+# define STATUS_DRIVER_BLOCKED_CRITICAL ((NTSTATUS) 0xC000036BL)
+#endif
+
+#ifndef STATUS_DRIVER_BLOCKED
+# define STATUS_DRIVER_BLOCKED ((NTSTATUS) 0xC000036CL)
+#endif
+
+#ifndef STATUS_DRIVER_DATABASE_ERROR
+# define STATUS_DRIVER_DATABASE_ERROR ((NTSTATUS) 0xC000036DL)
+#endif
+
+#ifndef STATUS_SYSTEM_HIVE_TOO_LARGE
+# define STATUS_SYSTEM_HIVE_TOO_LARGE ((NTSTATUS) 0xC000036EL)
+#endif
+
+#ifndef STATUS_INVALID_IMPORT_OF_NON_DLL
+# define STATUS_INVALID_IMPORT_OF_NON_DLL ((NTSTATUS) 0xC000036FL)
+#endif
+
+#ifndef STATUS_DS_SHUTTING_DOWN
+# define STATUS_DS_SHUTTING_DOWN ((NTSTATUS) 0x40000370L)
+#endif
+
+#ifndef STATUS_NO_SECRETS
+# define STATUS_NO_SECRETS ((NTSTATUS) 0xC0000371L)
+#endif
+
+#ifndef STATUS_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY
+# define STATUS_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY ((NTSTATUS) 0xC0000372L)
+#endif
+
+#ifndef STATUS_FAILED_STACK_SWITCH
+# define STATUS_FAILED_STACK_SWITCH ((NTSTATUS) 0xC0000373L)
+#endif
+
+#ifndef STATUS_HEAP_CORRUPTION
+# define STATUS_HEAP_CORRUPTION ((NTSTATUS) 0xC0000374L)
+#endif
+
+#ifndef STATUS_SMARTCARD_WRONG_PIN
+# define STATUS_SMARTCARD_WRONG_PIN ((NTSTATUS) 0xC0000380L)
+#endif
+
+#ifndef STATUS_SMARTCARD_CARD_BLOCKED
+# define STATUS_SMARTCARD_CARD_BLOCKED ((NTSTATUS) 0xC0000381L)
+#endif
+
+#ifndef STATUS_SMARTCARD_CARD_NOT_AUTHENTICATED
+# define STATUS_SMARTCARD_CARD_NOT_AUTHENTICATED ((NTSTATUS) 0xC0000382L)
+#endif
+
+#ifndef STATUS_SMARTCARD_NO_CARD
+# define STATUS_SMARTCARD_NO_CARD ((NTSTATUS) 0xC0000383L)
+#endif
+
+#ifndef STATUS_SMARTCARD_NO_KEY_CONTAINER
+# define STATUS_SMARTCARD_NO_KEY_CONTAINER ((NTSTATUS) 0xC0000384L)
+#endif
+
+#ifndef STATUS_SMARTCARD_NO_CERTIFICATE
+# define STATUS_SMARTCARD_NO_CERTIFICATE ((NTSTATUS) 0xC0000385L)
+#endif
+
+#ifndef STATUS_SMARTCARD_NO_KEYSET
+# define STATUS_SMARTCARD_NO_KEYSET ((NTSTATUS) 0xC0000386L)
+#endif
+
+#ifndef STATUS_SMARTCARD_IO_ERROR
+# define STATUS_SMARTCARD_IO_ERROR ((NTSTATUS) 0xC0000387L)
+#endif
+
+#ifndef STATUS_DOWNGRADE_DETECTED
+# define STATUS_DOWNGRADE_DETECTED ((NTSTATUS) 0xC0000388L)
+#endif
+
+#ifndef STATUS_SMARTCARD_CERT_REVOKED
+# define STATUS_SMARTCARD_CERT_REVOKED ((NTSTATUS) 0xC0000389L)
+#endif
+
+#ifndef STATUS_ISSUING_CA_UNTRUSTED
+# define STATUS_ISSUING_CA_UNTRUSTED ((NTSTATUS) 0xC000038AL)
+#endif
+
+#ifndef STATUS_REVOCATION_OFFLINE_C
+# define STATUS_REVOCATION_OFFLINE_C ((NTSTATUS) 0xC000038BL)
+#endif
+
+#ifndef STATUS_PKINIT_CLIENT_FAILURE
+# define STATUS_PKINIT_CLIENT_FAILURE ((NTSTATUS) 0xC000038CL)
+#endif
+
+#ifndef STATUS_SMARTCARD_CERT_EXPIRED
+# define STATUS_SMARTCARD_CERT_EXPIRED ((NTSTATUS) 0xC000038DL)
+#endif
+
+#ifndef STATUS_DRIVER_FAILED_PRIOR_UNLOAD
+# define STATUS_DRIVER_FAILED_PRIOR_UNLOAD ((NTSTATUS) 0xC000038EL)
+#endif
+
+#ifndef STATUS_SMARTCARD_SILENT_CONTEXT
+# define STATUS_SMARTCARD_SILENT_CONTEXT ((NTSTATUS) 0xC000038FL)
+#endif
+
+#ifndef STATUS_PER_USER_TRUST_QUOTA_EXCEEDED
+# define STATUS_PER_USER_TRUST_QUOTA_EXCEEDED ((NTSTATUS) 0xC0000401L)
+#endif
+
+#ifndef STATUS_ALL_USER_TRUST_QUOTA_EXCEEDED
+# define STATUS_ALL_USER_TRUST_QUOTA_EXCEEDED ((NTSTATUS) 0xC0000402L)
+#endif
+
+#ifndef STATUS_USER_DELETE_TRUST_QUOTA_EXCEEDED
+# define STATUS_USER_DELETE_TRUST_QUOTA_EXCEEDED ((NTSTATUS) 0xC0000403L)
+#endif
+
+#ifndef STATUS_DS_NAME_NOT_UNIQUE
+# define STATUS_DS_NAME_NOT_UNIQUE ((NTSTATUS) 0xC0000404L)
+#endif
+
+#ifndef STATUS_DS_DUPLICATE_ID_FOUND
+# define STATUS_DS_DUPLICATE_ID_FOUND ((NTSTATUS) 0xC0000405L)
+#endif
+
+#ifndef STATUS_DS_GROUP_CONVERSION_ERROR
+# define STATUS_DS_GROUP_CONVERSION_ERROR ((NTSTATUS) 0xC0000406L)
+#endif
+
+#ifndef STATUS_VOLSNAP_PREPARE_HIBERNATE
+# define STATUS_VOLSNAP_PREPARE_HIBERNATE ((NTSTATUS) 0xC0000407L)
+#endif
+
+#ifndef STATUS_USER2USER_REQUIRED
+# define STATUS_USER2USER_REQUIRED ((NTSTATUS) 0xC0000408L)
+#endif
+
+#ifndef STATUS_STACK_BUFFER_OVERRUN
+# define STATUS_STACK_BUFFER_OVERRUN ((NTSTATUS) 0xC0000409L)
+#endif
+
+#ifndef STATUS_NO_S4U_PROT_SUPPORT
+# define STATUS_NO_S4U_PROT_SUPPORT ((NTSTATUS) 0xC000040AL)
+#endif
+
+#ifndef STATUS_CROSSREALM_DELEGATION_FAILURE
+# define STATUS_CROSSREALM_DELEGATION_FAILURE ((NTSTATUS) 0xC000040BL)
+#endif
+
+#ifndef STATUS_REVOCATION_OFFLINE_KDC
+# define STATUS_REVOCATION_OFFLINE_KDC ((NTSTATUS) 0xC000040CL)
+#endif
+
+#ifndef STATUS_ISSUING_CA_UNTRUSTED_KDC
+# define STATUS_ISSUING_CA_UNTRUSTED_KDC ((NTSTATUS) 0xC000040DL)
+#endif
+
+#ifndef STATUS_KDC_CERT_EXPIRED
+# define STATUS_KDC_CERT_EXPIRED ((NTSTATUS) 0xC000040EL)
+#endif
+
+#ifndef STATUS_KDC_CERT_REVOKED
+# define STATUS_KDC_CERT_REVOKED ((NTSTATUS) 0xC000040FL)
+#endif
+
+#ifndef STATUS_PARAMETER_QUOTA_EXCEEDED
+# define STATUS_PARAMETER_QUOTA_EXCEEDED ((NTSTATUS) 0xC0000410L)
+#endif
+
+#ifndef STATUS_HIBERNATION_FAILURE
+# define STATUS_HIBERNATION_FAILURE ((NTSTATUS) 0xC0000411L)
+#endif
+
+#ifndef STATUS_DELAY_LOAD_FAILED
+# define STATUS_DELAY_LOAD_FAILED ((NTSTATUS) 0xC0000412L)
+#endif
+
+#ifndef STATUS_AUTHENTICATION_FIREWALL_FAILED
+# define STATUS_AUTHENTICATION_FIREWALL_FAILED ((NTSTATUS) 0xC0000413L)
+#endif
+
+#ifndef STATUS_VDM_DISALLOWED
+# define STATUS_VDM_DISALLOWED ((NTSTATUS) 0xC0000414L)
+#endif
+
+#ifndef STATUS_HUNG_DISPLAY_DRIVER_THREAD
+# define STATUS_HUNG_DISPLAY_DRIVER_THREAD ((NTSTATUS) 0xC0000415L)
+#endif
+
+#ifndef STATUS_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE
+# define STATUS_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE ((NTSTATUS) 0xC0000416L)
+#endif
+
+#ifndef STATUS_INVALID_CRUNTIME_PARAMETER
+# define STATUS_INVALID_CRUNTIME_PARAMETER ((NTSTATUS) 0xC0000417L)
+#endif
+
+#ifndef STATUS_NTLM_BLOCKED
+# define STATUS_NTLM_BLOCKED ((NTSTATUS) 0xC0000418L)
+#endif
+
+#ifndef STATUS_DS_SRC_SID_EXISTS_IN_FOREST
+# define STATUS_DS_SRC_SID_EXISTS_IN_FOREST ((NTSTATUS) 0xC0000419L)
+#endif
+
+#ifndef STATUS_DS_DOMAIN_NAME_EXISTS_IN_FOREST
+# define STATUS_DS_DOMAIN_NAME_EXISTS_IN_FOREST ((NTSTATUS) 0xC000041AL)
+#endif
+
+#ifndef STATUS_DS_FLAT_NAME_EXISTS_IN_FOREST
+# define STATUS_DS_FLAT_NAME_EXISTS_IN_FOREST ((NTSTATUS) 0xC000041BL)
+#endif
+
+#ifndef STATUS_INVALID_USER_PRINCIPAL_NAME
+# define STATUS_INVALID_USER_PRINCIPAL_NAME ((NTSTATUS) 0xC000041CL)
+#endif
+
+#ifndef STATUS_FATAL_USER_CALLBACK_EXCEPTION
+# define STATUS_FATAL_USER_CALLBACK_EXCEPTION ((NTSTATUS) 0xC000041DL)
+#endif
+
+#ifndef STATUS_ASSERTION_FAILURE
+# define STATUS_ASSERTION_FAILURE ((NTSTATUS) 0xC0000420L)
+#endif
+
+#ifndef STATUS_VERIFIER_STOP
+# define STATUS_VERIFIER_STOP ((NTSTATUS) 0xC0000421L)
+#endif
+
+#ifndef STATUS_CALLBACK_POP_STACK
+# define STATUS_CALLBACK_POP_STACK ((NTSTATUS) 0xC0000423L)
+#endif
+
+#ifndef STATUS_INCOMPATIBLE_DRIVER_BLOCKED
+# define STATUS_INCOMPATIBLE_DRIVER_BLOCKED ((NTSTATUS) 0xC0000424L)
+#endif
+
+#ifndef STATUS_HIVE_UNLOADED
+# define STATUS_HIVE_UNLOADED ((NTSTATUS) 0xC0000425L)
+#endif
+
+#ifndef STATUS_COMPRESSION_DISABLED
+# define STATUS_COMPRESSION_DISABLED ((NTSTATUS) 0xC0000426L)
+#endif
+
+#ifndef STATUS_FILE_SYSTEM_LIMITATION
+# define STATUS_FILE_SYSTEM_LIMITATION ((NTSTATUS) 0xC0000427L)
+#endif
+
+#ifndef STATUS_INVALID_IMAGE_HASH
+# define STATUS_INVALID_IMAGE_HASH ((NTSTATUS) 0xC0000428L)
+#endif
+
+#ifndef STATUS_NOT_CAPABLE
+# define STATUS_NOT_CAPABLE ((NTSTATUS) 0xC0000429L)
+#endif
+
+#ifndef STATUS_REQUEST_OUT_OF_SEQUENCE
+# define STATUS_REQUEST_OUT_OF_SEQUENCE ((NTSTATUS) 0xC000042AL)
+#endif
+
+#ifndef STATUS_IMPLEMENTATION_LIMIT
+# define STATUS_IMPLEMENTATION_LIMIT ((NTSTATUS) 0xC000042BL)
+#endif
+
+#ifndef STATUS_ELEVATION_REQUIRED
+# define STATUS_ELEVATION_REQUIRED ((NTSTATUS) 0xC000042CL)
+#endif
+
+#ifndef STATUS_NO_SECURITY_CONTEXT
+# define STATUS_NO_SECURITY_CONTEXT ((NTSTATUS) 0xC000042DL)
+#endif
+
+#ifndef STATUS_PKU2U_CERT_FAILURE
+# define STATUS_PKU2U_CERT_FAILURE ((NTSTATUS) 0xC000042FL)
+#endif
+
+#ifndef STATUS_BEYOND_VDL
+# define STATUS_BEYOND_VDL ((NTSTATUS) 0xC0000432L)
+#endif
+
+#ifndef STATUS_ENCOUNTERED_WRITE_IN_PROGRESS
+# define STATUS_ENCOUNTERED_WRITE_IN_PROGRESS ((NTSTATUS) 0xC0000433L)
+#endif
+
+#ifndef STATUS_PTE_CHANGED
+# define STATUS_PTE_CHANGED ((NTSTATUS) 0xC0000434L)
+#endif
+
+#ifndef STATUS_PURGE_FAILED
+# define STATUS_PURGE_FAILED ((NTSTATUS) 0xC0000435L)
+#endif
+
+#ifndef STATUS_CRED_REQUIRES_CONFIRMATION
+# define STATUS_CRED_REQUIRES_CONFIRMATION ((NTSTATUS) 0xC0000440L)
+#endif
+
+#ifndef STATUS_CS_ENCRYPTION_INVALID_SERVER_RESPONSE
+# define STATUS_CS_ENCRYPTION_INVALID_SERVER_RESPONSE ((NTSTATUS) 0xC0000441L)
+#endif
+
+#ifndef STATUS_CS_ENCRYPTION_UNSUPPORTED_SERVER
+# define STATUS_CS_ENCRYPTION_UNSUPPORTED_SERVER ((NTSTATUS) 0xC0000442L)
+#endif
+
+#ifndef STATUS_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE
+# define STATUS_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE ((NTSTATUS) 0xC0000443L)
+#endif
+
+#ifndef STATUS_CS_ENCRYPTION_NEW_ENCRYPTED_FILE
+# define STATUS_CS_ENCRYPTION_NEW_ENCRYPTED_FILE ((NTSTATUS) 0xC0000444L)
+#endif
+
+#ifndef STATUS_CS_ENCRYPTION_FILE_NOT_CSE
+# define STATUS_CS_ENCRYPTION_FILE_NOT_CSE ((NTSTATUS) 0xC0000445L)
+#endif
+
+#ifndef STATUS_INVALID_LABEL
+# define STATUS_INVALID_LABEL ((NTSTATUS) 0xC0000446L)
+#endif
+
+#ifndef STATUS_DRIVER_PROCESS_TERMINATED
+# define STATUS_DRIVER_PROCESS_TERMINATED ((NTSTATUS) 0xC0000450L)
+#endif
+
+#ifndef STATUS_AMBIGUOUS_SYSTEM_DEVICE
+# define STATUS_AMBIGUOUS_SYSTEM_DEVICE ((NTSTATUS) 0xC0000451L)
+#endif
+
+#ifndef STATUS_SYSTEM_DEVICE_NOT_FOUND
+# define STATUS_SYSTEM_DEVICE_NOT_FOUND ((NTSTATUS) 0xC0000452L)
+#endif
+
+#ifndef STATUS_RESTART_BOOT_APPLICATION
+# define STATUS_RESTART_BOOT_APPLICATION ((NTSTATUS) 0xC0000453L)
+#endif
+
+#ifndef STATUS_INSUFFICIENT_NVRAM_RESOURCES
+# define STATUS_INSUFFICIENT_NVRAM_RESOURCES ((NTSTATUS) 0xC0000454L)
+#endif
+
+#ifndef STATUS_INVALID_TASK_NAME
+# define STATUS_INVALID_TASK_NAME ((NTSTATUS) 0xC0000500L)
+#endif
+
+#ifndef STATUS_INVALID_TASK_INDEX
+# define STATUS_INVALID_TASK_INDEX ((NTSTATUS) 0xC0000501L)
+#endif
+
+#ifndef STATUS_THREAD_ALREADY_IN_TASK
+# define STATUS_THREAD_ALREADY_IN_TASK ((NTSTATUS) 0xC0000502L)
+#endif
+
+#ifndef STATUS_CALLBACK_BYPASS
+# define STATUS_CALLBACK_BYPASS ((NTSTATUS) 0xC0000503L)
+#endif
+
+#ifndef STATUS_FAIL_FAST_EXCEPTION
+# define STATUS_FAIL_FAST_EXCEPTION ((NTSTATUS) 0xC0000602L)
+#endif
+
+#ifndef STATUS_IMAGE_CERT_REVOKED
+# define STATUS_IMAGE_CERT_REVOKED ((NTSTATUS) 0xC0000603L)
+#endif
+
+#ifndef STATUS_PORT_CLOSED
+# define STATUS_PORT_CLOSED ((NTSTATUS) 0xC0000700L)
+#endif
+
+#ifndef STATUS_MESSAGE_LOST
+# define STATUS_MESSAGE_LOST ((NTSTATUS) 0xC0000701L)
+#endif
+
+#ifndef STATUS_INVALID_MESSAGE
+# define STATUS_INVALID_MESSAGE ((NTSTATUS) 0xC0000702L)
+#endif
+
+#ifndef STATUS_REQUEST_CANCELED
+# define STATUS_REQUEST_CANCELED ((NTSTATUS) 0xC0000703L)
+#endif
+
+#ifndef STATUS_RECURSIVE_DISPATCH
+# define STATUS_RECURSIVE_DISPATCH ((NTSTATUS) 0xC0000704L)
+#endif
+
+#ifndef STATUS_LPC_RECEIVE_BUFFER_EXPECTED
+# define STATUS_LPC_RECEIVE_BUFFER_EXPECTED ((NTSTATUS) 0xC0000705L)
+#endif
+
+#ifndef STATUS_LPC_INVALID_CONNECTION_USAGE
+# define STATUS_LPC_INVALID_CONNECTION_USAGE ((NTSTATUS) 0xC0000706L)
+#endif
+
+#ifndef STATUS_LPC_REQUESTS_NOT_ALLOWED
+# define STATUS_LPC_REQUESTS_NOT_ALLOWED ((NTSTATUS) 0xC0000707L)
+#endif
+
+#ifndef STATUS_RESOURCE_IN_USE
+# define STATUS_RESOURCE_IN_USE ((NTSTATUS) 0xC0000708L)
+#endif
+
+#ifndef STATUS_HARDWARE_MEMORY_ERROR
+# define STATUS_HARDWARE_MEMORY_ERROR ((NTSTATUS) 0xC0000709L)
+#endif
+
+#ifndef STATUS_THREADPOOL_HANDLE_EXCEPTION
+# define STATUS_THREADPOOL_HANDLE_EXCEPTION ((NTSTATUS) 0xC000070AL)
+#endif
+
+#ifndef STATUS_THREADPOOL_SET_EVENT_ON_COMPLETION_FAILED
+# define STATUS_THREADPOOL_SET_EVENT_ON_COMPLETION_FAILED ((NTSTATUS) 0xC000070BL)
+#endif
+
+#ifndef STATUS_THREADPOOL_RELEASE_SEMAPHORE_ON_COMPLETION_FAILED
+# define STATUS_THREADPOOL_RELEASE_SEMAPHORE_ON_COMPLETION_FAILED ((NTSTATUS) 0xC000070CL)
+#endif
+
+#ifndef STATUS_THREADPOOL_RELEASE_MUTEX_ON_COMPLETION_FAILED
+# define STATUS_THREADPOOL_RELEASE_MUTEX_ON_COMPLETION_FAILED ((NTSTATUS) 0xC000070DL)
+#endif
+
+#ifndef STATUS_THREADPOOL_FREE_LIBRARY_ON_COMPLETION_FAILED
+# define STATUS_THREADPOOL_FREE_LIBRARY_ON_COMPLETION_FAILED ((NTSTATUS) 0xC000070EL)
+#endif
+
+#ifndef STATUS_THREADPOOL_RELEASED_DURING_OPERATION
+# define STATUS_THREADPOOL_RELEASED_DURING_OPERATION ((NTSTATUS) 0xC000070FL)
+#endif
+
+#ifndef STATUS_CALLBACK_RETURNED_WHILE_IMPERSONATING
+# define STATUS_CALLBACK_RETURNED_WHILE_IMPERSONATING ((NTSTATUS) 0xC0000710L)
+#endif
+
+#ifndef STATUS_APC_RETURNED_WHILE_IMPERSONATING
+# define STATUS_APC_RETURNED_WHILE_IMPERSONATING ((NTSTATUS) 0xC0000711L)
+#endif
+
+#ifndef STATUS_PROCESS_IS_PROTECTED
+# define STATUS_PROCESS_IS_PROTECTED ((NTSTATUS) 0xC0000712L)
+#endif
+
+#ifndef STATUS_MCA_EXCEPTION
+# define STATUS_MCA_EXCEPTION ((NTSTATUS) 0xC0000713L)
+#endif
+
+#ifndef STATUS_CERTIFICATE_MAPPING_NOT_UNIQUE
+# define STATUS_CERTIFICATE_MAPPING_NOT_UNIQUE ((NTSTATUS) 0xC0000714L)
+#endif
+
+#ifndef STATUS_SYMLINK_CLASS_DISABLED
+# define STATUS_SYMLINK_CLASS_DISABLED ((NTSTATUS) 0xC0000715L)
+#endif
+
+#ifndef STATUS_INVALID_IDN_NORMALIZATION
+# define STATUS_INVALID_IDN_NORMALIZATION ((NTSTATUS) 0xC0000716L)
+#endif
+
+#ifndef STATUS_NO_UNICODE_TRANSLATION
+# define STATUS_NO_UNICODE_TRANSLATION ((NTSTATUS) 0xC0000717L)
+#endif
+
+#ifndef STATUS_ALREADY_REGISTERED
+# define STATUS_ALREADY_REGISTERED ((NTSTATUS) 0xC0000718L)
+#endif
+
+#ifndef STATUS_CONTEXT_MISMATCH
+# define STATUS_CONTEXT_MISMATCH ((NTSTATUS) 0xC0000719L)
+#endif
+
+#ifndef STATUS_PORT_ALREADY_HAS_COMPLETION_LIST
+# define STATUS_PORT_ALREADY_HAS_COMPLETION_LIST ((NTSTATUS) 0xC000071AL)
+#endif
+
+#ifndef STATUS_CALLBACK_RETURNED_THREAD_PRIORITY
+# define STATUS_CALLBACK_RETURNED_THREAD_PRIORITY ((NTSTATUS) 0xC000071BL)
+#endif
+
+#ifndef STATUS_INVALID_THREAD
+# define STATUS_INVALID_THREAD ((NTSTATUS) 0xC000071CL)
+#endif
+
+#ifndef STATUS_CALLBACK_RETURNED_TRANSACTION
+# define STATUS_CALLBACK_RETURNED_TRANSACTION ((NTSTATUS) 0xC000071DL)
+#endif
+
+#ifndef STATUS_CALLBACK_RETURNED_LDR_LOCK
+# define STATUS_CALLBACK_RETURNED_LDR_LOCK ((NTSTATUS) 0xC000071EL)
+#endif
+
+#ifndef STATUS_CALLBACK_RETURNED_LANG
+# define STATUS_CALLBACK_RETURNED_LANG ((NTSTATUS) 0xC000071FL)
+#endif
+
+#ifndef STATUS_CALLBACK_RETURNED_PRI_BACK
+# define STATUS_CALLBACK_RETURNED_PRI_BACK ((NTSTATUS) 0xC0000720L)
+#endif
+
+#ifndef STATUS_CALLBACK_RETURNED_THREAD_AFFINITY
+# define STATUS_CALLBACK_RETURNED_THREAD_AFFINITY ((NTSTATUS) 0xC0000721L)
+#endif
+
+#ifndef STATUS_DISK_REPAIR_DISABLED
+# define STATUS_DISK_REPAIR_DISABLED ((NTSTATUS) 0xC0000800L)
+#endif
+
+#ifndef STATUS_DS_DOMAIN_RENAME_IN_PROGRESS
+# define STATUS_DS_DOMAIN_RENAME_IN_PROGRESS ((NTSTATUS) 0xC0000801L)
+#endif
+
+#ifndef STATUS_DISK_QUOTA_EXCEEDED
+# define STATUS_DISK_QUOTA_EXCEEDED ((NTSTATUS) 0xC0000802L)
+#endif
+
+#ifndef STATUS_DATA_LOST_REPAIR
+# define STATUS_DATA_LOST_REPAIR ((NTSTATUS) 0x80000803L)
+#endif
+
+#ifndef STATUS_CONTENT_BLOCKED
+# define STATUS_CONTENT_BLOCKED ((NTSTATUS) 0xC0000804L)
+#endif
+
+#ifndef STATUS_BAD_CLUSTERS
+# define STATUS_BAD_CLUSTERS ((NTSTATUS) 0xC0000805L)
+#endif
+
+#ifndef STATUS_VOLUME_DIRTY
+# define STATUS_VOLUME_DIRTY ((NTSTATUS) 0xC0000806L)
+#endif
+
+#ifndef STATUS_FILE_CHECKED_OUT
+# define STATUS_FILE_CHECKED_OUT ((NTSTATUS) 0xC0000901L)
+#endif
+
+#ifndef STATUS_CHECKOUT_REQUIRED
+# define STATUS_CHECKOUT_REQUIRED ((NTSTATUS) 0xC0000902L)
+#endif
+
+#ifndef STATUS_BAD_FILE_TYPE
+# define STATUS_BAD_FILE_TYPE ((NTSTATUS) 0xC0000903L)
+#endif
+
+#ifndef STATUS_FILE_TOO_LARGE
+# define STATUS_FILE_TOO_LARGE ((NTSTATUS) 0xC0000904L)
+#endif
+
+#ifndef STATUS_FORMS_AUTH_REQUIRED
+# define STATUS_FORMS_AUTH_REQUIRED ((NTSTATUS) 0xC0000905L)
+#endif
+
+#ifndef STATUS_VIRUS_INFECTED
+# define STATUS_VIRUS_INFECTED ((NTSTATUS) 0xC0000906L)
+#endif
+
+#ifndef STATUS_VIRUS_DELETED
+# define STATUS_VIRUS_DELETED ((NTSTATUS) 0xC0000907L)
+#endif
+
+#ifndef STATUS_BAD_MCFG_TABLE
+# define STATUS_BAD_MCFG_TABLE ((NTSTATUS) 0xC0000908L)
+#endif
+
+#ifndef STATUS_CANNOT_BREAK_OPLOCK
+# define STATUS_CANNOT_BREAK_OPLOCK ((NTSTATUS) 0xC0000909L)
+#endif
+
+#ifndef STATUS_WOW_ASSERTION
+# define STATUS_WOW_ASSERTION ((NTSTATUS) 0xC0009898L)
+#endif
+
+#ifndef STATUS_INVALID_SIGNATURE
+# define STATUS_INVALID_SIGNATURE ((NTSTATUS) 0xC000A000L)
+#endif
+
+#ifndef STATUS_HMAC_NOT_SUPPORTED
+# define STATUS_HMAC_NOT_SUPPORTED ((NTSTATUS) 0xC000A001L)
+#endif
+
+#ifndef STATUS_AUTH_TAG_MISMATCH
+# define STATUS_AUTH_TAG_MISMATCH ((NTSTATUS) 0xC000A002L)
+#endif
+
+#ifndef STATUS_IPSEC_QUEUE_OVERFLOW
+# define STATUS_IPSEC_QUEUE_OVERFLOW ((NTSTATUS) 0xC000A010L)
+#endif
+
+#ifndef STATUS_ND_QUEUE_OVERFLOW
+# define STATUS_ND_QUEUE_OVERFLOW ((NTSTATUS) 0xC000A011L)
+#endif
+
+#ifndef STATUS_HOPLIMIT_EXCEEDED
+# define STATUS_HOPLIMIT_EXCEEDED ((NTSTATUS) 0xC000A012L)
+#endif
+
+#ifndef STATUS_PROTOCOL_NOT_SUPPORTED
+# define STATUS_PROTOCOL_NOT_SUPPORTED ((NTSTATUS) 0xC000A013L)
+#endif
+
+#ifndef STATUS_FASTPATH_REJECTED
+# define STATUS_FASTPATH_REJECTED ((NTSTATUS) 0xC000A014L)
+#endif
+
+#ifndef STATUS_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED
+# define STATUS_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED ((NTSTATUS) 0xC000A080L)
+#endif
+
+#ifndef STATUS_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR
+# define STATUS_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR ((NTSTATUS) 0xC000A081L)
+#endif
+
+#ifndef STATUS_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR
+# define STATUS_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR ((NTSTATUS) 0xC000A082L)
+#endif
+
+#ifndef STATUS_XML_PARSE_ERROR
+# define STATUS_XML_PARSE_ERROR ((NTSTATUS) 0xC000A083L)
+#endif
+
+#ifndef STATUS_XMLDSIG_ERROR
+# define STATUS_XMLDSIG_ERROR ((NTSTATUS) 0xC000A084L)
+#endif
+
+#ifndef STATUS_WRONG_COMPARTMENT
+# define STATUS_WRONG_COMPARTMENT ((NTSTATUS) 0xC000A085L)
+#endif
+
+#ifndef STATUS_AUTHIP_FAILURE
+# define STATUS_AUTHIP_FAILURE ((NTSTATUS) 0xC000A086L)
+#endif
+
+#ifndef STATUS_DS_OID_MAPPED_GROUP_CANT_HAVE_MEMBERS
+# define STATUS_DS_OID_MAPPED_GROUP_CANT_HAVE_MEMBERS ((NTSTATUS) 0xC000A087L)
+#endif
+
+#ifndef STATUS_DS_OID_NOT_FOUND
+# define STATUS_DS_OID_NOT_FOUND ((NTSTATUS) 0xC000A088L)
+#endif
+
+#ifndef STATUS_HASH_NOT_SUPPORTED
+# define STATUS_HASH_NOT_SUPPORTED ((NTSTATUS) 0xC000A100L)
+#endif
+
+#ifndef STATUS_HASH_NOT_PRESENT
+# define STATUS_HASH_NOT_PRESENT ((NTSTATUS) 0xC000A101L)
+#endif
+
+/* This is not the NTSTATUS_FROM_WIN32 that the DDK provides, because the DDK
+ * got it wrong! */
+#ifdef NTSTATUS_FROM_WIN32
+# undef NTSTATUS_FROM_WIN32
+#endif
+#define NTSTATUS_FROM_WIN32(error) ((NTSTATUS) (error) <= 0 ? \
+ ((NTSTATUS) (error)) : ((NTSTATUS) (((error) & 0x0000FFFF) | \
+ (FACILITY_NTWIN32 << 16) | ERROR_SEVERITY_WARNING)))
+
+#ifndef JOB_OBJECT_LIMIT_PROCESS_MEMORY
+# define JOB_OBJECT_LIMIT_PROCESS_MEMORY 0x00000100
+#endif
+#ifndef JOB_OBJECT_LIMIT_JOB_MEMORY
+# define JOB_OBJECT_LIMIT_JOB_MEMORY 0x00000200
+#endif
+#ifndef JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION
+# define JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION 0x00000400
+#endif
+#ifndef JOB_OBJECT_LIMIT_BREAKAWAY_OK
+# define JOB_OBJECT_LIMIT_BREAKAWAY_OK 0x00000800
+#endif
+#ifndef JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK
+# define JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK 0x00001000
+#endif
+#ifndef JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
+# define JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE 0x00002000
+#endif
+
+#ifndef SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE
+# define SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE 0x00000002
+#endif
+
+/* from winternl.h */
+#if !defined(__UNICODE_STRING_DEFINED) && defined(__MINGW32__)
+#define __UNICODE_STRING_DEFINED
+#endif
+typedef struct _UNICODE_STRING {
+ USHORT Length;
+ USHORT MaximumLength;
+ PWSTR Buffer;
+} UNICODE_STRING, *PUNICODE_STRING;
+
+typedef const UNICODE_STRING *PCUNICODE_STRING;
+
+/* from ntifs.h */
+#ifndef DEVICE_TYPE
+# define DEVICE_TYPE DWORD
+#endif
+
+#ifndef VOLUME_NAME_DOS
+# define VOLUME_NAME_DOS 0x0
+#endif
+
+#ifndef MAPVK_VK_TO_VSC
+# define MAPVK_VK_TO_VSC (0)
+#endif
+
+/* MinGW already has a definition for REPARSE_DATA_BUFFER, but mingw-w64 does
+ * not.
+ */
+#if defined(_MSC_VER) || defined(__MINGW64_VERSION_MAJOR)
+ typedef struct _REPARSE_DATA_BUFFER {
+ ULONG ReparseTag;
+ USHORT ReparseDataLength;
+ USHORT Reserved;
+ union {
+ struct {
+ USHORT SubstituteNameOffset;
+ USHORT SubstituteNameLength;
+ USHORT PrintNameOffset;
+ USHORT PrintNameLength;
+ ULONG Flags;
+ WCHAR PathBuffer[1];
+ } SymbolicLinkReparseBuffer;
+ struct {
+ USHORT SubstituteNameOffset;
+ USHORT SubstituteNameLength;
+ USHORT PrintNameOffset;
+ USHORT PrintNameLength;
+ WCHAR PathBuffer[1];
+ } MountPointReparseBuffer;
+ struct {
+ UCHAR DataBuffer[1];
+ } GenericReparseBuffer;
+ struct {
+ ULONG StringCount;
+ WCHAR StringList[1];
+ } AppExecLinkReparseBuffer;
+ };
+ } REPARSE_DATA_BUFFER, *PREPARSE_DATA_BUFFER;
+#endif
+
+typedef struct _IO_STATUS_BLOCK {
+ union {
+ NTSTATUS Status;
+ PVOID Pointer;
+ };
+ ULONG_PTR Information;
+} IO_STATUS_BLOCK, *PIO_STATUS_BLOCK;
+
+typedef enum _FILE_INFORMATION_CLASS {
+ FileDirectoryInformation = 1,
+ FileFullDirectoryInformation,
+ FileBothDirectoryInformation,
+ FileBasicInformation,
+ FileStandardInformation,
+ FileInternalInformation,
+ FileEaInformation,
+ FileAccessInformation,
+ FileNameInformation,
+ FileRenameInformation,
+ FileLinkInformation,
+ FileNamesInformation,
+ FileDispositionInformation,
+ FilePositionInformation,
+ FileFullEaInformation,
+ FileModeInformation,
+ FileAlignmentInformation,
+ FileAllInformation,
+ FileAllocationInformation,
+ FileEndOfFileInformation,
+ FileAlternateNameInformation,
+ FileStreamInformation,
+ FilePipeInformation,
+ FilePipeLocalInformation,
+ FilePipeRemoteInformation,
+ FileMailslotQueryInformation,
+ FileMailslotSetInformation,
+ FileCompressionInformation,
+ FileObjectIdInformation,
+ FileCompletionInformation,
+ FileMoveClusterInformation,
+ FileQuotaInformation,
+ FileReparsePointInformation,
+ FileNetworkOpenInformation,
+ FileAttributeTagInformation,
+ FileTrackingInformation,
+ FileIdBothDirectoryInformation,
+ FileIdFullDirectoryInformation,
+ FileValidDataLengthInformation,
+ FileShortNameInformation,
+ FileIoCompletionNotificationInformation,
+ FileIoStatusBlockRangeInformation,
+ FileIoPriorityHintInformation,
+ FileSfioReserveInformation,
+ FileSfioVolumeInformation,
+ FileHardLinkInformation,
+ FileProcessIdsUsingFileInformation,
+ FileNormalizedNameInformation,
+ FileNetworkPhysicalNameInformation,
+ FileIdGlobalTxDirectoryInformation,
+ FileIsRemoteDeviceInformation,
+ FileAttributeCacheInformation,
+ FileNumaNodeInformation,
+ FileStandardLinkInformation,
+ FileRemoteProtocolInformation,
+ FileMaximumInformation
+} FILE_INFORMATION_CLASS, *PFILE_INFORMATION_CLASS;
+
+typedef struct _FILE_DIRECTORY_INFORMATION {
+ ULONG NextEntryOffset;
+ ULONG FileIndex;
+ LARGE_INTEGER CreationTime;
+ LARGE_INTEGER LastAccessTime;
+ LARGE_INTEGER LastWriteTime;
+ LARGE_INTEGER ChangeTime;
+ LARGE_INTEGER EndOfFile;
+ LARGE_INTEGER AllocationSize;
+ ULONG FileAttributes;
+ ULONG FileNameLength;
+ WCHAR FileName[1];
+} FILE_DIRECTORY_INFORMATION, *PFILE_DIRECTORY_INFORMATION;
+
+typedef struct _FILE_BOTH_DIR_INFORMATION {
+ ULONG NextEntryOffset;
+ ULONG FileIndex;
+ LARGE_INTEGER CreationTime;
+ LARGE_INTEGER LastAccessTime;
+ LARGE_INTEGER LastWriteTime;
+ LARGE_INTEGER ChangeTime;
+ LARGE_INTEGER EndOfFile;
+ LARGE_INTEGER AllocationSize;
+ ULONG FileAttributes;
+ ULONG FileNameLength;
+ ULONG EaSize;
+ CCHAR ShortNameLength;
+ WCHAR ShortName[12];
+ WCHAR FileName[1];
+} FILE_BOTH_DIR_INFORMATION, *PFILE_BOTH_DIR_INFORMATION;
+
+typedef struct _FILE_BASIC_INFORMATION {
+ LARGE_INTEGER CreationTime;
+ LARGE_INTEGER LastAccessTime;
+ LARGE_INTEGER LastWriteTime;
+ LARGE_INTEGER ChangeTime;
+ DWORD FileAttributes;
+} FILE_BASIC_INFORMATION, *PFILE_BASIC_INFORMATION;
+
+typedef struct _FILE_STANDARD_INFORMATION {
+ LARGE_INTEGER AllocationSize;
+ LARGE_INTEGER EndOfFile;
+ ULONG NumberOfLinks;
+ BOOLEAN DeletePending;
+ BOOLEAN Directory;
+} FILE_STANDARD_INFORMATION, *PFILE_STANDARD_INFORMATION;
+
+typedef struct _FILE_INTERNAL_INFORMATION {
+ LARGE_INTEGER IndexNumber;
+} FILE_INTERNAL_INFORMATION, *PFILE_INTERNAL_INFORMATION;
+
+typedef struct _FILE_EA_INFORMATION {
+ ULONG EaSize;
+} FILE_EA_INFORMATION, *PFILE_EA_INFORMATION;
+
+typedef struct _FILE_ACCESS_INFORMATION {
+ ACCESS_MASK AccessFlags;
+} FILE_ACCESS_INFORMATION, *PFILE_ACCESS_INFORMATION;
+
+typedef struct _FILE_POSITION_INFORMATION {
+ LARGE_INTEGER CurrentByteOffset;
+} FILE_POSITION_INFORMATION, *PFILE_POSITION_INFORMATION;
+
+typedef struct _FILE_MODE_INFORMATION {
+ ULONG Mode;
+} FILE_MODE_INFORMATION, *PFILE_MODE_INFORMATION;
+
+typedef struct _FILE_ALIGNMENT_INFORMATION {
+ ULONG AlignmentRequirement;
+} FILE_ALIGNMENT_INFORMATION, *PFILE_ALIGNMENT_INFORMATION;
+
+typedef struct _FILE_NAME_INFORMATION {
+ ULONG FileNameLength;
+ WCHAR FileName[1];
+} FILE_NAME_INFORMATION, *PFILE_NAME_INFORMATION;
+
+typedef struct _FILE_END_OF_FILE_INFORMATION {
+ LARGE_INTEGER EndOfFile;
+} FILE_END_OF_FILE_INFORMATION, *PFILE_END_OF_FILE_INFORMATION;
+
+typedef struct _FILE_ALL_INFORMATION {
+ FILE_BASIC_INFORMATION BasicInformation;
+ FILE_STANDARD_INFORMATION StandardInformation;
+ FILE_INTERNAL_INFORMATION InternalInformation;
+ FILE_EA_INFORMATION EaInformation;
+ FILE_ACCESS_INFORMATION AccessInformation;
+ FILE_POSITION_INFORMATION PositionInformation;
+ FILE_MODE_INFORMATION ModeInformation;
+ FILE_ALIGNMENT_INFORMATION AlignmentInformation;
+ FILE_NAME_INFORMATION NameInformation;
+} FILE_ALL_INFORMATION, *PFILE_ALL_INFORMATION;
+
+typedef struct _FILE_DISPOSITION_INFORMATION {
+ BOOLEAN DeleteFile;
+} FILE_DISPOSITION_INFORMATION, *PFILE_DISPOSITION_INFORMATION;
+
+typedef struct _FILE_PIPE_LOCAL_INFORMATION {
+ ULONG NamedPipeType;
+ ULONG NamedPipeConfiguration;
+ ULONG MaximumInstances;
+ ULONG CurrentInstances;
+ ULONG InboundQuota;
+ ULONG ReadDataAvailable;
+ ULONG OutboundQuota;
+ ULONG WriteQuotaAvailable;
+ ULONG NamedPipeState;
+ ULONG NamedPipeEnd;
+} FILE_PIPE_LOCAL_INFORMATION, *PFILE_PIPE_LOCAL_INFORMATION;
+
+#define FILE_SYNCHRONOUS_IO_ALERT 0x00000010
+#define FILE_SYNCHRONOUS_IO_NONALERT 0x00000020
+
+typedef enum _FS_INFORMATION_CLASS {
+ FileFsVolumeInformation = 1,
+ FileFsLabelInformation = 2,
+ FileFsSizeInformation = 3,
+ FileFsDeviceInformation = 4,
+ FileFsAttributeInformation = 5,
+ FileFsControlInformation = 6,
+ FileFsFullSizeInformation = 7,
+ FileFsObjectIdInformation = 8,
+ FileFsDriverPathInformation = 9,
+ FileFsVolumeFlagsInformation = 10,
+ FileFsSectorSizeInformation = 11
+} FS_INFORMATION_CLASS, *PFS_INFORMATION_CLASS;
+
+typedef struct _FILE_FS_VOLUME_INFORMATION {
+ LARGE_INTEGER VolumeCreationTime;
+ ULONG VolumeSerialNumber;
+ ULONG VolumeLabelLength;
+ BOOLEAN SupportsObjects;
+ WCHAR VolumeLabel[1];
+} FILE_FS_VOLUME_INFORMATION, *PFILE_FS_VOLUME_INFORMATION;
+
+typedef struct _FILE_FS_LABEL_INFORMATION {
+ ULONG VolumeLabelLength;
+ WCHAR VolumeLabel[1];
+} FILE_FS_LABEL_INFORMATION, *PFILE_FS_LABEL_INFORMATION;
+
+typedef struct _FILE_FS_SIZE_INFORMATION {
+ LARGE_INTEGER TotalAllocationUnits;
+ LARGE_INTEGER AvailableAllocationUnits;
+ ULONG SectorsPerAllocationUnit;
+ ULONG BytesPerSector;
+} FILE_FS_SIZE_INFORMATION, *PFILE_FS_SIZE_INFORMATION;
+
+typedef struct _FILE_FS_DEVICE_INFORMATION {
+ DEVICE_TYPE DeviceType;
+ ULONG Characteristics;
+} FILE_FS_DEVICE_INFORMATION, *PFILE_FS_DEVICE_INFORMATION;
+
+typedef struct _FILE_FS_ATTRIBUTE_INFORMATION {
+ ULONG FileSystemAttributes;
+ LONG MaximumComponentNameLength;
+ ULONG FileSystemNameLength;
+ WCHAR FileSystemName[1];
+} FILE_FS_ATTRIBUTE_INFORMATION, *PFILE_FS_ATTRIBUTE_INFORMATION;
+
+typedef struct _FILE_FS_CONTROL_INFORMATION {
+ LARGE_INTEGER FreeSpaceStartFiltering;
+ LARGE_INTEGER FreeSpaceThreshold;
+ LARGE_INTEGER FreeSpaceStopFiltering;
+ LARGE_INTEGER DefaultQuotaThreshold;
+ LARGE_INTEGER DefaultQuotaLimit;
+ ULONG FileSystemControlFlags;
+} FILE_FS_CONTROL_INFORMATION, *PFILE_FS_CONTROL_INFORMATION;
+
+typedef struct _FILE_FS_FULL_SIZE_INFORMATION {
+ LARGE_INTEGER TotalAllocationUnits;
+ LARGE_INTEGER CallerAvailableAllocationUnits;
+ LARGE_INTEGER ActualAvailableAllocationUnits;
+ ULONG SectorsPerAllocationUnit;
+ ULONG BytesPerSector;
+} FILE_FS_FULL_SIZE_INFORMATION, *PFILE_FS_FULL_SIZE_INFORMATION;
+
+typedef struct _FILE_FS_OBJECTID_INFORMATION {
+ UCHAR ObjectId[16];
+ UCHAR ExtendedInfo[48];
+} FILE_FS_OBJECTID_INFORMATION, *PFILE_FS_OBJECTID_INFORMATION;
+
+typedef struct _FILE_FS_DRIVER_PATH_INFORMATION {
+ BOOLEAN DriverInPath;
+ ULONG DriverNameLength;
+ WCHAR DriverName[1];
+} FILE_FS_DRIVER_PATH_INFORMATION, *PFILE_FS_DRIVER_PATH_INFORMATION;
+
+typedef struct _FILE_FS_VOLUME_FLAGS_INFORMATION {
+ ULONG Flags;
+} FILE_FS_VOLUME_FLAGS_INFORMATION, *PFILE_FS_VOLUME_FLAGS_INFORMATION;
+
+typedef struct _FILE_FS_SECTOR_SIZE_INFORMATION {
+ ULONG LogicalBytesPerSector;
+ ULONG PhysicalBytesPerSectorForAtomicity;
+ ULONG PhysicalBytesPerSectorForPerformance;
+ ULONG FileSystemEffectivePhysicalBytesPerSectorForAtomicity;
+ ULONG Flags;
+ ULONG ByteOffsetForSectorAlignment;
+ ULONG ByteOffsetForPartitionAlignment;
+} FILE_FS_SECTOR_SIZE_INFORMATION, *PFILE_FS_SECTOR_SIZE_INFORMATION;
+
+typedef struct _SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION {
+ LARGE_INTEGER IdleTime;
+ LARGE_INTEGER KernelTime;
+ LARGE_INTEGER UserTime;
+ LARGE_INTEGER DpcTime;
+ LARGE_INTEGER InterruptTime;
+ ULONG InterruptCount;
+} SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION, *PSYSTEM_PROCESSOR_PERFORMANCE_INFORMATION;
+
+#ifndef SystemProcessorPerformanceInformation
+# define SystemProcessorPerformanceInformation 8
+#endif
+
+#ifndef ProcessConsoleHostProcess
+# define ProcessConsoleHostProcess 49
+#endif
+
+#ifndef FILE_DEVICE_FILE_SYSTEM
+# define FILE_DEVICE_FILE_SYSTEM 0x00000009
+#endif
+
+#ifndef FILE_DEVICE_NETWORK
+# define FILE_DEVICE_NETWORK 0x00000012
+#endif
+
+#ifndef METHOD_BUFFERED
+# define METHOD_BUFFERED 0
+#endif
+
+#ifndef METHOD_IN_DIRECT
+# define METHOD_IN_DIRECT 1
+#endif
+
+#ifndef METHOD_OUT_DIRECT
+# define METHOD_OUT_DIRECT 2
+#endif
+
+#ifndef METHOD_NEITHER
+#define METHOD_NEITHER 3
+#endif
+
+#ifndef METHOD_DIRECT_TO_HARDWARE
+# define METHOD_DIRECT_TO_HARDWARE METHOD_IN_DIRECT
+#endif
+
+#ifndef METHOD_DIRECT_FROM_HARDWARE
+# define METHOD_DIRECT_FROM_HARDWARE METHOD_OUT_DIRECT
+#endif
+
+#ifndef FILE_ANY_ACCESS
+# define FILE_ANY_ACCESS 0
+#endif
+
+#ifndef FILE_SPECIAL_ACCESS
+# define FILE_SPECIAL_ACCESS (FILE_ANY_ACCESS)
+#endif
+
+#ifndef FILE_READ_ACCESS
+# define FILE_READ_ACCESS 0x0001
+#endif
+
+#ifndef FILE_WRITE_ACCESS
+# define FILE_WRITE_ACCESS 0x0002
+#endif
+
+#ifndef CTL_CODE
+# define CTL_CODE(device_type, function, method, access) \
+ (((device_type) << 16) | ((access) << 14) | ((function) << 2) | (method))
+#endif
+
+#ifndef FSCTL_SET_REPARSE_POINT
+# define FSCTL_SET_REPARSE_POINT CTL_CODE(FILE_DEVICE_FILE_SYSTEM, \
+ 41, \
+ METHOD_BUFFERED, \
+ FILE_SPECIAL_ACCESS)
+#endif
+
+#ifndef FSCTL_GET_REPARSE_POINT
+# define FSCTL_GET_REPARSE_POINT CTL_CODE(FILE_DEVICE_FILE_SYSTEM, \
+ 42, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+#endif
+
+#ifndef FSCTL_DELETE_REPARSE_POINT
+# define FSCTL_DELETE_REPARSE_POINT CTL_CODE(FILE_DEVICE_FILE_SYSTEM, \
+ 43, \
+ METHOD_BUFFERED, \
+ FILE_SPECIAL_ACCESS)
+#endif
+
+#ifndef IO_REPARSE_TAG_SYMLINK
+# define IO_REPARSE_TAG_SYMLINK (0xA000000CL)
+#endif
+#ifndef IO_REPARSE_TAG_APPEXECLINK
+# define IO_REPARSE_TAG_APPEXECLINK (0x8000001BL)
+#endif
+
+typedef VOID (NTAPI *PIO_APC_ROUTINE)
+ (PVOID ApcContext,
+ PIO_STATUS_BLOCK IoStatusBlock,
+ ULONG Reserved);
+
+typedef NTSTATUS (NTAPI *sRtlGetVersion)
+ (PRTL_OSVERSIONINFOW lpVersionInformation);
+
+typedef ULONG (NTAPI *sRtlNtStatusToDosError)
+ (NTSTATUS Status);
+
+typedef NTSTATUS (NTAPI *sNtDeviceIoControlFile)
+ (HANDLE FileHandle,
+ HANDLE Event,
+ PIO_APC_ROUTINE ApcRoutine,
+ PVOID ApcContext,
+ PIO_STATUS_BLOCK IoStatusBlock,
+ ULONG IoControlCode,
+ PVOID InputBuffer,
+ ULONG InputBufferLength,
+ PVOID OutputBuffer,
+ ULONG OutputBufferLength);
+
+typedef NTSTATUS (NTAPI *sNtQueryInformationFile)
+ (HANDLE FileHandle,
+ PIO_STATUS_BLOCK IoStatusBlock,
+ PVOID FileInformation,
+ ULONG Length,
+ FILE_INFORMATION_CLASS FileInformationClass);
+
+typedef NTSTATUS (NTAPI *sNtSetInformationFile)
+ (HANDLE FileHandle,
+ PIO_STATUS_BLOCK IoStatusBlock,
+ PVOID FileInformation,
+ ULONG Length,
+ FILE_INFORMATION_CLASS FileInformationClass);
+
+typedef NTSTATUS (NTAPI *sNtQueryVolumeInformationFile)
+ (HANDLE FileHandle,
+ PIO_STATUS_BLOCK IoStatusBlock,
+ PVOID FsInformation,
+ ULONG Length,
+ FS_INFORMATION_CLASS FsInformationClass);
+
+typedef NTSTATUS (NTAPI *sNtQuerySystemInformation)
+ (UINT SystemInformationClass,
+ PVOID SystemInformation,
+ ULONG SystemInformationLength,
+ PULONG ReturnLength);
+
+typedef NTSTATUS (NTAPI *sNtQueryDirectoryFile)
+ (HANDLE FileHandle,
+ HANDLE Event,
+ PIO_APC_ROUTINE ApcRoutine,
+ PVOID ApcContext,
+ PIO_STATUS_BLOCK IoStatusBlock,
+ PVOID FileInformation,
+ ULONG Length,
+ FILE_INFORMATION_CLASS FileInformationClass,
+ BOOLEAN ReturnSingleEntry,
+ PUNICODE_STRING FileName,
+ BOOLEAN RestartScan
+ );
+
+typedef NTSTATUS (NTAPI *sNtQueryInformationProcess)
+ (HANDLE ProcessHandle,
+ UINT ProcessInformationClass,
+ PVOID ProcessInformation,
+ ULONG Length,
+ PULONG ReturnLength);
+
+/*
+ * Kernel32 headers
+ */
+#ifndef FILE_SKIP_COMPLETION_PORT_ON_SUCCESS
+# define FILE_SKIP_COMPLETION_PORT_ON_SUCCESS 0x1
+#endif
+
+#ifndef FILE_SKIP_SET_EVENT_ON_HANDLE
+# define FILE_SKIP_SET_EVENT_ON_HANDLE 0x2
+#endif
+
+#ifndef SYMBOLIC_LINK_FLAG_DIRECTORY
+# define SYMBOLIC_LINK_FLAG_DIRECTORY 0x1
+#endif
+
+#if (defined(__MINGW32__) && !defined(__MINGW64_VERSION_MAJOR)) \
+ || (defined(_MSC_VER) && _MSC_VER < 1500)
+ typedef struct _OVERLAPPED_ENTRY {
+ ULONG_PTR lpCompletionKey;
+ LPOVERLAPPED lpOverlapped;
+ ULONG_PTR Internal;
+ DWORD dwNumberOfBytesTransferred;
+ } OVERLAPPED_ENTRY, *LPOVERLAPPED_ENTRY;
+#endif
+
+/* from wincon.h */
+#ifndef ENABLE_INSERT_MODE
+# define ENABLE_INSERT_MODE 0x20
+#endif
+
+#ifndef ENABLE_QUICK_EDIT_MODE
+# define ENABLE_QUICK_EDIT_MODE 0x40
+#endif
+
+#ifndef ENABLE_EXTENDED_FLAGS
+# define ENABLE_EXTENDED_FLAGS 0x80
+#endif
+
+/* from winerror.h */
+#ifndef ERROR_ELEVATION_REQUIRED
+# define ERROR_ELEVATION_REQUIRED 740
+#endif
+
+#ifndef ERROR_SYMLINK_NOT_SUPPORTED
+# define ERROR_SYMLINK_NOT_SUPPORTED 1464
+#endif
+
+#ifndef ERROR_MUI_FILE_NOT_FOUND
+# define ERROR_MUI_FILE_NOT_FOUND 15100
+#endif
+
+#ifndef ERROR_MUI_INVALID_FILE
+# define ERROR_MUI_INVALID_FILE 15101
+#endif
+
+#ifndef ERROR_MUI_INVALID_RC_CONFIG
+# define ERROR_MUI_INVALID_RC_CONFIG 15102
+#endif
+
+#ifndef ERROR_MUI_INVALID_LOCALE_NAME
+# define ERROR_MUI_INVALID_LOCALE_NAME 15103
+#endif
+
+#ifndef ERROR_MUI_INVALID_ULTIMATEFALLBACK_NAME
+# define ERROR_MUI_INVALID_ULTIMATEFALLBACK_NAME 15104
+#endif
+
+#ifndef ERROR_MUI_FILE_NOT_LOADED
+# define ERROR_MUI_FILE_NOT_LOADED 15105
+#endif
+
+typedef BOOL (WINAPI *sGetQueuedCompletionStatusEx)
+ (HANDLE CompletionPort,
+ LPOVERLAPPED_ENTRY lpCompletionPortEntries,
+ ULONG ulCount,
+ PULONG ulNumEntriesRemoved,
+ DWORD dwMilliseconds,
+ BOOL fAlertable);
+
+/* from powerbase.h */
+#ifndef DEVICE_NOTIFY_CALLBACK
+# define DEVICE_NOTIFY_CALLBACK 2
+#endif
+
+#ifndef PBT_APMRESUMEAUTOMATIC
+# define PBT_APMRESUMEAUTOMATIC 18
+#endif
+
+#ifndef PBT_APMRESUMESUSPEND
+# define PBT_APMRESUMESUSPEND 7
+#endif
+
+typedef ULONG CALLBACK _DEVICE_NOTIFY_CALLBACK_ROUTINE(
+ PVOID Context,
+ ULONG Type,
+ PVOID Setting
+);
+typedef _DEVICE_NOTIFY_CALLBACK_ROUTINE* _PDEVICE_NOTIFY_CALLBACK_ROUTINE;
+
+typedef struct _DEVICE_NOTIFY_SUBSCRIBE_PARAMETERS {
+ _PDEVICE_NOTIFY_CALLBACK_ROUTINE Callback;
+ PVOID Context;
+} _DEVICE_NOTIFY_SUBSCRIBE_PARAMETERS, *_PDEVICE_NOTIFY_SUBSCRIBE_PARAMETERS;
+
+typedef PVOID _HPOWERNOTIFY;
+typedef _HPOWERNOTIFY *_PHPOWERNOTIFY;
+
+typedef DWORD (WINAPI *sPowerRegisterSuspendResumeNotification)
+ (DWORD Flags,
+ HANDLE Recipient,
+ _PHPOWERNOTIFY RegistrationHandle);
+
+/* from Winuser.h */
+typedef VOID (CALLBACK* WINEVENTPROC)
+ (HWINEVENTHOOK hWinEventHook,
+ DWORD event,
+ HWND hwnd,
+ LONG idObject,
+ LONG idChild,
+ DWORD idEventThread,
+ DWORD dwmsEventTime);
+
+typedef HWINEVENTHOOK (WINAPI *sSetWinEventHook)
+ (UINT eventMin,
+ UINT eventMax,
+ HMODULE hmodWinEventProc,
+ WINEVENTPROC lpfnWinEventProc,
+ DWORD idProcess,
+ DWORD idThread,
+ UINT dwflags);
+
+/* From mstcpip.h */
+typedef struct _TCP_INITIAL_RTO_PARAMETERS {
+ USHORT Rtt;
+ UCHAR MaxSynRetransmissions;
+} TCP_INITIAL_RTO_PARAMETERS, *PTCP_INITIAL_RTO_PARAMETERS;
+
+#ifndef TCP_INITIAL_RTO_NO_SYN_RETRANSMISSIONS
+# define TCP_INITIAL_RTO_NO_SYN_RETRANSMISSIONS ((UCHAR) -2)
+#endif
+#ifndef SIO_TCP_INITIAL_RTO
+# define SIO_TCP_INITIAL_RTO _WSAIOW(IOC_VENDOR,17)
+#endif
+
+/* Ntdll function pointers */
+extern sRtlGetVersion pRtlGetVersion;
+extern sRtlNtStatusToDosError pRtlNtStatusToDosError;
+extern sNtDeviceIoControlFile pNtDeviceIoControlFile;
+extern sNtQueryInformationFile pNtQueryInformationFile;
+extern sNtSetInformationFile pNtSetInformationFile;
+extern sNtQueryVolumeInformationFile pNtQueryVolumeInformationFile;
+extern sNtQueryDirectoryFile pNtQueryDirectoryFile;
+extern sNtQuerySystemInformation pNtQuerySystemInformation;
+extern sNtQueryInformationProcess pNtQueryInformationProcess;
+
+/* Kernel32 function pointers */
+extern sGetQueuedCompletionStatusEx pGetQueuedCompletionStatusEx;
+
+/* Powrprof.dll function pointer */
+extern sPowerRegisterSuspendResumeNotification pPowerRegisterSuspendResumeNotification;
+
+/* User32.dll function pointer */
+extern sSetWinEventHook pSetWinEventHook;
+
+#endif /* UV_WIN_WINAPI_H_ */
diff --git a/Utilities/cmlibuv/src/win/winsock.c b/Utilities/cmlibuv/src/win/winsock.c
new file mode 100644
index 0000000..4cf6e6b
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/winsock.c
@@ -0,0 +1,575 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+
+#include "uv.h"
+#include "internal.h"
+
+
+/* Whether there are any non-IFS LSPs stacked on TCP */
+int uv_tcp_non_ifs_lsp_ipv4;
+int uv_tcp_non_ifs_lsp_ipv6;
+
+/* Ip address used to bind to any port at any interface */
+struct sockaddr_in uv_addr_ip4_any_;
+struct sockaddr_in6 uv_addr_ip6_any_;
+
+
+/*
+ * Retrieves the pointer to a winsock extension function.
+ */
+static BOOL uv_get_extension_function(SOCKET socket, GUID guid,
+ void **target) {
+ int result;
+ DWORD bytes;
+
+ result = WSAIoctl(socket,
+ SIO_GET_EXTENSION_FUNCTION_POINTER,
+ &guid,
+ sizeof(guid),
+ (void*)target,
+ sizeof(*target),
+ &bytes,
+ NULL,
+ NULL);
+
+ if (result == SOCKET_ERROR) {
+ *target = NULL;
+ return FALSE;
+ } else {
+ return TRUE;
+ }
+}
+
+
+BOOL uv_get_acceptex_function(SOCKET socket, LPFN_ACCEPTEX* target) {
+ const GUID wsaid_acceptex = WSAID_ACCEPTEX;
+ return uv_get_extension_function(socket, wsaid_acceptex, (void**)target);
+}
+
+
+BOOL uv_get_connectex_function(SOCKET socket, LPFN_CONNECTEX* target) {
+ const GUID wsaid_connectex = WSAID_CONNECTEX;
+ return uv_get_extension_function(socket, wsaid_connectex, (void**)target);
+}
+
+
+
+void uv_winsock_init(void) {
+ WSADATA wsa_data;
+ int errorno;
+ SOCKET dummy;
+ WSAPROTOCOL_INFOW protocol_info;
+ int opt_len;
+
+ /* Set implicit binding address used by connectEx */
+ if (uv_ip4_addr("0.0.0.0", 0, &uv_addr_ip4_any_)) {
+ abort();
+ }
+
+ if (uv_ip6_addr("::", 0, &uv_addr_ip6_any_)) {
+ abort();
+ }
+
+ /* Skip initialization in safe mode without network support */
+ if (1 == GetSystemMetrics(SM_CLEANBOOT)) return;
+
+ /* Initialize winsock */
+ errorno = WSAStartup(MAKEWORD(2, 2), &wsa_data);
+ if (errorno != 0) {
+ uv_fatal_error(errorno, "WSAStartup");
+ }
+
+ /* Try to detect non-IFS LSPs */
+ uv_tcp_non_ifs_lsp_ipv4 = 1;
+ dummy = socket(AF_INET, SOCK_STREAM, IPPROTO_IP);
+ if (dummy != INVALID_SOCKET) {
+ opt_len = (int) sizeof protocol_info;
+ if (getsockopt(dummy,
+ SOL_SOCKET,
+ SO_PROTOCOL_INFOW,
+ (char*) &protocol_info,
+ &opt_len) == 0) {
+ if (protocol_info.dwServiceFlags1 & XP1_IFS_HANDLES)
+ uv_tcp_non_ifs_lsp_ipv4 = 0;
+ }
+ closesocket(dummy);
+ }
+
+ /* Try to detect IPV6 support and non-IFS LSPs */
+ uv_tcp_non_ifs_lsp_ipv6 = 1;
+ dummy = socket(AF_INET6, SOCK_STREAM, IPPROTO_IP);
+ if (dummy != INVALID_SOCKET) {
+ opt_len = (int) sizeof protocol_info;
+ if (getsockopt(dummy,
+ SOL_SOCKET,
+ SO_PROTOCOL_INFOW,
+ (char*) &protocol_info,
+ &opt_len) == 0) {
+ if (protocol_info.dwServiceFlags1 & XP1_IFS_HANDLES)
+ uv_tcp_non_ifs_lsp_ipv6 = 0;
+ }
+ closesocket(dummy);
+ }
+}
+
+
+int uv_ntstatus_to_winsock_error(NTSTATUS status) {
+ switch (status) {
+ case STATUS_SUCCESS:
+ return ERROR_SUCCESS;
+
+ case STATUS_PENDING:
+ return ERROR_IO_PENDING;
+
+ case STATUS_INVALID_HANDLE:
+ case STATUS_OBJECT_TYPE_MISMATCH:
+ return WSAENOTSOCK;
+
+ case STATUS_INSUFFICIENT_RESOURCES:
+ case STATUS_PAGEFILE_QUOTA:
+ case STATUS_COMMITMENT_LIMIT:
+ case STATUS_WORKING_SET_QUOTA:
+ case STATUS_NO_MEMORY:
+ case STATUS_QUOTA_EXCEEDED:
+ case STATUS_TOO_MANY_PAGING_FILES:
+ case STATUS_REMOTE_RESOURCES:
+ return WSAENOBUFS;
+
+ case STATUS_TOO_MANY_ADDRESSES:
+ case STATUS_SHARING_VIOLATION:
+ case STATUS_ADDRESS_ALREADY_EXISTS:
+ return WSAEADDRINUSE;
+
+ case STATUS_LINK_TIMEOUT:
+ case STATUS_IO_TIMEOUT:
+ case STATUS_TIMEOUT:
+ return WSAETIMEDOUT;
+
+ case STATUS_GRACEFUL_DISCONNECT:
+ return WSAEDISCON;
+
+ case STATUS_REMOTE_DISCONNECT:
+ case STATUS_CONNECTION_RESET:
+ case STATUS_LINK_FAILED:
+ case STATUS_CONNECTION_DISCONNECTED:
+ case STATUS_PORT_UNREACHABLE:
+ case STATUS_HOPLIMIT_EXCEEDED:
+ return WSAECONNRESET;
+
+ case STATUS_LOCAL_DISCONNECT:
+ case STATUS_TRANSACTION_ABORTED:
+ case STATUS_CONNECTION_ABORTED:
+ return WSAECONNABORTED;
+
+ case STATUS_BAD_NETWORK_PATH:
+ case STATUS_NETWORK_UNREACHABLE:
+ case STATUS_PROTOCOL_UNREACHABLE:
+ return WSAENETUNREACH;
+
+ case STATUS_HOST_UNREACHABLE:
+ return WSAEHOSTUNREACH;
+
+ case STATUS_CANCELLED:
+ case STATUS_REQUEST_ABORTED:
+ return WSAEINTR;
+
+ case STATUS_BUFFER_OVERFLOW:
+ case STATUS_INVALID_BUFFER_SIZE:
+ return WSAEMSGSIZE;
+
+ case STATUS_BUFFER_TOO_SMALL:
+ case STATUS_ACCESS_VIOLATION:
+ return WSAEFAULT;
+
+ case STATUS_DEVICE_NOT_READY:
+ case STATUS_REQUEST_NOT_ACCEPTED:
+ return WSAEWOULDBLOCK;
+
+ case STATUS_INVALID_NETWORK_RESPONSE:
+ case STATUS_NETWORK_BUSY:
+ case STATUS_NO_SUCH_DEVICE:
+ case STATUS_NO_SUCH_FILE:
+ case STATUS_OBJECT_PATH_NOT_FOUND:
+ case STATUS_OBJECT_NAME_NOT_FOUND:
+ case STATUS_UNEXPECTED_NETWORK_ERROR:
+ return WSAENETDOWN;
+
+ case STATUS_INVALID_CONNECTION:
+ return WSAENOTCONN;
+
+ case STATUS_REMOTE_NOT_LISTENING:
+ case STATUS_CONNECTION_REFUSED:
+ return WSAECONNREFUSED;
+
+ case STATUS_PIPE_DISCONNECTED:
+ return WSAESHUTDOWN;
+
+ case STATUS_CONFLICTING_ADDRESSES:
+ case STATUS_INVALID_ADDRESS:
+ case STATUS_INVALID_ADDRESS_COMPONENT:
+ return WSAEADDRNOTAVAIL;
+
+ case STATUS_NOT_SUPPORTED:
+ case STATUS_NOT_IMPLEMENTED:
+ return WSAEOPNOTSUPP;
+
+ case STATUS_ACCESS_DENIED:
+ return WSAEACCES;
+
+ default:
+ if ((status & (FACILITY_NTWIN32 << 16)) == (FACILITY_NTWIN32 << 16) &&
+ (status & (ERROR_SEVERITY_ERROR | ERROR_SEVERITY_WARNING))) {
+ /* It's a windows error that has been previously mapped to an ntstatus
+ * code. */
+ return (DWORD) (status & 0xffff);
+ } else {
+ /* The default fallback for unmappable ntstatus codes. */
+ return WSAEINVAL;
+ }
+ }
+}
+
+
+/*
+ * This function provides a workaround for a bug in the winsock implementation
+ * of WSARecv. The problem is that when SetFileCompletionNotificationModes is
+ * used to avoid IOCP notifications of completed reads, WSARecv does not
+ * reliably indicate whether we can expect a completion package to be posted
+ * when the receive buffer is smaller than the received datagram.
+ *
+ * However it is desirable to use SetFileCompletionNotificationModes because
+ * it yields a massive performance increase.
+ *
+ * This function provides a workaround for that bug, but it only works for the
+ * specific case that we need it for. E.g. it assumes that the "avoid iocp"
+ * bit has been set, and supports only overlapped operation. It also requires
+ * the user to use the default msafd driver, doesn't work when other LSPs are
+ * stacked on top of it.
+ */
+int WSAAPI uv_wsarecv_workaround(SOCKET socket, WSABUF* buffers,
+ DWORD buffer_count, DWORD* bytes, DWORD* flags, WSAOVERLAPPED *overlapped,
+ LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine) {
+ NTSTATUS status;
+ void* apc_context;
+ IO_STATUS_BLOCK* iosb = (IO_STATUS_BLOCK*) &overlapped->Internal;
+ AFD_RECV_INFO info;
+ DWORD error;
+
+ if (overlapped == NULL || completion_routine != NULL) {
+ WSASetLastError(WSAEINVAL);
+ return SOCKET_ERROR;
+ }
+
+ info.BufferArray = buffers;
+ info.BufferCount = buffer_count;
+ info.AfdFlags = AFD_OVERLAPPED;
+ info.TdiFlags = TDI_RECEIVE_NORMAL;
+
+ if (*flags & MSG_PEEK) {
+ info.TdiFlags |= TDI_RECEIVE_PEEK;
+ }
+
+ if (*flags & MSG_PARTIAL) {
+ info.TdiFlags |= TDI_RECEIVE_PARTIAL;
+ }
+
+ if (!((intptr_t) overlapped->hEvent & 1)) {
+ apc_context = (void*) overlapped;
+ } else {
+ apc_context = NULL;
+ }
+
+ iosb->Status = STATUS_PENDING;
+ iosb->Pointer = 0;
+
+ status = pNtDeviceIoControlFile((HANDLE) socket,
+ overlapped->hEvent,
+ NULL,
+ apc_context,
+ iosb,
+ IOCTL_AFD_RECEIVE,
+ &info,
+ sizeof(info),
+ NULL,
+ 0);
+
+ *flags = 0;
+ *bytes = (DWORD) iosb->Information;
+
+ switch (status) {
+ case STATUS_SUCCESS:
+ error = ERROR_SUCCESS;
+ break;
+
+ case STATUS_PENDING:
+ error = WSA_IO_PENDING;
+ break;
+
+ case STATUS_BUFFER_OVERFLOW:
+ error = WSAEMSGSIZE;
+ break;
+
+ case STATUS_RECEIVE_EXPEDITED:
+ error = ERROR_SUCCESS;
+ *flags = MSG_OOB;
+ break;
+
+ case STATUS_RECEIVE_PARTIAL_EXPEDITED:
+ error = ERROR_SUCCESS;
+ *flags = MSG_PARTIAL | MSG_OOB;
+ break;
+
+ case STATUS_RECEIVE_PARTIAL:
+ error = ERROR_SUCCESS;
+ *flags = MSG_PARTIAL;
+ break;
+
+ default:
+ error = uv_ntstatus_to_winsock_error(status);
+ break;
+ }
+
+ WSASetLastError(error);
+
+ if (error == ERROR_SUCCESS) {
+ return 0;
+ } else {
+ return SOCKET_ERROR;
+ }
+}
+
+
+/* See description of uv_wsarecv_workaround. */
+int WSAAPI uv_wsarecvfrom_workaround(SOCKET socket, WSABUF* buffers,
+ DWORD buffer_count, DWORD* bytes, DWORD* flags, struct sockaddr* addr,
+ int* addr_len, WSAOVERLAPPED *overlapped,
+ LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine) {
+ NTSTATUS status;
+ void* apc_context;
+ IO_STATUS_BLOCK* iosb = (IO_STATUS_BLOCK*) &overlapped->Internal;
+ AFD_RECV_DATAGRAM_INFO info;
+ DWORD error;
+
+ if (overlapped == NULL || addr == NULL || addr_len == NULL ||
+ completion_routine != NULL) {
+ WSASetLastError(WSAEINVAL);
+ return SOCKET_ERROR;
+ }
+
+ info.BufferArray = buffers;
+ info.BufferCount = buffer_count;
+ info.AfdFlags = AFD_OVERLAPPED;
+ info.TdiFlags = TDI_RECEIVE_NORMAL;
+ info.Address = addr;
+ info.AddressLength = addr_len;
+
+ if (*flags & MSG_PEEK) {
+ info.TdiFlags |= TDI_RECEIVE_PEEK;
+ }
+
+ if (*flags & MSG_PARTIAL) {
+ info.TdiFlags |= TDI_RECEIVE_PARTIAL;
+ }
+
+ if (!((intptr_t) overlapped->hEvent & 1)) {
+ apc_context = (void*) overlapped;
+ } else {
+ apc_context = NULL;
+ }
+
+ iosb->Status = STATUS_PENDING;
+ iosb->Pointer = 0;
+
+ status = pNtDeviceIoControlFile((HANDLE) socket,
+ overlapped->hEvent,
+ NULL,
+ apc_context,
+ iosb,
+ IOCTL_AFD_RECEIVE_DATAGRAM,
+ &info,
+ sizeof(info),
+ NULL,
+ 0);
+
+ *flags = 0;
+ *bytes = (DWORD) iosb->Information;
+
+ switch (status) {
+ case STATUS_SUCCESS:
+ error = ERROR_SUCCESS;
+ break;
+
+ case STATUS_PENDING:
+ error = WSA_IO_PENDING;
+ break;
+
+ case STATUS_BUFFER_OVERFLOW:
+ error = WSAEMSGSIZE;
+ break;
+
+ case STATUS_RECEIVE_EXPEDITED:
+ error = ERROR_SUCCESS;
+ *flags = MSG_OOB;
+ break;
+
+ case STATUS_RECEIVE_PARTIAL_EXPEDITED:
+ error = ERROR_SUCCESS;
+ *flags = MSG_PARTIAL | MSG_OOB;
+ break;
+
+ case STATUS_RECEIVE_PARTIAL:
+ error = ERROR_SUCCESS;
+ *flags = MSG_PARTIAL;
+ break;
+
+ default:
+ error = uv_ntstatus_to_winsock_error(status);
+ break;
+ }
+
+ WSASetLastError(error);
+
+ if (error == ERROR_SUCCESS) {
+ return 0;
+ } else {
+ return SOCKET_ERROR;
+ }
+}
+
+
+int WSAAPI uv_msafd_poll(SOCKET socket, AFD_POLL_INFO* info_in,
+ AFD_POLL_INFO* info_out, OVERLAPPED* overlapped) {
+ IO_STATUS_BLOCK iosb;
+ IO_STATUS_BLOCK* iosb_ptr;
+ HANDLE event = NULL;
+ void* apc_context;
+ NTSTATUS status;
+ DWORD error;
+
+ if (overlapped != NULL) {
+ /* Overlapped operation. */
+ iosb_ptr = (IO_STATUS_BLOCK*) &overlapped->Internal;
+ event = overlapped->hEvent;
+
+ /* Do not report iocp completion if hEvent is tagged. */
+ if ((uintptr_t) event & 1) {
+ event = (HANDLE)((uintptr_t) event & ~(uintptr_t) 1);
+ apc_context = NULL;
+ } else {
+ apc_context = overlapped;
+ }
+
+ } else {
+ /* Blocking operation. */
+ iosb_ptr = &iosb;
+ event = CreateEvent(NULL, FALSE, FALSE, NULL);
+ if (event == NULL) {
+ return SOCKET_ERROR;
+ }
+ apc_context = NULL;
+ }
+
+ iosb_ptr->Status = STATUS_PENDING;
+ status = pNtDeviceIoControlFile((HANDLE) socket,
+ event,
+ NULL,
+ apc_context,
+ iosb_ptr,
+ IOCTL_AFD_POLL,
+ info_in,
+ sizeof *info_in,
+ info_out,
+ sizeof *info_out);
+
+ if (overlapped == NULL) {
+ /* If this is a blocking operation, wait for the event to become signaled,
+ * and then grab the real status from the io status block. */
+ if (status == STATUS_PENDING) {
+ DWORD r = WaitForSingleObject(event, INFINITE);
+
+ if (r == WAIT_FAILED) {
+ DWORD saved_error = GetLastError();
+ CloseHandle(event);
+ WSASetLastError(saved_error);
+ return SOCKET_ERROR;
+ }
+
+ status = iosb.Status;
+ }
+
+ CloseHandle(event);
+ }
+
+ switch (status) {
+ case STATUS_SUCCESS:
+ error = ERROR_SUCCESS;
+ break;
+
+ case STATUS_PENDING:
+ error = WSA_IO_PENDING;
+ break;
+
+ default:
+ error = uv_ntstatus_to_winsock_error(status);
+ break;
+ }
+
+ WSASetLastError(error);
+
+ if (error == ERROR_SUCCESS) {
+ return 0;
+ } else {
+ return SOCKET_ERROR;
+ }
+}
+
+int uv__convert_to_localhost_if_unspecified(const struct sockaddr* addr,
+ struct sockaddr_storage* storage) {
+ struct sockaddr_in* dest4;
+ struct sockaddr_in6* dest6;
+
+ if (addr == NULL)
+ return UV_EINVAL;
+
+ switch (addr->sa_family) {
+ case AF_INET:
+ dest4 = (struct sockaddr_in*) storage;
+ memcpy(dest4, addr, sizeof(*dest4));
+ if (dest4->sin_addr.s_addr == 0)
+ dest4->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ return 0;
+ case AF_INET6:
+ dest6 = (struct sockaddr_in6*) storage;
+ memcpy(dest6, addr, sizeof(*dest6));
+ if (memcmp(&dest6->sin6_addr,
+ &uv_addr_ip6_any_.sin6_addr,
+ sizeof(uv_addr_ip6_any_.sin6_addr)) == 0) {
+ struct in6_addr init_sin6_addr = IN6ADDR_LOOPBACK_INIT;
+ dest6->sin6_addr = init_sin6_addr;
+ }
+ return 0;
+ default:
+ return UV_EINVAL;
+ }
+}
diff --git a/Utilities/cmlibuv/src/win/winsock.h b/Utilities/cmlibuv/src/win/winsock.h
new file mode 100644
index 0000000..153632c
--- /dev/null
+++ b/Utilities/cmlibuv/src/win/winsock.h
@@ -0,0 +1,202 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_WIN_WINSOCK_H_
+#define UV_WIN_WINSOCK_H_
+
+#include <winsock2.h>
+#include <iptypes.h>
+#include <mswsock.h>
+#include <ws2tcpip.h>
+#include <windows.h>
+
+#include "winapi.h"
+
+
+/*
+ * MinGW is missing these too
+ */
+#ifndef SO_UPDATE_CONNECT_CONTEXT
+# define SO_UPDATE_CONNECT_CONTEXT 0x7010
+#endif
+
+#ifndef TCP_KEEPALIVE
+# define TCP_KEEPALIVE 3
+#endif
+
+#ifndef IPV6_V6ONLY
+# define IPV6_V6ONLY 27
+#endif
+
+#ifndef IPV6_HOPLIMIT
+# define IPV6_HOPLIMIT 21
+#endif
+
+#ifndef SIO_BASE_HANDLE
+# define SIO_BASE_HANDLE 0x48000022
+#endif
+
+#ifndef MCAST_JOIN_SOURCE_GROUP
+# define MCAST_JOIN_SOURCE_GROUP 45
+#endif
+
+#ifndef MCAST_LEAVE_SOURCE_GROUP
+# define MCAST_LEAVE_SOURCE_GROUP 46
+#endif
+
+/*
+ * TDI defines that are only in the DDK.
+ * We only need receive flags so far.
+ */
+#ifndef TDI_RECEIVE_NORMAL
+ #define TDI_RECEIVE_BROADCAST 0x00000004
+ #define TDI_RECEIVE_MULTICAST 0x00000008
+ #define TDI_RECEIVE_PARTIAL 0x00000010
+ #define TDI_RECEIVE_NORMAL 0x00000020
+ #define TDI_RECEIVE_EXPEDITED 0x00000040
+ #define TDI_RECEIVE_PEEK 0x00000080
+ #define TDI_RECEIVE_NO_RESPONSE_EXP 0x00000100
+ #define TDI_RECEIVE_COPY_LOOKAHEAD 0x00000200
+ #define TDI_RECEIVE_ENTIRE_MESSAGE 0x00000400
+ #define TDI_RECEIVE_AT_DISPATCH_LEVEL 0x00000800
+ #define TDI_RECEIVE_CONTROL_INFO 0x00001000
+ #define TDI_RECEIVE_FORCE_INDICATION 0x00002000
+ #define TDI_RECEIVE_NO_PUSH 0x00004000
+#endif
+
+/*
+ * The "Auxiliary Function Driver" is the windows kernel-mode driver that does
+ * TCP, UDP etc. Winsock is just a layer that dispatches requests to it.
+ * Having these definitions allows us to bypass winsock and make an AFD kernel
+ * call directly, avoiding a bug in winsock's recvfrom implementation.
+ */
+
+#define AFD_NO_FAST_IO 0x00000001
+#define AFD_OVERLAPPED 0x00000002
+#define AFD_IMMEDIATE 0x00000004
+
+#define AFD_POLL_RECEIVE_BIT 0
+#define AFD_POLL_RECEIVE (1 << AFD_POLL_RECEIVE_BIT)
+#define AFD_POLL_RECEIVE_EXPEDITED_BIT 1
+#define AFD_POLL_RECEIVE_EXPEDITED (1 << AFD_POLL_RECEIVE_EXPEDITED_BIT)
+#define AFD_POLL_SEND_BIT 2
+#define AFD_POLL_SEND (1 << AFD_POLL_SEND_BIT)
+#define AFD_POLL_DISCONNECT_BIT 3
+#define AFD_POLL_DISCONNECT (1 << AFD_POLL_DISCONNECT_BIT)
+#define AFD_POLL_ABORT_BIT 4
+#define AFD_POLL_ABORT (1 << AFD_POLL_ABORT_BIT)
+#define AFD_POLL_LOCAL_CLOSE_BIT 5
+#define AFD_POLL_LOCAL_CLOSE (1 << AFD_POLL_LOCAL_CLOSE_BIT)
+#define AFD_POLL_CONNECT_BIT 6
+#define AFD_POLL_CONNECT (1 << AFD_POLL_CONNECT_BIT)
+#define AFD_POLL_ACCEPT_BIT 7
+#define AFD_POLL_ACCEPT (1 << AFD_POLL_ACCEPT_BIT)
+#define AFD_POLL_CONNECT_FAIL_BIT 8
+#define AFD_POLL_CONNECT_FAIL (1 << AFD_POLL_CONNECT_FAIL_BIT)
+#define AFD_POLL_QOS_BIT 9
+#define AFD_POLL_QOS (1 << AFD_POLL_QOS_BIT)
+#define AFD_POLL_GROUP_QOS_BIT 10
+#define AFD_POLL_GROUP_QOS (1 << AFD_POLL_GROUP_QOS_BIT)
+
+#define AFD_NUM_POLL_EVENTS 11
+#define AFD_POLL_ALL ((1 << AFD_NUM_POLL_EVENTS) - 1)
+
+typedef struct _AFD_RECV_DATAGRAM_INFO {
+ LPWSABUF BufferArray;
+ ULONG BufferCount;
+ ULONG AfdFlags;
+ ULONG TdiFlags;
+ struct sockaddr* Address;
+ int* AddressLength;
+} AFD_RECV_DATAGRAM_INFO, *PAFD_RECV_DATAGRAM_INFO;
+
+typedef struct _AFD_RECV_INFO {
+ LPWSABUF BufferArray;
+ ULONG BufferCount;
+ ULONG AfdFlags;
+ ULONG TdiFlags;
+} AFD_RECV_INFO, *PAFD_RECV_INFO;
+
+
+#define _AFD_CONTROL_CODE(operation, method) \
+ ((FSCTL_AFD_BASE) << 12 | (operation << 2) | method)
+
+#define FSCTL_AFD_BASE FILE_DEVICE_NETWORK
+
+#define AFD_RECEIVE 5
+#define AFD_RECEIVE_DATAGRAM 6
+#define AFD_POLL 9
+
+#define IOCTL_AFD_RECEIVE \
+ _AFD_CONTROL_CODE(AFD_RECEIVE, METHOD_NEITHER)
+
+#define IOCTL_AFD_RECEIVE_DATAGRAM \
+ _AFD_CONTROL_CODE(AFD_RECEIVE_DATAGRAM, METHOD_NEITHER)
+
+#define IOCTL_AFD_POLL \
+ _AFD_CONTROL_CODE(AFD_POLL, METHOD_BUFFERED)
+
+#if (defined(__MINGW32__) && !defined(__MINGW64_VERSION_MAJOR)) \
+ || (defined(_MSC_VER) && _MSC_VER < 1500)
+typedef struct _IP_ADAPTER_UNICAST_ADDRESS_XP {
+ /* FIXME: __C89_NAMELESS was removed */
+ /* __C89_NAMELESS */ union {
+ ULONGLONG Alignment;
+ /* __C89_NAMELESS */ struct {
+ ULONG Length;
+ DWORD Flags;
+ };
+ };
+ struct _IP_ADAPTER_UNICAST_ADDRESS_XP *Next;
+ SOCKET_ADDRESS Address;
+ IP_PREFIX_ORIGIN PrefixOrigin;
+ IP_SUFFIX_ORIGIN SuffixOrigin;
+ IP_DAD_STATE DadState;
+ ULONG ValidLifetime;
+ ULONG PreferredLifetime;
+ ULONG LeaseLifetime;
+} IP_ADAPTER_UNICAST_ADDRESS_XP,*PIP_ADAPTER_UNICAST_ADDRESS_XP;
+
+typedef struct _IP_ADAPTER_UNICAST_ADDRESS_LH {
+ union {
+ ULONGLONG Alignment;
+ struct {
+ ULONG Length;
+ DWORD Flags;
+ };
+ };
+ struct _IP_ADAPTER_UNICAST_ADDRESS_LH *Next;
+ SOCKET_ADDRESS Address;
+ IP_PREFIX_ORIGIN PrefixOrigin;
+ IP_SUFFIX_ORIGIN SuffixOrigin;
+ IP_DAD_STATE DadState;
+ ULONG ValidLifetime;
+ ULONG PreferredLifetime;
+ ULONG LeaseLifetime;
+ UINT8 OnLinkPrefixLength;
+} IP_ADAPTER_UNICAST_ADDRESS_LH,*PIP_ADAPTER_UNICAST_ADDRESS_LH;
+
+#endif
+
+int uv__convert_to_localhost_if_unspecified(const struct sockaddr* addr,
+ struct sockaddr_storage* storage);
+
+#endif /* UV_WIN_WINSOCK_H_ */