Browse Source

Merge topic 'import-libuv'

39ac889d cmake: Add trivial usage of libuv
7cf369fe Do not build libuv on HP-UX
075cae51 Do not build libuv on SPARC
9a53af40 Do not build libuv on Cygwin
219f7411 Do not build libuv on Mac OS X 10.4 and lower
8a5beef3 Add option to build CMake against a system libuv
e56aa462 FindLibUV: Add module to find libuv package
551d5aed libuv: Fix unused variable warning in uv_loop_close
f4f8074b libuv: Avoid including macOS CoreServices header globally
a63aaaed libuv: Always include our own header first
9130b53a libuv: Conditionally declare Windows APIs for VS 2008 and below
b52afa46 libuv: Fix anonymous union syntax
05dbc204 libuv: Fix Windows API function typedef syntax
75139374 libuv: Install LICENSE file with CMake documentation
95dcc4e4 libuv: Disable warnings to avoid changing 3rd party code
13b7e758 libuv: Build the library within CMake
...
Brad King 9 years ago
parent
commit
6f8b93983a
100 changed files with 35315 additions and 1 deletions
  1. 51 1
      CMakeLists.txt
  2. 1 0
      Source/CMakeLists.txt
  3. 131 0
      Source/Modules/FindLibUV.cmake
  4. 1 0
      Source/cmConfigure.cmake.h.in
  5. 7 0
      Source/cmakemain.cxx
  6. 4 0
      Tests/CMakeLists.txt
  7. 10 0
      Tests/FindLibUV/CMakeLists.txt
  8. 17 0
      Tests/FindLibUV/Test/CMakeLists.txt
  9. 7 0
      Tests/FindLibUV/Test/main.c
  10. 26 0
      Utilities/Scripts/update-libuv.bash
  11. 1 0
      Utilities/cmThirdParty.h.in
  12. 23 0
      Utilities/cm_uv.h
  13. 1 0
      Utilities/cmlibuv/.gitattributes
  14. 227 0
      Utilities/cmlibuv/CMakeLists.txt
  15. 70 0
      Utilities/cmlibuv/LICENSE
  16. 54 0
      Utilities/cmlibuv/include/android-ifaddrs.h
  17. 66 0
      Utilities/cmlibuv/include/pthread-barrier.h
  18. 247 0
      Utilities/cmlibuv/include/stdint-msvc2008.h
  19. 768 0
      Utilities/cmlibuv/include/tree.h
  20. 32 0
      Utilities/cmlibuv/include/uv-aix.h
  21. 34 0
      Utilities/cmlibuv/include/uv-bsd.h
  22. 61 0
      Utilities/cmlibuv/include/uv-darwin.h
  23. 419 0
      Utilities/cmlibuv/include/uv-errno.h
  24. 34 0
      Utilities/cmlibuv/include/uv-linux.h
  25. 27 0
      Utilities/cmlibuv/include/uv-os390.h
  26. 44 0
      Utilities/cmlibuv/include/uv-sunos.h
  27. 37 0
      Utilities/cmlibuv/include/uv-threadpool.h
  28. 371 0
      Utilities/cmlibuv/include/uv-unix.h
  29. 43 0
      Utilities/cmlibuv/include/uv-version.h
  30. 660 0
      Utilities/cmlibuv/include/uv-win.h
  31. 1499 0
      Utilities/cmlibuv/include/uv.h
  32. 256 0
      Utilities/cmlibuv/src/fs-poll.c
  33. 245 0
      Utilities/cmlibuv/src/heap-inl.h
  34. 309 0
      Utilities/cmlibuv/src/inet.c
  35. 108 0
      Utilities/cmlibuv/src/queue.h
  36. 303 0
      Utilities/cmlibuv/src/threadpool.c
  37. 1154 0
      Utilities/cmlibuv/src/unix/aix.c
  38. 703 0
      Utilities/cmlibuv/src/unix/android-ifaddrs.c
  39. 290 0
      Utilities/cmlibuv/src/unix/async.c
  40. 88 0
      Utilities/cmlibuv/src/unix/atomic-ops.h
  41. 1238 0
      Utilities/cmlibuv/src/unix/core.c
  42. 206 0
      Utilities/cmlibuv/src/unix/darwin-proctitle.c
  43. 335 0
      Utilities/cmlibuv/src/unix/darwin.c
  44. 80 0
      Utilities/cmlibuv/src/unix/dl.c
  45. 460 0
      Utilities/cmlibuv/src/unix/freebsd.c
  46. 1355 0
      Utilities/cmlibuv/src/unix/fs.c
  47. 904 0
      Utilities/cmlibuv/src/unix/fsevents.c
  48. 202 0
      Utilities/cmlibuv/src/unix/getaddrinfo.c
  49. 120 0
      Utilities/cmlibuv/src/unix/getnameinfo.c
  50. 322 0
      Utilities/cmlibuv/src/unix/internal.h
  51. 463 0
      Utilities/cmlibuv/src/unix/kqueue.c
  52. 985 0
      Utilities/cmlibuv/src/unix/linux-core.c
  53. 285 0
      Utilities/cmlibuv/src/unix/linux-inotify.c
  54. 471 0
      Utilities/cmlibuv/src/unix/linux-syscalls.c
  55. 151 0
      Utilities/cmlibuv/src/unix/linux-syscalls.h
  56. 68 0
      Utilities/cmlibuv/src/unix/loop-watcher.c
  57. 159 0
      Utilities/cmlibuv/src/unix/loop.c
  58. 380 0
      Utilities/cmlibuv/src/unix/netbsd.c
  59. 396 0
      Utilities/cmlibuv/src/unix/openbsd.c
  60. 42 0
      Utilities/cmlibuv/src/unix/os390.c
  61. 298 0
      Utilities/cmlibuv/src/unix/pipe.c
  62. 130 0
      Utilities/cmlibuv/src/unix/poll.c
  63. 563 0
      Utilities/cmlibuv/src/unix/process.c
  64. 105 0
      Utilities/cmlibuv/src/unix/proctitle.c
  65. 120 0
      Utilities/cmlibuv/src/unix/pthread-barrier.c
  66. 56 0
      Utilities/cmlibuv/src/unix/pthread-fixes.c
  67. 467 0
      Utilities/cmlibuv/src/unix/signal.c
  68. 53 0
      Utilities/cmlibuv/src/unix/spinlock.h
  69. 1638 0
      Utilities/cmlibuv/src/unix/stream.c
  70. 821 0
      Utilities/cmlibuv/src/unix/sunos.c
  71. 395 0
      Utilities/cmlibuv/src/unix/tcp.c
  72. 605 0
      Utilities/cmlibuv/src/unix/thread.c
  73. 172 0
      Utilities/cmlibuv/src/unix/timer.c
  74. 336 0
      Utilities/cmlibuv/src/unix/tty.c
  75. 895 0
      Utilities/cmlibuv/src/unix/udp.c
  76. 654 0
      Utilities/cmlibuv/src/uv-common.c
  77. 227 0
      Utilities/cmlibuv/src/uv-common.h
  78. 45 0
      Utilities/cmlibuv/src/version.c
  79. 99 0
      Utilities/cmlibuv/src/win/async.c
  80. 56 0
      Utilities/cmlibuv/src/win/atomicops-inl.h
  81. 602 0
      Utilities/cmlibuv/src/win/core.c
  82. 35 0
      Utilities/cmlibuv/src/win/detect-wakeup.c
  83. 118 0
      Utilities/cmlibuv/src/win/dl.c
  84. 170 0
      Utilities/cmlibuv/src/win/error.c
  85. 545 0
      Utilities/cmlibuv/src/win/fs-event.c
  86. 2491 0
      Utilities/cmlibuv/src/win/fs.c
  87. 385 0
      Utilities/cmlibuv/src/win/getaddrinfo.c
  88. 150 0
      Utilities/cmlibuv/src/win/getnameinfo.c
  89. 179 0
      Utilities/cmlibuv/src/win/handle-inl.h
  90. 154 0
      Utilities/cmlibuv/src/win/handle.c
  91. 398 0
      Utilities/cmlibuv/src/win/internal.h
  92. 122 0
      Utilities/cmlibuv/src/win/loop-watcher.c
  93. 2130 0
      Utilities/cmlibuv/src/win/pipe.c
  94. 646 0
      Utilities/cmlibuv/src/win/poll.c
  95. 510 0
      Utilities/cmlibuv/src/win/process-stdio.c
  96. 1247 0
      Utilities/cmlibuv/src/win/process.c
  97. 224 0
      Utilities/cmlibuv/src/win/req-inl.h
  98. 25 0
      Utilities/cmlibuv/src/win/req.c
  99. 356 0
      Utilities/cmlibuv/src/win/signal.c
  100. 42 0
      Utilities/cmlibuv/src/win/snprintf.c

+ 51 - 1
CMakeLists.txt

@@ -112,7 +112,7 @@ macro(CMAKE_HANDLE_SYSTEM_LIBRARIES)
 
   # Allow the user to enable/disable all system utility library options by
   # defining CMAKE_USE_SYSTEM_LIBRARIES or CMAKE_USE_SYSTEM_LIBRARY_${util}.
-  set(UTILITIES BZIP2 CURL EXPAT FORM JSONCPP LIBARCHIVE LIBLZMA ZLIB)
+  set(UTILITIES BZIP2 CURL EXPAT FORM JSONCPP LIBARCHIVE LIBLZMA LIBUV ZLIB)
   foreach(util ${UTILITIES})
     if(NOT DEFINED CMAKE_USE_SYSTEM_LIBRARY_${util}
         AND DEFINED CMAKE_USE_SYSTEM_LIBRARIES)
@@ -152,6 +152,7 @@ macro(CMAKE_HANDLE_SYSTEM_LIBRARIES)
     "${CMAKE_USE_SYSTEM_LIBRARY_LIBLZMA}" "NOT CMAKE_USE_SYSTEM_LIBARCHIVE" ON)
   option(CMAKE_USE_SYSTEM_FORM "Use system-installed libform" "${CMAKE_USE_SYSTEM_LIBRARY_FORM}")
   option(CMAKE_USE_SYSTEM_JSONCPP "Use system-installed jsoncpp" "${CMAKE_USE_SYSTEM_LIBRARY_JSONCPP}")
+  option(CMAKE_USE_SYSTEM_LIBUV "Use system-installed libuv" "${CMAKE_USE_SYSTEM_LIBRARY_LIBUV}")
 
   # For now use system KWIML only if explicitly requested rather
   # than activating via the general system libs options.
@@ -463,6 +464,55 @@ macro (CMAKE_BUILD_UTILITIES)
     CMAKE_SET_TARGET_FOLDER(cmjsoncpp "Utilities/3rdParty")
   endif()
 
+  #---------------------------------------------------------------------
+  # Build libuv library.
+  if(NOT DEFINED CMAKE_USE_LIBUV)
+    set(CMAKE_USE_LIBUV 1)
+    if(APPLE)
+      include(CheckCSourceCompiles)
+      check_c_source_compiles("
+#include <CoreServices/CoreServices.h>
+#include <AvailabilityMacros.h>
+#ifndef MAC_OS_X_VERSION_10_5
+#error \"MAC_OS_X_VERSION_10_5 is not defined\"
+#endif
+int main(void) { return 0; }
+" HAVE_CoreServices_OS_X_10_5)
+      if(NOT HAVE_CoreServices_OS_X_10_5)
+        set(CMAKE_USE_LIBUV 0)
+      endif()
+    elseif(CYGWIN)
+      # libuv does not support Cygwin
+      set(CMAKE_USE_LIBUV 0)
+    elseif(CMAKE_SYSTEM_NAME STREQUAL "HP-UX")
+      # Disable until it can be ported.
+      set(CMAKE_USE_LIBUV 0)
+    elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "sparc")
+      # Disable until it can be ported.
+      set(CMAKE_USE_LIBUV 0)
+    endif()
+  endif()
+  if(CMAKE_USE_LIBUV)
+    if(CMAKE_USE_SYSTEM_LIBUV)
+      if(NOT CMAKE_VERSION VERSION_LESS 3.0)
+        include(${CMake_SOURCE_DIR}/Source/Modules/FindLibUV.cmake)
+      else()
+        message(FATAL_ERROR "CMAKE_USE_SYSTEM_LIBUV requires CMake >= 3.0")
+      endif()
+      if(NOT LIBUV_FOUND)
+        message(FATAL_ERROR
+          "CMAKE_USE_SYSTEM_LIBUV is ON but a libuv is not found!")
+      endif()
+      set(CMAKE_LIBUV_LIBRARIES LibUV::LibUV)
+    else()
+      set(CMAKE_LIBUV_LIBRARIES cmlibuv)
+      add_subdirectory(Utilities/cmlibuv)
+      CMAKE_SET_TARGET_FOLDER(cmlibuv "Utilities/3rdParty")
+    endif()
+  else()
+    set(CMAKE_LIBUV_LIBRARIES)
+  endif()
+
   #---------------------------------------------------------------------
   # Build XMLRPC library for CMake and CTest.
   if(CTEST_USE_XMLRPC)

+ 1 - 0
Source/CMakeLists.txt

@@ -582,6 +582,7 @@ target_link_libraries(CMakeLib cmsys
   ${CMAKE_TAR_LIBRARIES} ${CMAKE_COMPRESS_LIBRARIES}
   ${CMAKE_CURL_LIBRARIES}
   ${CMAKE_JSONCPP_LIBRARIES}
+  ${CMAKE_LIBUV_LIBRARIES}
   ${CMake_KWIML_LIBRARIES}
   )
 

+ 131 - 0
Source/Modules/FindLibUV.cmake

@@ -0,0 +1,131 @@
+#[=======================================================================[.rst:
+FindLibUV
+---------
+
+Find libuv includes and library.
+
+Imported Targets
+^^^^^^^^^^^^^^^^
+
+An :ref:`imported target <Imported targets>` named
+``LibUV::LibUV`` is provided if libuv has been found.
+
+Result Variables
+^^^^^^^^^^^^^^^^
+
+This module defines the following variables:
+
+``LibUV_FOUND``
+  True if libuv was found, false otherwise.
+``LibUV_INCLUDE_DIRS``
+  Include directories needed to include libuv headers.
+``LibUV_LIBRARIES``
+  Libraries needed to link to libuv.
+``LibUV_VERSION``
+  The version of libuv found.
+``LibUV_VERSION_MAJOR``
+  The major version of libuv.
+``LibUV_VERSION_MINOR``
+  The minor version of libuv.
+``LibUV_VERSION_PATCH``
+  The patch version of libuv.
+
+Cache Variables
+^^^^^^^^^^^^^^^
+
+This module uses the following cache variables:
+
+``LibUV_LIBRARY``
+  The location of the libuv library file.
+``LibUV_INCLUDE_DIR``
+  The location of the libuv include directory containing ``uv.h``.
+
+The cache variables should not be used by project code.
+They may be set by end users to point at libuv components.
+#]=======================================================================]
+
+#=============================================================================
+# Copyright 2014-2016 Kitware, Inc.
+#
+# Distributed under the OSI-approved BSD License (the "License");
+# see accompanying file Copyright.txt for details.
+#
+# This software is distributed WITHOUT ANY WARRANTY; without even the
+# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the License for more information.
+#=============================================================================
+# (To distribute this file outside of CMake, substitute the full
+#  License text for the above reference.)
+
+#-----------------------------------------------------------------------------
+find_library(LibUV_LIBRARY
+  NAMES uv
+  )
+mark_as_advanced(LibUV_LIBRARY)
+
+find_path(LibUV_INCLUDE_DIR
+  NAMES uv.h
+  )
+mark_as_advanced(LibUV_INCLUDE_DIR)
+
+#-----------------------------------------------------------------------------
+# Extract version number if possible.
+set(_LibUV_H_REGEX "#[ \t]*define[ \t]+UV_VERSION_(MAJOR|MINOR|PATCH)[ \t]+[0-9]+")
+if(LibUV_INCLUDE_DIR AND EXISTS "${LibUV_INCLUDE_DIR}/uv-version.h")
+  file(STRINGS "${LibUV_INCLUDE_DIR}/uv-version.h" _LibUV_H REGEX "${_LibUV_H_REGEX}")
+elseif(LibUV_INCLUDE_DIR AND EXISTS "${LibUV_INCLUDE_DIR}/uv.h")
+  file(STRINGS "${LibUV_INCLUDE_DIR}/uv.h" _LibUV_H REGEX "${_LibUV_H_REGEX}")
+else()
+  set(_LibUV_H "")
+endif()
+foreach(c MAJOR MINOR PATCH)
+  if(_LibUV_H MATCHES "#[ \t]*define[ \t]+UV_VERSION_${c}[ \t]+([0-9]+)")
+    set(_LibUV_VERSION_${c} "${CMAKE_MATCH_1}")
+  else()
+    unset(_LibUV_VERSION_${c})
+  endif()
+endforeach()
+if(DEFINED _LibUV_VERSION_MAJOR AND DEFINED _LibUV_VERSION_MINOR)
+  set(LibUV_VERSION_MAJOR "${_LibUV_VERSION_MAJOR}")
+  set(LibUV_VERSION_MINOR "${_LibUV_VERSION_MINOR}")
+  set(LibUV_VERSION "${LibUV_VERSION_MAJOR}.${LibUV_VERSION_MINOR}")
+  if(DEFINED _LibUV_VERSION_PATCH)
+    set(LibUV_VERSION_PATCH "${_LibUV_VERSION_PATCH}")
+    set(LibUV_VERSION "${LibUV_VERSION}.${LibUV_VERSION_PATCH}")
+  else()
+    unset(LibUV_VERSION_PATCH)
+  endif()
+else()
+  set(LibUV_VERSION_MAJOR "")
+  set(LibUV_VERSION_MINOR "")
+  set(LibUV_VERSION_PATCH "")
+  set(LibUV_VERSION "")
+endif()
+unset(_LibUV_VERSION_MAJOR)
+unset(_LibUV_VERSION_MINOR)
+unset(_LibUV_VERSION_PATCH)
+unset(_LibUV_H_REGEX)
+unset(_LibUV_H)
+
+#-----------------------------------------------------------------------------
+include(${CMAKE_CURRENT_LIST_DIR}/../../Modules/FindPackageHandleStandardArgs.cmake)
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(LibUV
+  FOUND_VAR LibUV_FOUND
+  REQUIRED_VARS LibUV_LIBRARY LibUV_INCLUDE_DIR
+  VERSION_VAR LibUV_VERSION
+  )
+set(LIBUV_FOUND ${LibUV_FOUND})
+
+#-----------------------------------------------------------------------------
+# Provide documented result variables and targets.
+if(LibUV_FOUND)
+  set(LibUV_INCLUDE_DIRS ${LibUV_INCLUDE_DIR})
+  set(LibUV_LIBRARIES ${LibUV_LIBRARY})
+  if(NOT TARGET LibUV::LibUV)
+    add_library(LibUV::LibUV UNKNOWN IMPORTED)
+    set_target_properties(LibUV::LibUV PROPERTIES
+      IMPORTED_LOCATION "${LibUV_LIBRARY}"
+      INTERFACE_INCLUDE_DIRECTORIES "${LibUV_INCLUDE_DIRS}"
+      )
+  endif()
+endif()

+ 1 - 0
Source/cmConfigure.cmake.h.in

@@ -28,6 +28,7 @@
 #cmakedefine HAVE_UNSETENV
 #cmakedefine CMAKE_USE_ELF_PARSER
 #cmakedefine CMAKE_USE_MACH_PARSER
+#cmakedefine CMAKE_USE_LIBUV
 #cmakedefine CMAKE_ENCODING_UTF8
 #cmakedefine CMake_HAVE_CXX_NULLPTR
 #cmakedefine CMake_HAVE_CXX_OVERRIDE

+ 7 - 0
Source/cmakemain.cxx

@@ -27,6 +27,10 @@
 #include "cmcmd.h"
 #include <cmsys/Encoding.hxx>
 
+#ifdef CMAKE_USE_LIBUV
+#include "cm_uv.h"
+#endif
+
 #ifdef CMAKE_BUILD_WITH_CMAKE
 static const char* cmDocumentationName[][2] = {
   { CM_NULLPTR, "  cmake - Cross-Platform Makefile Generator." },
@@ -171,6 +175,9 @@ int main(int ac, char const* const* av)
   int ret = do_cmake(ac, av);
 #ifdef CMAKE_BUILD_WITH_CMAKE
   cmDynamicLoader::FlushCache();
+#endif
+#ifdef CMAKE_USE_LIBUV
+  uv_loop_close(uv_default_loop());
 #endif
   return ret;
 }

+ 4 - 0
Tests/CMakeLists.txt

@@ -1373,6 +1373,10 @@ ${CMake_BINARY_DIR}/bin/cmake -DDIR=dev -P ${CMake_SOURCE_DIR}/Utilities/Release
     add_subdirectory(FindJsonCpp)
   endif()
 
+  if(CMake_TEST_FindLibUV)
+    add_subdirectory(FindLibUV)
+  endif()
+
   if(CMake_TEST_FindLTTngUST)
     add_subdirectory(FindLTTngUST)
   endif()

+ 10 - 0
Tests/FindLibUV/CMakeLists.txt

@@ -0,0 +1,10 @@
+add_test(NAME FindLibUV.Test COMMAND
+  ${CMAKE_CTEST_COMMAND} -C $<CONFIGURATION>
+  --build-and-test
+  "${CMake_SOURCE_DIR}/Tests/FindLibUV/Test"
+  "${CMake_BINARY_DIR}/Tests/FindLibUV/Test"
+  ${build_generator_args}
+  --build-project TestFindLibUV
+  --build-options ${build_options}
+  --test-command ${CMAKE_CTEST_COMMAND} -V -C $<CONFIGURATION>
+  )

+ 17 - 0
Tests/FindLibUV/Test/CMakeLists.txt

@@ -0,0 +1,17 @@
+cmake_minimum_required(VERSION 3.6)
+project(TestFindLibUV C)
+include(CTest)
+
+# CMake does not actually provide FindLibUV publicly.
+set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/../../../Source/Modules)
+
+find_package(LibUV REQUIRED)
+
+add_executable(test_libuv_tgt main.c)
+target_link_libraries(test_libuv_tgt LibUV::LibUV)
+add_test(NAME test_libuv_tgt COMMAND test_libuv_tgt)
+
+add_executable(test_libuv_var main.c)
+target_include_directories(test_libuv_var PRIVATE ${LibUV_INCLUDE_DIRS})
+target_link_libraries(test_libuv_var PRIVATE ${LibUV_LIBRARIES})
+add_test(NAME test_libuv_var COMMAND test_libuv_var)

+ 7 - 0
Tests/FindLibUV/Test/main.c

@@ -0,0 +1,7 @@
+#include <uv.h>
+
+int main()
+{
+  uv_loop_close(uv_default_loop());
+  return 0;
+}

+ 26 - 0
Utilities/Scripts/update-libuv.bash

@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+
+set -e
+set -x
+shopt -s dotglob
+
+readonly name="libuv"
+readonly ownership="libuv upstream <[email protected]>"
+readonly subtree="Utilities/cmlibuv"
+readonly repo="https://github.com/libuv/libuv.git"
+readonly tag="v1.x"
+readonly shortlog=false
+readonly paths="
+  LICENSE
+  include
+  src
+"
+
+extract_source () {
+    git_archive
+    pushd "${extractdir}/${name}-reduced"
+    echo "* -whitespace" > .gitattributes
+    popd
+}
+
+. "${BASH_SOURCE%/*}/update-third-party.bash"

+ 1 - 0
Utilities/cmThirdParty.h.in

@@ -22,6 +22,7 @@
 #cmakedefine CMAKE_USE_SYSTEM_LIBLZMA
 #cmakedefine CMAKE_USE_SYSTEM_FORM
 #cmakedefine CMAKE_USE_SYSTEM_JSONCPP
+#cmakedefine CMAKE_USE_SYSTEM_LIBUV
 #cmakedefine CTEST_USE_XMLRPC
 
 #endif

+ 23 - 0
Utilities/cm_uv.h

@@ -0,0 +1,23 @@
+/*============================================================================
+  CMake - Cross Platform Makefile Generator
+  Copyright 2000-2016 Kitware, Inc., Insight Software Consortium
+
+  Distributed under the OSI-approved BSD License (the "License");
+  see accompanying file Copyright.txt for details.
+
+  This software is distributed WITHOUT ANY WARRANTY; without even the
+  implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+  See the License for more information.
+============================================================================*/
+#ifndef cm_uv_h
+#define cm_uv_h
+
+/* Use the libuv library configured for CMake.  */
+#include "cmThirdParty.h"
+#ifdef CMAKE_USE_SYSTEM_LIBUV
+#include <uv.h>
+#else
+#include <cmlibuv/include/uv.h>
+#endif
+
+#endif

+ 1 - 0
Utilities/cmlibuv/.gitattributes

@@ -0,0 +1 @@
+* -whitespace

+ 227 - 0
Utilities/cmlibuv/CMakeLists.txt

@@ -0,0 +1,227 @@
+project(libuv C)
+
+# Disable warnings to avoid changing 3rd party code.
+if(CMAKE_C_COMPILER_ID MATCHES
+    "^(GNU|Clang|AppleClang|XL|VisualAge|SunPro|MIPSpro|HP|Intel)$")
+  set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w")
+elseif(CMAKE_C_COMPILER_ID STREQUAL "PathScale")
+  set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -woffall")
+endif()
+
+find_package(Threads)
+
+set(uv_libraries ${CMAKE_THREAD_LIBS_INIT})
+set(uv_includes include src)
+set(uv_headers
+  include/uv.h
+  include/uv-errno.h
+  include/uv-threadpool.h
+  include/uv-version.h
+  )
+set(uv_sources
+  src/fs-poll.c
+  src/heap-inl.h
+  src/inet.c
+  src/queue.h
+  src/threadpool.c
+  src/uv-common.c
+  src/uv-common.h
+  src/version.c
+  )
+if(WIN32)
+  list(APPEND uv_libraries
+    ws2_32
+    psapi
+    iphlpapi
+    shell32
+    userenv
+    )
+  list(APPEND uv_includes
+    src/win
+    )
+  list(APPEND uv_defines
+    WIN32_LEAN_AND_MEAN
+    _WIN32_WINNT=0x0600
+    )
+  list(APPEND uv_headers
+    include/uv-win.h
+    include/tree.h
+    )
+  list(APPEND uv_sources
+    src/win/async.c
+    src/win/atomicops-inl.h
+    src/win/core.c
+    src/win/detect-wakeup.c
+    src/win/dl.c
+    src/win/error.c
+    src/win/fs-event.c
+    src/win/fs.c
+    src/win/getaddrinfo.c
+    src/win/getnameinfo.c
+    src/win/handle.c
+    src/win/handle-inl.h
+    src/win/internal.h
+    src/win/loop-watcher.c
+    src/win/pipe.c
+    src/win/poll.c
+    src/win/process-stdio.c
+    src/win/process.c
+    src/win/req.c
+    src/win/req-inl.h
+    src/win/signal.c
+    src/win/snprintf.c
+    src/win/stream.c
+    src/win/stream-inl.h
+    src/win/tcp.c
+    src/win/thread.c
+    src/win/timer.c
+    src/win/tty.c
+    src/win/udp.c
+    src/win/util.c
+    src/win/winapi.c
+    src/win/winapi.h
+    src/win/winsock.c
+    src/win/winsock.h
+    )
+else()
+  list(APPEND uv_includes
+    src/unix
+    )
+  list(APPEND uv_headers
+    include/uv-unix.h
+    )
+  list(APPEND uv_sources
+    src/unix/async.c
+    src/unix/atomic-ops.h
+    src/unix/core.c
+    src/unix/dl.c
+    src/unix/fs.c
+    src/unix/getaddrinfo.c
+    src/unix/getnameinfo.c
+    src/unix/internal.h
+    src/unix/loop-watcher.c
+    src/unix/loop.c
+    src/unix/pipe.c
+    src/unix/poll.c
+    src/unix/process.c
+    src/unix/signal.c
+    src/unix/spinlock.h
+    src/unix/stream.c
+    src/unix/tcp.c
+    src/unix/thread.c
+    src/unix/timer.c
+    src/unix/tty.c
+    src/unix/udp.c
+    )
+endif()
+
+if(CMAKE_SYSTEM_NAME STREQUAL "AIX")
+  list(APPEND uv_libraries
+    perfstat
+    )
+  list(APPEND uv_headers
+    include/uv-aix.h
+    )
+  list(APPEND uv_defines
+    _ALL_SOURCE
+    _XOPEN_SOURCE=500
+    _LINUX_SOURCE_COMPAT
+    _THREAD_SAFE
+    )
+  list(APPEND uv_sources
+    src/unix/aix.c
+    )
+endif()
+
+if(CMAKE_SYSTEM_NAME STREQUAL "Darwin")
+  list(APPEND uv_headers
+    include/uv-darwin.h
+    include/pthread-barrier.h
+    )
+  list(APPEND uv_defines
+    _DARWIN_USE_64_BIT_INODE=1
+    _DARWIN_UNLIMITED_SELECT=1
+    )
+  list(APPEND uv_sources
+    src/unix/darwin.c
+    src/unix/darwin-proctitle.c
+    src/unix/fsevents.c
+    src/unix/kqueue.c
+    src/unix/proctitle.c
+    src/unix/pthread-barrier.c
+    )
+endif()
+
+if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
+  list(APPEND uv_libraries dl rt)
+  list(APPEND uv_headers
+    include/uv-linux.h
+    )
+  list(APPEND uv_defines _GNU_SOURCE)
+  list(APPEND uv_sources
+    src/unix/linux-core.c
+    src/unix/linux-inotify.c
+    src/unix/linux-syscalls.c
+    src/unix/linux-syscalls.h
+    src/unix/proctitle.c
+    )
+endif()
+
+if(CMAKE_SYSTEM_NAME STREQUAL "FreeBSD")
+  list(APPEND uv_headers
+    include/uv-bsd.h
+    )
+  list(APPEND uv_sources
+    src/unix/freebsd.c
+    src/unix/kqueue.c
+    )
+endif()
+
+if(CMAKE_SYSTEM_NAME STREQUAL "NetBSD")
+  list(APPEND uv_headers
+    include/uv-bsd.h
+    )
+  list(APPEND uv_sources
+    src/unix/netbsd.c
+    src/unix/kqueue.c
+    )
+endif()
+
+if(CMAKE_SYSTEM_NAME STREQUAL "OpenBSD")
+  list(APPEND uv_headers
+    include/uv-bsd.h
+    )
+  list(APPEND uv_sources
+    src/unix/openbsd.c
+    src/unix/kqueue.c
+    )
+endif()
+
+if(CMAKE_SYSTEM_NAME STREQUAL "SunOS")
+  list(APPEND uv_libraries
+    kstat
+    nsl
+    sendfile
+    socket
+    )
+  list(APPEND uv_headers
+    include/uv-sunos.h
+    )
+  list(APPEND uv_defines
+    __EXTENSIONS__
+    _XOPEN_SOURCE=500
+    )
+  list(APPEND uv_sources
+    src/unix/sunos.c
+    )
+endif()
+
+include_directories(
+  ${uv_includes}
+  ${KWSYS_HEADER_ROOT}
+  )
+add_library(cmlibuv STATIC ${uv_sources})
+target_link_libraries(cmlibuv ${uv_libraries})
+set_property(TARGET cmlibuv PROPERTY COMPILE_DEFINITIONS ${uv_defines})
+
+install(FILES LICENSE DESTINATION ${CMAKE_DOC_DIR}/cmlibuv)

+ 70 - 0
Utilities/cmlibuv/LICENSE

@@ -0,0 +1,70 @@
+libuv is licensed for use as follows:
+
+====
+Copyright (c) 2015-present libuv project contributors.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to
+deal in the Software without restriction, including without limitation the
+rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+sell copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+IN THE SOFTWARE.
+====
+
+This license applies to parts of libuv originating from the
+https://github.com/joyent/libuv repository:
+
+====
+
+Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to
+deal in the Software without restriction, including without limitation the
+rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+sell copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+IN THE SOFTWARE.
+
+====
+
+This license applies to all parts of libuv that are not externally
+maintained libraries.
+
+The externally maintained libraries used by libuv are:
+
+  - tree.h (from FreeBSD), copyright Niels Provos. Two clause BSD license.
+
+  - inet_pton and inet_ntop implementations, contained in src/inet.c, are
+    copyright the Internet Systems Consortium, Inc., and licensed under the ISC
+    license.
+
+  - stdint-msvc2008.h (from msinttypes), copyright Alexander Chemeris. Three
+    clause BSD license.
+
+  - pthread-fixes.h, pthread-fixes.c, copyright Google Inc. and Sony Mobile
+    Communications AB. Three clause BSD license.
+
+  - android-ifaddrs.h, android-ifaddrs.c, copyright Berkeley Software Design
+    Inc, Kenneth MacKay and Emergya (Cloud4all, FP7/2007-2013, grant agreement
+    n° 289016). Three clause BSD license.

+ 54 - 0
Utilities/cmlibuv/include/android-ifaddrs.h

@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 1995, 1999
+ *	Berkeley Software Design, Inc.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Berkeley Software Design, Inc. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL Berkeley Software Design, Inc. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	BSDI ifaddrs.h,v 2.5 2000/02/23 14:51:59 dab Exp
+ */
+
+#ifndef	_IFADDRS_H_
+#define	_IFADDRS_H_
+
+struct ifaddrs {
+	struct ifaddrs  *ifa_next;
+	char		*ifa_name;
+	unsigned int	 ifa_flags;
+	struct sockaddr	*ifa_addr;
+	struct sockaddr	*ifa_netmask;
+	struct sockaddr	*ifa_dstaddr;
+	void		*ifa_data;
+};
+
+/*
+ * This may have been defined in <net/if.h>.  Note that if <net/if.h> is
+ * to be included it must be included before this header file.
+ */
+#ifndef	ifa_broadaddr
+#define	ifa_broadaddr	ifa_dstaddr	/* broadcast address interface */
+#endif
+
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+extern int getifaddrs(struct ifaddrs **ifap);
+extern void freeifaddrs(struct ifaddrs *ifa);
+__END_DECLS
+
+#endif

+ 66 - 0
Utilities/cmlibuv/include/pthread-barrier.h

@@ -0,0 +1,66 @@
+/*
+Copyright (c) 2016, Kari Tristan Helgason <[email protected]>
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+#ifndef _UV_PTHREAD_BARRIER_
+#define _UV_PTHREAD_BARRIER_
+#include <errno.h>
+#include <pthread.h>
+#include <semaphore.h> /* sem_t */
+
+#define PTHREAD_BARRIER_SERIAL_THREAD  0x12345
+
+/*
+ * To maintain ABI compatibility with
+ * libuv v1.x struct is padded according
+ * to target platform
+ */
+#if defined(__ANDROID__)
+# define UV_BARRIER_STRUCT_PADDING \
+  sizeof(pthread_mutex_t) + \
+  sizeof(pthread_cond_t) + \
+  sizeof(unsigned int) - \
+  sizeof(void *)
+#elif defined(__APPLE__)
+# define UV_BARRIER_STRUCT_PADDING \
+  sizeof(pthread_mutex_t) + \
+  2 * sizeof(sem_t) + \
+  2 * sizeof(unsigned int) - \
+  sizeof(void *)
+#else
+# define UV_BARRIER_STRUCT_PADDING 0
+#endif
+
+typedef struct {
+  pthread_mutex_t  mutex;
+  pthread_cond_t   cond;
+  unsigned         threshold;
+  unsigned         in;
+  unsigned         out;
+} _uv_barrier;
+
+typedef struct {
+  _uv_barrier* b;
+  char _pad[UV_BARRIER_STRUCT_PADDING];
+} pthread_barrier_t;
+
+int pthread_barrier_init(pthread_barrier_t* barrier,
+                         const void* barrier_attr,
+                         unsigned count);
+
+int pthread_barrier_wait(pthread_barrier_t* barrier);
+int pthread_barrier_destroy(pthread_barrier_t *barrier);
+
+#endif /* _UV_PTHREAD_BARRIER_ */

+ 247 - 0
Utilities/cmlibuv/include/stdint-msvc2008.h

@@ -0,0 +1,247 @@
+// ISO C9x  compliant stdint.h for Microsoft Visual Studio
+// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 
+// 
+//  Copyright (c) 2006-2008 Alexander Chemeris
+// 
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+// 
+//   1. Redistributions of source code must retain the above copyright notice,
+//      this list of conditions and the following disclaimer.
+// 
+//   2. Redistributions in binary form must reproduce the above copyright
+//      notice, this list of conditions and the following disclaimer in the
+//      documentation and/or other materials provided with the distribution.
+// 
+//   3. The name of the author may be used to endorse or promote products
+//      derived from this software without specific prior written permission.
+// 
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// 
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef _MSC_VER // [
+#error "Use this header only with Microsoft Visual C++ compilers!"
+#endif // _MSC_VER ]
+
+#ifndef _MSC_STDINT_H_ // [
+#define _MSC_STDINT_H_
+
+#if _MSC_VER > 1000
+#pragma once
+#endif
+
+#include <limits.h>
+
+// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
+// compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}'
+// or compiler give many errors like this:
+//   error C2733: second C linkage of overloaded function 'wmemchr' not allowed
+#ifdef __cplusplus
+extern "C" {
+#endif
+#  include <wchar.h>
+#ifdef __cplusplus
+}
+#endif
+
+// Define _W64 macros to mark types changing their size, like intptr_t.
+#ifndef _W64
+#  if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
+#     define _W64 __w64
+#  else
+#     define _W64
+#  endif
+#endif
+
+
+// 7.18.1 Integer types
+
+// 7.18.1.1 Exact-width integer types
+
+// Visual Studio 6 and Embedded Visual C++ 4 doesn't
+// realize that, e.g. char has the same size as __int8
+// so we give up on __intX for them.
+#if (_MSC_VER < 1300)
+   typedef signed char       int8_t;
+   typedef signed short      int16_t;
+   typedef signed int        int32_t;
+   typedef unsigned char     uint8_t;
+   typedef unsigned short    uint16_t;
+   typedef unsigned int      uint32_t;
+#else
+   typedef signed __int8     int8_t;
+   typedef signed __int16    int16_t;
+   typedef signed __int32    int32_t;
+   typedef unsigned __int8   uint8_t;
+   typedef unsigned __int16  uint16_t;
+   typedef unsigned __int32  uint32_t;
+#endif
+typedef signed __int64       int64_t;
+typedef unsigned __int64     uint64_t;
+
+
+// 7.18.1.2 Minimum-width integer types
+typedef int8_t    int_least8_t;
+typedef int16_t   int_least16_t;
+typedef int32_t   int_least32_t;
+typedef int64_t   int_least64_t;
+typedef uint8_t   uint_least8_t;
+typedef uint16_t  uint_least16_t;
+typedef uint32_t  uint_least32_t;
+typedef uint64_t  uint_least64_t;
+
+// 7.18.1.3 Fastest minimum-width integer types
+typedef int8_t    int_fast8_t;
+typedef int16_t   int_fast16_t;
+typedef int32_t   int_fast32_t;
+typedef int64_t   int_fast64_t;
+typedef uint8_t   uint_fast8_t;
+typedef uint16_t  uint_fast16_t;
+typedef uint32_t  uint_fast32_t;
+typedef uint64_t  uint_fast64_t;
+
+// 7.18.1.4 Integer types capable of holding object pointers
+#ifdef _WIN64 // [
+   typedef signed __int64    intptr_t;
+   typedef unsigned __int64  uintptr_t;
+#else // _WIN64 ][
+   typedef _W64 signed int   intptr_t;
+   typedef _W64 unsigned int uintptr_t;
+#endif // _WIN64 ]
+
+// 7.18.1.5 Greatest-width integer types
+typedef int64_t   intmax_t;
+typedef uint64_t  uintmax_t;
+
+
+// 7.18.2 Limits of specified-width integer types
+
+#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [   See footnote 220 at page 257 and footnote 221 at page 259
+
+// 7.18.2.1 Limits of exact-width integer types
+#define INT8_MIN     ((int8_t)_I8_MIN)
+#define INT8_MAX     _I8_MAX
+#define INT16_MIN    ((int16_t)_I16_MIN)
+#define INT16_MAX    _I16_MAX
+#define INT32_MIN    ((int32_t)_I32_MIN)
+#define INT32_MAX    _I32_MAX
+#define INT64_MIN    ((int64_t)_I64_MIN)
+#define INT64_MAX    _I64_MAX
+#define UINT8_MAX    _UI8_MAX
+#define UINT16_MAX   _UI16_MAX
+#define UINT32_MAX   _UI32_MAX
+#define UINT64_MAX   _UI64_MAX
+
+// 7.18.2.2 Limits of minimum-width integer types
+#define INT_LEAST8_MIN    INT8_MIN
+#define INT_LEAST8_MAX    INT8_MAX
+#define INT_LEAST16_MIN   INT16_MIN
+#define INT_LEAST16_MAX   INT16_MAX
+#define INT_LEAST32_MIN   INT32_MIN
+#define INT_LEAST32_MAX   INT32_MAX
+#define INT_LEAST64_MIN   INT64_MIN
+#define INT_LEAST64_MAX   INT64_MAX
+#define UINT_LEAST8_MAX   UINT8_MAX
+#define UINT_LEAST16_MAX  UINT16_MAX
+#define UINT_LEAST32_MAX  UINT32_MAX
+#define UINT_LEAST64_MAX  UINT64_MAX
+
+// 7.18.2.3 Limits of fastest minimum-width integer types
+#define INT_FAST8_MIN    INT8_MIN
+#define INT_FAST8_MAX    INT8_MAX
+#define INT_FAST16_MIN   INT16_MIN
+#define INT_FAST16_MAX   INT16_MAX
+#define INT_FAST32_MIN   INT32_MIN
+#define INT_FAST32_MAX   INT32_MAX
+#define INT_FAST64_MIN   INT64_MIN
+#define INT_FAST64_MAX   INT64_MAX
+#define UINT_FAST8_MAX   UINT8_MAX
+#define UINT_FAST16_MAX  UINT16_MAX
+#define UINT_FAST32_MAX  UINT32_MAX
+#define UINT_FAST64_MAX  UINT64_MAX
+
+// 7.18.2.4 Limits of integer types capable of holding object pointers
+#ifdef _WIN64 // [
+#  define INTPTR_MIN   INT64_MIN
+#  define INTPTR_MAX   INT64_MAX
+#  define UINTPTR_MAX  UINT64_MAX
+#else // _WIN64 ][
+#  define INTPTR_MIN   INT32_MIN
+#  define INTPTR_MAX   INT32_MAX
+#  define UINTPTR_MAX  UINT32_MAX
+#endif // _WIN64 ]
+
+// 7.18.2.5 Limits of greatest-width integer types
+#define INTMAX_MIN   INT64_MIN
+#define INTMAX_MAX   INT64_MAX
+#define UINTMAX_MAX  UINT64_MAX
+
+// 7.18.3 Limits of other integer types
+
+#ifdef _WIN64 // [
+#  define PTRDIFF_MIN  _I64_MIN
+#  define PTRDIFF_MAX  _I64_MAX
+#else  // _WIN64 ][
+#  define PTRDIFF_MIN  _I32_MIN
+#  define PTRDIFF_MAX  _I32_MAX
+#endif  // _WIN64 ]
+
+#define SIG_ATOMIC_MIN  INT_MIN
+#define SIG_ATOMIC_MAX  INT_MAX
+
+#ifndef SIZE_MAX // [
+#  ifdef _WIN64 // [
+#     define SIZE_MAX  _UI64_MAX
+#  else // _WIN64 ][
+#     define SIZE_MAX  _UI32_MAX
+#  endif // _WIN64 ]
+#endif // SIZE_MAX ]
+
+// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
+#ifndef WCHAR_MIN // [
+#  define WCHAR_MIN  0
+#endif  // WCHAR_MIN ]
+#ifndef WCHAR_MAX // [
+#  define WCHAR_MAX  _UI16_MAX
+#endif  // WCHAR_MAX ]
+
+#define WINT_MIN  0
+#define WINT_MAX  _UI16_MAX
+
+#endif // __STDC_LIMIT_MACROS ]
+
+
+// 7.18.4 Limits of other integer types
+
+#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [   See footnote 224 at page 260
+
+// 7.18.4.1 Macros for minimum-width integer constants
+
+#define INT8_C(val)  val##i8
+#define INT16_C(val) val##i16
+#define INT32_C(val) val##i32
+#define INT64_C(val) val##i64
+
+#define UINT8_C(val)  val##ui8
+#define UINT16_C(val) val##ui16
+#define UINT32_C(val) val##ui32
+#define UINT64_C(val) val##ui64
+
+// 7.18.4.2 Macros for greatest-width integer constants
+#define INTMAX_C   INT64_C
+#define UINTMAX_C  UINT64_C
+
+#endif // __STDC_CONSTANT_MACROS ]
+
+
+#endif // _MSC_STDINT_H_ ]

+ 768 - 0
Utilities/cmlibuv/include/tree.h

@@ -0,0 +1,768 @@
+/*-
+ * Copyright 2002 Niels Provos <[email protected]>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef  UV_TREE_H_
+#define  UV_TREE_H_
+
+#ifndef UV__UNUSED
+# if __GNUC__
+#  define UV__UNUSED __attribute__((unused))
+# else
+#  define UV__UNUSED
+# endif
+#endif
+
+/*
+ * This file defines data structures for different types of trees:
+ * splay trees and red-black trees.
+ *
+ * A splay tree is a self-organizing data structure.  Every operation
+ * on the tree causes a splay to happen.  The splay moves the requested
+ * node to the root of the tree and partly rebalances it.
+ *
+ * This has the benefit that request locality causes faster lookups as
+ * the requested nodes move to the top of the tree.  On the other hand,
+ * every lookup causes memory writes.
+ *
+ * The Balance Theorem bounds the total access time for m operations
+ * and n inserts on an initially empty tree as O((m + n)lg n).  The
+ * amortized cost for a sequence of m accesses to a splay tree is O(lg n);
+ *
+ * A red-black tree is a binary search tree with the node color as an
+ * extra attribute.  It fulfills a set of conditions:
+ *  - every search path from the root to a leaf consists of the
+ *    same number of black nodes,
+ *  - each red node (except for the root) has a black parent,
+ *  - each leaf node is black.
+ *
+ * Every operation on a red-black tree is bounded as O(lg n).
+ * The maximum height of a red-black tree is 2lg (n+1).
+ */
+
+#define SPLAY_HEAD(name, type)                                                \
+struct name {                                                                 \
+  struct type *sph_root; /* root of the tree */                               \
+}
+
+#define SPLAY_INITIALIZER(root)                                               \
+  { NULL }
+
+#define SPLAY_INIT(root) do {                                                 \
+  (root)->sph_root = NULL;                                                    \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_ENTRY(type)                                                     \
+struct {                                                                      \
+  struct type *spe_left;          /* left element */                          \
+  struct type *spe_right;         /* right element */                         \
+}
+
+#define SPLAY_LEFT(elm, field)    (elm)->field.spe_left
+#define SPLAY_RIGHT(elm, field)   (elm)->field.spe_right
+#define SPLAY_ROOT(head)          (head)->sph_root
+#define SPLAY_EMPTY(head)         (SPLAY_ROOT(head) == NULL)
+
+/* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */
+#define SPLAY_ROTATE_RIGHT(head, tmp, field) do {                             \
+  SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field);              \
+  SPLAY_RIGHT(tmp, field) = (head)->sph_root;                                 \
+  (head)->sph_root = tmp;                                                     \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_ROTATE_LEFT(head, tmp, field) do {                              \
+  SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field);              \
+  SPLAY_LEFT(tmp, field) = (head)->sph_root;                                  \
+  (head)->sph_root = tmp;                                                     \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_LINKLEFT(head, tmp, field) do {                                 \
+  SPLAY_LEFT(tmp, field) = (head)->sph_root;                                  \
+  tmp = (head)->sph_root;                                                     \
+  (head)->sph_root = SPLAY_LEFT((head)->sph_root, field);                     \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_LINKRIGHT(head, tmp, field) do {                                \
+  SPLAY_RIGHT(tmp, field) = (head)->sph_root;                                 \
+  tmp = (head)->sph_root;                                                     \
+  (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field);                    \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_ASSEMBLE(head, node, left, right, field) do {                   \
+  SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field);             \
+  SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field);            \
+  SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field);             \
+  SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field);             \
+} while (/*CONSTCOND*/ 0)
+
+/* Generates prototypes and inline functions */
+
+#define SPLAY_PROTOTYPE(name, type, field, cmp)                               \
+void name##_SPLAY(struct name *, struct type *);                              \
+void name##_SPLAY_MINMAX(struct name *, int);                                 \
+struct type *name##_SPLAY_INSERT(struct name *, struct type *);               \
+struct type *name##_SPLAY_REMOVE(struct name *, struct type *);               \
+                                                                              \
+/* Finds the node with the same key as elm */                                 \
+static __inline struct type *                                                 \
+name##_SPLAY_FIND(struct name *head, struct type *elm)                        \
+{                                                                             \
+  if (SPLAY_EMPTY(head))                                                      \
+    return(NULL);                                                             \
+  name##_SPLAY(head, elm);                                                    \
+  if ((cmp)(elm, (head)->sph_root) == 0)                                      \
+    return (head->sph_root);                                                  \
+  return (NULL);                                                              \
+}                                                                             \
+                                                                              \
+static __inline struct type *                                                 \
+name##_SPLAY_NEXT(struct name *head, struct type *elm)                        \
+{                                                                             \
+  name##_SPLAY(head, elm);                                                    \
+  if (SPLAY_RIGHT(elm, field) != NULL) {                                      \
+    elm = SPLAY_RIGHT(elm, field);                                            \
+    while (SPLAY_LEFT(elm, field) != NULL) {                                  \
+      elm = SPLAY_LEFT(elm, field);                                           \
+    }                                                                         \
+  } else                                                                      \
+    elm = NULL;                                                               \
+  return (elm);                                                               \
+}                                                                             \
+                                                                              \
+static __inline struct type *                                                 \
+name##_SPLAY_MIN_MAX(struct name *head, int val)                              \
+{                                                                             \
+  name##_SPLAY_MINMAX(head, val);                                             \
+  return (SPLAY_ROOT(head));                                                  \
+}
+
+/* Main splay operation.
+ * Moves node close to the key of elm to top
+ */
+#define SPLAY_GENERATE(name, type, field, cmp)                                \
+struct type *                                                                 \
+name##_SPLAY_INSERT(struct name *head, struct type *elm)                      \
+{                                                                             \
+    if (SPLAY_EMPTY(head)) {                                                  \
+      SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL;                \
+    } else {                                                                  \
+      int __comp;                                                             \
+      name##_SPLAY(head, elm);                                                \
+      __comp = (cmp)(elm, (head)->sph_root);                                  \
+      if(__comp < 0) {                                                        \
+        SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field);         \
+        SPLAY_RIGHT(elm, field) = (head)->sph_root;                           \
+        SPLAY_LEFT((head)->sph_root, field) = NULL;                           \
+      } else if (__comp > 0) {                                                \
+        SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field);       \
+        SPLAY_LEFT(elm, field) = (head)->sph_root;                            \
+        SPLAY_RIGHT((head)->sph_root, field) = NULL;                          \
+      } else                                                                  \
+        return ((head)->sph_root);                                            \
+    }                                                                         \
+    (head)->sph_root = (elm);                                                 \
+    return (NULL);                                                            \
+}                                                                             \
+                                                                              \
+struct type *                                                                 \
+name##_SPLAY_REMOVE(struct name *head, struct type *elm)                      \
+{                                                                             \
+  struct type *__tmp;                                                         \
+  if (SPLAY_EMPTY(head))                                                      \
+    return (NULL);                                                            \
+  name##_SPLAY(head, elm);                                                    \
+  if ((cmp)(elm, (head)->sph_root) == 0) {                                    \
+    if (SPLAY_LEFT((head)->sph_root, field) == NULL) {                        \
+      (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field);                \
+    } else {                                                                  \
+      __tmp = SPLAY_RIGHT((head)->sph_root, field);                           \
+      (head)->sph_root = SPLAY_LEFT((head)->sph_root, field);                 \
+      name##_SPLAY(head, elm);                                                \
+      SPLAY_RIGHT((head)->sph_root, field) = __tmp;                           \
+    }                                                                         \
+    return (elm);                                                             \
+  }                                                                           \
+  return (NULL);                                                              \
+}                                                                             \
+                                                                              \
+void                                                                          \
+name##_SPLAY(struct name *head, struct type *elm)                             \
+{                                                                             \
+  struct type __node, *__left, *__right, *__tmp;                              \
+  int __comp;                                                                 \
+                                                                              \
+  SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;            \
+  __left = __right = &__node;                                                 \
+                                                                              \
+  while ((__comp = (cmp)(elm, (head)->sph_root)) != 0) {                      \
+    if (__comp < 0) {                                                         \
+      __tmp = SPLAY_LEFT((head)->sph_root, field);                            \
+      if (__tmp == NULL)                                                      \
+        break;                                                                \
+      if ((cmp)(elm, __tmp) < 0){                                             \
+        SPLAY_ROTATE_RIGHT(head, __tmp, field);                               \
+        if (SPLAY_LEFT((head)->sph_root, field) == NULL)                      \
+          break;                                                              \
+      }                                                                       \
+      SPLAY_LINKLEFT(head, __right, field);                                   \
+    } else if (__comp > 0) {                                                  \
+      __tmp = SPLAY_RIGHT((head)->sph_root, field);                           \
+      if (__tmp == NULL)                                                      \
+        break;                                                                \
+      if ((cmp)(elm, __tmp) > 0){                                             \
+        SPLAY_ROTATE_LEFT(head, __tmp, field);                                \
+        if (SPLAY_RIGHT((head)->sph_root, field) == NULL)                     \
+          break;                                                              \
+      }                                                                       \
+      SPLAY_LINKRIGHT(head, __left, field);                                   \
+    }                                                                         \
+  }                                                                           \
+  SPLAY_ASSEMBLE(head, &__node, __left, __right, field);                      \
+}                                                                             \
+                                                                              \
+/* Splay with either the minimum or the maximum element                       \
+ * Used to find minimum or maximum element in tree.                           \
+ */                                                                           \
+void name##_SPLAY_MINMAX(struct name *head, int __comp)                       \
+{                                                                             \
+  struct type __node, *__left, *__right, *__tmp;                              \
+                                                                              \
+  SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;            \
+  __left = __right = &__node;                                                 \
+                                                                              \
+  while (1) {                                                                 \
+    if (__comp < 0) {                                                         \
+      __tmp = SPLAY_LEFT((head)->sph_root, field);                            \
+      if (__tmp == NULL)                                                      \
+        break;                                                                \
+      if (__comp < 0){                                                        \
+        SPLAY_ROTATE_RIGHT(head, __tmp, field);                               \
+        if (SPLAY_LEFT((head)->sph_root, field) == NULL)                      \
+          break;                                                              \
+      }                                                                       \
+      SPLAY_LINKLEFT(head, __right, field);                                   \
+    } else if (__comp > 0) {                                                  \
+      __tmp = SPLAY_RIGHT((head)->sph_root, field);                           \
+      if (__tmp == NULL)                                                      \
+        break;                                                                \
+      if (__comp > 0) {                                                       \
+        SPLAY_ROTATE_LEFT(head, __tmp, field);                                \
+        if (SPLAY_RIGHT((head)->sph_root, field) == NULL)                     \
+          break;                                                              \
+      }                                                                       \
+      SPLAY_LINKRIGHT(head, __left, field);                                   \
+    }                                                                         \
+  }                                                                           \
+  SPLAY_ASSEMBLE(head, &__node, __left, __right, field);                      \
+}
+
+#define SPLAY_NEGINF  -1
+#define SPLAY_INF     1
+
+#define SPLAY_INSERT(name, x, y)  name##_SPLAY_INSERT(x, y)
+#define SPLAY_REMOVE(name, x, y)  name##_SPLAY_REMOVE(x, y)
+#define SPLAY_FIND(name, x, y)    name##_SPLAY_FIND(x, y)
+#define SPLAY_NEXT(name, x, y)    name##_SPLAY_NEXT(x, y)
+#define SPLAY_MIN(name, x)        (SPLAY_EMPTY(x) ? NULL                      \
+                                  : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF))
+#define SPLAY_MAX(name, x)        (SPLAY_EMPTY(x) ? NULL                      \
+                                  : name##_SPLAY_MIN_MAX(x, SPLAY_INF))
+
+#define SPLAY_FOREACH(x, name, head)                                          \
+  for ((x) = SPLAY_MIN(name, head);                                           \
+       (x) != NULL;                                                           \
+       (x) = SPLAY_NEXT(name, head, x))
+
+/* Macros that define a red-black tree */
+#define RB_HEAD(name, type)                                                   \
+struct name {                                                                 \
+  struct type *rbh_root; /* root of the tree */                               \
+}
+
+#define RB_INITIALIZER(root)                                                  \
+  { NULL }
+
+#define RB_INIT(root) do {                                                    \
+  (root)->rbh_root = NULL;                                                    \
+} while (/*CONSTCOND*/ 0)
+
+#define RB_BLACK  0
+#define RB_RED    1
+#define RB_ENTRY(type)                                                        \
+struct {                                                                      \
+  struct type *rbe_left;        /* left element */                            \
+  struct type *rbe_right;       /* right element */                           \
+  struct type *rbe_parent;      /* parent element */                          \
+  int rbe_color;                /* node color */                              \
+}
+
+#define RB_LEFT(elm, field)     (elm)->field.rbe_left
+#define RB_RIGHT(elm, field)    (elm)->field.rbe_right
+#define RB_PARENT(elm, field)   (elm)->field.rbe_parent
+#define RB_COLOR(elm, field)    (elm)->field.rbe_color
+#define RB_ROOT(head)           (head)->rbh_root
+#define RB_EMPTY(head)          (RB_ROOT(head) == NULL)
+
+#define RB_SET(elm, parent, field) do {                                       \
+  RB_PARENT(elm, field) = parent;                                             \
+  RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL;                          \
+  RB_COLOR(elm, field) = RB_RED;                                              \
+} while (/*CONSTCOND*/ 0)
+
+#define RB_SET_BLACKRED(black, red, field) do {                               \
+  RB_COLOR(black, field) = RB_BLACK;                                          \
+  RB_COLOR(red, field) = RB_RED;                                              \
+} while (/*CONSTCOND*/ 0)
+
+#ifndef RB_AUGMENT
+#define RB_AUGMENT(x)  do {} while (0)
+#endif
+
+#define RB_ROTATE_LEFT(head, elm, tmp, field) do {                            \
+  (tmp) = RB_RIGHT(elm, field);                                               \
+  if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field)) != NULL) {                 \
+    RB_PARENT(RB_LEFT(tmp, field), field) = (elm);                            \
+  }                                                                           \
+  RB_AUGMENT(elm);                                                            \
+  if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) {              \
+    if ((elm) == RB_LEFT(RB_PARENT(elm, field), field))                       \
+      RB_LEFT(RB_PARENT(elm, field), field) = (tmp);                          \
+    else                                                                      \
+      RB_RIGHT(RB_PARENT(elm, field), field) = (tmp);                         \
+  } else                                                                      \
+    (head)->rbh_root = (tmp);                                                 \
+  RB_LEFT(tmp, field) = (elm);                                                \
+  RB_PARENT(elm, field) = (tmp);                                              \
+  RB_AUGMENT(tmp);                                                            \
+  if ((RB_PARENT(tmp, field)))                                                \
+    RB_AUGMENT(RB_PARENT(tmp, field));                                        \
+} while (/*CONSTCOND*/ 0)
+
+#define RB_ROTATE_RIGHT(head, elm, tmp, field) do {                           \
+  (tmp) = RB_LEFT(elm, field);                                                \
+  if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field)) != NULL) {                 \
+    RB_PARENT(RB_RIGHT(tmp, field), field) = (elm);                           \
+  }                                                                           \
+  RB_AUGMENT(elm);                                                            \
+  if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) {              \
+    if ((elm) == RB_LEFT(RB_PARENT(elm, field), field))                       \
+      RB_LEFT(RB_PARENT(elm, field), field) = (tmp);                          \
+    else                                                                      \
+      RB_RIGHT(RB_PARENT(elm, field), field) = (tmp);                         \
+  } else                                                                      \
+    (head)->rbh_root = (tmp);                                                 \
+  RB_RIGHT(tmp, field) = (elm);                                               \
+  RB_PARENT(elm, field) = (tmp);                                              \
+  RB_AUGMENT(tmp);                                                            \
+  if ((RB_PARENT(tmp, field)))                                                \
+    RB_AUGMENT(RB_PARENT(tmp, field));                                        \
+} while (/*CONSTCOND*/ 0)
+
+/* Generates prototypes and inline functions */
+#define  RB_PROTOTYPE(name, type, field, cmp)                                 \
+  RB_PROTOTYPE_INTERNAL(name, type, field, cmp,)
+#define  RB_PROTOTYPE_STATIC(name, type, field, cmp)                          \
+  RB_PROTOTYPE_INTERNAL(name, type, field, cmp, UV__UNUSED static)
+#define RB_PROTOTYPE_INTERNAL(name, type, field, cmp, attr)                   \
+attr void name##_RB_INSERT_COLOR(struct name *, struct type *);               \
+attr void name##_RB_REMOVE_COLOR(struct name *, struct type *, struct type *);\
+attr struct type *name##_RB_REMOVE(struct name *, struct type *);             \
+attr struct type *name##_RB_INSERT(struct name *, struct type *);             \
+attr struct type *name##_RB_FIND(struct name *, struct type *);               \
+attr struct type *name##_RB_NFIND(struct name *, struct type *);              \
+attr struct type *name##_RB_NEXT(struct type *);                              \
+attr struct type *name##_RB_PREV(struct type *);                              \
+attr struct type *name##_RB_MINMAX(struct name *, int);                       \
+                                                                              \
+
+/* Main rb operation.
+ * Moves node close to the key of elm to top
+ */
+#define  RB_GENERATE(name, type, field, cmp)                                  \
+  RB_GENERATE_INTERNAL(name, type, field, cmp,)
+#define  RB_GENERATE_STATIC(name, type, field, cmp)                           \
+  RB_GENERATE_INTERNAL(name, type, field, cmp, UV__UNUSED static)
+#define RB_GENERATE_INTERNAL(name, type, field, cmp, attr)                    \
+attr void                                                                     \
+name##_RB_INSERT_COLOR(struct name *head, struct type *elm)                   \
+{                                                                             \
+  struct type *parent, *gparent, *tmp;                                        \
+  while ((parent = RB_PARENT(elm, field)) != NULL &&                          \
+      RB_COLOR(parent, field) == RB_RED) {                                    \
+    gparent = RB_PARENT(parent, field);                                       \
+    if (parent == RB_LEFT(gparent, field)) {                                  \
+      tmp = RB_RIGHT(gparent, field);                                         \
+      if (tmp && RB_COLOR(tmp, field) == RB_RED) {                            \
+        RB_COLOR(tmp, field) = RB_BLACK;                                      \
+        RB_SET_BLACKRED(parent, gparent, field);                              \
+        elm = gparent;                                                        \
+        continue;                                                             \
+      }                                                                       \
+      if (RB_RIGHT(parent, field) == elm) {                                   \
+        RB_ROTATE_LEFT(head, parent, tmp, field);                             \
+        tmp = parent;                                                         \
+        parent = elm;                                                         \
+        elm = tmp;                                                            \
+      }                                                                       \
+      RB_SET_BLACKRED(parent, gparent, field);                                \
+      RB_ROTATE_RIGHT(head, gparent, tmp, field);                             \
+    } else {                                                                  \
+      tmp = RB_LEFT(gparent, field);                                          \
+      if (tmp && RB_COLOR(tmp, field) == RB_RED) {                            \
+        RB_COLOR(tmp, field) = RB_BLACK;                                      \
+        RB_SET_BLACKRED(parent, gparent, field);                              \
+        elm = gparent;                                                        \
+        continue;                                                             \
+      }                                                                       \
+      if (RB_LEFT(parent, field) == elm) {                                    \
+        RB_ROTATE_RIGHT(head, parent, tmp, field);                            \
+        tmp = parent;                                                         \
+        parent = elm;                                                         \
+        elm = tmp;                                                            \
+      }                                                                       \
+      RB_SET_BLACKRED(parent, gparent, field);                                \
+      RB_ROTATE_LEFT(head, gparent, tmp, field);                              \
+    }                                                                         \
+  }                                                                           \
+  RB_COLOR(head->rbh_root, field) = RB_BLACK;                                 \
+}                                                                             \
+                                                                              \
+attr void                                                                     \
+name##_RB_REMOVE_COLOR(struct name *head, struct type *parent,                \
+    struct type *elm)                                                         \
+{                                                                             \
+  struct type *tmp;                                                           \
+  while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) &&                 \
+      elm != RB_ROOT(head)) {                                                 \
+    if (RB_LEFT(parent, field) == elm) {                                      \
+      tmp = RB_RIGHT(parent, field);                                          \
+      if (RB_COLOR(tmp, field) == RB_RED) {                                   \
+        RB_SET_BLACKRED(tmp, parent, field);                                  \
+        RB_ROTATE_LEFT(head, parent, tmp, field);                             \
+        tmp = RB_RIGHT(parent, field);                                        \
+      }                                                                       \
+      if ((RB_LEFT(tmp, field) == NULL ||                                     \
+          RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&                \
+          (RB_RIGHT(tmp, field) == NULL ||                                    \
+          RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {               \
+        RB_COLOR(tmp, field) = RB_RED;                                        \
+        elm = parent;                                                         \
+        parent = RB_PARENT(elm, field);                                       \
+      } else {                                                                \
+        if (RB_RIGHT(tmp, field) == NULL ||                                   \
+            RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK) {              \
+          struct type *oleft;                                                 \
+          if ((oleft = RB_LEFT(tmp, field))                                   \
+              != NULL)                                                        \
+            RB_COLOR(oleft, field) = RB_BLACK;                                \
+          RB_COLOR(tmp, field) = RB_RED;                                      \
+          RB_ROTATE_RIGHT(head, tmp, oleft, field);                           \
+          tmp = RB_RIGHT(parent, field);                                      \
+        }                                                                     \
+        RB_COLOR(tmp, field) = RB_COLOR(parent, field);                       \
+        RB_COLOR(parent, field) = RB_BLACK;                                   \
+        if (RB_RIGHT(tmp, field))                                             \
+          RB_COLOR(RB_RIGHT(tmp, field), field) = RB_BLACK;                   \
+        RB_ROTATE_LEFT(head, parent, tmp, field);                             \
+        elm = RB_ROOT(head);                                                  \
+        break;                                                                \
+      }                                                                       \
+    } else {                                                                  \
+      tmp = RB_LEFT(parent, field);                                           \
+      if (RB_COLOR(tmp, field) == RB_RED) {                                   \
+        RB_SET_BLACKRED(tmp, parent, field);                                  \
+        RB_ROTATE_RIGHT(head, parent, tmp, field);                            \
+        tmp = RB_LEFT(parent, field);                                         \
+      }                                                                       \
+      if ((RB_LEFT(tmp, field) == NULL ||                                     \
+          RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&                \
+          (RB_RIGHT(tmp, field) == NULL ||                                    \
+          RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {               \
+        RB_COLOR(tmp, field) = RB_RED;                                        \
+        elm = parent;                                                         \
+        parent = RB_PARENT(elm, field);                                       \
+      } else {                                                                \
+        if (RB_LEFT(tmp, field) == NULL ||                                    \
+            RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) {               \
+          struct type *oright;                                                \
+          if ((oright = RB_RIGHT(tmp, field))                                 \
+              != NULL)                                                        \
+            RB_COLOR(oright, field) = RB_BLACK;                               \
+          RB_COLOR(tmp, field) = RB_RED;                                      \
+          RB_ROTATE_LEFT(head, tmp, oright, field);                           \
+          tmp = RB_LEFT(parent, field);                                       \
+        }                                                                     \
+        RB_COLOR(tmp, field) = RB_COLOR(parent, field);                       \
+        RB_COLOR(parent, field) = RB_BLACK;                                   \
+        if (RB_LEFT(tmp, field))                                              \
+          RB_COLOR(RB_LEFT(tmp, field), field) = RB_BLACK;                    \
+        RB_ROTATE_RIGHT(head, parent, tmp, field);                            \
+        elm = RB_ROOT(head);                                                  \
+        break;                                                                \
+      }                                                                       \
+    }                                                                         \
+  }                                                                           \
+  if (elm)                                                                    \
+    RB_COLOR(elm, field) = RB_BLACK;                                          \
+}                                                                             \
+                                                                              \
+attr struct type *                                                            \
+name##_RB_REMOVE(struct name *head, struct type *elm)                         \
+{                                                                             \
+  struct type *child, *parent, *old = elm;                                    \
+  int color;                                                                  \
+  if (RB_LEFT(elm, field) == NULL)                                            \
+    child = RB_RIGHT(elm, field);                                             \
+  else if (RB_RIGHT(elm, field) == NULL)                                      \
+    child = RB_LEFT(elm, field);                                              \
+  else {                                                                      \
+    struct type *left;                                                        \
+    elm = RB_RIGHT(elm, field);                                               \
+    while ((left = RB_LEFT(elm, field)) != NULL)                              \
+      elm = left;                                                             \
+    child = RB_RIGHT(elm, field);                                             \
+    parent = RB_PARENT(elm, field);                                           \
+    color = RB_COLOR(elm, field);                                             \
+    if (child)                                                                \
+      RB_PARENT(child, field) = parent;                                       \
+    if (parent) {                                                             \
+      if (RB_LEFT(parent, field) == elm)                                      \
+        RB_LEFT(parent, field) = child;                                       \
+      else                                                                    \
+        RB_RIGHT(parent, field) = child;                                      \
+      RB_AUGMENT(parent);                                                     \
+    } else                                                                    \
+      RB_ROOT(head) = child;                                                  \
+    if (RB_PARENT(elm, field) == old)                                         \
+      parent = elm;                                                           \
+    (elm)->field = (old)->field;                                              \
+    if (RB_PARENT(old, field)) {                                              \
+      if (RB_LEFT(RB_PARENT(old, field), field) == old)                       \
+        RB_LEFT(RB_PARENT(old, field), field) = elm;                          \
+      else                                                                    \
+        RB_RIGHT(RB_PARENT(old, field), field) = elm;                         \
+      RB_AUGMENT(RB_PARENT(old, field));                                      \
+    } else                                                                    \
+      RB_ROOT(head) = elm;                                                    \
+    RB_PARENT(RB_LEFT(old, field), field) = elm;                              \
+    if (RB_RIGHT(old, field))                                                 \
+      RB_PARENT(RB_RIGHT(old, field), field) = elm;                           \
+    if (parent) {                                                             \
+      left = parent;                                                          \
+      do {                                                                    \
+        RB_AUGMENT(left);                                                     \
+      } while ((left = RB_PARENT(left, field)) != NULL);                      \
+    }                                                                         \
+    goto color;                                                               \
+  }                                                                           \
+  parent = RB_PARENT(elm, field);                                             \
+  color = RB_COLOR(elm, field);                                               \
+  if (child)                                                                  \
+    RB_PARENT(child, field) = parent;                                         \
+  if (parent) {                                                               \
+    if (RB_LEFT(parent, field) == elm)                                        \
+      RB_LEFT(parent, field) = child;                                         \
+    else                                                                      \
+      RB_RIGHT(parent, field) = child;                                        \
+    RB_AUGMENT(parent);                                                       \
+  } else                                                                      \
+    RB_ROOT(head) = child;                                                    \
+color:                                                                        \
+  if (color == RB_BLACK)                                                      \
+    name##_RB_REMOVE_COLOR(head, parent, child);                              \
+  return (old);                                                               \
+}                                                                             \
+                                                                              \
+/* Inserts a node into the RB tree */                                         \
+attr struct type *                                                            \
+name##_RB_INSERT(struct name *head, struct type *elm)                         \
+{                                                                             \
+  struct type *tmp;                                                           \
+  struct type *parent = NULL;                                                 \
+  int comp = 0;                                                               \
+  tmp = RB_ROOT(head);                                                        \
+  while (tmp) {                                                               \
+    parent = tmp;                                                             \
+    comp = (cmp)(elm, parent);                                                \
+    if (comp < 0)                                                             \
+      tmp = RB_LEFT(tmp, field);                                              \
+    else if (comp > 0)                                                        \
+      tmp = RB_RIGHT(tmp, field);                                             \
+    else                                                                      \
+      return (tmp);                                                           \
+  }                                                                           \
+  RB_SET(elm, parent, field);                                                 \
+  if (parent != NULL) {                                                       \
+    if (comp < 0)                                                             \
+      RB_LEFT(parent, field) = elm;                                           \
+    else                                                                      \
+      RB_RIGHT(parent, field) = elm;                                          \
+    RB_AUGMENT(parent);                                                       \
+  } else                                                                      \
+    RB_ROOT(head) = elm;                                                      \
+  name##_RB_INSERT_COLOR(head, elm);                                          \
+  return (NULL);                                                              \
+}                                                                             \
+                                                                              \
+/* Finds the node with the same key as elm */                                 \
+attr struct type *                                                            \
+name##_RB_FIND(struct name *head, struct type *elm)                           \
+{                                                                             \
+  struct type *tmp = RB_ROOT(head);                                           \
+  int comp;                                                                   \
+  while (tmp) {                                                               \
+    comp = cmp(elm, tmp);                                                     \
+    if (comp < 0)                                                             \
+      tmp = RB_LEFT(tmp, field);                                              \
+    else if (comp > 0)                                                        \
+      tmp = RB_RIGHT(tmp, field);                                             \
+    else                                                                      \
+      return (tmp);                                                           \
+  }                                                                           \
+  return (NULL);                                                              \
+}                                                                             \
+                                                                              \
+/* Finds the first node greater than or equal to the search key */            \
+attr struct type *                                                            \
+name##_RB_NFIND(struct name *head, struct type *elm)                          \
+{                                                                             \
+  struct type *tmp = RB_ROOT(head);                                           \
+  struct type *res = NULL;                                                    \
+  int comp;                                                                   \
+  while (tmp) {                                                               \
+    comp = cmp(elm, tmp);                                                     \
+    if (comp < 0) {                                                           \
+      res = tmp;                                                              \
+      tmp = RB_LEFT(tmp, field);                                              \
+    }                                                                         \
+    else if (comp > 0)                                                        \
+      tmp = RB_RIGHT(tmp, field);                                             \
+    else                                                                      \
+      return (tmp);                                                           \
+  }                                                                           \
+  return (res);                                                               \
+}                                                                             \
+                                                                              \
+/* ARGSUSED */                                                                \
+attr struct type *                                                            \
+name##_RB_NEXT(struct type *elm)                                              \
+{                                                                             \
+  if (RB_RIGHT(elm, field)) {                                                 \
+    elm = RB_RIGHT(elm, field);                                               \
+    while (RB_LEFT(elm, field))                                               \
+      elm = RB_LEFT(elm, field);                                              \
+  } else {                                                                    \
+    if (RB_PARENT(elm, field) &&                                              \
+        (elm == RB_LEFT(RB_PARENT(elm, field), field)))                       \
+      elm = RB_PARENT(elm, field);                                            \
+    else {                                                                    \
+      while (RB_PARENT(elm, field) &&                                         \
+          (elm == RB_RIGHT(RB_PARENT(elm, field), field)))                    \
+        elm = RB_PARENT(elm, field);                                          \
+      elm = RB_PARENT(elm, field);                                            \
+    }                                                                         \
+  }                                                                           \
+  return (elm);                                                               \
+}                                                                             \
+                                                                              \
+/* ARGSUSED */                                                                \
+attr struct type *                                                            \
+name##_RB_PREV(struct type *elm)                                              \
+{                                                                             \
+  if (RB_LEFT(elm, field)) {                                                  \
+    elm = RB_LEFT(elm, field);                                                \
+    while (RB_RIGHT(elm, field))                                              \
+      elm = RB_RIGHT(elm, field);                                             \
+  } else {                                                                    \
+    if (RB_PARENT(elm, field) &&                                              \
+        (elm == RB_RIGHT(RB_PARENT(elm, field), field)))                      \
+      elm = RB_PARENT(elm, field);                                            \
+    else {                                                                    \
+      while (RB_PARENT(elm, field) &&                                         \
+          (elm == RB_LEFT(RB_PARENT(elm, field), field)))                     \
+        elm = RB_PARENT(elm, field);                                          \
+      elm = RB_PARENT(elm, field);                                            \
+    }                                                                         \
+  }                                                                           \
+  return (elm);                                                               \
+}                                                                             \
+                                                                              \
+attr struct type *                                                            \
+name##_RB_MINMAX(struct name *head, int val)                                  \
+{                                                                             \
+  struct type *tmp = RB_ROOT(head);                                           \
+  struct type *parent = NULL;                                                 \
+  while (tmp) {                                                               \
+    parent = tmp;                                                             \
+    if (val < 0)                                                              \
+      tmp = RB_LEFT(tmp, field);                                              \
+    else                                                                      \
+      tmp = RB_RIGHT(tmp, field);                                             \
+  }                                                                           \
+  return (parent);                                                            \
+}
+
+#define RB_NEGINF   -1
+#define RB_INF      1
+
+#define RB_INSERT(name, x, y)   name##_RB_INSERT(x, y)
+#define RB_REMOVE(name, x, y)   name##_RB_REMOVE(x, y)
+#define RB_FIND(name, x, y)     name##_RB_FIND(x, y)
+#define RB_NFIND(name, x, y)    name##_RB_NFIND(x, y)
+#define RB_NEXT(name, x, y)     name##_RB_NEXT(y)
+#define RB_PREV(name, x, y)     name##_RB_PREV(y)
+#define RB_MIN(name, x)         name##_RB_MINMAX(x, RB_NEGINF)
+#define RB_MAX(name, x)         name##_RB_MINMAX(x, RB_INF)
+
+#define RB_FOREACH(x, name, head)                                             \
+  for ((x) = RB_MIN(name, head);                                              \
+       (x) != NULL;                                                           \
+       (x) = name##_RB_NEXT(x))
+
+#define RB_FOREACH_FROM(x, name, y)                                           \
+  for ((x) = (y);                                                             \
+      ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL);                \
+       (x) = (y))
+
+#define RB_FOREACH_SAFE(x, name, head, y)                                     \
+  for ((x) = RB_MIN(name, head);                                              \
+      ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL);                \
+       (x) = (y))
+
+#define RB_FOREACH_REVERSE(x, name, head)                                     \
+  for ((x) = RB_MAX(name, head);                                              \
+       (x) != NULL;                                                           \
+       (x) = name##_RB_PREV(x))
+
+#define RB_FOREACH_REVERSE_FROM(x, name, y)                                   \
+  for ((x) = (y);                                                             \
+      ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL);                \
+       (x) = (y))
+
+#define RB_FOREACH_REVERSE_SAFE(x, name, head, y)                             \
+  for ((x) = RB_MAX(name, head);                                              \
+      ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL);                \
+       (x) = (y))
+
+#endif  /* UV_TREE_H_ */

+ 32 - 0
Utilities/cmlibuv/include/uv-aix.h

@@ -0,0 +1,32 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_AIX_H
+#define UV_AIX_H
+
+#define UV_PLATFORM_LOOP_FIELDS                                               \
+  int fs_fd;                                                                  \
+
+#define UV_PLATFORM_FS_EVENT_FIELDS                                           \
+  uv__io_t event_watcher;                                                     \
+  char *dir_filename;                                                         \
+
+#endif /* UV_AIX_H */

+ 34 - 0
Utilities/cmlibuv/include/uv-bsd.h

@@ -0,0 +1,34 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_BSD_H
+#define UV_BSD_H
+
+#define UV_PLATFORM_FS_EVENT_FIELDS                                           \
+  uv__io_t event_watcher;                                                     \
+
+#define UV_IO_PRIVATE_PLATFORM_FIELDS                                         \
+  int rcount;                                                                 \
+  int wcount;                                                                 \
+
+#define UV_HAVE_KQUEUE 1
+
+#endif /* UV_BSD_H */

+ 61 - 0
Utilities/cmlibuv/include/uv-darwin.h

@@ -0,0 +1,61 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_DARWIN_H
+#define UV_DARWIN_H
+
+#if defined(__APPLE__) && defined(__MACH__)
+# include <mach/mach.h>
+# include <mach/task.h>
+# include <mach/semaphore.h>
+# include <TargetConditionals.h>
+# define UV_PLATFORM_SEM_T semaphore_t
+#endif
+
+#define UV_IO_PRIVATE_PLATFORM_FIELDS                                         \
+  int rcount;                                                                 \
+  int wcount;                                                                 \
+
+#define UV_PLATFORM_LOOP_FIELDS                                               \
+  uv_thread_t cf_thread;                                                      \
+  void* _cf_reserved;                                                         \
+  void* cf_state;                                                             \
+  uv_mutex_t cf_mutex;                                                        \
+  uv_sem_t cf_sem;                                                            \
+  void* cf_signals[2];                                                        \
+
+#define UV_PLATFORM_FS_EVENT_FIELDS                                           \
+  uv__io_t event_watcher;                                                     \
+  char* realpath;                                                             \
+  int realpath_len;                                                           \
+  int cf_flags;                                                               \
+  uv_async_t* cf_cb;                                                          \
+  void* cf_events[2];                                                         \
+  void* cf_member[2];                                                         \
+  int cf_error;                                                               \
+  uv_mutex_t cf_mutex;                                                        \
+
+#define UV_STREAM_PRIVATE_PLATFORM_FIELDS                                     \
+  void* select;                                                               \
+
+#define UV_HAVE_KQUEUE 1
+
+#endif /* UV_DARWIN_H */

+ 419 - 0
Utilities/cmlibuv/include/uv-errno.h

@@ -0,0 +1,419 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_ERRNO_H_
+#define UV_ERRNO_H_
+
+#include <errno.h>
+
+#define UV__EOF     (-4095)
+#define UV__UNKNOWN (-4094)
+
+#define UV__EAI_ADDRFAMILY  (-3000)
+#define UV__EAI_AGAIN       (-3001)
+#define UV__EAI_BADFLAGS    (-3002)
+#define UV__EAI_CANCELED    (-3003)
+#define UV__EAI_FAIL        (-3004)
+#define UV__EAI_FAMILY      (-3005)
+#define UV__EAI_MEMORY      (-3006)
+#define UV__EAI_NODATA      (-3007)
+#define UV__EAI_NONAME      (-3008)
+#define UV__EAI_OVERFLOW    (-3009)
+#define UV__EAI_SERVICE     (-3010)
+#define UV__EAI_SOCKTYPE    (-3011)
+#define UV__EAI_BADHINTS    (-3013)
+#define UV__EAI_PROTOCOL    (-3014)
+
+/* Only map to the system errno on non-Windows platforms. It's apparently
+ * a fairly common practice for Windows programmers to redefine errno codes.
+ */
+#if defined(E2BIG) && !defined(_WIN32)
+# define UV__E2BIG (-E2BIG)
+#else
+# define UV__E2BIG (-4093)
+#endif
+
+#if defined(EACCES) && !defined(_WIN32)
+# define UV__EACCES (-EACCES)
+#else
+# define UV__EACCES (-4092)
+#endif
+
+#if defined(EADDRINUSE) && !defined(_WIN32)
+# define UV__EADDRINUSE (-EADDRINUSE)
+#else
+# define UV__EADDRINUSE (-4091)
+#endif
+
+#if defined(EADDRNOTAVAIL) && !defined(_WIN32)
+# define UV__EADDRNOTAVAIL (-EADDRNOTAVAIL)
+#else
+# define UV__EADDRNOTAVAIL (-4090)
+#endif
+
+#if defined(EAFNOSUPPORT) && !defined(_WIN32)
+# define UV__EAFNOSUPPORT (-EAFNOSUPPORT)
+#else
+# define UV__EAFNOSUPPORT (-4089)
+#endif
+
+#if defined(EAGAIN) && !defined(_WIN32)
+# define UV__EAGAIN (-EAGAIN)
+#else
+# define UV__EAGAIN (-4088)
+#endif
+
+#if defined(EALREADY) && !defined(_WIN32)
+# define UV__EALREADY (-EALREADY)
+#else
+# define UV__EALREADY (-4084)
+#endif
+
+#if defined(EBADF) && !defined(_WIN32)
+# define UV__EBADF (-EBADF)
+#else
+# define UV__EBADF (-4083)
+#endif
+
+#if defined(EBUSY) && !defined(_WIN32)
+# define UV__EBUSY (-EBUSY)
+#else
+# define UV__EBUSY (-4082)
+#endif
+
+#if defined(ECANCELED) && !defined(_WIN32)
+# define UV__ECANCELED (-ECANCELED)
+#else
+# define UV__ECANCELED (-4081)
+#endif
+
+#if defined(ECHARSET) && !defined(_WIN32)
+# define UV__ECHARSET (-ECHARSET)
+#else
+# define UV__ECHARSET (-4080)
+#endif
+
+#if defined(ECONNABORTED) && !defined(_WIN32)
+# define UV__ECONNABORTED (-ECONNABORTED)
+#else
+# define UV__ECONNABORTED (-4079)
+#endif
+
+#if defined(ECONNREFUSED) && !defined(_WIN32)
+# define UV__ECONNREFUSED (-ECONNREFUSED)
+#else
+# define UV__ECONNREFUSED (-4078)
+#endif
+
+#if defined(ECONNRESET) && !defined(_WIN32)
+# define UV__ECONNRESET (-ECONNRESET)
+#else
+# define UV__ECONNRESET (-4077)
+#endif
+
+#if defined(EDESTADDRREQ) && !defined(_WIN32)
+# define UV__EDESTADDRREQ (-EDESTADDRREQ)
+#else
+# define UV__EDESTADDRREQ (-4076)
+#endif
+
+#if defined(EEXIST) && !defined(_WIN32)
+# define UV__EEXIST (-EEXIST)
+#else
+# define UV__EEXIST (-4075)
+#endif
+
+#if defined(EFAULT) && !defined(_WIN32)
+# define UV__EFAULT (-EFAULT)
+#else
+# define UV__EFAULT (-4074)
+#endif
+
+#if defined(EHOSTUNREACH) && !defined(_WIN32)
+# define UV__EHOSTUNREACH (-EHOSTUNREACH)
+#else
+# define UV__EHOSTUNREACH (-4073)
+#endif
+
+#if defined(EINTR) && !defined(_WIN32)
+# define UV__EINTR (-EINTR)
+#else
+# define UV__EINTR (-4072)
+#endif
+
+#if defined(EINVAL) && !defined(_WIN32)
+# define UV__EINVAL (-EINVAL)
+#else
+# define UV__EINVAL (-4071)
+#endif
+
+#if defined(EIO) && !defined(_WIN32)
+# define UV__EIO (-EIO)
+#else
+# define UV__EIO (-4070)
+#endif
+
+#if defined(EISCONN) && !defined(_WIN32)
+# define UV__EISCONN (-EISCONN)
+#else
+# define UV__EISCONN (-4069)
+#endif
+
+#if defined(EISDIR) && !defined(_WIN32)
+# define UV__EISDIR (-EISDIR)
+#else
+# define UV__EISDIR (-4068)
+#endif
+
+#if defined(ELOOP) && !defined(_WIN32)
+# define UV__ELOOP (-ELOOP)
+#else
+# define UV__ELOOP (-4067)
+#endif
+
+#if defined(EMFILE) && !defined(_WIN32)
+# define UV__EMFILE (-EMFILE)
+#else
+# define UV__EMFILE (-4066)
+#endif
+
+#if defined(EMSGSIZE) && !defined(_WIN32)
+# define UV__EMSGSIZE (-EMSGSIZE)
+#else
+# define UV__EMSGSIZE (-4065)
+#endif
+
+#if defined(ENAMETOOLONG) && !defined(_WIN32)
+# define UV__ENAMETOOLONG (-ENAMETOOLONG)
+#else
+# define UV__ENAMETOOLONG (-4064)
+#endif
+
+#if defined(ENETDOWN) && !defined(_WIN32)
+# define UV__ENETDOWN (-ENETDOWN)
+#else
+# define UV__ENETDOWN (-4063)
+#endif
+
+#if defined(ENETUNREACH) && !defined(_WIN32)
+# define UV__ENETUNREACH (-ENETUNREACH)
+#else
+# define UV__ENETUNREACH (-4062)
+#endif
+
+#if defined(ENFILE) && !defined(_WIN32)
+# define UV__ENFILE (-ENFILE)
+#else
+# define UV__ENFILE (-4061)
+#endif
+
+#if defined(ENOBUFS) && !defined(_WIN32)
+# define UV__ENOBUFS (-ENOBUFS)
+#else
+# define UV__ENOBUFS (-4060)
+#endif
+
+#if defined(ENODEV) && !defined(_WIN32)
+# define UV__ENODEV (-ENODEV)
+#else
+# define UV__ENODEV (-4059)
+#endif
+
+#if defined(ENOENT) && !defined(_WIN32)
+# define UV__ENOENT (-ENOENT)
+#else
+# define UV__ENOENT (-4058)
+#endif
+
+#if defined(ENOMEM) && !defined(_WIN32)
+# define UV__ENOMEM (-ENOMEM)
+#else
+# define UV__ENOMEM (-4057)
+#endif
+
+#if defined(ENONET) && !defined(_WIN32)
+# define UV__ENONET (-ENONET)
+#else
+# define UV__ENONET (-4056)
+#endif
+
+#if defined(ENOSPC) && !defined(_WIN32)
+# define UV__ENOSPC (-ENOSPC)
+#else
+# define UV__ENOSPC (-4055)
+#endif
+
+#if defined(ENOSYS) && !defined(_WIN32)
+# define UV__ENOSYS (-ENOSYS)
+#else
+# define UV__ENOSYS (-4054)
+#endif
+
+#if defined(ENOTCONN) && !defined(_WIN32)
+# define UV__ENOTCONN (-ENOTCONN)
+#else
+# define UV__ENOTCONN (-4053)
+#endif
+
+#if defined(ENOTDIR) && !defined(_WIN32)
+# define UV__ENOTDIR (-ENOTDIR)
+#else
+# define UV__ENOTDIR (-4052)
+#endif
+
+#if defined(ENOTEMPTY) && !defined(_WIN32)
+# define UV__ENOTEMPTY (-ENOTEMPTY)
+#else
+# define UV__ENOTEMPTY (-4051)
+#endif
+
+#if defined(ENOTSOCK) && !defined(_WIN32)
+# define UV__ENOTSOCK (-ENOTSOCK)
+#else
+# define UV__ENOTSOCK (-4050)
+#endif
+
+#if defined(ENOTSUP) && !defined(_WIN32)
+# define UV__ENOTSUP (-ENOTSUP)
+#else
+# define UV__ENOTSUP (-4049)
+#endif
+
+#if defined(EPERM) && !defined(_WIN32)
+# define UV__EPERM (-EPERM)
+#else
+# define UV__EPERM (-4048)
+#endif
+
+#if defined(EPIPE) && !defined(_WIN32)
+# define UV__EPIPE (-EPIPE)
+#else
+# define UV__EPIPE (-4047)
+#endif
+
+#if defined(EPROTO) && !defined(_WIN32)
+# define UV__EPROTO (-EPROTO)
+#else
+# define UV__EPROTO (-4046)
+#endif
+
+#if defined(EPROTONOSUPPORT) && !defined(_WIN32)
+# define UV__EPROTONOSUPPORT (-EPROTONOSUPPORT)
+#else
+# define UV__EPROTONOSUPPORT (-4045)
+#endif
+
+#if defined(EPROTOTYPE) && !defined(_WIN32)
+# define UV__EPROTOTYPE (-EPROTOTYPE)
+#else
+# define UV__EPROTOTYPE (-4044)
+#endif
+
+#if defined(EROFS) && !defined(_WIN32)
+# define UV__EROFS (-EROFS)
+#else
+# define UV__EROFS (-4043)
+#endif
+
+#if defined(ESHUTDOWN) && !defined(_WIN32)
+# define UV__ESHUTDOWN (-ESHUTDOWN)
+#else
+# define UV__ESHUTDOWN (-4042)
+#endif
+
+#if defined(ESPIPE) && !defined(_WIN32)
+# define UV__ESPIPE (-ESPIPE)
+#else
+# define UV__ESPIPE (-4041)
+#endif
+
+#if defined(ESRCH) && !defined(_WIN32)
+# define UV__ESRCH (-ESRCH)
+#else
+# define UV__ESRCH (-4040)
+#endif
+
+#if defined(ETIMEDOUT) && !defined(_WIN32)
+# define UV__ETIMEDOUT (-ETIMEDOUT)
+#else
+# define UV__ETIMEDOUT (-4039)
+#endif
+
+#if defined(ETXTBSY) && !defined(_WIN32)
+# define UV__ETXTBSY (-ETXTBSY)
+#else
+# define UV__ETXTBSY (-4038)
+#endif
+
+#if defined(EXDEV) && !defined(_WIN32)
+# define UV__EXDEV (-EXDEV)
+#else
+# define UV__EXDEV (-4037)
+#endif
+
+#if defined(EFBIG) && !defined(_WIN32)
+# define UV__EFBIG (-EFBIG)
+#else
+# define UV__EFBIG (-4036)
+#endif
+
+#if defined(ENOPROTOOPT) && !defined(_WIN32)
+# define UV__ENOPROTOOPT (-ENOPROTOOPT)
+#else
+# define UV__ENOPROTOOPT (-4035)
+#endif
+
+#if defined(ERANGE) && !defined(_WIN32)
+# define UV__ERANGE (-ERANGE)
+#else
+# define UV__ERANGE (-4034)
+#endif
+
+#if defined(ENXIO) && !defined(_WIN32)
+# define UV__ENXIO (-ENXIO)
+#else
+# define UV__ENXIO (-4033)
+#endif
+
+#if defined(EMLINK) && !defined(_WIN32)
+# define UV__EMLINK (-EMLINK)
+#else
+# define UV__EMLINK (-4032)
+#endif
+
+/* EHOSTDOWN is not visible on BSD-like systems when _POSIX_C_SOURCE is
+ * defined. Fortunately, its value is always 64 so it's possible albeit
+ * icky to hard-code it.
+ */
+#if defined(EHOSTDOWN) && !defined(_WIN32)
+# define UV__EHOSTDOWN (-EHOSTDOWN)
+#elif defined(__APPLE__) || \
+      defined(__DragonFly__) || \
+      defined(__FreeBSD__) || \
+      defined(__FreeBSD_kernel__) || \
+      defined(__NetBSD__) || \
+      defined(__OpenBSD__)
+# define UV__EHOSTDOWN (-64)
+#else
+# define UV__EHOSTDOWN (-4031)
+#endif
+
+#endif /* UV_ERRNO_H_ */

+ 34 - 0
Utilities/cmlibuv/include/uv-linux.h

@@ -0,0 +1,34 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_LINUX_H
+#define UV_LINUX_H
+
+#define UV_PLATFORM_LOOP_FIELDS                                               \
+  uv__io_t inotify_read_watcher;                                              \
+  void* inotify_watchers;                                                     \
+  int inotify_fd;                                                             \
+
+#define UV_PLATFORM_FS_EVENT_FIELDS                                           \
+  void* watchers[2];                                                          \
+  int wd;                                                                     \
+
+#endif /* UV_LINUX_H */

+ 27 - 0
Utilities/cmlibuv/include/uv-os390.h

@@ -0,0 +1,27 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_MVS_H
+#define UV_MVS_H
+
+#define UV_PLATFORM_SEM_T int
+
+#endif /* UV_MVS_H */

+ 44 - 0
Utilities/cmlibuv/include/uv-sunos.h

@@ -0,0 +1,44 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_SUNOS_H
+#define UV_SUNOS_H
+
+#include <sys/port.h>
+#include <port.h>
+
+/* For the sake of convenience and reduced #ifdef-ery in src/unix/sunos.c,
+ * add the fs_event fields even when this version of SunOS doesn't support
+ * file watching.
+ */
+#define UV_PLATFORM_LOOP_FIELDS                                               \
+  uv__io_t fs_event_watcher;                                                  \
+  int fs_fd;                                                                  \
+
+#if defined(PORT_SOURCE_FILE)
+
+# define UV_PLATFORM_FS_EVENT_FIELDS                                          \
+  file_obj_t fo;                                                              \
+  int fd;                                                                     \
+
+#endif /* defined(PORT_SOURCE_FILE) */
+
+#endif /* UV_SUNOS_H */

+ 37 - 0
Utilities/cmlibuv/include/uv-threadpool.h

@@ -0,0 +1,37 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/*
+ * This file is private to libuv. It provides common functionality to both
+ * Windows and Unix backends.
+ */
+
+#ifndef UV_THREADPOOL_H_
+#define UV_THREADPOOL_H_
+
+struct uv__work {
+  void (*work)(struct uv__work *w);
+  void (*done)(struct uv__work *w, int status);
+  struct uv_loop_s* loop;
+  void* wq[2];
+};
+
+#endif /* UV_THREADPOOL_H_ */

+ 371 - 0
Utilities/cmlibuv/include/uv-unix.h

@@ -0,0 +1,371 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_UNIX_H
+#define UV_UNIX_H
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <dirent.h>
+
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <arpa/inet.h>
+#include <netdb.h>
+
+#include <termios.h>
+#include <pwd.h>
+
+#include <semaphore.h>
+#include <pthread.h>
+#include <signal.h>
+
+#include "uv-threadpool.h"
+
+#if defined(__linux__)
+# include "uv-linux.h"
+#elif defined(_AIX)
+# include "uv-aix.h"
+#elif defined(__sun)
+# include "uv-sunos.h"
+#elif defined(__APPLE__)
+# include "uv-darwin.h"
+#elif defined(__DragonFly__)       || \
+      defined(__FreeBSD__)         || \
+      defined(__FreeBSD_kernel__)  || \
+      defined(__OpenBSD__)         || \
+      defined(__NetBSD__)
+# include "uv-bsd.h"
+#endif
+
+#ifndef PTHREAD_BARRIER_SERIAL_THREAD
+# include "pthread-barrier.h"
+#endif
+
+#ifndef NI_MAXHOST
+# define NI_MAXHOST 1025
+#endif
+
+#ifndef NI_MAXSERV
+# define NI_MAXSERV 32
+#endif
+
+#ifndef UV_IO_PRIVATE_PLATFORM_FIELDS
+# define UV_IO_PRIVATE_PLATFORM_FIELDS /* empty */
+#endif
+
+struct uv__io_s;
+struct uv__async;
+struct uv_loop_s;
+
+typedef void (*uv__io_cb)(struct uv_loop_s* loop,
+                          struct uv__io_s* w,
+                          unsigned int events);
+typedef struct uv__io_s uv__io_t;
+
+struct uv__io_s {
+  uv__io_cb cb;
+  void* pending_queue[2];
+  void* watcher_queue[2];
+  unsigned int pevents; /* Pending event mask i.e. mask at next tick. */
+  unsigned int events;  /* Current event mask. */
+  int fd;
+  UV_IO_PRIVATE_PLATFORM_FIELDS
+};
+
+typedef void (*uv__async_cb)(struct uv_loop_s* loop,
+                             struct uv__async* w,
+                             unsigned int nevents);
+
+struct uv__async {
+  uv__async_cb cb;
+  uv__io_t io_watcher;
+  int wfd;
+};
+
+#ifndef UV_PLATFORM_SEM_T
+# define UV_PLATFORM_SEM_T sem_t
+#endif
+
+#ifndef UV_PLATFORM_LOOP_FIELDS
+# define UV_PLATFORM_LOOP_FIELDS /* empty */
+#endif
+
+#ifndef UV_PLATFORM_FS_EVENT_FIELDS
+# define UV_PLATFORM_FS_EVENT_FIELDS /* empty */
+#endif
+
+#ifndef UV_STREAM_PRIVATE_PLATFORM_FIELDS
+# define UV_STREAM_PRIVATE_PLATFORM_FIELDS /* empty */
+#endif
+
+/* Note: May be cast to struct iovec. See writev(2). */
+typedef struct uv_buf_t {
+  char* base;
+  size_t len;
+} uv_buf_t;
+
+typedef int uv_file;
+typedef int uv_os_sock_t;
+typedef int uv_os_fd_t;
+
+#define UV_ONCE_INIT PTHREAD_ONCE_INIT
+
+typedef pthread_once_t uv_once_t;
+typedef pthread_t uv_thread_t;
+typedef pthread_mutex_t uv_mutex_t;
+typedef pthread_rwlock_t uv_rwlock_t;
+typedef UV_PLATFORM_SEM_T uv_sem_t;
+typedef pthread_cond_t uv_cond_t;
+typedef pthread_key_t uv_key_t;
+typedef pthread_barrier_t uv_barrier_t;
+
+
+/* Platform-specific definitions for uv_spawn support. */
+typedef gid_t uv_gid_t;
+typedef uid_t uv_uid_t;
+
+typedef struct dirent uv__dirent_t;
+
+#if defined(DT_UNKNOWN)
+# define HAVE_DIRENT_TYPES
+# if defined(DT_REG)
+#  define UV__DT_FILE DT_REG
+# else
+#  define UV__DT_FILE -1
+# endif
+# if defined(DT_DIR)
+#  define UV__DT_DIR DT_DIR
+# else
+#  define UV__DT_DIR -2
+# endif
+# if defined(DT_LNK)
+#  define UV__DT_LINK DT_LNK
+# else
+#  define UV__DT_LINK -3
+# endif
+# if defined(DT_FIFO)
+#  define UV__DT_FIFO DT_FIFO
+# else
+#  define UV__DT_FIFO -4
+# endif
+# if defined(DT_SOCK)
+#  define UV__DT_SOCKET DT_SOCK
+# else
+#  define UV__DT_SOCKET -5
+# endif
+# if defined(DT_CHR)
+#  define UV__DT_CHAR DT_CHR
+# else
+#  define UV__DT_CHAR -6
+# endif
+# if defined(DT_BLK)
+#  define UV__DT_BLOCK DT_BLK
+# else
+#  define UV__DT_BLOCK -7
+# endif
+#endif
+
+/* Platform-specific definitions for uv_dlopen support. */
+#define UV_DYNAMIC /* empty */
+
+typedef struct {
+  void* handle;
+  char* errmsg;
+} uv_lib_t;
+
+#define UV_LOOP_PRIVATE_FIELDS                                                \
+  unsigned long flags;                                                        \
+  int backend_fd;                                                             \
+  void* pending_queue[2];                                                     \
+  void* watcher_queue[2];                                                     \
+  uv__io_t** watchers;                                                        \
+  unsigned int nwatchers;                                                     \
+  unsigned int nfds;                                                          \
+  void* wq[2];                                                                \
+  uv_mutex_t wq_mutex;                                                        \
+  uv_async_t wq_async;                                                        \
+  uv_rwlock_t cloexec_lock;                                                   \
+  uv_handle_t* closing_handles;                                               \
+  void* process_handles[2];                                                   \
+  void* prepare_handles[2];                                                   \
+  void* check_handles[2];                                                     \
+  void* idle_handles[2];                                                      \
+  void* async_handles[2];                                                     \
+  struct uv__async async_watcher;                                             \
+  struct {                                                                    \
+    void* min;                                                                \
+    unsigned int nelts;                                                       \
+  } timer_heap;                                                               \
+  uint64_t timer_counter;                                                     \
+  uint64_t time;                                                              \
+  int signal_pipefd[2];                                                       \
+  uv__io_t signal_io_watcher;                                                 \
+  uv_signal_t child_watcher;                                                  \
+  int emfile_fd;                                                              \
+  UV_PLATFORM_LOOP_FIELDS                                                     \
+
+#define UV_REQ_TYPE_PRIVATE /* empty */
+
+#define UV_REQ_PRIVATE_FIELDS  /* empty */
+
+#define UV_PRIVATE_REQ_TYPES /* empty */
+
+#define UV_WRITE_PRIVATE_FIELDS                                               \
+  void* queue[2];                                                             \
+  unsigned int write_index;                                                   \
+  uv_buf_t* bufs;                                                             \
+  unsigned int nbufs;                                                         \
+  int error;                                                                  \
+  uv_buf_t bufsml[4];                                                         \
+
+#define UV_CONNECT_PRIVATE_FIELDS                                             \
+  void* queue[2];                                                             \
+
+#define UV_SHUTDOWN_PRIVATE_FIELDS /* empty */
+
+#define UV_UDP_SEND_PRIVATE_FIELDS                                            \
+  void* queue[2];                                                             \
+  struct sockaddr_storage addr;                                               \
+  unsigned int nbufs;                                                         \
+  uv_buf_t* bufs;                                                             \
+  ssize_t status;                                                             \
+  uv_udp_send_cb send_cb;                                                     \
+  uv_buf_t bufsml[4];                                                         \
+
+#define UV_HANDLE_PRIVATE_FIELDS                                              \
+  uv_handle_t* next_closing;                                                  \
+  unsigned int flags;                                                         \
+
+#define UV_STREAM_PRIVATE_FIELDS                                              \
+  uv_connect_t *connect_req;                                                  \
+  uv_shutdown_t *shutdown_req;                                                \
+  uv__io_t io_watcher;                                                        \
+  void* write_queue[2];                                                       \
+  void* write_completed_queue[2];                                             \
+  uv_connection_cb connection_cb;                                             \
+  int delayed_error;                                                          \
+  int accepted_fd;                                                            \
+  void* queued_fds;                                                           \
+  UV_STREAM_PRIVATE_PLATFORM_FIELDS                                           \
+
+#define UV_TCP_PRIVATE_FIELDS /* empty */
+
+#define UV_UDP_PRIVATE_FIELDS                                                 \
+  uv_alloc_cb alloc_cb;                                                       \
+  uv_udp_recv_cb recv_cb;                                                     \
+  uv__io_t io_watcher;                                                        \
+  void* write_queue[2];                                                       \
+  void* write_completed_queue[2];                                             \
+
+#define UV_PIPE_PRIVATE_FIELDS                                                \
+  const char* pipe_fname; /* strdup'ed */
+
+#define UV_POLL_PRIVATE_FIELDS                                                \
+  uv__io_t io_watcher;
+
+#define UV_PREPARE_PRIVATE_FIELDS                                             \
+  uv_prepare_cb prepare_cb;                                                   \
+  void* queue[2];                                                             \
+
+#define UV_CHECK_PRIVATE_FIELDS                                               \
+  uv_check_cb check_cb;                                                       \
+  void* queue[2];                                                             \
+
+#define UV_IDLE_PRIVATE_FIELDS                                                \
+  uv_idle_cb idle_cb;                                                         \
+  void* queue[2];                                                             \
+
+#define UV_ASYNC_PRIVATE_FIELDS                                               \
+  uv_async_cb async_cb;                                                       \
+  void* queue[2];                                                             \
+  int pending;                                                                \
+
+#define UV_TIMER_PRIVATE_FIELDS                                               \
+  uv_timer_cb timer_cb;                                                       \
+  void* heap_node[3];                                                         \
+  uint64_t timeout;                                                           \
+  uint64_t repeat;                                                            \
+  uint64_t start_id;
+
+#define UV_GETADDRINFO_PRIVATE_FIELDS                                         \
+  struct uv__work work_req;                                                   \
+  uv_getaddrinfo_cb cb;                                                       \
+  struct addrinfo* hints;                                                     \
+  char* hostname;                                                             \
+  char* service;                                                              \
+  struct addrinfo* addrinfo;                                                  \
+  int retcode;
+
+#define UV_GETNAMEINFO_PRIVATE_FIELDS                                         \
+  struct uv__work work_req;                                                   \
+  uv_getnameinfo_cb getnameinfo_cb;                                           \
+  struct sockaddr_storage storage;                                            \
+  int flags;                                                                  \
+  char host[NI_MAXHOST];                                                      \
+  char service[NI_MAXSERV];                                                   \
+  int retcode;
+
+#define UV_PROCESS_PRIVATE_FIELDS                                             \
+  void* queue[2];                                                             \
+  int status;                                                                 \
+
+#define UV_FS_PRIVATE_FIELDS                                                  \
+  const char *new_path;                                                       \
+  uv_file file;                                                               \
+  int flags;                                                                  \
+  mode_t mode;                                                                \
+  unsigned int nbufs;                                                         \
+  uv_buf_t* bufs;                                                             \
+  off_t off;                                                                  \
+  uv_uid_t uid;                                                               \
+  uv_gid_t gid;                                                               \
+  double atime;                                                               \
+  double mtime;                                                               \
+  struct uv__work work_req;                                                   \
+  uv_buf_t bufsml[4];                                                         \
+
+#define UV_WORK_PRIVATE_FIELDS                                                \
+  struct uv__work work_req;
+
+#define UV_TTY_PRIVATE_FIELDS                                                 \
+  struct termios orig_termios;                                                \
+  int mode;
+
+#define UV_SIGNAL_PRIVATE_FIELDS                                              \
+  /* RB_ENTRY(uv_signal_s) tree_entry; */                                     \
+  struct {                                                                    \
+    struct uv_signal_s* rbe_left;                                             \
+    struct uv_signal_s* rbe_right;                                            \
+    struct uv_signal_s* rbe_parent;                                           \
+    int rbe_color;                                                            \
+  } tree_entry;                                                               \
+  /* Use two counters here so we don have to fiddle with atomics. */          \
+  unsigned int caught_signals;                                                \
+  unsigned int dispatched_signals;
+
+#define UV_FS_EVENT_PRIVATE_FIELDS                                            \
+  uv_fs_event_cb cb;                                                          \
+  UV_PLATFORM_FS_EVENT_FIELDS                                                 \
+
+#endif /* UV_UNIX_H */

+ 43 - 0
Utilities/cmlibuv/include/uv-version.h

@@ -0,0 +1,43 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_VERSION_H
+#define UV_VERSION_H
+
+ /*
+ * Versions with the same major number are ABI stable. API is allowed to
+ * evolve between minor releases, but only in a backwards compatible way.
+ * Make sure you update the -soname directives in configure.ac
+ * and uv.gyp whenever you bump UV_VERSION_MAJOR or UV_VERSION_MINOR (but
+ * not UV_VERSION_PATCH.)
+ */
+
+#define UV_VERSION_MAJOR 1
+#define UV_VERSION_MINOR 9
+#define UV_VERSION_PATCH 2
+#define UV_VERSION_IS_RELEASE 0
+#define UV_VERSION_SUFFIX "dev"
+
+#define UV_VERSION_HEX  ((UV_VERSION_MAJOR << 16) | \
+                         (UV_VERSION_MINOR <<  8) | \
+                         (UV_VERSION_PATCH))
+
+#endif /* UV_VERSION_H */

+ 660 - 0
Utilities/cmlibuv/include/uv-win.h

@@ -0,0 +1,660 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _WIN32_WINNT
+# define _WIN32_WINNT   0x0502
+#endif
+
+#if !defined(_SSIZE_T_) && !defined(_SSIZE_T_DEFINED)
+typedef intptr_t ssize_t;
+# define _SSIZE_T_
+# define _SSIZE_T_DEFINED
+#endif
+
+#include <winsock2.h>
+
+#if defined(__MINGW32__) && !defined(__MINGW64_VERSION_MAJOR)
+typedef struct pollfd {
+  SOCKET fd;
+  short  events;
+  short  revents;
+} WSAPOLLFD, *PWSAPOLLFD, *LPWSAPOLLFD;
+#endif
+
+#ifndef LOCALE_INVARIANT
+# define LOCALE_INVARIANT 0x007f
+#endif
+
+#include <mswsock.h>
+#include <ws2tcpip.h>
+#include <windows.h>
+
+#include <process.h>
+#include <signal.h>
+#include <sys/stat.h>
+
+#if defined(_MSC_VER) && _MSC_VER < 1600
+# include "stdint-msvc2008.h"
+#else
+# include <stdint.h>
+#endif
+
+#include "tree.h"
+#include "uv-threadpool.h"
+
+#define MAX_PIPENAME_LEN 256
+
+#ifndef S_IFLNK
+# define S_IFLNK 0xA000
+#endif
+
+/* Additional signals supported by uv_signal and or uv_kill. The CRT defines
+ * the following signals already:
+ *
+ *   #define SIGINT           2
+ *   #define SIGILL           4
+ *   #define SIGABRT_COMPAT   6
+ *   #define SIGFPE           8
+ *   #define SIGSEGV         11
+ *   #define SIGTERM         15
+ *   #define SIGBREAK        21
+ *   #define SIGABRT         22
+ *
+ * The additional signals have values that are common on other Unix
+ * variants (Linux and Darwin)
+ */
+#define SIGHUP                1
+#define SIGKILL               9
+#define SIGWINCH             28
+
+/* The CRT defines SIGABRT_COMPAT as 6, which equals SIGABRT on many */
+/* unix-like platforms. However MinGW doesn't define it, so we do. */
+#ifndef SIGABRT_COMPAT
+# define SIGABRT_COMPAT       6
+#endif
+
+/*
+ * Guids and typedefs for winsock extension functions
+ * Mingw32 doesn't have these :-(
+ */
+#ifndef WSAID_ACCEPTEX
+# define WSAID_ACCEPTEX                                                       \
+         {0xb5367df1, 0xcbac, 0x11cf,                                         \
+         {0x95, 0xca, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92}}
+#endif
+
+#ifndef WSAID_CONNECTEX
+# define WSAID_CONNECTEX                                                      \
+         {0x25a207b9, 0xddf3, 0x4660,                                         \
+         {0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e}}
+#endif
+
+#ifndef WSAID_GETACCEPTEXSOCKADDRS
+# define WSAID_GETACCEPTEXSOCKADDRS                                           \
+         {0xb5367df2, 0xcbac, 0x11cf,                                         \
+         {0x95, 0xca, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92}}
+#endif
+
+#ifndef WSAID_DISCONNECTEX
+# define WSAID_DISCONNECTEX                                                   \
+         {0x7fda2e11, 0x8630, 0x436f,                                         \
+         {0xa0, 0x31, 0xf5, 0x36, 0xa6, 0xee, 0xc1, 0x57}}
+#endif
+
+#ifndef WSAID_TRANSMITFILE
+# define WSAID_TRANSMITFILE                                                   \
+         {0xb5367df0, 0xcbac, 0x11cf,                                         \
+         {0x95, 0xca, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92}}
+#endif
+
+#if (defined(__MINGW32__) && !defined(__MINGW64_VERSION_MAJOR)) \
+ || (defined(_MSC_VER) && _MSC_VER < 1500)
+  typedef BOOL (PASCAL *LPFN_ACCEPTEX)
+                      (SOCKET sListenSocket,
+                       SOCKET sAcceptSocket,
+                       PVOID lpOutputBuffer,
+                       DWORD dwReceiveDataLength,
+                       DWORD dwLocalAddressLength,
+                       DWORD dwRemoteAddressLength,
+                       LPDWORD lpdwBytesReceived,
+                       LPOVERLAPPED lpOverlapped);
+
+  typedef BOOL (PASCAL *LPFN_CONNECTEX)
+                      (SOCKET s,
+                       const struct sockaddr* name,
+                       int namelen,
+                       PVOID lpSendBuffer,
+                       DWORD dwSendDataLength,
+                       LPDWORD lpdwBytesSent,
+                       LPOVERLAPPED lpOverlapped);
+
+  typedef void (PASCAL *LPFN_GETACCEPTEXSOCKADDRS)
+                      (PVOID lpOutputBuffer,
+                       DWORD dwReceiveDataLength,
+                       DWORD dwLocalAddressLength,
+                       DWORD dwRemoteAddressLength,
+                       LPSOCKADDR* LocalSockaddr,
+                       LPINT LocalSockaddrLength,
+                       LPSOCKADDR* RemoteSockaddr,
+                       LPINT RemoteSockaddrLength);
+
+  typedef BOOL (PASCAL *LPFN_DISCONNECTEX)
+                      (SOCKET hSocket,
+                       LPOVERLAPPED lpOverlapped,
+                       DWORD dwFlags,
+                       DWORD reserved);
+
+  typedef BOOL (PASCAL *LPFN_TRANSMITFILE)
+                      (SOCKET hSocket,
+                       HANDLE hFile,
+                       DWORD nNumberOfBytesToWrite,
+                       DWORD nNumberOfBytesPerSend,
+                       LPOVERLAPPED lpOverlapped,
+                       LPTRANSMIT_FILE_BUFFERS lpTransmitBuffers,
+                       DWORD dwFlags);
+
+  typedef PVOID RTL_SRWLOCK;
+  typedef RTL_SRWLOCK SRWLOCK, *PSRWLOCK;
+#endif
+
+typedef int (WSAAPI* LPFN_WSARECV)
+            (SOCKET socket,
+             LPWSABUF buffers,
+             DWORD buffer_count,
+             LPDWORD bytes,
+             LPDWORD flags,
+             LPWSAOVERLAPPED overlapped,
+             LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine);
+
+typedef int (WSAAPI* LPFN_WSARECVFROM)
+            (SOCKET socket,
+             LPWSABUF buffers,
+             DWORD buffer_count,
+             LPDWORD bytes,
+             LPDWORD flags,
+             struct sockaddr* addr,
+             LPINT addr_len,
+             LPWSAOVERLAPPED overlapped,
+             LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine);
+
+#ifndef _NTDEF_
+  typedef LONG NTSTATUS;
+  typedef NTSTATUS *PNTSTATUS;
+#endif
+
+#ifndef RTL_CONDITION_VARIABLE_INIT
+  typedef PVOID CONDITION_VARIABLE, *PCONDITION_VARIABLE;
+#endif
+
+typedef struct _AFD_POLL_HANDLE_INFO {
+  HANDLE Handle;
+  ULONG Events;
+  NTSTATUS Status;
+} AFD_POLL_HANDLE_INFO, *PAFD_POLL_HANDLE_INFO;
+
+typedef struct _AFD_POLL_INFO {
+  LARGE_INTEGER Timeout;
+  ULONG NumberOfHandles;
+  ULONG Exclusive;
+  AFD_POLL_HANDLE_INFO Handles[1];
+} AFD_POLL_INFO, *PAFD_POLL_INFO;
+
+#define UV_MSAFD_PROVIDER_COUNT 3
+
+
+/**
+ * It should be possible to cast uv_buf_t[] to WSABUF[]
+ * see http://msdn.microsoft.com/en-us/library/ms741542(v=vs.85).aspx
+ */
+typedef struct uv_buf_t {
+  ULONG len;
+  char* base;
+} uv_buf_t;
+
+typedef int uv_file;
+typedef SOCKET uv_os_sock_t;
+typedef HANDLE uv_os_fd_t;
+
+typedef HANDLE uv_thread_t;
+
+typedef HANDLE uv_sem_t;
+
+typedef CRITICAL_SECTION uv_mutex_t;
+
+/* This condition variable implementation is based on the SetEvent solution
+ * (section 3.2) at http://www.cs.wustl.edu/~schmidt/win32-cv-1.html
+ * We could not use the SignalObjectAndWait solution (section 3.4) because
+ * it want the 2nd argument (type uv_mutex_t) of uv_cond_wait() and
+ * uv_cond_timedwait() to be HANDLEs, but we use CRITICAL_SECTIONs.
+ */
+
+typedef union {
+  CONDITION_VARIABLE cond_var;
+  struct {
+    unsigned int waiters_count;
+    CRITICAL_SECTION waiters_count_lock;
+    HANDLE signal_event;
+    HANDLE broadcast_event;
+  } fallback;
+} uv_cond_t;
+
+typedef union {
+  struct {
+    unsigned int num_readers_;
+    CRITICAL_SECTION num_readers_lock_;
+    HANDLE write_semaphore_;
+  } state_;
+  /* TODO: remove me in v2.x. */
+  struct {
+    SRWLOCK unused_;
+  } unused1_;
+  /* TODO: remove me in v2.x. */
+  struct {
+    uv_mutex_t unused1_;
+    uv_mutex_t unused2_;
+  } unused2_;
+} uv_rwlock_t;
+
+typedef struct {
+  unsigned int n;
+  unsigned int count;
+  uv_mutex_t mutex;
+  uv_sem_t turnstile1;
+  uv_sem_t turnstile2;
+} uv_barrier_t;
+
+typedef struct {
+  DWORD tls_index;
+} uv_key_t;
+
+#define UV_ONCE_INIT { 0, NULL }
+
+typedef struct uv_once_s {
+  unsigned char ran;
+  HANDLE event;
+} uv_once_t;
+
+/* Platform-specific definitions for uv_spawn support. */
+typedef unsigned char uv_uid_t;
+typedef unsigned char uv_gid_t;
+
+typedef struct uv__dirent_s {
+  int d_type;
+  char d_name[1];
+} uv__dirent_t;
+
+#define HAVE_DIRENT_TYPES
+#define UV__DT_DIR     UV_DIRENT_DIR
+#define UV__DT_FILE    UV_DIRENT_FILE
+#define UV__DT_LINK    UV_DIRENT_LINK
+#define UV__DT_FIFO    UV_DIRENT_FIFO
+#define UV__DT_SOCKET  UV_DIRENT_SOCKET
+#define UV__DT_CHAR    UV_DIRENT_CHAR
+#define UV__DT_BLOCK   UV_DIRENT_BLOCK
+
+/* Platform-specific definitions for uv_dlopen support. */
+#define UV_DYNAMIC FAR WINAPI
+typedef struct {
+  HMODULE handle;
+  char* errmsg;
+} uv_lib_t;
+
+RB_HEAD(uv_timer_tree_s, uv_timer_s);
+
+#define UV_LOOP_PRIVATE_FIELDS                                                \
+    /* The loop's I/O completion port */                                      \
+  HANDLE iocp;                                                                \
+  /* The current time according to the event loop. in msecs. */               \
+  uint64_t time;                                                              \
+  /* Tail of a single-linked circular queue of pending reqs. If the queue */  \
+  /* is empty, tail_ is NULL. If there is only one item, */                   \
+  /* tail_->next_req == tail_ */                                              \
+  uv_req_t* pending_reqs_tail;                                                \
+  /* Head of a single-linked list of closed handles */                        \
+  uv_handle_t* endgame_handles;                                               \
+  /* The head of the timers tree */                                           \
+  struct uv_timer_tree_s timers;                                              \
+    /* Lists of active loop (prepare / check / idle) watchers */              \
+  uv_prepare_t* prepare_handles;                                              \
+  uv_check_t* check_handles;                                                  \
+  uv_idle_t* idle_handles;                                                    \
+  /* This pointer will refer to the prepare/check/idle handle whose */        \
+  /* callback is scheduled to be called next. This is needed to allow */      \
+  /* safe removal from one of the lists above while that list being */        \
+  /* iterated over. */                                                        \
+  uv_prepare_t* next_prepare_handle;                                          \
+  uv_check_t* next_check_handle;                                              \
+  uv_idle_t* next_idle_handle;                                                \
+  /* This handle holds the peer sockets for the fast variant of uv_poll_t */  \
+  SOCKET poll_peer_sockets[UV_MSAFD_PROVIDER_COUNT];                          \
+  /* Counter to keep track of active tcp streams */                           \
+  unsigned int active_tcp_streams;                                            \
+  /* Counter to keep track of active udp streams */                           \
+  unsigned int active_udp_streams;                                            \
+  /* Counter to started timer */                                              \
+  uint64_t timer_counter;                                                     \
+  /* Threadpool */                                                            \
+  void* wq[2];                                                                \
+  uv_mutex_t wq_mutex;                                                        \
+  uv_async_t wq_async;
+
+#define UV_REQ_TYPE_PRIVATE                                                   \
+  /* TODO: remove the req suffix */                                           \
+  UV_ACCEPT,                                                                  \
+  UV_FS_EVENT_REQ,                                                            \
+  UV_POLL_REQ,                                                                \
+  UV_PROCESS_EXIT,                                                            \
+  UV_READ,                                                                    \
+  UV_UDP_RECV,                                                                \
+  UV_WAKEUP,                                                                  \
+  UV_SIGNAL_REQ,
+
+#define UV_REQ_PRIVATE_FIELDS                                                 \
+  union {                                                                     \
+    /* Used by I/O operations */                                              \
+    struct {                                                                  \
+      OVERLAPPED overlapped;                                                  \
+      size_t queued_bytes;                                                    \
+    } io;                                                                     \
+  } u;                                                                        \
+  struct uv_req_s* next_req;
+
+#define UV_WRITE_PRIVATE_FIELDS                                               \
+  int ipc_header;                                                             \
+  uv_buf_t write_buffer;                                                      \
+  HANDLE event_handle;                                                        \
+  HANDLE wait_handle;
+
+#define UV_CONNECT_PRIVATE_FIELDS                                             \
+  /* empty */
+
+#define UV_SHUTDOWN_PRIVATE_FIELDS                                            \
+  /* empty */
+
+#define UV_UDP_SEND_PRIVATE_FIELDS                                            \
+  /* empty */
+
+#define UV_PRIVATE_REQ_TYPES                                                  \
+  typedef struct uv_pipe_accept_s {                                           \
+    UV_REQ_FIELDS                                                             \
+    HANDLE pipeHandle;                                                        \
+    struct uv_pipe_accept_s* next_pending;                                    \
+  } uv_pipe_accept_t;                                                         \
+                                                                              \
+  typedef struct uv_tcp_accept_s {                                            \
+    UV_REQ_FIELDS                                                             \
+    SOCKET accept_socket;                                                     \
+    char accept_buffer[sizeof(struct sockaddr_storage) * 2 + 32];             \
+    HANDLE event_handle;                                                      \
+    HANDLE wait_handle;                                                       \
+    struct uv_tcp_accept_s* next_pending;                                     \
+  } uv_tcp_accept_t;                                                          \
+                                                                              \
+  typedef struct uv_read_s {                                                  \
+    UV_REQ_FIELDS                                                             \
+    HANDLE event_handle;                                                      \
+    HANDLE wait_handle;                                                       \
+  } uv_read_t;
+
+#define uv_stream_connection_fields                                           \
+  unsigned int write_reqs_pending;                                            \
+  uv_shutdown_t* shutdown_req;
+
+#define uv_stream_server_fields                                               \
+  uv_connection_cb connection_cb;
+
+#define UV_STREAM_PRIVATE_FIELDS                                              \
+  unsigned int reqs_pending;                                                  \
+  int activecnt;                                                              \
+  uv_read_t read_req;                                                         \
+  union {                                                                     \
+    struct { uv_stream_connection_fields } conn;                              \
+    struct { uv_stream_server_fields     } serv;                              \
+  } stream;
+
+#define uv_tcp_server_fields                                                  \
+  uv_tcp_accept_t* accept_reqs;                                               \
+  unsigned int processed_accepts;                                             \
+  uv_tcp_accept_t* pending_accepts;                                           \
+  LPFN_ACCEPTEX func_acceptex;
+
+#define uv_tcp_connection_fields                                              \
+  uv_buf_t read_buffer;                                                       \
+  LPFN_CONNECTEX func_connectex;
+
+#define UV_TCP_PRIVATE_FIELDS                                                 \
+  SOCKET socket;                                                              \
+  int delayed_error;                                                          \
+  union {                                                                     \
+    struct { uv_tcp_server_fields } serv;                                     \
+    struct { uv_tcp_connection_fields } conn;                                 \
+  } tcp;
+
+#define UV_UDP_PRIVATE_FIELDS                                                 \
+  SOCKET socket;                                                              \
+  unsigned int reqs_pending;                                                  \
+  int activecnt;                                                              \
+  uv_req_t recv_req;                                                          \
+  uv_buf_t recv_buffer;                                                       \
+  struct sockaddr_storage recv_from;                                          \
+  int recv_from_len;                                                          \
+  uv_udp_recv_cb recv_cb;                                                     \
+  uv_alloc_cb alloc_cb;                                                       \
+  LPFN_WSARECV func_wsarecv;                                                  \
+  LPFN_WSARECVFROM func_wsarecvfrom;
+
+#define uv_pipe_server_fields                                                 \
+  int pending_instances;                                                      \
+  uv_pipe_accept_t* accept_reqs;                                              \
+  uv_pipe_accept_t* pending_accepts;
+
+#define uv_pipe_connection_fields                                             \
+  uv_timer_t* eof_timer;                                                      \
+  uv_write_t ipc_header_write_req;                                            \
+  int ipc_pid;                                                                \
+  uint64_t remaining_ipc_rawdata_bytes;                                       \
+  struct {                                                                    \
+    void* queue[2];                                                           \
+    int queue_len;                                                            \
+  } pending_ipc_info;                                                         \
+  uv_write_t* non_overlapped_writes_tail;                                     \
+  uv_mutex_t readfile_mutex;                                                  \
+  volatile HANDLE readfile_thread;
+
+#define UV_PIPE_PRIVATE_FIELDS                                                \
+  HANDLE handle;                                                              \
+  WCHAR* name;                                                                \
+  union {                                                                     \
+    struct { uv_pipe_server_fields } serv;                                    \
+    struct { uv_pipe_connection_fields } conn;                                \
+  } pipe;
+
+/* TODO: put the parser states in an union - TTY handles are always */
+/* half-duplex so read-state can safely overlap write-state. */
+#define UV_TTY_PRIVATE_FIELDS                                                 \
+  HANDLE handle;                                                              \
+  union {                                                                     \
+    struct {                                                                  \
+      /* Used for readable TTY handles */                                     \
+      /* TODO: remove me in v2.x. */                                          \
+      HANDLE unused_;                                                         \
+      uv_buf_t read_line_buffer;                                              \
+      HANDLE read_raw_wait;                                                   \
+      /* Fields used for translating win keystrokes into vt100 characters */  \
+      char last_key[8];                                                       \
+      unsigned char last_key_offset;                                          \
+      unsigned char last_key_len;                                             \
+      WCHAR last_utf16_high_surrogate;                                        \
+      INPUT_RECORD last_input_record;                                         \
+    } rd;                                                                     \
+    struct {                                                                  \
+      /* Used for writable TTY handles */                                     \
+      /* utf8-to-utf16 conversion state */                                    \
+      unsigned int utf8_codepoint;                                            \
+      unsigned char utf8_bytes_left;                                          \
+      /* eol conversion state */                                              \
+      unsigned char previous_eol;                                             \
+      /* ansi parser state */                                                 \
+      unsigned char ansi_parser_state;                                        \
+      unsigned char ansi_csi_argc;                                            \
+      unsigned short ansi_csi_argv[4];                                        \
+      COORD saved_position;                                                   \
+      WORD saved_attributes;                                                  \
+    } wr;                                                                     \
+  } tty;
+
+#define UV_POLL_PRIVATE_FIELDS                                                \
+  SOCKET socket;                                                              \
+  /* Used in fast mode */                                                     \
+  SOCKET peer_socket;                                                         \
+  AFD_POLL_INFO afd_poll_info_1;                                              \
+  AFD_POLL_INFO afd_poll_info_2;                                              \
+  /* Used in fast and slow mode. */                                           \
+  uv_req_t poll_req_1;                                                        \
+  uv_req_t poll_req_2;                                                        \
+  unsigned char submitted_events_1;                                           \
+  unsigned char submitted_events_2;                                           \
+  unsigned char mask_events_1;                                                \
+  unsigned char mask_events_2;                                                \
+  unsigned char events;
+
+#define UV_TIMER_PRIVATE_FIELDS                                               \
+  RB_ENTRY(uv_timer_s) tree_entry;                                            \
+  uint64_t due;                                                               \
+  uint64_t repeat;                                                            \
+  uint64_t start_id;                                                          \
+  uv_timer_cb timer_cb;
+
+#define UV_ASYNC_PRIVATE_FIELDS                                               \
+  struct uv_req_s async_req;                                                  \
+  uv_async_cb async_cb;                                                       \
+  /* char to avoid alignment issues */                                        \
+  char volatile async_sent;
+
+#define UV_PREPARE_PRIVATE_FIELDS                                             \
+  uv_prepare_t* prepare_prev;                                                 \
+  uv_prepare_t* prepare_next;                                                 \
+  uv_prepare_cb prepare_cb;
+
+#define UV_CHECK_PRIVATE_FIELDS                                               \
+  uv_check_t* check_prev;                                                     \
+  uv_check_t* check_next;                                                     \
+  uv_check_cb check_cb;
+
+#define UV_IDLE_PRIVATE_FIELDS                                                \
+  uv_idle_t* idle_prev;                                                       \
+  uv_idle_t* idle_next;                                                       \
+  uv_idle_cb idle_cb;
+
+#define UV_HANDLE_PRIVATE_FIELDS                                              \
+  uv_handle_t* endgame_next;                                                  \
+  unsigned int flags;
+
+#define UV_GETADDRINFO_PRIVATE_FIELDS                                         \
+  struct uv__work work_req;                                                   \
+  uv_getaddrinfo_cb getaddrinfo_cb;                                           \
+  void* alloc;                                                                \
+  WCHAR* node;                                                                \
+  WCHAR* service;                                                             \
+  /* The addrinfoW field is used to store a pointer to the hints, and    */   \
+  /* later on to store the result of GetAddrInfoW. The final result will */   \
+  /* be converted to struct addrinfo* and stored in the addrinfo field.  */   \
+  struct addrinfoW* addrinfow;                                                \
+  struct addrinfo* addrinfo;                                                  \
+  int retcode;
+
+#define UV_GETNAMEINFO_PRIVATE_FIELDS                                         \
+  struct uv__work work_req;                                                   \
+  uv_getnameinfo_cb getnameinfo_cb;                                           \
+  struct sockaddr_storage storage;                                            \
+  int flags;                                                                  \
+  char host[NI_MAXHOST];                                                      \
+  char service[NI_MAXSERV];                                                   \
+  int retcode;
+
+#define UV_PROCESS_PRIVATE_FIELDS                                             \
+  struct uv_process_exit_s {                                                  \
+    UV_REQ_FIELDS                                                             \
+  } exit_req;                                                                 \
+  BYTE* child_stdio_buffer;                                                   \
+  int exit_signal;                                                            \
+  HANDLE wait_handle;                                                         \
+  HANDLE process_handle;                                                      \
+  volatile char exit_cb_pending;
+
+#define UV_FS_PRIVATE_FIELDS                                                  \
+  struct uv__work work_req;                                                   \
+  int flags;                                                                  \
+  DWORD sys_errno_;                                                           \
+  union {                                                                     \
+    /* TODO: remove me in 0.9. */                                             \
+    WCHAR* pathw;                                                             \
+    int fd;                                                                   \
+  } file;                                                                     \
+  union {                                                                     \
+    struct {                                                                  \
+      int mode;                                                               \
+      WCHAR* new_pathw;                                                       \
+      int file_flags;                                                         \
+      int fd_out;                                                             \
+      unsigned int nbufs;                                                     \
+      uv_buf_t* bufs;                                                         \
+      int64_t offset;                                                         \
+      uv_buf_t bufsml[4];                                                     \
+    } info;                                                                   \
+    struct {                                                                  \
+      double atime;                                                           \
+      double mtime;                                                           \
+    } time;                                                                   \
+  } fs;
+
+#define UV_WORK_PRIVATE_FIELDS                                                \
+  struct uv__work work_req;
+
+#define UV_FS_EVENT_PRIVATE_FIELDS                                            \
+  struct uv_fs_event_req_s {                                                  \
+    UV_REQ_FIELDS                                                             \
+  } req;                                                                      \
+  HANDLE dir_handle;                                                          \
+  int req_pending;                                                            \
+  uv_fs_event_cb cb;                                                          \
+  WCHAR* filew;                                                               \
+  WCHAR* short_filew;                                                         \
+  WCHAR* dirw;                                                                \
+  char* buffer;
+
+#define UV_SIGNAL_PRIVATE_FIELDS                                              \
+  RB_ENTRY(uv_signal_s) tree_entry;                                           \
+  struct uv_req_s signal_req;                                                 \
+  unsigned long pending_signum;
+
+#ifndef F_OK
+#define F_OK 0
+#endif
+#ifndef R_OK
+#define R_OK 4
+#endif
+#ifndef W_OK
+#define W_OK 2
+#endif
+#ifndef X_OK
+#define X_OK 1
+#endif

+ 1499 - 0
Utilities/cmlibuv/include/uv.h

@@ -0,0 +1,1499 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/* See https://github.com/libuv/libuv#documentation for documentation. */
+
+#ifndef UV_H
+#define UV_H
+
+/* Include KWSys Large File Support configuration. */
+#include <cmsys/Configure.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef _WIN32
+  /* Windows - set up dll import/export decorators. */
+# if defined(BUILDING_UV_SHARED)
+    /* Building shared library. */
+#   define UV_EXTERN __declspec(dllexport)
+# elif defined(USING_UV_SHARED)
+    /* Using shared library. */
+#   define UV_EXTERN __declspec(dllimport)
+# else
+    /* Building static library. */
+#   define UV_EXTERN /* nothing */
+# endif
+#elif __GNUC__ >= 4
+# define UV_EXTERN __attribute__((visibility("default")))
+#else
+# define UV_EXTERN /* nothing */
+#endif
+
+#include "uv-errno.h"
+#include "uv-version.h"
+#include <stddef.h>
+#include <stdio.h>
+
+#if defined(_MSC_VER) && _MSC_VER < 1600
+# include "stdint-msvc2008.h"
+#else
+# include <stdint.h>
+#endif
+
+#if defined(_WIN32)
+# include "uv-win.h"
+#else
+# include "uv-unix.h"
+#endif
+
+/* Expand this list if necessary. */
+#define UV_ERRNO_MAP(XX)                                                      \
+  XX(E2BIG, "argument list too long")                                         \
+  XX(EACCES, "permission denied")                                             \
+  XX(EADDRINUSE, "address already in use")                                    \
+  XX(EADDRNOTAVAIL, "address not available")                                  \
+  XX(EAFNOSUPPORT, "address family not supported")                            \
+  XX(EAGAIN, "resource temporarily unavailable")                              \
+  XX(EAI_ADDRFAMILY, "address family not supported")                          \
+  XX(EAI_AGAIN, "temporary failure")                                          \
+  XX(EAI_BADFLAGS, "bad ai_flags value")                                      \
+  XX(EAI_BADHINTS, "invalid value for hints")                                 \
+  XX(EAI_CANCELED, "request canceled")                                        \
+  XX(EAI_FAIL, "permanent failure")                                           \
+  XX(EAI_FAMILY, "ai_family not supported")                                   \
+  XX(EAI_MEMORY, "out of memory")                                             \
+  XX(EAI_NODATA, "no address")                                                \
+  XX(EAI_NONAME, "unknown node or service")                                   \
+  XX(EAI_OVERFLOW, "argument buffer overflow")                                \
+  XX(EAI_PROTOCOL, "resolved protocol is unknown")                            \
+  XX(EAI_SERVICE, "service not available for socket type")                    \
+  XX(EAI_SOCKTYPE, "socket type not supported")                               \
+  XX(EALREADY, "connection already in progress")                              \
+  XX(EBADF, "bad file descriptor")                                            \
+  XX(EBUSY, "resource busy or locked")                                        \
+  XX(ECANCELED, "operation canceled")                                         \
+  XX(ECHARSET, "invalid Unicode character")                                   \
+  XX(ECONNABORTED, "software caused connection abort")                        \
+  XX(ECONNREFUSED, "connection refused")                                      \
+  XX(ECONNRESET, "connection reset by peer")                                  \
+  XX(EDESTADDRREQ, "destination address required")                            \
+  XX(EEXIST, "file already exists")                                           \
+  XX(EFAULT, "bad address in system call argument")                           \
+  XX(EFBIG, "file too large")                                                 \
+  XX(EHOSTUNREACH, "host is unreachable")                                     \
+  XX(EINTR, "interrupted system call")                                        \
+  XX(EINVAL, "invalid argument")                                              \
+  XX(EIO, "i/o error")                                                        \
+  XX(EISCONN, "socket is already connected")                                  \
+  XX(EISDIR, "illegal operation on a directory")                              \
+  XX(ELOOP, "too many symbolic links encountered")                            \
+  XX(EMFILE, "too many open files")                                           \
+  XX(EMSGSIZE, "message too long")                                            \
+  XX(ENAMETOOLONG, "name too long")                                           \
+  XX(ENETDOWN, "network is down")                                             \
+  XX(ENETUNREACH, "network is unreachable")                                   \
+  XX(ENFILE, "file table overflow")                                           \
+  XX(ENOBUFS, "no buffer space available")                                    \
+  XX(ENODEV, "no such device")                                                \
+  XX(ENOENT, "no such file or directory")                                     \
+  XX(ENOMEM, "not enough memory")                                             \
+  XX(ENONET, "machine is not on the network")                                 \
+  XX(ENOPROTOOPT, "protocol not available")                                   \
+  XX(ENOSPC, "no space left on device")                                       \
+  XX(ENOSYS, "function not implemented")                                      \
+  XX(ENOTCONN, "socket is not connected")                                     \
+  XX(ENOTDIR, "not a directory")                                              \
+  XX(ENOTEMPTY, "directory not empty")                                        \
+  XX(ENOTSOCK, "socket operation on non-socket")                              \
+  XX(ENOTSUP, "operation not supported on socket")                            \
+  XX(EPERM, "operation not permitted")                                        \
+  XX(EPIPE, "broken pipe")                                                    \
+  XX(EPROTO, "protocol error")                                                \
+  XX(EPROTONOSUPPORT, "protocol not supported")                               \
+  XX(EPROTOTYPE, "protocol wrong type for socket")                            \
+  XX(ERANGE, "result too large")                                              \
+  XX(EROFS, "read-only file system")                                          \
+  XX(ESHUTDOWN, "cannot send after transport endpoint shutdown")              \
+  XX(ESPIPE, "invalid seek")                                                  \
+  XX(ESRCH, "no such process")                                                \
+  XX(ETIMEDOUT, "connection timed out")                                       \
+  XX(ETXTBSY, "text file is busy")                                            \
+  XX(EXDEV, "cross-device link not permitted")                                \
+  XX(UNKNOWN, "unknown error")                                                \
+  XX(EOF, "end of file")                                                      \
+  XX(ENXIO, "no such device or address")                                      \
+  XX(EMLINK, "too many links")                                                \
+  XX(EHOSTDOWN, "host is down")                                               \
+
+#define UV_HANDLE_TYPE_MAP(XX)                                                \
+  XX(ASYNC, async)                                                            \
+  XX(CHECK, check)                                                            \
+  XX(FS_EVENT, fs_event)                                                      \
+  XX(FS_POLL, fs_poll)                                                        \
+  XX(HANDLE, handle)                                                          \
+  XX(IDLE, idle)                                                              \
+  XX(NAMED_PIPE, pipe)                                                        \
+  XX(POLL, poll)                                                              \
+  XX(PREPARE, prepare)                                                        \
+  XX(PROCESS, process)                                                        \
+  XX(STREAM, stream)                                                          \
+  XX(TCP, tcp)                                                                \
+  XX(TIMER, timer)                                                            \
+  XX(TTY, tty)                                                                \
+  XX(UDP, udp)                                                                \
+  XX(SIGNAL, signal)                                                          \
+
+#define UV_REQ_TYPE_MAP(XX)                                                   \
+  XX(REQ, req)                                                                \
+  XX(CONNECT, connect)                                                        \
+  XX(WRITE, write)                                                            \
+  XX(SHUTDOWN, shutdown)                                                      \
+  XX(UDP_SEND, udp_send)                                                      \
+  XX(FS, fs)                                                                  \
+  XX(WORK, work)                                                              \
+  XX(GETADDRINFO, getaddrinfo)                                                \
+  XX(GETNAMEINFO, getnameinfo)                                                \
+
+typedef enum {
+#define XX(code, _) UV_ ## code = UV__ ## code,
+  UV_ERRNO_MAP(XX)
+#undef XX
+  UV_ERRNO_MAX = UV__EOF - 1
+} uv_errno_t;
+
+typedef enum {
+  UV_UNKNOWN_HANDLE = 0,
+#define XX(uc, lc) UV_##uc,
+  UV_HANDLE_TYPE_MAP(XX)
+#undef XX
+  UV_FILE,
+  UV_HANDLE_TYPE_MAX
+} uv_handle_type;
+
+typedef enum {
+  UV_UNKNOWN_REQ = 0,
+#define XX(uc, lc) UV_##uc,
+  UV_REQ_TYPE_MAP(XX)
+#undef XX
+  UV_REQ_TYPE_PRIVATE
+  UV_REQ_TYPE_MAX
+} uv_req_type;
+
+
+/* Handle types. */
+typedef struct uv_loop_s uv_loop_t;
+typedef struct uv_handle_s uv_handle_t;
+typedef struct uv_stream_s uv_stream_t;
+typedef struct uv_tcp_s uv_tcp_t;
+typedef struct uv_udp_s uv_udp_t;
+typedef struct uv_pipe_s uv_pipe_t;
+typedef struct uv_tty_s uv_tty_t;
+typedef struct uv_poll_s uv_poll_t;
+typedef struct uv_timer_s uv_timer_t;
+typedef struct uv_prepare_s uv_prepare_t;
+typedef struct uv_check_s uv_check_t;
+typedef struct uv_idle_s uv_idle_t;
+typedef struct uv_async_s uv_async_t;
+typedef struct uv_process_s uv_process_t;
+typedef struct uv_fs_event_s uv_fs_event_t;
+typedef struct uv_fs_poll_s uv_fs_poll_t;
+typedef struct uv_signal_s uv_signal_t;
+
+/* Request types. */
+typedef struct uv_req_s uv_req_t;
+typedef struct uv_getaddrinfo_s uv_getaddrinfo_t;
+typedef struct uv_getnameinfo_s uv_getnameinfo_t;
+typedef struct uv_shutdown_s uv_shutdown_t;
+typedef struct uv_write_s uv_write_t;
+typedef struct uv_connect_s uv_connect_t;
+typedef struct uv_udp_send_s uv_udp_send_t;
+typedef struct uv_fs_s uv_fs_t;
+typedef struct uv_work_s uv_work_t;
+
+/* None of the above. */
+typedef struct uv_cpu_info_s uv_cpu_info_t;
+typedef struct uv_interface_address_s uv_interface_address_t;
+typedef struct uv_dirent_s uv_dirent_t;
+typedef struct uv_passwd_s uv_passwd_t;
+
+typedef enum {
+  UV_LOOP_BLOCK_SIGNAL
+} uv_loop_option;
+
+typedef enum {
+  UV_RUN_DEFAULT = 0,
+  UV_RUN_ONCE,
+  UV_RUN_NOWAIT
+} uv_run_mode;
+
+
+UV_EXTERN unsigned int uv_version(void);
+UV_EXTERN const char* uv_version_string(void);
+
+typedef void* (*uv_malloc_func)(size_t size);
+typedef void* (*uv_realloc_func)(void* ptr, size_t size);
+typedef void* (*uv_calloc_func)(size_t count, size_t size);
+typedef void (*uv_free_func)(void* ptr);
+
+UV_EXTERN int uv_replace_allocator(uv_malloc_func malloc_func,
+                                   uv_realloc_func realloc_func,
+                                   uv_calloc_func calloc_func,
+                                   uv_free_func free_func);
+
+UV_EXTERN uv_loop_t* uv_default_loop(void);
+UV_EXTERN int uv_loop_init(uv_loop_t* loop);
+UV_EXTERN int uv_loop_close(uv_loop_t* loop);
+/*
+ * NOTE:
+ *  This function is DEPRECATED (to be removed after 0.12), users should
+ *  allocate the loop manually and use uv_loop_init instead.
+ */
+UV_EXTERN uv_loop_t* uv_loop_new(void);
+/*
+ * NOTE:
+ *  This function is DEPRECATED (to be removed after 0.12). Users should use
+ *  uv_loop_close and free the memory manually instead.
+ */
+UV_EXTERN void uv_loop_delete(uv_loop_t*);
+UV_EXTERN size_t uv_loop_size(void);
+UV_EXTERN int uv_loop_alive(const uv_loop_t* loop);
+UV_EXTERN int uv_loop_configure(uv_loop_t* loop, uv_loop_option option, ...);
+
+UV_EXTERN int uv_run(uv_loop_t*, uv_run_mode mode);
+UV_EXTERN void uv_stop(uv_loop_t*);
+
+UV_EXTERN void uv_ref(uv_handle_t*);
+UV_EXTERN void uv_unref(uv_handle_t*);
+UV_EXTERN int uv_has_ref(const uv_handle_t*);
+
+UV_EXTERN void uv_update_time(uv_loop_t*);
+UV_EXTERN uint64_t uv_now(const uv_loop_t*);
+
+UV_EXTERN int uv_backend_fd(const uv_loop_t*);
+UV_EXTERN int uv_backend_timeout(const uv_loop_t*);
+
+typedef void (*uv_alloc_cb)(uv_handle_t* handle,
+                            size_t suggested_size,
+                            uv_buf_t* buf);
+typedef void (*uv_read_cb)(uv_stream_t* stream,
+                           ssize_t nread,
+                           const uv_buf_t* buf);
+typedef void (*uv_write_cb)(uv_write_t* req, int status);
+typedef void (*uv_connect_cb)(uv_connect_t* req, int status);
+typedef void (*uv_shutdown_cb)(uv_shutdown_t* req, int status);
+typedef void (*uv_connection_cb)(uv_stream_t* server, int status);
+typedef void (*uv_close_cb)(uv_handle_t* handle);
+typedef void (*uv_poll_cb)(uv_poll_t* handle, int status, int events);
+typedef void (*uv_timer_cb)(uv_timer_t* handle);
+typedef void (*uv_async_cb)(uv_async_t* handle);
+typedef void (*uv_prepare_cb)(uv_prepare_t* handle);
+typedef void (*uv_check_cb)(uv_check_t* handle);
+typedef void (*uv_idle_cb)(uv_idle_t* handle);
+typedef void (*uv_exit_cb)(uv_process_t*, int64_t exit_status, int term_signal);
+typedef void (*uv_walk_cb)(uv_handle_t* handle, void* arg);
+typedef void (*uv_fs_cb)(uv_fs_t* req);
+typedef void (*uv_work_cb)(uv_work_t* req);
+typedef void (*uv_after_work_cb)(uv_work_t* req, int status);
+typedef void (*uv_getaddrinfo_cb)(uv_getaddrinfo_t* req,
+                                  int status,
+                                  struct addrinfo* res);
+typedef void (*uv_getnameinfo_cb)(uv_getnameinfo_t* req,
+                                  int status,
+                                  const char* hostname,
+                                  const char* service);
+
+typedef struct {
+  long tv_sec;
+  long tv_nsec;
+} uv_timespec_t;
+
+
+typedef struct {
+  uint64_t st_dev;
+  uint64_t st_mode;
+  uint64_t st_nlink;
+  uint64_t st_uid;
+  uint64_t st_gid;
+  uint64_t st_rdev;
+  uint64_t st_ino;
+  uint64_t st_size;
+  uint64_t st_blksize;
+  uint64_t st_blocks;
+  uint64_t st_flags;
+  uint64_t st_gen;
+  uv_timespec_t st_atim;
+  uv_timespec_t st_mtim;
+  uv_timespec_t st_ctim;
+  uv_timespec_t st_birthtim;
+} uv_stat_t;
+
+
+typedef void (*uv_fs_event_cb)(uv_fs_event_t* handle,
+                               const char* filename,
+                               int events,
+                               int status);
+
+typedef void (*uv_fs_poll_cb)(uv_fs_poll_t* handle,
+                              int status,
+                              const uv_stat_t* prev,
+                              const uv_stat_t* curr);
+
+typedef void (*uv_signal_cb)(uv_signal_t* handle, int signum);
+
+
+typedef enum {
+  UV_LEAVE_GROUP = 0,
+  UV_JOIN_GROUP
+} uv_membership;
+
+
+UV_EXTERN const char* uv_strerror(int err);
+UV_EXTERN const char* uv_err_name(int err);
+
+
+#define UV_REQ_FIELDS                                                         \
+  /* public */                                                                \
+  void* data;                                                                 \
+  /* read-only */                                                             \
+  uv_req_type type;                                                           \
+  /* private */                                                               \
+  void* active_queue[2];                                                      \
+  void* reserved[4];                                                          \
+  UV_REQ_PRIVATE_FIELDS                                                       \
+
+/* Abstract base class of all requests. */
+struct uv_req_s {
+  UV_REQ_FIELDS
+};
+
+
+/* Platform-specific request types. */
+UV_PRIVATE_REQ_TYPES
+
+
+UV_EXTERN int uv_shutdown(uv_shutdown_t* req,
+                          uv_stream_t* handle,
+                          uv_shutdown_cb cb);
+
+struct uv_shutdown_s {
+  UV_REQ_FIELDS
+  uv_stream_t* handle;
+  uv_shutdown_cb cb;
+  UV_SHUTDOWN_PRIVATE_FIELDS
+};
+
+
+#define UV_HANDLE_FIELDS                                                      \
+  /* public */                                                                \
+  void* data;                                                                 \
+  /* read-only */                                                             \
+  uv_loop_t* loop;                                                            \
+  uv_handle_type type;                                                        \
+  /* private */                                                               \
+  uv_close_cb close_cb;                                                       \
+  void* handle_queue[2];                                                      \
+  union {                                                                     \
+    int fd;                                                                   \
+    void* reserved[4];                                                        \
+  } u;                                                                        \
+  UV_HANDLE_PRIVATE_FIELDS                                                    \
+
+/* The abstract base class of all handles. */
+struct uv_handle_s {
+  UV_HANDLE_FIELDS
+};
+
+UV_EXTERN size_t uv_handle_size(uv_handle_type type);
+UV_EXTERN size_t uv_req_size(uv_req_type type);
+
+UV_EXTERN int uv_is_active(const uv_handle_t* handle);
+
+UV_EXTERN void uv_walk(uv_loop_t* loop, uv_walk_cb walk_cb, void* arg);
+
+/* Helpers for ad hoc debugging, no API/ABI stability guaranteed. */
+UV_EXTERN void uv_print_all_handles(uv_loop_t* loop, FILE* stream);
+UV_EXTERN void uv_print_active_handles(uv_loop_t* loop, FILE* stream);
+
+UV_EXTERN void uv_close(uv_handle_t* handle, uv_close_cb close_cb);
+
+UV_EXTERN int uv_send_buffer_size(uv_handle_t* handle, int* value);
+UV_EXTERN int uv_recv_buffer_size(uv_handle_t* handle, int* value);
+
+UV_EXTERN int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd);
+
+UV_EXTERN uv_buf_t uv_buf_init(char* base, unsigned int len);
+
+
+#define UV_STREAM_FIELDS                                                      \
+  /* number of bytes queued for writing */                                    \
+  size_t write_queue_size;                                                    \
+  uv_alloc_cb alloc_cb;                                                       \
+  uv_read_cb read_cb;                                                         \
+  /* private */                                                               \
+  UV_STREAM_PRIVATE_FIELDS
+
+/*
+ * uv_stream_t is a subclass of uv_handle_t.
+ *
+ * uv_stream is an abstract class.
+ *
+ * uv_stream_t is the parent class of uv_tcp_t, uv_pipe_t and uv_tty_t.
+ */
+struct uv_stream_s {
+  UV_HANDLE_FIELDS
+  UV_STREAM_FIELDS
+};
+
+UV_EXTERN int uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb);
+UV_EXTERN int uv_accept(uv_stream_t* server, uv_stream_t* client);
+
+UV_EXTERN int uv_read_start(uv_stream_t*,
+                            uv_alloc_cb alloc_cb,
+                            uv_read_cb read_cb);
+UV_EXTERN int uv_read_stop(uv_stream_t*);
+
+UV_EXTERN int uv_write(uv_write_t* req,
+                       uv_stream_t* handle,
+                       const uv_buf_t bufs[],
+                       unsigned int nbufs,
+                       uv_write_cb cb);
+UV_EXTERN int uv_write2(uv_write_t* req,
+                        uv_stream_t* handle,
+                        const uv_buf_t bufs[],
+                        unsigned int nbufs,
+                        uv_stream_t* send_handle,
+                        uv_write_cb cb);
+UV_EXTERN int uv_try_write(uv_stream_t* handle,
+                           const uv_buf_t bufs[],
+                           unsigned int nbufs);
+
+/* uv_write_t is a subclass of uv_req_t. */
+struct uv_write_s {
+  UV_REQ_FIELDS
+  uv_write_cb cb;
+  uv_stream_t* send_handle;
+  uv_stream_t* handle;
+  UV_WRITE_PRIVATE_FIELDS
+};
+
+
+UV_EXTERN int uv_is_readable(const uv_stream_t* handle);
+UV_EXTERN int uv_is_writable(const uv_stream_t* handle);
+
+UV_EXTERN int uv_stream_set_blocking(uv_stream_t* handle, int blocking);
+
+UV_EXTERN int uv_is_closing(const uv_handle_t* handle);
+
+
+/*
+ * uv_tcp_t is a subclass of uv_stream_t.
+ *
+ * Represents a TCP stream or TCP server.
+ */
+struct uv_tcp_s {
+  UV_HANDLE_FIELDS
+  UV_STREAM_FIELDS
+  UV_TCP_PRIVATE_FIELDS
+};
+
+UV_EXTERN int uv_tcp_init(uv_loop_t*, uv_tcp_t* handle);
+UV_EXTERN int uv_tcp_init_ex(uv_loop_t*, uv_tcp_t* handle, unsigned int flags);
+UV_EXTERN int uv_tcp_open(uv_tcp_t* handle, uv_os_sock_t sock);
+UV_EXTERN int uv_tcp_nodelay(uv_tcp_t* handle, int enable);
+UV_EXTERN int uv_tcp_keepalive(uv_tcp_t* handle,
+                               int enable,
+                               unsigned int delay);
+UV_EXTERN int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable);
+
+enum uv_tcp_flags {
+  /* Used with uv_tcp_bind, when an IPv6 address is used. */
+  UV_TCP_IPV6ONLY = 1
+};
+
+UV_EXTERN int uv_tcp_bind(uv_tcp_t* handle,
+                          const struct sockaddr* addr,
+                          unsigned int flags);
+UV_EXTERN int uv_tcp_getsockname(const uv_tcp_t* handle,
+                                 struct sockaddr* name,
+                                 int* namelen);
+UV_EXTERN int uv_tcp_getpeername(const uv_tcp_t* handle,
+                                 struct sockaddr* name,
+                                 int* namelen);
+UV_EXTERN int uv_tcp_connect(uv_connect_t* req,
+                             uv_tcp_t* handle,
+                             const struct sockaddr* addr,
+                             uv_connect_cb cb);
+
+/* uv_connect_t is a subclass of uv_req_t. */
+struct uv_connect_s {
+  UV_REQ_FIELDS
+  uv_connect_cb cb;
+  uv_stream_t* handle;
+  UV_CONNECT_PRIVATE_FIELDS
+};
+
+
+/*
+ * UDP support.
+ */
+
+enum uv_udp_flags {
+  /* Disables dual stack mode. */
+  UV_UDP_IPV6ONLY = 1,
+  /*
+   * Indicates message was truncated because read buffer was too small. The
+   * remainder was discarded by the OS. Used in uv_udp_recv_cb.
+   */
+  UV_UDP_PARTIAL = 2,
+  /*
+   * Indicates if SO_REUSEADDR will be set when binding the handle.
+   * This sets the SO_REUSEPORT socket flag on the BSDs and OS X. On other
+   * Unix platforms, it sets the SO_REUSEADDR flag.  What that means is that
+   * multiple threads or processes can bind to the same address without error
+   * (provided they all set the flag) but only the last one to bind will receive
+   * any traffic, in effect "stealing" the port from the previous listener.
+   */
+  UV_UDP_REUSEADDR = 4
+};
+
+typedef void (*uv_udp_send_cb)(uv_udp_send_t* req, int status);
+typedef void (*uv_udp_recv_cb)(uv_udp_t* handle,
+                               ssize_t nread,
+                               const uv_buf_t* buf,
+                               const struct sockaddr* addr,
+                               unsigned flags);
+
+/* uv_udp_t is a subclass of uv_handle_t. */
+struct uv_udp_s {
+  UV_HANDLE_FIELDS
+  /* read-only */
+  /*
+   * Number of bytes queued for sending. This field strictly shows how much
+   * information is currently queued.
+   */
+  size_t send_queue_size;
+  /*
+   * Number of send requests currently in the queue awaiting to be processed.
+   */
+  size_t send_queue_count;
+  UV_UDP_PRIVATE_FIELDS
+};
+
+/* uv_udp_send_t is a subclass of uv_req_t. */
+struct uv_udp_send_s {
+  UV_REQ_FIELDS
+  uv_udp_t* handle;
+  uv_udp_send_cb cb;
+  UV_UDP_SEND_PRIVATE_FIELDS
+};
+
+UV_EXTERN int uv_udp_init(uv_loop_t*, uv_udp_t* handle);
+UV_EXTERN int uv_udp_init_ex(uv_loop_t*, uv_udp_t* handle, unsigned int flags);
+UV_EXTERN int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock);
+UV_EXTERN int uv_udp_bind(uv_udp_t* handle,
+                          const struct sockaddr* addr,
+                          unsigned int flags);
+
+UV_EXTERN int uv_udp_getsockname(const uv_udp_t* handle,
+                                 struct sockaddr* name,
+                                 int* namelen);
+UV_EXTERN int uv_udp_set_membership(uv_udp_t* handle,
+                                    const char* multicast_addr,
+                                    const char* interface_addr,
+                                    uv_membership membership);
+UV_EXTERN int uv_udp_set_multicast_loop(uv_udp_t* handle, int on);
+UV_EXTERN int uv_udp_set_multicast_ttl(uv_udp_t* handle, int ttl);
+UV_EXTERN int uv_udp_set_multicast_interface(uv_udp_t* handle,
+                                             const char* interface_addr);
+UV_EXTERN int uv_udp_set_broadcast(uv_udp_t* handle, int on);
+UV_EXTERN int uv_udp_set_ttl(uv_udp_t* handle, int ttl);
+UV_EXTERN int uv_udp_send(uv_udp_send_t* req,
+                          uv_udp_t* handle,
+                          const uv_buf_t bufs[],
+                          unsigned int nbufs,
+                          const struct sockaddr* addr,
+                          uv_udp_send_cb send_cb);
+UV_EXTERN int uv_udp_try_send(uv_udp_t* handle,
+                              const uv_buf_t bufs[],
+                              unsigned int nbufs,
+                              const struct sockaddr* addr);
+UV_EXTERN int uv_udp_recv_start(uv_udp_t* handle,
+                                uv_alloc_cb alloc_cb,
+                                uv_udp_recv_cb recv_cb);
+UV_EXTERN int uv_udp_recv_stop(uv_udp_t* handle);
+
+
+/*
+ * uv_tty_t is a subclass of uv_stream_t.
+ *
+ * Representing a stream for the console.
+ */
+struct uv_tty_s {
+  UV_HANDLE_FIELDS
+  UV_STREAM_FIELDS
+  UV_TTY_PRIVATE_FIELDS
+};
+
+typedef enum {
+  /* Initial/normal terminal mode */
+  UV_TTY_MODE_NORMAL,
+  /* Raw input mode (On Windows, ENABLE_WINDOW_INPUT is also enabled) */
+  UV_TTY_MODE_RAW,
+  /* Binary-safe I/O mode for IPC (Unix-only) */
+  UV_TTY_MODE_IO
+} uv_tty_mode_t;
+
+UV_EXTERN int uv_tty_init(uv_loop_t*, uv_tty_t*, uv_file fd, int readable);
+UV_EXTERN int uv_tty_set_mode(uv_tty_t*, uv_tty_mode_t mode);
+UV_EXTERN int uv_tty_reset_mode(void);
+UV_EXTERN int uv_tty_get_winsize(uv_tty_t*, int* width, int* height);
+
+#ifdef __cplusplus
+extern "C++" {
+
+inline int uv_tty_set_mode(uv_tty_t* handle, int mode) {
+  return uv_tty_set_mode(handle, static_cast<uv_tty_mode_t>(mode));
+}
+
+}
+#endif
+
+UV_EXTERN uv_handle_type uv_guess_handle(uv_file file);
+
+/*
+ * uv_pipe_t is a subclass of uv_stream_t.
+ *
+ * Representing a pipe stream or pipe server. On Windows this is a Named
+ * Pipe. On Unix this is a Unix domain socket.
+ */
+struct uv_pipe_s {
+  UV_HANDLE_FIELDS
+  UV_STREAM_FIELDS
+  int ipc; /* non-zero if this pipe is used for passing handles */
+  UV_PIPE_PRIVATE_FIELDS
+};
+
+UV_EXTERN int uv_pipe_init(uv_loop_t*, uv_pipe_t* handle, int ipc);
+UV_EXTERN int uv_pipe_open(uv_pipe_t*, uv_file file);
+UV_EXTERN int uv_pipe_bind(uv_pipe_t* handle, const char* name);
+UV_EXTERN void uv_pipe_connect(uv_connect_t* req,
+                               uv_pipe_t* handle,
+                               const char* name,
+                               uv_connect_cb cb);
+UV_EXTERN int uv_pipe_getsockname(const uv_pipe_t* handle,
+                                  char* buffer,
+                                  size_t* size);
+UV_EXTERN int uv_pipe_getpeername(const uv_pipe_t* handle,
+                                  char* buffer,
+                                  size_t* size);
+UV_EXTERN void uv_pipe_pending_instances(uv_pipe_t* handle, int count);
+UV_EXTERN int uv_pipe_pending_count(uv_pipe_t* handle);
+UV_EXTERN uv_handle_type uv_pipe_pending_type(uv_pipe_t* handle);
+
+
+struct uv_poll_s {
+  UV_HANDLE_FIELDS
+  uv_poll_cb poll_cb;
+  UV_POLL_PRIVATE_FIELDS
+};
+
+enum uv_poll_event {
+  UV_READABLE = 1,
+  UV_WRITABLE = 2,
+  UV_DISCONNECT = 4
+};
+
+UV_EXTERN int uv_poll_init(uv_loop_t* loop, uv_poll_t* handle, int fd);
+UV_EXTERN int uv_poll_init_socket(uv_loop_t* loop,
+                                  uv_poll_t* handle,
+                                  uv_os_sock_t socket);
+UV_EXTERN int uv_poll_start(uv_poll_t* handle, int events, uv_poll_cb cb);
+UV_EXTERN int uv_poll_stop(uv_poll_t* handle);
+
+
+struct uv_prepare_s {
+  UV_HANDLE_FIELDS
+  UV_PREPARE_PRIVATE_FIELDS
+};
+
+UV_EXTERN int uv_prepare_init(uv_loop_t*, uv_prepare_t* prepare);
+UV_EXTERN int uv_prepare_start(uv_prepare_t* prepare, uv_prepare_cb cb);
+UV_EXTERN int uv_prepare_stop(uv_prepare_t* prepare);
+
+
+struct uv_check_s {
+  UV_HANDLE_FIELDS
+  UV_CHECK_PRIVATE_FIELDS
+};
+
+UV_EXTERN int uv_check_init(uv_loop_t*, uv_check_t* check);
+UV_EXTERN int uv_check_start(uv_check_t* check, uv_check_cb cb);
+UV_EXTERN int uv_check_stop(uv_check_t* check);
+
+
+struct uv_idle_s {
+  UV_HANDLE_FIELDS
+  UV_IDLE_PRIVATE_FIELDS
+};
+
+UV_EXTERN int uv_idle_init(uv_loop_t*, uv_idle_t* idle);
+UV_EXTERN int uv_idle_start(uv_idle_t* idle, uv_idle_cb cb);
+UV_EXTERN int uv_idle_stop(uv_idle_t* idle);
+
+
+struct uv_async_s {
+  UV_HANDLE_FIELDS
+  UV_ASYNC_PRIVATE_FIELDS
+};
+
+UV_EXTERN int uv_async_init(uv_loop_t*,
+                            uv_async_t* async,
+                            uv_async_cb async_cb);
+UV_EXTERN int uv_async_send(uv_async_t* async);
+
+
+/*
+ * uv_timer_t is a subclass of uv_handle_t.
+ *
+ * Used to get woken up at a specified time in the future.
+ */
+struct uv_timer_s {
+  UV_HANDLE_FIELDS
+  UV_TIMER_PRIVATE_FIELDS
+};
+
+UV_EXTERN int uv_timer_init(uv_loop_t*, uv_timer_t* handle);
+UV_EXTERN int uv_timer_start(uv_timer_t* handle,
+                             uv_timer_cb cb,
+                             uint64_t timeout,
+                             uint64_t repeat);
+UV_EXTERN int uv_timer_stop(uv_timer_t* handle);
+UV_EXTERN int uv_timer_again(uv_timer_t* handle);
+UV_EXTERN void uv_timer_set_repeat(uv_timer_t* handle, uint64_t repeat);
+UV_EXTERN uint64_t uv_timer_get_repeat(const uv_timer_t* handle);
+
+
+/*
+ * uv_getaddrinfo_t is a subclass of uv_req_t.
+ *
+ * Request object for uv_getaddrinfo.
+ */
+struct uv_getaddrinfo_s {
+  UV_REQ_FIELDS
+  /* read-only */
+  uv_loop_t* loop;
+  /* struct addrinfo* addrinfo is marked as private, but it really isn't. */
+  UV_GETADDRINFO_PRIVATE_FIELDS
+};
+
+
+UV_EXTERN int uv_getaddrinfo(uv_loop_t* loop,
+                             uv_getaddrinfo_t* req,
+                             uv_getaddrinfo_cb getaddrinfo_cb,
+                             const char* node,
+                             const char* service,
+                             const struct addrinfo* hints);
+UV_EXTERN void uv_freeaddrinfo(struct addrinfo* ai);
+
+
+/*
+* uv_getnameinfo_t is a subclass of uv_req_t.
+*
+* Request object for uv_getnameinfo.
+*/
+struct uv_getnameinfo_s {
+  UV_REQ_FIELDS
+  /* read-only */
+  uv_loop_t* loop;
+  /* host and service are marked as private, but they really aren't. */
+  UV_GETNAMEINFO_PRIVATE_FIELDS
+};
+
+UV_EXTERN int uv_getnameinfo(uv_loop_t* loop,
+                             uv_getnameinfo_t* req,
+                             uv_getnameinfo_cb getnameinfo_cb,
+                             const struct sockaddr* addr,
+                             int flags);
+
+
+/* uv_spawn() options. */
+typedef enum {
+  UV_IGNORE         = 0x00,
+  UV_CREATE_PIPE    = 0x01,
+  UV_INHERIT_FD     = 0x02,
+  UV_INHERIT_STREAM = 0x04,
+
+  /*
+   * When UV_CREATE_PIPE is specified, UV_READABLE_PIPE and UV_WRITABLE_PIPE
+   * determine the direction of flow, from the child process' perspective. Both
+   * flags may be specified to create a duplex data stream.
+   */
+  UV_READABLE_PIPE  = 0x10,
+  UV_WRITABLE_PIPE  = 0x20
+} uv_stdio_flags;
+
+typedef struct uv_stdio_container_s {
+  uv_stdio_flags flags;
+
+  union {
+    uv_stream_t* stream;
+    int fd;
+  } data;
+} uv_stdio_container_t;
+
+typedef struct uv_process_options_s {
+  uv_exit_cb exit_cb; /* Called after the process exits. */
+  const char* file;   /* Path to program to execute. */
+  /*
+   * Command line arguments. args[0] should be the path to the program. On
+   * Windows this uses CreateProcess which concatenates the arguments into a
+   * string this can cause some strange errors. See the note at
+   * windows_verbatim_arguments.
+   */
+  char** args;
+  /*
+   * This will be set as the environ variable in the subprocess. If this is
+   * NULL then the parents environ will be used.
+   */
+  char** env;
+  /*
+   * If non-null this represents a directory the subprocess should execute
+   * in. Stands for current working directory.
+   */
+  const char* cwd;
+  /*
+   * Various flags that control how uv_spawn() behaves. See the definition of
+   * `enum uv_process_flags` below.
+   */
+  unsigned int flags;
+  /*
+   * The `stdio` field points to an array of uv_stdio_container_t structs that
+   * describe the file descriptors that will be made available to the child
+   * process. The convention is that stdio[0] points to stdin, fd 1 is used for
+   * stdout, and fd 2 is stderr.
+   *
+   * Note that on windows file descriptors greater than 2 are available to the
+   * child process only if the child processes uses the MSVCRT runtime.
+   */
+  int stdio_count;
+  uv_stdio_container_t* stdio;
+  /*
+   * Libuv can change the child process' user/group id. This happens only when
+   * the appropriate bits are set in the flags fields. This is not supported on
+   * windows; uv_spawn() will fail and set the error to UV_ENOTSUP.
+   */
+  uv_uid_t uid;
+  uv_gid_t gid;
+} uv_process_options_t;
+
+/*
+ * These are the flags that can be used for the uv_process_options.flags field.
+ */
+enum uv_process_flags {
+  /*
+   * Set the child process' user id. The user id is supplied in the `uid` field
+   * of the options struct. This does not work on windows; setting this flag
+   * will cause uv_spawn() to fail.
+   */
+  UV_PROCESS_SETUID = (1 << 0),
+  /*
+   * Set the child process' group id. The user id is supplied in the `gid`
+   * field of the options struct. This does not work on windows; setting this
+   * flag will cause uv_spawn() to fail.
+   */
+  UV_PROCESS_SETGID = (1 << 1),
+  /*
+   * Do not wrap any arguments in quotes, or perform any other escaping, when
+   * converting the argument list into a command line string. This option is
+   * only meaningful on Windows systems. On Unix it is silently ignored.
+   */
+  UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS = (1 << 2),
+  /*
+   * Spawn the child process in a detached state - this will make it a process
+   * group leader, and will effectively enable the child to keep running after
+   * the parent exits.  Note that the child process will still keep the
+   * parent's event loop alive unless the parent process calls uv_unref() on
+   * the child's process handle.
+   */
+  UV_PROCESS_DETACHED = (1 << 3),
+  /*
+   * Hide the subprocess console window that would normally be created. This
+   * option is only meaningful on Windows systems. On Unix it is silently
+   * ignored.
+   */
+  UV_PROCESS_WINDOWS_HIDE = (1 << 4)
+};
+
+/*
+ * uv_process_t is a subclass of uv_handle_t.
+ */
+struct uv_process_s {
+  UV_HANDLE_FIELDS
+  uv_exit_cb exit_cb;
+  int pid;
+  UV_PROCESS_PRIVATE_FIELDS
+};
+
+UV_EXTERN int uv_spawn(uv_loop_t* loop,
+                       uv_process_t* handle,
+                       const uv_process_options_t* options);
+UV_EXTERN int uv_process_kill(uv_process_t*, int signum);
+UV_EXTERN int uv_kill(int pid, int signum);
+
+
+/*
+ * uv_work_t is a subclass of uv_req_t.
+ */
+struct uv_work_s {
+  UV_REQ_FIELDS
+  uv_loop_t* loop;
+  uv_work_cb work_cb;
+  uv_after_work_cb after_work_cb;
+  UV_WORK_PRIVATE_FIELDS
+};
+
+UV_EXTERN int uv_queue_work(uv_loop_t* loop,
+                            uv_work_t* req,
+                            uv_work_cb work_cb,
+                            uv_after_work_cb after_work_cb);
+
+UV_EXTERN int uv_cancel(uv_req_t* req);
+
+
+struct uv_cpu_info_s {
+  char* model;
+  int speed;
+  struct uv_cpu_times_s {
+    uint64_t user;
+    uint64_t nice;
+    uint64_t sys;
+    uint64_t idle;
+    uint64_t irq;
+  } cpu_times;
+};
+
+struct uv_interface_address_s {
+  char* name;
+  char phys_addr[6];
+  int is_internal;
+  union {
+    struct sockaddr_in address4;
+    struct sockaddr_in6 address6;
+  } address;
+  union {
+    struct sockaddr_in netmask4;
+    struct sockaddr_in6 netmask6;
+  } netmask;
+};
+
+struct uv_passwd_s {
+  char* username;
+  long uid;
+  long gid;
+  char* shell;
+  char* homedir;
+};
+
+typedef enum {
+  UV_DIRENT_UNKNOWN,
+  UV_DIRENT_FILE,
+  UV_DIRENT_DIR,
+  UV_DIRENT_LINK,
+  UV_DIRENT_FIFO,
+  UV_DIRENT_SOCKET,
+  UV_DIRENT_CHAR,
+  UV_DIRENT_BLOCK
+} uv_dirent_type_t;
+
+struct uv_dirent_s {
+  const char* name;
+  uv_dirent_type_t type;
+};
+
+UV_EXTERN char** uv_setup_args(int argc, char** argv);
+UV_EXTERN int uv_get_process_title(char* buffer, size_t size);
+UV_EXTERN int uv_set_process_title(const char* title);
+UV_EXTERN int uv_resident_set_memory(size_t* rss);
+UV_EXTERN int uv_uptime(double* uptime);
+
+typedef struct {
+  long tv_sec;
+  long tv_usec;
+} uv_timeval_t;
+
+typedef struct {
+   uv_timeval_t ru_utime; /* user CPU time used */
+   uv_timeval_t ru_stime; /* system CPU time used */
+   uint64_t ru_maxrss;    /* maximum resident set size */
+   uint64_t ru_ixrss;     /* integral shared memory size */
+   uint64_t ru_idrss;     /* integral unshared data size */
+   uint64_t ru_isrss;     /* integral unshared stack size */
+   uint64_t ru_minflt;    /* page reclaims (soft page faults) */
+   uint64_t ru_majflt;    /* page faults (hard page faults) */
+   uint64_t ru_nswap;     /* swaps */
+   uint64_t ru_inblock;   /* block input operations */
+   uint64_t ru_oublock;   /* block output operations */
+   uint64_t ru_msgsnd;    /* IPC messages sent */
+   uint64_t ru_msgrcv;    /* IPC messages received */
+   uint64_t ru_nsignals;  /* signals received */
+   uint64_t ru_nvcsw;     /* voluntary context switches */
+   uint64_t ru_nivcsw;    /* involuntary context switches */
+} uv_rusage_t;
+
+UV_EXTERN int uv_getrusage(uv_rusage_t* rusage);
+
+UV_EXTERN int uv_os_homedir(char* buffer, size_t* size);
+UV_EXTERN int uv_os_tmpdir(char* buffer, size_t* size);
+UV_EXTERN int uv_os_get_passwd(uv_passwd_t* pwd);
+UV_EXTERN void uv_os_free_passwd(uv_passwd_t* pwd);
+
+UV_EXTERN int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count);
+UV_EXTERN void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count);
+
+UV_EXTERN int uv_interface_addresses(uv_interface_address_t** addresses,
+                                     int* count);
+UV_EXTERN void uv_free_interface_addresses(uv_interface_address_t* addresses,
+                                           int count);
+
+
+typedef enum {
+  UV_FS_UNKNOWN = -1,
+  UV_FS_CUSTOM,
+  UV_FS_OPEN,
+  UV_FS_CLOSE,
+  UV_FS_READ,
+  UV_FS_WRITE,
+  UV_FS_SENDFILE,
+  UV_FS_STAT,
+  UV_FS_LSTAT,
+  UV_FS_FSTAT,
+  UV_FS_FTRUNCATE,
+  UV_FS_UTIME,
+  UV_FS_FUTIME,
+  UV_FS_ACCESS,
+  UV_FS_CHMOD,
+  UV_FS_FCHMOD,
+  UV_FS_FSYNC,
+  UV_FS_FDATASYNC,
+  UV_FS_UNLINK,
+  UV_FS_RMDIR,
+  UV_FS_MKDIR,
+  UV_FS_MKDTEMP,
+  UV_FS_RENAME,
+  UV_FS_SCANDIR,
+  UV_FS_LINK,
+  UV_FS_SYMLINK,
+  UV_FS_READLINK,
+  UV_FS_CHOWN,
+  UV_FS_FCHOWN,
+  UV_FS_REALPATH
+} uv_fs_type;
+
+/* uv_fs_t is a subclass of uv_req_t. */
+struct uv_fs_s {
+  UV_REQ_FIELDS
+  uv_fs_type fs_type;
+  uv_loop_t* loop;
+  uv_fs_cb cb;
+  ssize_t result;
+  void* ptr;
+  const char* path;
+  uv_stat_t statbuf;  /* Stores the result of uv_fs_stat() and uv_fs_fstat(). */
+  UV_FS_PRIVATE_FIELDS
+};
+
+UV_EXTERN void uv_fs_req_cleanup(uv_fs_t* req);
+UV_EXTERN int uv_fs_close(uv_loop_t* loop,
+                          uv_fs_t* req,
+                          uv_file file,
+                          uv_fs_cb cb);
+UV_EXTERN int uv_fs_open(uv_loop_t* loop,
+                         uv_fs_t* req,
+                         const char* path,
+                         int flags,
+                         int mode,
+                         uv_fs_cb cb);
+UV_EXTERN int uv_fs_read(uv_loop_t* loop,
+                         uv_fs_t* req,
+                         uv_file file,
+                         const uv_buf_t bufs[],
+                         unsigned int nbufs,
+                         int64_t offset,
+                         uv_fs_cb cb);
+UV_EXTERN int uv_fs_unlink(uv_loop_t* loop,
+                           uv_fs_t* req,
+                           const char* path,
+                           uv_fs_cb cb);
+UV_EXTERN int uv_fs_write(uv_loop_t* loop,
+                          uv_fs_t* req,
+                          uv_file file,
+                          const uv_buf_t bufs[],
+                          unsigned int nbufs,
+                          int64_t offset,
+                          uv_fs_cb cb);
+UV_EXTERN int uv_fs_mkdir(uv_loop_t* loop,
+                          uv_fs_t* req,
+                          const char* path,
+                          int mode,
+                          uv_fs_cb cb);
+UV_EXTERN int uv_fs_mkdtemp(uv_loop_t* loop,
+                            uv_fs_t* req,
+                            const char* tpl,
+                            uv_fs_cb cb);
+UV_EXTERN int uv_fs_rmdir(uv_loop_t* loop,
+                          uv_fs_t* req,
+                          const char* path,
+                          uv_fs_cb cb);
+UV_EXTERN int uv_fs_scandir(uv_loop_t* loop,
+                            uv_fs_t* req,
+                            const char* path,
+                            int flags,
+                            uv_fs_cb cb);
+UV_EXTERN int uv_fs_scandir_next(uv_fs_t* req,
+                                 uv_dirent_t* ent);
+UV_EXTERN int uv_fs_stat(uv_loop_t* loop,
+                         uv_fs_t* req,
+                         const char* path,
+                         uv_fs_cb cb);
+UV_EXTERN int uv_fs_fstat(uv_loop_t* loop,
+                          uv_fs_t* req,
+                          uv_file file,
+                          uv_fs_cb cb);
+UV_EXTERN int uv_fs_rename(uv_loop_t* loop,
+                           uv_fs_t* req,
+                           const char* path,
+                           const char* new_path,
+                           uv_fs_cb cb);
+UV_EXTERN int uv_fs_fsync(uv_loop_t* loop,
+                          uv_fs_t* req,
+                          uv_file file,
+                          uv_fs_cb cb);
+UV_EXTERN int uv_fs_fdatasync(uv_loop_t* loop,
+                              uv_fs_t* req,
+                              uv_file file,
+                              uv_fs_cb cb);
+UV_EXTERN int uv_fs_ftruncate(uv_loop_t* loop,
+                              uv_fs_t* req,
+                              uv_file file,
+                              int64_t offset,
+                              uv_fs_cb cb);
+UV_EXTERN int uv_fs_sendfile(uv_loop_t* loop,
+                             uv_fs_t* req,
+                             uv_file out_fd,
+                             uv_file in_fd,
+                             int64_t in_offset,
+                             size_t length,
+                             uv_fs_cb cb);
+UV_EXTERN int uv_fs_access(uv_loop_t* loop,
+                           uv_fs_t* req,
+                           const char* path,
+                           int mode,
+                           uv_fs_cb cb);
+UV_EXTERN int uv_fs_chmod(uv_loop_t* loop,
+                          uv_fs_t* req,
+                          const char* path,
+                          int mode,
+                          uv_fs_cb cb);
+UV_EXTERN int uv_fs_utime(uv_loop_t* loop,
+                          uv_fs_t* req,
+                          const char* path,
+                          double atime,
+                          double mtime,
+                          uv_fs_cb cb);
+UV_EXTERN int uv_fs_futime(uv_loop_t* loop,
+                           uv_fs_t* req,
+                           uv_file file,
+                           double atime,
+                           double mtime,
+                           uv_fs_cb cb);
+UV_EXTERN int uv_fs_lstat(uv_loop_t* loop,
+                          uv_fs_t* req,
+                          const char* path,
+                          uv_fs_cb cb);
+UV_EXTERN int uv_fs_link(uv_loop_t* loop,
+                         uv_fs_t* req,
+                         const char* path,
+                         const char* new_path,
+                         uv_fs_cb cb);
+
+/*
+ * This flag can be used with uv_fs_symlink() on Windows to specify whether
+ * path argument points to a directory.
+ */
+#define UV_FS_SYMLINK_DIR          0x0001
+
+/*
+ * This flag can be used with uv_fs_symlink() on Windows to specify whether
+ * the symlink is to be created using junction points.
+ */
+#define UV_FS_SYMLINK_JUNCTION     0x0002
+
+UV_EXTERN int uv_fs_symlink(uv_loop_t* loop,
+                            uv_fs_t* req,
+                            const char* path,
+                            const char* new_path,
+                            int flags,
+                            uv_fs_cb cb);
+UV_EXTERN int uv_fs_readlink(uv_loop_t* loop,
+                             uv_fs_t* req,
+                             const char* path,
+                             uv_fs_cb cb);
+UV_EXTERN int uv_fs_realpath(uv_loop_t* loop,
+                             uv_fs_t* req,
+                             const char* path,
+                             uv_fs_cb cb);
+UV_EXTERN int uv_fs_fchmod(uv_loop_t* loop,
+                           uv_fs_t* req,
+                           uv_file file,
+                           int mode,
+                           uv_fs_cb cb);
+UV_EXTERN int uv_fs_chown(uv_loop_t* loop,
+                          uv_fs_t* req,
+                          const char* path,
+                          uv_uid_t uid,
+                          uv_gid_t gid,
+                          uv_fs_cb cb);
+UV_EXTERN int uv_fs_fchown(uv_loop_t* loop,
+                           uv_fs_t* req,
+                           uv_file file,
+                           uv_uid_t uid,
+                           uv_gid_t gid,
+                           uv_fs_cb cb);
+
+
+enum uv_fs_event {
+  UV_RENAME = 1,
+  UV_CHANGE = 2
+};
+
+
+struct uv_fs_event_s {
+  UV_HANDLE_FIELDS
+  /* private */
+  char* path;
+  UV_FS_EVENT_PRIVATE_FIELDS
+};
+
+
+/*
+ * uv_fs_stat() based polling file watcher.
+ */
+struct uv_fs_poll_s {
+  UV_HANDLE_FIELDS
+  /* Private, don't touch. */
+  void* poll_ctx;
+};
+
+UV_EXTERN int uv_fs_poll_init(uv_loop_t* loop, uv_fs_poll_t* handle);
+UV_EXTERN int uv_fs_poll_start(uv_fs_poll_t* handle,
+                               uv_fs_poll_cb poll_cb,
+                               const char* path,
+                               unsigned int interval);
+UV_EXTERN int uv_fs_poll_stop(uv_fs_poll_t* handle);
+UV_EXTERN int uv_fs_poll_getpath(uv_fs_poll_t* handle,
+                                 char* buffer,
+                                 size_t* size);
+
+
+struct uv_signal_s {
+  UV_HANDLE_FIELDS
+  uv_signal_cb signal_cb;
+  int signum;
+  UV_SIGNAL_PRIVATE_FIELDS
+};
+
+UV_EXTERN int uv_signal_init(uv_loop_t* loop, uv_signal_t* handle);
+UV_EXTERN int uv_signal_start(uv_signal_t* handle,
+                              uv_signal_cb signal_cb,
+                              int signum);
+UV_EXTERN int uv_signal_stop(uv_signal_t* handle);
+
+UV_EXTERN void uv_loadavg(double avg[3]);
+
+
+/*
+ * Flags to be passed to uv_fs_event_start().
+ */
+enum uv_fs_event_flags {
+  /*
+   * By default, if the fs event watcher is given a directory name, we will
+   * watch for all events in that directory. This flags overrides this behavior
+   * and makes fs_event report only changes to the directory entry itself. This
+   * flag does not affect individual files watched.
+   * This flag is currently not implemented yet on any backend.
+   */
+  UV_FS_EVENT_WATCH_ENTRY = 1,
+
+  /*
+   * By default uv_fs_event will try to use a kernel interface such as inotify
+   * or kqueue to detect events. This may not work on remote filesystems such
+   * as NFS mounts. This flag makes fs_event fall back to calling stat() on a
+   * regular interval.
+   * This flag is currently not implemented yet on any backend.
+   */
+  UV_FS_EVENT_STAT = 2,
+
+  /*
+   * By default, event watcher, when watching directory, is not registering
+   * (is ignoring) changes in it's subdirectories.
+   * This flag will override this behaviour on platforms that support it.
+   */
+  UV_FS_EVENT_RECURSIVE = 4
+};
+
+
+UV_EXTERN int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle);
+UV_EXTERN int uv_fs_event_start(uv_fs_event_t* handle,
+                                uv_fs_event_cb cb,
+                                const char* path,
+                                unsigned int flags);
+UV_EXTERN int uv_fs_event_stop(uv_fs_event_t* handle);
+UV_EXTERN int uv_fs_event_getpath(uv_fs_event_t* handle,
+                                  char* buffer,
+                                  size_t* size);
+
+UV_EXTERN int uv_ip4_addr(const char* ip, int port, struct sockaddr_in* addr);
+UV_EXTERN int uv_ip6_addr(const char* ip, int port, struct sockaddr_in6* addr);
+
+UV_EXTERN int uv_ip4_name(const struct sockaddr_in* src, char* dst, size_t size);
+UV_EXTERN int uv_ip6_name(const struct sockaddr_in6* src, char* dst, size_t size);
+
+UV_EXTERN int uv_inet_ntop(int af, const void* src, char* dst, size_t size);
+UV_EXTERN int uv_inet_pton(int af, const char* src, void* dst);
+
+UV_EXTERN int uv_exepath(char* buffer, size_t* size);
+
+UV_EXTERN int uv_cwd(char* buffer, size_t* size);
+
+UV_EXTERN int uv_chdir(const char* dir);
+
+UV_EXTERN uint64_t uv_get_free_memory(void);
+UV_EXTERN uint64_t uv_get_total_memory(void);
+
+UV_EXTERN uint64_t uv_hrtime(void);
+
+UV_EXTERN void uv_disable_stdio_inheritance(void);
+
+UV_EXTERN int uv_dlopen(const char* filename, uv_lib_t* lib);
+UV_EXTERN void uv_dlclose(uv_lib_t* lib);
+UV_EXTERN int uv_dlsym(uv_lib_t* lib, const char* name, void** ptr);
+UV_EXTERN const char* uv_dlerror(const uv_lib_t* lib);
+
+UV_EXTERN int uv_mutex_init(uv_mutex_t* handle);
+UV_EXTERN void uv_mutex_destroy(uv_mutex_t* handle);
+UV_EXTERN void uv_mutex_lock(uv_mutex_t* handle);
+UV_EXTERN int uv_mutex_trylock(uv_mutex_t* handle);
+UV_EXTERN void uv_mutex_unlock(uv_mutex_t* handle);
+
+UV_EXTERN int uv_rwlock_init(uv_rwlock_t* rwlock);
+UV_EXTERN void uv_rwlock_destroy(uv_rwlock_t* rwlock);
+UV_EXTERN void uv_rwlock_rdlock(uv_rwlock_t* rwlock);
+UV_EXTERN int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock);
+UV_EXTERN void uv_rwlock_rdunlock(uv_rwlock_t* rwlock);
+UV_EXTERN void uv_rwlock_wrlock(uv_rwlock_t* rwlock);
+UV_EXTERN int uv_rwlock_trywrlock(uv_rwlock_t* rwlock);
+UV_EXTERN void uv_rwlock_wrunlock(uv_rwlock_t* rwlock);
+
+UV_EXTERN int uv_sem_init(uv_sem_t* sem, unsigned int value);
+UV_EXTERN void uv_sem_destroy(uv_sem_t* sem);
+UV_EXTERN void uv_sem_post(uv_sem_t* sem);
+UV_EXTERN void uv_sem_wait(uv_sem_t* sem);
+UV_EXTERN int uv_sem_trywait(uv_sem_t* sem);
+
+UV_EXTERN int uv_cond_init(uv_cond_t* cond);
+UV_EXTERN void uv_cond_destroy(uv_cond_t* cond);
+UV_EXTERN void uv_cond_signal(uv_cond_t* cond);
+UV_EXTERN void uv_cond_broadcast(uv_cond_t* cond);
+
+UV_EXTERN int uv_barrier_init(uv_barrier_t* barrier, unsigned int count);
+UV_EXTERN void uv_barrier_destroy(uv_barrier_t* barrier);
+UV_EXTERN int uv_barrier_wait(uv_barrier_t* barrier);
+
+UV_EXTERN void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex);
+UV_EXTERN int uv_cond_timedwait(uv_cond_t* cond,
+                                uv_mutex_t* mutex,
+                                uint64_t timeout);
+
+UV_EXTERN void uv_once(uv_once_t* guard, void (*callback)(void));
+
+UV_EXTERN int uv_key_create(uv_key_t* key);
+UV_EXTERN void uv_key_delete(uv_key_t* key);
+UV_EXTERN void* uv_key_get(uv_key_t* key);
+UV_EXTERN void uv_key_set(uv_key_t* key, void* value);
+
+typedef void (*uv_thread_cb)(void* arg);
+
+UV_EXTERN int uv_thread_create(uv_thread_t* tid, uv_thread_cb entry, void* arg);
+UV_EXTERN uv_thread_t uv_thread_self(void);
+UV_EXTERN int uv_thread_join(uv_thread_t *tid);
+UV_EXTERN int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2);
+
+/* The presence of these unions force similar struct layout. */
+#define XX(_, name) uv_ ## name ## _t name;
+union uv_any_handle {
+  UV_HANDLE_TYPE_MAP(XX)
+};
+
+union uv_any_req {
+  UV_REQ_TYPE_MAP(XX)
+};
+#undef XX
+
+
+struct uv_loop_s {
+  /* User data - use this for whatever. */
+  void* data;
+  /* Loop reference counting. */
+  unsigned int active_handles;
+  void* handle_queue[2];
+  void* active_reqs[2];
+  /* Internal flag to signal loop stop. */
+  unsigned int stop_flag;
+  UV_LOOP_PRIVATE_FIELDS
+};
+
+
+/* Don't export the private CPP symbols. */
+#undef UV_HANDLE_TYPE_PRIVATE
+#undef UV_REQ_TYPE_PRIVATE
+#undef UV_REQ_PRIVATE_FIELDS
+#undef UV_STREAM_PRIVATE_FIELDS
+#undef UV_TCP_PRIVATE_FIELDS
+#undef UV_PREPARE_PRIVATE_FIELDS
+#undef UV_CHECK_PRIVATE_FIELDS
+#undef UV_IDLE_PRIVATE_FIELDS
+#undef UV_ASYNC_PRIVATE_FIELDS
+#undef UV_TIMER_PRIVATE_FIELDS
+#undef UV_GETADDRINFO_PRIVATE_FIELDS
+#undef UV_GETNAMEINFO_PRIVATE_FIELDS
+#undef UV_FS_REQ_PRIVATE_FIELDS
+#undef UV_WORK_PRIVATE_FIELDS
+#undef UV_FS_EVENT_PRIVATE_FIELDS
+#undef UV_SIGNAL_PRIVATE_FIELDS
+#undef UV_LOOP_PRIVATE_FIELDS
+#undef UV_LOOP_PRIVATE_PLATFORM_FIELDS
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* UV_H */

+ 256 - 0
Utilities/cmlibuv/src/fs-poll.c

@@ -0,0 +1,256 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "uv-common.h"
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+
+struct poll_ctx {
+  uv_fs_poll_t* parent_handle; /* NULL if parent has been stopped or closed */
+  int busy_polling;
+  unsigned int interval;
+  uint64_t start_time;
+  uv_loop_t* loop;
+  uv_fs_poll_cb poll_cb;
+  uv_timer_t timer_handle;
+  uv_fs_t fs_req; /* TODO(bnoordhuis) mark fs_req internal */
+  uv_stat_t statbuf;
+  char path[1]; /* variable length */
+};
+
+static int statbuf_eq(const uv_stat_t* a, const uv_stat_t* b);
+static void poll_cb(uv_fs_t* req);
+static void timer_cb(uv_timer_t* timer);
+static void timer_close_cb(uv_handle_t* handle);
+
+static uv_stat_t zero_statbuf;
+
+
+int uv_fs_poll_init(uv_loop_t* loop, uv_fs_poll_t* handle) {
+  uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_POLL);
+  return 0;
+}
+
+
+int uv_fs_poll_start(uv_fs_poll_t* handle,
+                     uv_fs_poll_cb cb,
+                     const char* path,
+                     unsigned int interval) {
+  struct poll_ctx* ctx;
+  uv_loop_t* loop;
+  size_t len;
+  int err;
+
+  if (uv__is_active(handle))
+    return 0;
+
+  loop = handle->loop;
+  len = strlen(path);
+  ctx = uv__calloc(1, sizeof(*ctx) + len);
+
+  if (ctx == NULL)
+    return UV_ENOMEM;
+
+  ctx->loop = loop;
+  ctx->poll_cb = cb;
+  ctx->interval = interval ? interval : 1;
+  ctx->start_time = uv_now(loop);
+  ctx->parent_handle = handle;
+  memcpy(ctx->path, path, len + 1);
+
+  err = uv_timer_init(loop, &ctx->timer_handle);
+  if (err < 0)
+    goto error;
+
+  ctx->timer_handle.flags |= UV__HANDLE_INTERNAL;
+  uv__handle_unref(&ctx->timer_handle);
+
+  err = uv_fs_stat(loop, &ctx->fs_req, ctx->path, poll_cb);
+  if (err < 0)
+    goto error;
+
+  handle->poll_ctx = ctx;
+  uv__handle_start(handle);
+
+  return 0;
+
+error:
+  uv__free(ctx);
+  return err;
+}
+
+
+int uv_fs_poll_stop(uv_fs_poll_t* handle) {
+  struct poll_ctx* ctx;
+
+  if (!uv__is_active(handle))
+    return 0;
+
+  ctx = handle->poll_ctx;
+  assert(ctx != NULL);
+  assert(ctx->parent_handle != NULL);
+  ctx->parent_handle = NULL;
+  handle->poll_ctx = NULL;
+
+  /* Close the timer if it's active. If it's inactive, there's a stat request
+   * in progress and poll_cb will take care of the cleanup.
+   */
+  if (uv__is_active(&ctx->timer_handle))
+    uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb);
+
+  uv__handle_stop(handle);
+
+  return 0;
+}
+
+
+int uv_fs_poll_getpath(uv_fs_poll_t* handle, char* buffer, size_t* size) {
+  struct poll_ctx* ctx;
+  size_t required_len;
+
+  if (!uv__is_active(handle)) {
+    *size = 0;
+    return UV_EINVAL;
+  }
+
+  ctx = handle->poll_ctx;
+  assert(ctx != NULL);
+
+  required_len = strlen(ctx->path);
+  if (required_len >= *size) {
+    *size = required_len + 1;
+    return UV_ENOBUFS;
+  }
+
+  memcpy(buffer, ctx->path, required_len);
+  *size = required_len;
+  buffer[required_len] = '\0';
+
+  return 0;
+}
+
+
+void uv__fs_poll_close(uv_fs_poll_t* handle) {
+  uv_fs_poll_stop(handle);
+}
+
+
+static void timer_cb(uv_timer_t* timer) {
+  struct poll_ctx* ctx;
+
+  ctx = container_of(timer, struct poll_ctx, timer_handle);
+  assert(ctx->parent_handle != NULL);
+  assert(ctx->parent_handle->poll_ctx == ctx);
+  ctx->start_time = uv_now(ctx->loop);
+
+  if (uv_fs_stat(ctx->loop, &ctx->fs_req, ctx->path, poll_cb))
+    abort();
+}
+
+
+static void poll_cb(uv_fs_t* req) {
+  uv_stat_t* statbuf;
+  struct poll_ctx* ctx;
+  uint64_t interval;
+
+  ctx = container_of(req, struct poll_ctx, fs_req);
+
+  if (ctx->parent_handle == NULL) { /* handle has been stopped or closed */
+    uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb);
+    uv_fs_req_cleanup(req);
+    return;
+  }
+
+  if (req->result != 0) {
+    if (ctx->busy_polling != req->result) {
+      ctx->poll_cb(ctx->parent_handle,
+                   req->result,
+                   &ctx->statbuf,
+                   &zero_statbuf);
+      ctx->busy_polling = req->result;
+    }
+    goto out;
+  }
+
+  statbuf = &req->statbuf;
+
+  if (ctx->busy_polling != 0)
+    if (ctx->busy_polling < 0 || !statbuf_eq(&ctx->statbuf, statbuf))
+      ctx->poll_cb(ctx->parent_handle, 0, &ctx->statbuf, statbuf);
+
+  ctx->statbuf = *statbuf;
+  ctx->busy_polling = 1;
+
+out:
+  uv_fs_req_cleanup(req);
+
+  if (ctx->parent_handle == NULL) { /* handle has been stopped by callback */
+    uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb);
+    return;
+  }
+
+  /* Reschedule timer, subtract the delay from doing the stat(). */
+  interval = ctx->interval;
+  interval -= (uv_now(ctx->loop) - ctx->start_time) % interval;
+
+  if (uv_timer_start(&ctx->timer_handle, timer_cb, interval, 0))
+    abort();
+}
+
+
+static void timer_close_cb(uv_handle_t* handle) {
+  uv__free(container_of(handle, struct poll_ctx, timer_handle));
+}
+
+
+static int statbuf_eq(const uv_stat_t* a, const uv_stat_t* b) {
+  return a->st_ctim.tv_nsec == b->st_ctim.tv_nsec
+      && a->st_mtim.tv_nsec == b->st_mtim.tv_nsec
+      && a->st_birthtim.tv_nsec == b->st_birthtim.tv_nsec
+      && a->st_ctim.tv_sec == b->st_ctim.tv_sec
+      && a->st_mtim.tv_sec == b->st_mtim.tv_sec
+      && a->st_birthtim.tv_sec == b->st_birthtim.tv_sec
+      && a->st_size == b->st_size
+      && a->st_mode == b->st_mode
+      && a->st_uid == b->st_uid
+      && a->st_gid == b->st_gid
+      && a->st_ino == b->st_ino
+      && a->st_dev == b->st_dev
+      && a->st_flags == b->st_flags
+      && a->st_gen == b->st_gen;
+}
+
+
+#if defined(_WIN32)
+
+#include "win/internal.h"
+#include "win/handle-inl.h"
+
+void uv__fs_poll_endgame(uv_loop_t* loop, uv_fs_poll_t* handle) {
+  assert(handle->flags & UV__HANDLE_CLOSING);
+  assert(!(handle->flags & UV_HANDLE_CLOSED));
+  uv__handle_close(handle);
+}
+
+#endif /* _WIN32 */

+ 245 - 0
Utilities/cmlibuv/src/heap-inl.h

@@ -0,0 +1,245 @@
+/* Copyright (c) 2013, Ben Noordhuis <[email protected]>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef UV_SRC_HEAP_H_
+#define UV_SRC_HEAP_H_
+
+#include <stddef.h>  /* NULL */
+
+#if defined(__GNUC__)
+# define HEAP_EXPORT(declaration) __attribute__((unused)) static declaration
+#else
+# define HEAP_EXPORT(declaration) static declaration
+#endif
+
+struct heap_node {
+  struct heap_node* left;
+  struct heap_node* right;
+  struct heap_node* parent;
+};
+
+/* A binary min heap.  The usual properties hold: the root is the lowest
+ * element in the set, the height of the tree is at most log2(nodes) and
+ * it's always a complete binary tree.
+ *
+ * The heap function try hard to detect corrupted tree nodes at the cost
+ * of a minor reduction in performance.  Compile with -DNDEBUG to disable.
+ */
+struct heap {
+  struct heap_node* min;
+  unsigned int nelts;
+};
+
+/* Return non-zero if a < b. */
+typedef int (*heap_compare_fn)(const struct heap_node* a,
+                               const struct heap_node* b);
+
+/* Public functions. */
+HEAP_EXPORT(void heap_init(struct heap* heap));
+HEAP_EXPORT(struct heap_node* heap_min(const struct heap* heap));
+HEAP_EXPORT(void heap_insert(struct heap* heap,
+                             struct heap_node* newnode,
+                             heap_compare_fn less_than));
+HEAP_EXPORT(void heap_remove(struct heap* heap,
+                             struct heap_node* node,
+                             heap_compare_fn less_than));
+HEAP_EXPORT(void heap_dequeue(struct heap* heap, heap_compare_fn less_than));
+
+/* Implementation follows. */
+
+HEAP_EXPORT(void heap_init(struct heap* heap)) {
+  heap->min = NULL;
+  heap->nelts = 0;
+}
+
+HEAP_EXPORT(struct heap_node* heap_min(const struct heap* heap)) {
+  return heap->min;
+}
+
+/* Swap parent with child. Child moves closer to the root, parent moves away. */
+static void heap_node_swap(struct heap* heap,
+                           struct heap_node* parent,
+                           struct heap_node* child) {
+  struct heap_node* sibling;
+  struct heap_node t;
+
+  t = *parent;
+  *parent = *child;
+  *child = t;
+
+  parent->parent = child;
+  if (child->left == child) {
+    child->left = parent;
+    sibling = child->right;
+  } else {
+    child->right = parent;
+    sibling = child->left;
+  }
+  if (sibling != NULL)
+    sibling->parent = child;
+
+  if (parent->left != NULL)
+    parent->left->parent = parent;
+  if (parent->right != NULL)
+    parent->right->parent = parent;
+
+  if (child->parent == NULL)
+    heap->min = child;
+  else if (child->parent->left == parent)
+    child->parent->left = child;
+  else
+    child->parent->right = child;
+}
+
+HEAP_EXPORT(void heap_insert(struct heap* heap,
+                             struct heap_node* newnode,
+                             heap_compare_fn less_than)) {
+  struct heap_node** parent;
+  struct heap_node** child;
+  unsigned int path;
+  unsigned int n;
+  unsigned int k;
+
+  newnode->left = NULL;
+  newnode->right = NULL;
+  newnode->parent = NULL;
+
+  /* Calculate the path from the root to the insertion point.  This is a min
+   * heap so we always insert at the left-most free node of the bottom row.
+   */
+  path = 0;
+  for (k = 0, n = 1 + heap->nelts; n >= 2; k += 1, n /= 2)
+    path = (path << 1) | (n & 1);
+
+  /* Now traverse the heap using the path we calculated in the previous step. */
+  parent = child = &heap->min;
+  while (k > 0) {
+    parent = child;
+    if (path & 1)
+      child = &(*child)->right;
+    else
+      child = &(*child)->left;
+    path >>= 1;
+    k -= 1;
+  }
+
+  /* Insert the new node. */
+  newnode->parent = *parent;
+  *child = newnode;
+  heap->nelts += 1;
+
+  /* Walk up the tree and check at each node if the heap property holds.
+   * It's a min heap so parent < child must be true.
+   */
+  while (newnode->parent != NULL && less_than(newnode, newnode->parent))
+    heap_node_swap(heap, newnode->parent, newnode);
+}
+
+HEAP_EXPORT(void heap_remove(struct heap* heap,
+                             struct heap_node* node,
+                             heap_compare_fn less_than)) {
+  struct heap_node* smallest;
+  struct heap_node** max;
+  struct heap_node* child;
+  unsigned int path;
+  unsigned int k;
+  unsigned int n;
+
+  if (heap->nelts == 0)
+    return;
+
+  /* Calculate the path from the min (the root) to the max, the left-most node
+   * of the bottom row.
+   */
+  path = 0;
+  for (k = 0, n = heap->nelts; n >= 2; k += 1, n /= 2)
+    path = (path << 1) | (n & 1);
+
+  /* Now traverse the heap using the path we calculated in the previous step. */
+  max = &heap->min;
+  while (k > 0) {
+    if (path & 1)
+      max = &(*max)->right;
+    else
+      max = &(*max)->left;
+    path >>= 1;
+    k -= 1;
+  }
+
+  heap->nelts -= 1;
+
+  /* Unlink the max node. */
+  child = *max;
+  *max = NULL;
+
+  if (child == node) {
+    /* We're removing either the max or the last node in the tree. */
+    if (child == heap->min) {
+      heap->min = NULL;
+    }
+    return;
+  }
+
+  /* Replace the to be deleted node with the max node. */
+  child->left = node->left;
+  child->right = node->right;
+  child->parent = node->parent;
+
+  if (child->left != NULL) {
+    child->left->parent = child;
+  }
+
+  if (child->right != NULL) {
+    child->right->parent = child;
+  }
+
+  if (node->parent == NULL) {
+    heap->min = child;
+  } else if (node->parent->left == node) {
+    node->parent->left = child;
+  } else {
+    node->parent->right = child;
+  }
+
+  /* Walk down the subtree and check at each node if the heap property holds.
+   * It's a min heap so parent < child must be true.  If the parent is bigger,
+   * swap it with the smallest child.
+   */
+  for (;;) {
+    smallest = child;
+    if (child->left != NULL && less_than(child->left, smallest))
+      smallest = child->left;
+    if (child->right != NULL && less_than(child->right, smallest))
+      smallest = child->right;
+    if (smallest == child)
+      break;
+    heap_node_swap(heap, child, smallest);
+  }
+
+  /* Walk up the subtree and check that each parent is less than the node
+   * this is required, because `max` node is not guaranteed to be the
+   * actual maximum in tree
+   */
+  while (child->parent != NULL && less_than(child, child->parent))
+    heap_node_swap(heap, child->parent, child);
+}
+
+HEAP_EXPORT(void heap_dequeue(struct heap* heap, heap_compare_fn less_than)) {
+  heap_remove(heap, heap->min, less_than);
+}
+
+#undef HEAP_EXPORT
+
+#endif  /* UV_SRC_HEAP_H_ */

+ 309 - 0
Utilities/cmlibuv/src/inet.c

@@ -0,0 +1,309 @@
+/*
+ * Copyright (c) 2004 by Internet Systems Consortium, Inc. ("ISC")
+ * Copyright (c) 1996-1999 by Internet Software Consortium.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "uv.h"
+#include "uv-common.h"
+
+#include <stdio.h>
+#include <string.h>
+
+#if defined(_MSC_VER) && _MSC_VER < 1600
+# include "stdint-msvc2008.h"
+#else
+# include <stdint.h>
+#endif
+
+#define UV__INET_ADDRSTRLEN         16
+#define UV__INET6_ADDRSTRLEN        46
+
+
+static int inet_ntop4(const unsigned char *src, char *dst, size_t size);
+static int inet_ntop6(const unsigned char *src, char *dst, size_t size);
+static int inet_pton4(const char *src, unsigned char *dst);
+static int inet_pton6(const char *src, unsigned char *dst);
+
+
+int uv_inet_ntop(int af, const void* src, char* dst, size_t size) {
+  switch (af) {
+  case AF_INET:
+    return (inet_ntop4(src, dst, size));
+  case AF_INET6:
+    return (inet_ntop6(src, dst, size));
+  default:
+    return UV_EAFNOSUPPORT;
+  }
+  /* NOTREACHED */
+}
+
+
+static int inet_ntop4(const unsigned char *src, char *dst, size_t size) {
+  static const char fmt[] = "%u.%u.%u.%u";
+  char tmp[UV__INET_ADDRSTRLEN];
+  int l;
+
+  l = snprintf(tmp, sizeof(tmp), fmt, src[0], src[1], src[2], src[3]);
+  if (l <= 0 || (size_t) l >= size) {
+    return UV_ENOSPC;
+  }
+  strncpy(dst, tmp, size);
+  dst[size - 1] = '\0';
+  return 0;
+}
+
+
+static int inet_ntop6(const unsigned char *src, char *dst, size_t size) {
+  /*
+   * Note that int32_t and int16_t need only be "at least" large enough
+   * to contain a value of the specified size.  On some systems, like
+   * Crays, there is no such thing as an integer variable with 16 bits.
+   * Keep this in mind if you think this function should have been coded
+   * to use pointer overlays.  All the world's not a VAX.
+   */
+  char tmp[UV__INET6_ADDRSTRLEN], *tp;
+  struct { int base, len; } best, cur;
+  unsigned int words[sizeof(struct in6_addr) / sizeof(uint16_t)];
+  int i;
+
+  /*
+   * Preprocess:
+   *  Copy the input (bytewise) array into a wordwise array.
+   *  Find the longest run of 0x00's in src[] for :: shorthanding.
+   */
+  memset(words, '\0', sizeof words);
+  for (i = 0; i < (int) sizeof(struct in6_addr); i++)
+    words[i / 2] |= (src[i] << ((1 - (i % 2)) << 3));
+  best.base = -1;
+  best.len = 0;
+  cur.base = -1;
+  cur.len = 0;
+  for (i = 0; i < (int) ARRAY_SIZE(words); i++) {
+    if (words[i] == 0) {
+      if (cur.base == -1)
+        cur.base = i, cur.len = 1;
+      else
+        cur.len++;
+    } else {
+      if (cur.base != -1) {
+        if (best.base == -1 || cur.len > best.len)
+          best = cur;
+        cur.base = -1;
+      }
+    }
+  }
+  if (cur.base != -1) {
+    if (best.base == -1 || cur.len > best.len)
+      best = cur;
+  }
+  if (best.base != -1 && best.len < 2)
+    best.base = -1;
+
+  /*
+   * Format the result.
+   */
+  tp = tmp;
+  for (i = 0; i < (int) ARRAY_SIZE(words); i++) {
+    /* Are we inside the best run of 0x00's? */
+    if (best.base != -1 && i >= best.base &&
+        i < (best.base + best.len)) {
+      if (i == best.base)
+        *tp++ = ':';
+      continue;
+    }
+    /* Are we following an initial run of 0x00s or any real hex? */
+    if (i != 0)
+      *tp++ = ':';
+    /* Is this address an encapsulated IPv4? */
+    if (i == 6 && best.base == 0 && (best.len == 6 ||
+        (best.len == 7 && words[7] != 0x0001) ||
+        (best.len == 5 && words[5] == 0xffff))) {
+      int err = inet_ntop4(src+12, tp, sizeof tmp - (tp - tmp));
+      if (err)
+        return err;
+      tp += strlen(tp);
+      break;
+    }
+    tp += sprintf(tp, "%x", words[i]);
+  }
+  /* Was it a trailing run of 0x00's? */
+  if (best.base != -1 && (best.base + best.len) == ARRAY_SIZE(words))
+    *tp++ = ':';
+  *tp++ = '\0';
+
+  /*
+   * Check for overflow, copy, and we're done.
+   */
+  if ((size_t)(tp - tmp) > size) {
+    return UV_ENOSPC;
+  }
+  strcpy(dst, tmp);
+  return 0;
+}
+
+
+int uv_inet_pton(int af, const char* src, void* dst) {
+  if (src == NULL || dst == NULL)
+    return UV_EINVAL;
+
+  switch (af) {
+  case AF_INET:
+    return (inet_pton4(src, dst));
+  case AF_INET6: {
+    int len;
+    char tmp[UV__INET6_ADDRSTRLEN], *s, *p;
+    s = (char*) src;
+    p = strchr(src, '%');
+    if (p != NULL) {
+      s = tmp;
+      len = p - src;
+      if (len > UV__INET6_ADDRSTRLEN-1)
+        return UV_EINVAL;
+      memcpy(s, src, len);
+      s[len] = '\0';
+    }
+    return inet_pton6(s, dst);
+  }
+  default:
+    return UV_EAFNOSUPPORT;
+  }
+  /* NOTREACHED */
+}
+
+
+static int inet_pton4(const char *src, unsigned char *dst) {
+  static const char digits[] = "0123456789";
+  int saw_digit, octets, ch;
+  unsigned char tmp[sizeof(struct in_addr)], *tp;
+
+  saw_digit = 0;
+  octets = 0;
+  *(tp = tmp) = 0;
+  while ((ch = *src++) != '\0') {
+    const char *pch;
+
+    if ((pch = strchr(digits, ch)) != NULL) {
+      unsigned int nw = *tp * 10 + (pch - digits);
+
+      if (saw_digit && *tp == 0)
+        return UV_EINVAL;
+      if (nw > 255)
+        return UV_EINVAL;
+      *tp = nw;
+      if (!saw_digit) {
+        if (++octets > 4)
+          return UV_EINVAL;
+        saw_digit = 1;
+      }
+    } else if (ch == '.' && saw_digit) {
+      if (octets == 4)
+        return UV_EINVAL;
+      *++tp = 0;
+      saw_digit = 0;
+    } else
+      return UV_EINVAL;
+  }
+  if (octets < 4)
+    return UV_EINVAL;
+  memcpy(dst, tmp, sizeof(struct in_addr));
+  return 0;
+}
+
+
+static int inet_pton6(const char *src, unsigned char *dst) {
+  static const char xdigits_l[] = "0123456789abcdef",
+                    xdigits_u[] = "0123456789ABCDEF";
+  unsigned char tmp[sizeof(struct in6_addr)], *tp, *endp, *colonp;
+  const char *xdigits, *curtok;
+  int ch, seen_xdigits;
+  unsigned int val;
+
+  memset((tp = tmp), '\0', sizeof tmp);
+  endp = tp + sizeof tmp;
+  colonp = NULL;
+  /* Leading :: requires some special handling. */
+  if (*src == ':')
+    if (*++src != ':')
+      return UV_EINVAL;
+  curtok = src;
+  seen_xdigits = 0;
+  val = 0;
+  while ((ch = *src++) != '\0') {
+    const char *pch;
+
+    if ((pch = strchr((xdigits = xdigits_l), ch)) == NULL)
+      pch = strchr((xdigits = xdigits_u), ch);
+    if (pch != NULL) {
+      val <<= 4;
+      val |= (pch - xdigits);
+      if (++seen_xdigits > 4)
+        return UV_EINVAL;
+      continue;
+    }
+    if (ch == ':') {
+      curtok = src;
+      if (!seen_xdigits) {
+        if (colonp)
+          return UV_EINVAL;
+        colonp = tp;
+        continue;
+      } else if (*src == '\0') {
+        return UV_EINVAL;
+      }
+      if (tp + sizeof(uint16_t) > endp)
+        return UV_EINVAL;
+      *tp++ = (unsigned char) (val >> 8) & 0xff;
+      *tp++ = (unsigned char) val & 0xff;
+      seen_xdigits = 0;
+      val = 0;
+      continue;
+    }
+    if (ch == '.' && ((tp + sizeof(struct in_addr)) <= endp)) {
+      int err = inet_pton4(curtok, tp);
+      if (err == 0) {
+        tp += sizeof(struct in_addr);
+        seen_xdigits = 0;
+        break;  /*%< '\\0' was seen by inet_pton4(). */
+      }
+    }
+    return UV_EINVAL;
+  }
+  if (seen_xdigits) {
+    if (tp + sizeof(uint16_t) > endp)
+      return UV_EINVAL;
+    *tp++ = (unsigned char) (val >> 8) & 0xff;
+    *tp++ = (unsigned char) val & 0xff;
+  }
+  if (colonp != NULL) {
+    /*
+     * Since some memmove()'s erroneously fail to handle
+     * overlapping regions, we'll do the shift by hand.
+     */
+    const int n = tp - colonp;
+    int i;
+
+    if (tp == endp)
+      return UV_EINVAL;
+    for (i = 1; i <= n; i++) {
+      endp[- i] = colonp[n - i];
+      colonp[n - i] = 0;
+    }
+    tp = endp;
+  }
+  if (tp != endp)
+    return UV_EINVAL;
+  memcpy(dst, tmp, sizeof tmp);
+  return 0;
+}

+ 108 - 0
Utilities/cmlibuv/src/queue.h

@@ -0,0 +1,108 @@
+/* Copyright (c) 2013, Ben Noordhuis <[email protected]>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef QUEUE_H_
+#define QUEUE_H_
+
+#include <stddef.h>
+
+typedef void *QUEUE[2];
+
+/* Private macros. */
+#define QUEUE_NEXT(q)       (*(QUEUE **) &((*(q))[0]))
+#define QUEUE_PREV(q)       (*(QUEUE **) &((*(q))[1]))
+#define QUEUE_PREV_NEXT(q)  (QUEUE_NEXT(QUEUE_PREV(q)))
+#define QUEUE_NEXT_PREV(q)  (QUEUE_PREV(QUEUE_NEXT(q)))
+
+/* Public macros. */
+#define QUEUE_DATA(ptr, type, field)                                          \
+  ((type *) ((char *) (ptr) - offsetof(type, field)))
+
+/* Important note: mutating the list while QUEUE_FOREACH is
+ * iterating over its elements results in undefined behavior.
+ */
+#define QUEUE_FOREACH(q, h)                                                   \
+  for ((q) = QUEUE_NEXT(h); (q) != (h); (q) = QUEUE_NEXT(q))
+
+#define QUEUE_EMPTY(q)                                                        \
+  ((const QUEUE *) (q) == (const QUEUE *) QUEUE_NEXT(q))
+
+#define QUEUE_HEAD(q)                                                         \
+  (QUEUE_NEXT(q))
+
+#define QUEUE_INIT(q)                                                         \
+  do {                                                                        \
+    QUEUE_NEXT(q) = (q);                                                      \
+    QUEUE_PREV(q) = (q);                                                      \
+  }                                                                           \
+  while (0)
+
+#define QUEUE_ADD(h, n)                                                       \
+  do {                                                                        \
+    QUEUE_PREV_NEXT(h) = QUEUE_NEXT(n);                                       \
+    QUEUE_NEXT_PREV(n) = QUEUE_PREV(h);                                       \
+    QUEUE_PREV(h) = QUEUE_PREV(n);                                            \
+    QUEUE_PREV_NEXT(h) = (h);                                                 \
+  }                                                                           \
+  while (0)
+
+#define QUEUE_SPLIT(h, q, n)                                                  \
+  do {                                                                        \
+    QUEUE_PREV(n) = QUEUE_PREV(h);                                            \
+    QUEUE_PREV_NEXT(n) = (n);                                                 \
+    QUEUE_NEXT(n) = (q);                                                      \
+    QUEUE_PREV(h) = QUEUE_PREV(q);                                            \
+    QUEUE_PREV_NEXT(h) = (h);                                                 \
+    QUEUE_PREV(q) = (n);                                                      \
+  }                                                                           \
+  while (0)
+
+#define QUEUE_MOVE(h, n)                                                      \
+  do {                                                                        \
+    if (QUEUE_EMPTY(h))                                                       \
+      QUEUE_INIT(n);                                                          \
+    else {                                                                    \
+      QUEUE* q = QUEUE_HEAD(h);                                               \
+      QUEUE_SPLIT(h, q, n);                                                   \
+    }                                                                         \
+  }                                                                           \
+  while (0)
+
+#define QUEUE_INSERT_HEAD(h, q)                                               \
+  do {                                                                        \
+    QUEUE_NEXT(q) = QUEUE_NEXT(h);                                            \
+    QUEUE_PREV(q) = (h);                                                      \
+    QUEUE_NEXT_PREV(q) = (q);                                                 \
+    QUEUE_NEXT(h) = (q);                                                      \
+  }                                                                           \
+  while (0)
+
+#define QUEUE_INSERT_TAIL(h, q)                                               \
+  do {                                                                        \
+    QUEUE_NEXT(q) = (h);                                                      \
+    QUEUE_PREV(q) = QUEUE_PREV(h);                                            \
+    QUEUE_PREV_NEXT(q) = (q);                                                 \
+    QUEUE_PREV(h) = (q);                                                      \
+  }                                                                           \
+  while (0)
+
+#define QUEUE_REMOVE(q)                                                       \
+  do {                                                                        \
+    QUEUE_PREV_NEXT(q) = QUEUE_NEXT(q);                                       \
+    QUEUE_NEXT_PREV(q) = QUEUE_PREV(q);                                       \
+  }                                                                           \
+  while (0)
+
+#endif /* QUEUE_H_ */

+ 303 - 0
Utilities/cmlibuv/src/threadpool.c

@@ -0,0 +1,303 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv-common.h"
+
+#if !defined(_WIN32)
+# include "unix/internal.h"
+#else
+# include "win/req-inl.h"
+/* TODO(saghul): unify internal req functions */
+static void uv__req_init(uv_loop_t* loop,
+                         uv_req_t* req,
+                         uv_req_type type) {
+  uv_req_init(loop, req);
+  req->type = type;
+  uv__req_register(loop, req);
+}
+# define uv__req_init(loop, req, type) \
+    uv__req_init((loop), (uv_req_t*)(req), (type))
+#endif
+
+#include <stdlib.h>
+
+#define MAX_THREADPOOL_SIZE 128
+
+static uv_once_t once = UV_ONCE_INIT;
+static uv_cond_t cond;
+static uv_mutex_t mutex;
+static unsigned int idle_threads;
+static unsigned int nthreads;
+static uv_thread_t* threads;
+static uv_thread_t default_threads[4];
+static QUEUE exit_message;
+static QUEUE wq;
+static volatile int initialized;
+
+
+static void uv__cancelled(struct uv__work* w) {
+  abort();
+}
+
+
+/* To avoid deadlock with uv_cancel() it's crucial that the worker
+ * never holds the global mutex and the loop-local mutex at the same time.
+ */
+static void worker(void* arg) {
+  struct uv__work* w;
+  QUEUE* q;
+
+  (void) arg;
+
+  for (;;) {
+    uv_mutex_lock(&mutex);
+
+    while (QUEUE_EMPTY(&wq)) {
+      idle_threads += 1;
+      uv_cond_wait(&cond, &mutex);
+      idle_threads -= 1;
+    }
+
+    q = QUEUE_HEAD(&wq);
+
+    if (q == &exit_message)
+      uv_cond_signal(&cond);
+    else {
+      QUEUE_REMOVE(q);
+      QUEUE_INIT(q);  /* Signal uv_cancel() that the work req is
+                             executing. */
+    }
+
+    uv_mutex_unlock(&mutex);
+
+    if (q == &exit_message)
+      break;
+
+    w = QUEUE_DATA(q, struct uv__work, wq);
+    w->work(w);
+
+    uv_mutex_lock(&w->loop->wq_mutex);
+    w->work = NULL;  /* Signal uv_cancel() that the work req is done
+                        executing. */
+    QUEUE_INSERT_TAIL(&w->loop->wq, &w->wq);
+    uv_async_send(&w->loop->wq_async);
+    uv_mutex_unlock(&w->loop->wq_mutex);
+  }
+}
+
+
+static void post(QUEUE* q) {
+  uv_mutex_lock(&mutex);
+  QUEUE_INSERT_TAIL(&wq, q);
+  if (idle_threads > 0)
+    uv_cond_signal(&cond);
+  uv_mutex_unlock(&mutex);
+}
+
+
+#ifndef _WIN32
+UV_DESTRUCTOR(static void cleanup(void)) {
+  unsigned int i;
+
+  if (initialized == 0)
+    return;
+
+  post(&exit_message);
+
+  for (i = 0; i < nthreads; i++)
+    if (uv_thread_join(threads + i))
+      abort();
+
+  if (threads != default_threads)
+    uv__free(threads);
+
+  uv_mutex_destroy(&mutex);
+  uv_cond_destroy(&cond);
+
+  threads = NULL;
+  nthreads = 0;
+  initialized = 0;
+}
+#endif
+
+
+static void init_once(void) {
+  unsigned int i;
+  const char* val;
+
+  nthreads = ARRAY_SIZE(default_threads);
+  val = getenv("UV_THREADPOOL_SIZE");
+  if (val != NULL)
+    nthreads = atoi(val);
+  if (nthreads == 0)
+    nthreads = 1;
+  if (nthreads > MAX_THREADPOOL_SIZE)
+    nthreads = MAX_THREADPOOL_SIZE;
+
+  threads = default_threads;
+  if (nthreads > ARRAY_SIZE(default_threads)) {
+    threads = uv__malloc(nthreads * sizeof(threads[0]));
+    if (threads == NULL) {
+      nthreads = ARRAY_SIZE(default_threads);
+      threads = default_threads;
+    }
+  }
+
+  if (uv_cond_init(&cond))
+    abort();
+
+  if (uv_mutex_init(&mutex))
+    abort();
+
+  QUEUE_INIT(&wq);
+
+  for (i = 0; i < nthreads; i++)
+    if (uv_thread_create(threads + i, worker, NULL))
+      abort();
+
+  initialized = 1;
+}
+
+
+void uv__work_submit(uv_loop_t* loop,
+                     struct uv__work* w,
+                     void (*work)(struct uv__work* w),
+                     void (*done)(struct uv__work* w, int status)) {
+  uv_once(&once, init_once);
+  w->loop = loop;
+  w->work = work;
+  w->done = done;
+  post(&w->wq);
+}
+
+
+static int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) {
+  int cancelled;
+
+  uv_mutex_lock(&mutex);
+  uv_mutex_lock(&w->loop->wq_mutex);
+
+  cancelled = !QUEUE_EMPTY(&w->wq) && w->work != NULL;
+  if (cancelled)
+    QUEUE_REMOVE(&w->wq);
+
+  uv_mutex_unlock(&w->loop->wq_mutex);
+  uv_mutex_unlock(&mutex);
+
+  if (!cancelled)
+    return UV_EBUSY;
+
+  w->work = uv__cancelled;
+  uv_mutex_lock(&loop->wq_mutex);
+  QUEUE_INSERT_TAIL(&loop->wq, &w->wq);
+  uv_async_send(&loop->wq_async);
+  uv_mutex_unlock(&loop->wq_mutex);
+
+  return 0;
+}
+
+
+void uv__work_done(uv_async_t* handle) {
+  struct uv__work* w;
+  uv_loop_t* loop;
+  QUEUE* q;
+  QUEUE wq;
+  int err;
+
+  loop = container_of(handle, uv_loop_t, wq_async);
+  uv_mutex_lock(&loop->wq_mutex);
+  QUEUE_MOVE(&loop->wq, &wq);
+  uv_mutex_unlock(&loop->wq_mutex);
+
+  while (!QUEUE_EMPTY(&wq)) {
+    q = QUEUE_HEAD(&wq);
+    QUEUE_REMOVE(q);
+
+    w = container_of(q, struct uv__work, wq);
+    err = (w->work == uv__cancelled) ? UV_ECANCELED : 0;
+    w->done(w, err);
+  }
+}
+
+
+static void uv__queue_work(struct uv__work* w) {
+  uv_work_t* req = container_of(w, uv_work_t, work_req);
+
+  req->work_cb(req);
+}
+
+
+static void uv__queue_done(struct uv__work* w, int err) {
+  uv_work_t* req;
+
+  req = container_of(w, uv_work_t, work_req);
+  uv__req_unregister(req->loop, req);
+
+  if (req->after_work_cb == NULL)
+    return;
+
+  req->after_work_cb(req, err);
+}
+
+
+int uv_queue_work(uv_loop_t* loop,
+                  uv_work_t* req,
+                  uv_work_cb work_cb,
+                  uv_after_work_cb after_work_cb) {
+  if (work_cb == NULL)
+    return UV_EINVAL;
+
+  uv__req_init(loop, req, UV_WORK);
+  req->loop = loop;
+  req->work_cb = work_cb;
+  req->after_work_cb = after_work_cb;
+  uv__work_submit(loop, &req->work_req, uv__queue_work, uv__queue_done);
+  return 0;
+}
+
+
+int uv_cancel(uv_req_t* req) {
+  struct uv__work* wreq;
+  uv_loop_t* loop;
+
+  switch (req->type) {
+  case UV_FS:
+    loop =  ((uv_fs_t*) req)->loop;
+    wreq = &((uv_fs_t*) req)->work_req;
+    break;
+  case UV_GETADDRINFO:
+    loop =  ((uv_getaddrinfo_t*) req)->loop;
+    wreq = &((uv_getaddrinfo_t*) req)->work_req;
+    break;
+  case UV_GETNAMEINFO:
+    loop = ((uv_getnameinfo_t*) req)->loop;
+    wreq = &((uv_getnameinfo_t*) req)->work_req;
+    break;
+  case UV_WORK:
+    loop =  ((uv_work_t*) req)->loop;
+    wreq = &((uv_work_t*) req)->work_req;
+    break;
+  default:
+    return UV_EINVAL;
+  }
+
+  return uv__work_cancel(loop, req, wreq);
+}

+ 1154 - 0
Utilities/cmlibuv/src/unix/aix.c

@@ -0,0 +1,1154 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/ioctl.h>
+#include <net/if.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+
+#include <sys/time.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <utmp.h>
+#include <libgen.h>
+
+#include <sys/protosw.h>
+#include <libperfstat.h>
+#include <procinfo.h>
+#include <sys/proc.h>
+#include <sys/procfs.h>
+
+#include <sys/poll.h>
+
+#include <sys/pollset.h>
+#include <ctype.h>
+#ifdef HAVE_SYS_AHAFS_EVPRODS_H
+#include <sys/ahafs_evProds.h>
+#endif
+
+#include <sys/mntctl.h>
+#include <sys/vmount.h>
+#include <limits.h>
+#include <strings.h>
+#include <sys/vnode.h>
+
+#define RDWR_BUF_SIZE   4096
+#define EQ(a,b)         (strcmp(a,b) == 0)
+
+int uv__platform_loop_init(uv_loop_t* loop) {
+  loop->fs_fd = -1;
+
+  /* Passing maxfd of -1 should mean the limit is determined
+   * by the user's ulimit or the global limit as per the doc */
+  loop->backend_fd = pollset_create(-1);
+
+  if (loop->backend_fd == -1)
+    return -1;
+
+  return 0;
+}
+
+
+void uv__platform_loop_delete(uv_loop_t* loop) {
+  if (loop->fs_fd != -1) {
+    uv__close(loop->fs_fd);
+    loop->fs_fd = -1;
+  }
+
+  if (loop->backend_fd != -1) {
+    pollset_destroy(loop->backend_fd);
+    loop->backend_fd = -1;
+  }
+}
+
+
+int uv__io_check_fd(uv_loop_t* loop, int fd) {
+  struct poll_ctl pc;
+
+  pc.events = POLLIN;
+  pc.cmd = PS_MOD;  /* Equivalent to PS_ADD if the fd is not in the pollset. */
+  pc.fd = fd;
+
+  if (pollset_ctl(loop->backend_fd, &pc, 1))
+    return -errno;
+
+  pc.cmd = PS_DELETE;
+  if (pollset_ctl(loop->backend_fd, &pc, 1))
+    abort();
+
+  return 0;
+}
+
+
+void uv__io_poll(uv_loop_t* loop, int timeout) {
+  struct pollfd events[1024];
+  struct pollfd pqry;
+  struct pollfd* pe;
+  struct poll_ctl pc;
+  QUEUE* q;
+  uv__io_t* w;
+  uint64_t base;
+  uint64_t diff;
+  int have_signals;
+  int nevents;
+  int count;
+  int nfds;
+  int i;
+  int rc;
+  int add_failed;
+
+  if (loop->nfds == 0) {
+    assert(QUEUE_EMPTY(&loop->watcher_queue));
+    return;
+  }
+
+  while (!QUEUE_EMPTY(&loop->watcher_queue)) {
+    q = QUEUE_HEAD(&loop->watcher_queue);
+    QUEUE_REMOVE(q);
+    QUEUE_INIT(q);
+
+    w = QUEUE_DATA(q, uv__io_t, watcher_queue);
+    assert(w->pevents != 0);
+    assert(w->fd >= 0);
+    assert(w->fd < (int) loop->nwatchers);
+
+    pc.events = w->pevents;
+    pc.fd = w->fd;
+
+    add_failed = 0;
+    if (w->events == 0) {
+      pc.cmd = PS_ADD;
+      if (pollset_ctl(loop->backend_fd, &pc, 1)) {
+        if (errno != EINVAL) {
+          assert(0 && "Failed to add file descriptor (pc.fd) to pollset");
+          abort();
+        }
+        /* Check if the fd is already in the pollset */
+        pqry.fd = pc.fd;
+        rc = pollset_query(loop->backend_fd, &pqry);
+        switch (rc) {
+        case -1:
+          assert(0 && "Failed to query pollset for file descriptor");
+          abort();
+        case 0:
+          assert(0 && "Pollset does not contain file descriptor");
+          abort();
+        }
+        /* If we got here then the pollset already contained the file descriptor even though
+         * we didn't think it should. This probably shouldn't happen, but we can continue. */
+        add_failed = 1;
+      }
+    }
+    if (w->events != 0 || add_failed) {
+      /* Modify, potentially removing events -- need to delete then add.
+       * Could maybe mod if we knew for sure no events are removed, but
+       * content of w->events is handled above as not reliable (falls back)
+       * so may require a pollset_query() which would have to be pretty cheap
+       * compared to a PS_DELETE to be worth optimizing. Alternatively, could
+       * lazily remove events, squelching them in the mean time. */
+      pc.cmd = PS_DELETE;
+      if (pollset_ctl(loop->backend_fd, &pc, 1)) {
+        assert(0 && "Failed to delete file descriptor (pc.fd) from pollset");
+        abort();
+      }
+      pc.cmd = PS_ADD;
+      if (pollset_ctl(loop->backend_fd, &pc, 1)) {
+        assert(0 && "Failed to add file descriptor (pc.fd) to pollset");
+        abort();
+      }
+    }
+
+    w->events = w->pevents;
+  }
+
+  assert(timeout >= -1);
+  base = loop->time;
+  count = 48; /* Benchmarks suggest this gives the best throughput. */
+
+  for (;;) {
+    nfds = pollset_poll(loop->backend_fd,
+                        events,
+                        ARRAY_SIZE(events),
+                        timeout);
+
+    /* Update loop->time unconditionally. It's tempting to skip the update when
+     * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
+     * operating system didn't reschedule our process while in the syscall.
+     */
+    SAVE_ERRNO(uv__update_time(loop));
+
+    if (nfds == 0) {
+      assert(timeout != -1);
+      return;
+    }
+
+    if (nfds == -1) {
+      if (errno != EINTR) {
+        abort();
+      }
+
+      if (timeout == -1)
+        continue;
+
+      if (timeout == 0)
+        return;
+
+      /* Interrupted by a signal. Update timeout and poll again. */
+      goto update_timeout;
+    }
+
+    have_signals = 0;
+    nevents = 0;
+
+    assert(loop->watchers != NULL);
+    loop->watchers[loop->nwatchers] = (void*) events;
+    loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
+
+    for (i = 0; i < nfds; i++) {
+      pe = events + i;
+      pc.cmd = PS_DELETE;
+      pc.fd = pe->fd;
+
+      /* Skip invalidated events, see uv__platform_invalidate_fd */
+      if (pc.fd == -1)
+        continue;
+
+      assert(pc.fd >= 0);
+      assert((unsigned) pc.fd < loop->nwatchers);
+
+      w = loop->watchers[pc.fd];
+
+      if (w == NULL) {
+        /* File descriptor that we've stopped watching, disarm it.
+         *
+         * Ignore all errors because we may be racing with another thread
+         * when the file descriptor is closed.
+         */
+        pollset_ctl(loop->backend_fd, &pc, 1);
+        continue;
+      }
+
+      /* Run signal watchers last.  This also affects child process watchers
+       * because those are implemented in terms of signal watchers.
+       */
+      if (w == &loop->signal_io_watcher)
+        have_signals = 1;
+      else
+        w->cb(loop, w, pe->revents);
+
+      nevents++;
+    }
+
+    if (have_signals != 0)
+      loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
+
+    loop->watchers[loop->nwatchers] = NULL;
+    loop->watchers[loop->nwatchers + 1] = NULL;
+
+    if (have_signals != 0)
+      return;  /* Event loop should cycle now so don't poll again. */
+
+    if (nevents != 0) {
+      if (nfds == ARRAY_SIZE(events) && --count != 0) {
+        /* Poll for more events but don't block this time. */
+        timeout = 0;
+        continue;
+      }
+      return;
+    }
+
+    if (timeout == 0)
+      return;
+
+    if (timeout == -1)
+      continue;
+
+update_timeout:
+    assert(timeout > 0);
+
+    diff = loop->time - base;
+    if (diff >= (uint64_t) timeout)
+      return;
+
+    timeout -= diff;
+  }
+}
+
+
+uint64_t uv__hrtime(uv_clocktype_t type) {
+  uint64_t G = 1000000000;
+  timebasestruct_t t;
+  read_wall_time(&t, TIMEBASE_SZ);
+  time_base_to_time(&t, TIMEBASE_SZ);
+  return (uint64_t) t.tb_high * G + t.tb_low;
+}
+
+
+/*
+ * We could use a static buffer for the path manipulations that we need outside
+ * of the function, but this function could be called by multiple consumers and
+ * we don't want to potentially create a race condition in the use of snprintf.
+ * There is no direct way of getting the exe path in AIX - either through /procfs
+ * or through some libc APIs. The below approach is to parse the argv[0]'s pattern
+ * and use it in conjunction with PATH environment variable to craft one.
+ */
+int uv_exepath(char* buffer, size_t* size) {
+  int res;
+  char args[PATH_MAX];
+  char abspath[PATH_MAX];
+  size_t abspath_size;
+  struct procsinfo pi;
+
+  if (buffer == NULL || size == NULL || *size == 0)
+    return -EINVAL;
+
+  pi.pi_pid = getpid();
+  res = getargs(&pi, sizeof(pi), args, sizeof(args));
+  if (res < 0)
+    return -EINVAL;
+
+  /*
+   * Possibilities for args:
+   * i) an absolute path such as: /home/user/myprojects/nodejs/node
+   * ii) a relative path such as: ./node or ../myprojects/nodejs/node
+   * iii) a bare filename such as "node", after exporting PATH variable
+   *     to its location.
+   */
+
+  /* Case i) and ii) absolute or relative paths */
+  if (strchr(args, '/') != NULL) {
+    if (realpath(args, abspath) != abspath)
+      return -errno;
+
+    abspath_size = strlen(abspath);
+
+    *size -= 1;
+    if (*size > abspath_size)
+      *size = abspath_size;
+
+    memcpy(buffer, abspath, *size);
+    buffer[*size] = '\0';
+
+    return 0;
+  } else {
+  /* Case iii). Search PATH environment variable */
+    char trypath[PATH_MAX];
+    char *clonedpath = NULL;
+    char *token = NULL;
+    char *path = getenv("PATH");
+
+    if (path == NULL)
+      return -EINVAL;
+
+    clonedpath = uv__strdup(path);
+    if (clonedpath == NULL)
+      return -ENOMEM;
+
+    token = strtok(clonedpath, ":");
+    while (token != NULL) {
+      snprintf(trypath, sizeof(trypath) - 1, "%s/%s", token, args);
+      if (realpath(trypath, abspath) == abspath) {
+        /* Check the match is executable */
+        if (access(abspath, X_OK) == 0) {
+          abspath_size = strlen(abspath);
+
+          *size -= 1;
+          if (*size > abspath_size)
+            *size = abspath_size;
+
+          memcpy(buffer, abspath, *size);
+          buffer[*size] = '\0';
+
+          uv__free(clonedpath);
+          return 0;
+        }
+      }
+      token = strtok(NULL, ":");
+    }
+    uv__free(clonedpath);
+
+    /* Out of tokens (path entries), and no match found */
+    return -EINVAL;
+  }
+}
+
+
+uint64_t uv_get_free_memory(void) {
+  perfstat_memory_total_t mem_total;
+  int result = perfstat_memory_total(NULL, &mem_total, sizeof(mem_total), 1);
+  if (result == -1) {
+    return 0;
+  }
+  return mem_total.real_free * 4096;
+}
+
+
+uint64_t uv_get_total_memory(void) {
+  perfstat_memory_total_t mem_total;
+  int result = perfstat_memory_total(NULL, &mem_total, sizeof(mem_total), 1);
+  if (result == -1) {
+    return 0;
+  }
+  return mem_total.real_total * 4096;
+}
+
+
+void uv_loadavg(double avg[3]) {
+  perfstat_cpu_total_t ps_total;
+  int result = perfstat_cpu_total(NULL, &ps_total, sizeof(ps_total), 1);
+  if (result == -1) {
+    avg[0] = 0.; avg[1] = 0.; avg[2] = 0.;
+    return;
+  }
+  avg[0] = ps_total.loadavg[0] / (double)(1 << SBITS);
+  avg[1] = ps_total.loadavg[1] / (double)(1 << SBITS);
+  avg[2] = ps_total.loadavg[2] / (double)(1 << SBITS);
+}
+
+
+#ifdef HAVE_SYS_AHAFS_EVPRODS_H
+static char *uv__rawname(char *cp) {
+  static char rawbuf[FILENAME_MAX+1];
+  char *dp = rindex(cp, '/');
+
+  if (dp == 0)
+    return 0;
+
+  *dp = 0;
+  strcpy(rawbuf, cp);
+  *dp = '/';
+  strcat(rawbuf, "/r");
+  strcat(rawbuf, dp+1);
+  return rawbuf;
+}
+
+
+/*
+ * Determine whether given pathname is a directory
+ * Returns 0 if the path is a directory, -1 if not
+ *
+ * Note: Opportunity here for more detailed error information but
+ *       that requires changing callers of this function as well
+ */
+static int uv__path_is_a_directory(char* filename) {
+  struct stat statbuf;
+
+  if (stat(filename, &statbuf) < 0)
+    return -1;  /* failed: not a directory, assume it is a file */
+
+  if (statbuf.st_type == VDIR)
+    return 0;
+
+  return -1;
+}
+
+
+/*
+ * Check whether AHAFS is mounted.
+ * Returns 0 if AHAFS is mounted, or an error code < 0 on failure
+ */
+static int uv__is_ahafs_mounted(void){
+  int rv, i = 2;
+  struct vmount *p;
+  int size_multiplier = 10;
+  size_t siz = sizeof(struct vmount)*size_multiplier;
+  struct vmount *vmt;
+  const char *dev = "/aha";
+  char *obj, *stub;
+
+  p = uv__malloc(siz);
+  if (p == NULL)
+    return -errno;
+
+  /* Retrieve all mounted filesystems */
+  rv = mntctl(MCTL_QUERY, siz, (char*)p);
+  if (rv < 0)
+    return -errno;
+  if (rv == 0) {
+    /* buffer was not large enough, reallocate to correct size */
+    siz = *(int*)p;
+    uv__free(p);
+    p = uv__malloc(siz);
+    if (p == NULL)
+      return -errno;
+    rv = mntctl(MCTL_QUERY, siz, (char*)p);
+    if (rv < 0)
+      return -errno;
+  }
+
+  /* Look for dev in filesystems mount info */
+  for(vmt = p, i = 0; i < rv; i++) {
+    obj = vmt2dataptr(vmt, VMT_OBJECT);     /* device */
+    stub = vmt2dataptr(vmt, VMT_STUB);      /* mount point */
+
+    if (EQ(obj, dev) || EQ(uv__rawname(obj), dev) || EQ(stub, dev)) {
+      uv__free(p);  /* Found a match */
+      return 0;
+    }
+    vmt = (struct vmount *) ((char *) vmt + vmt->vmt_length);
+  }
+
+  /* /aha is required for monitoring filesystem changes */
+  return -1;
+}
+
+/*
+ * Recursive call to mkdir() to create intermediate folders, if any
+ * Returns code from mkdir call
+ */
+static int uv__makedir_p(const char *dir) {
+  char tmp[256];
+  char *p = NULL;
+  size_t len;
+  int err;
+
+  snprintf(tmp, sizeof(tmp),"%s",dir);
+  len = strlen(tmp);
+  if (tmp[len - 1] == '/')
+    tmp[len - 1] = 0;
+  for (p = tmp + 1; *p; p++) {
+    if (*p == '/') {
+      *p = 0;
+      err = mkdir(tmp, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
+      if (err != 0 && errno != EEXIST)
+        return err;
+      *p = '/';
+    }
+  }
+  return mkdir(tmp, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
+}
+
+/*
+ * Creates necessary subdirectories in the AIX Event Infrastructure
+ * file system for monitoring the object specified.
+ * Returns code from mkdir call
+ */
+static int uv__make_subdirs_p(const char *filename) {
+  char cmd[2048];
+  char *p;
+  int rc = 0;
+
+  /* Strip off the monitor file name */
+  p = strrchr(filename, '/');
+
+  if (p == NULL)
+    return 0;
+
+  if (uv__path_is_a_directory((char*)filename) == 0) {
+    sprintf(cmd, "/aha/fs/modDir.monFactory");
+  } else {
+    sprintf(cmd, "/aha/fs/modFile.monFactory");
+  }
+
+  strncat(cmd, filename, (p - filename));
+  rc = uv__makedir_p(cmd);
+
+  if (rc == -1 && errno != EEXIST){
+    return -errno;
+  }
+
+  return rc;
+}
+
+
+/*
+ * Checks if /aha is mounted, then proceeds to set up the monitoring
+ * objects for the specified file.
+ * Returns 0 on success, or an error code < 0 on failure
+ */
+static int uv__setup_ahafs(const char* filename, int *fd) {
+  int rc = 0;
+  char mon_file_write_string[RDWR_BUF_SIZE];
+  char mon_file[PATH_MAX];
+  int file_is_directory = 0; /* -1 == NO, 0 == YES  */
+
+  /* Create monitor file name for object */
+  file_is_directory = uv__path_is_a_directory((char*)filename);
+
+  if (file_is_directory == 0)
+    sprintf(mon_file, "/aha/fs/modDir.monFactory");
+  else
+    sprintf(mon_file, "/aha/fs/modFile.monFactory");
+
+  if ((strlen(mon_file) + strlen(filename) + 5) > PATH_MAX)
+    return -ENAMETOOLONG;
+
+  /* Make the necessary subdirectories for the monitor file */
+  rc = uv__make_subdirs_p(filename);
+  if (rc == -1 && errno != EEXIST)
+    return rc;
+
+  strcat(mon_file, filename);
+  strcat(mon_file, ".mon");
+
+  *fd = 0; errno = 0;
+
+  /* Open the monitor file, creating it if necessary */
+  *fd = open(mon_file, O_CREAT|O_RDWR);
+  if (*fd < 0)
+    return -errno;
+
+  /* Write out the monitoring specifications.
+   * In this case, we are monitoring for a state change event type
+   *    CHANGED=YES
+   * We will be waiting in select call, rather than a read:
+   *    WAIT_TYPE=WAIT_IN_SELECT
+   * We only want minimal information for files:
+   *      INFO_LVL=1
+   * For directories, we want more information to track what file
+   * caused the change
+   *      INFO_LVL=2
+   */
+
+  if (file_is_directory == 0)
+    sprintf(mon_file_write_string, "CHANGED=YES;WAIT_TYPE=WAIT_IN_SELECT;INFO_LVL=2");
+  else
+    sprintf(mon_file_write_string, "CHANGED=YES;WAIT_TYPE=WAIT_IN_SELECT;INFO_LVL=1");
+
+  rc = write(*fd, mon_file_write_string, strlen(mon_file_write_string)+1);
+  if (rc < 0)
+    return -errno;
+
+  return 0;
+}
+
+/*
+ * Skips a specified number of lines in the buffer passed in.
+ * Walks the buffer pointed to by p and attempts to skip n lines.
+ * Returns the total number of lines skipped
+ */
+static int uv__skip_lines(char **p, int n) {
+  int lines = 0;
+
+  while(n > 0) {
+    *p = strchr(*p, '\n');
+    if (!p)
+      return lines;
+
+    (*p)++;
+    n--;
+    lines++;
+  }
+  return lines;
+}
+
+
+/*
+ * Parse the event occurrence data to figure out what event just occurred
+ * and take proper action.
+ *
+ * The buf is a pointer to the buffer containing the event occurrence data
+ * Returns 0 on success, -1 if unrecoverable error in parsing
+ *
+ */
+static int uv__parse_data(char *buf, int *events, uv_fs_event_t* handle) {
+  int    evp_rc, i;
+  char   *p;
+  char   filename[PATH_MAX]; /* To be used when handling directories */
+
+  p = buf;
+  *events = 0;
+
+  /* Clean the filename buffer*/
+  for(i = 0; i < PATH_MAX; i++) {
+    filename[i] = 0;
+  }
+  i = 0;
+
+  /* Check for BUF_WRAP */
+  if (strncmp(buf, "BUF_WRAP", strlen("BUF_WRAP")) == 0) {
+    assert(0 && "Buffer wrap detected, Some event occurrences lost!");
+    return 0;
+  }
+
+  /* Since we are using the default buffer size (4K), and have specified
+   * INFO_LVL=1, we won't see any EVENT_OVERFLOW conditions.  Applications
+   * should check for this keyword if they are using an INFO_LVL of 2 or
+   * higher, and have a buffer size of <= 4K
+   */
+
+  /* Skip to RC_FROM_EVPROD */
+  if (uv__skip_lines(&p, 9) != 9)
+    return -1;
+
+  if (sscanf(p, "RC_FROM_EVPROD=%d\nEND_EVENT_DATA", &evp_rc) == 1) {
+    if (uv__path_is_a_directory(handle->path) == 0) { /* Directory */
+      if (evp_rc == AHAFS_MODDIR_UNMOUNT || evp_rc == AHAFS_MODDIR_REMOVE_SELF) {
+        /* The directory is no longer available for monitoring */
+        *events = UV_RENAME;
+        handle->dir_filename = NULL;
+      } else {
+        /* A file was added/removed inside the directory */
+        *events = UV_CHANGE;
+
+        /* Get the EVPROD_INFO */
+        if (uv__skip_lines(&p, 1) != 1)
+          return -1;
+
+        /* Scan out the name of the file that triggered the event*/
+        if (sscanf(p, "BEGIN_EVPROD_INFO\n%sEND_EVPROD_INFO", filename) == 1) {
+          handle->dir_filename = uv__strdup((const char*)&filename);
+        } else
+          return -1;
+        }
+    } else { /* Regular File */
+      if (evp_rc == AHAFS_MODFILE_RENAME)
+        *events = UV_RENAME;
+      else
+        *events = UV_CHANGE;
+    }
+  }
+  else
+    return -1;
+
+  return 0;
+}
+
+
+/* This is the internal callback */
+static void uv__ahafs_event(uv_loop_t* loop, uv__io_t* event_watch, unsigned int fflags) {
+  char   result_data[RDWR_BUF_SIZE];
+  int bytes, rc = 0;
+  uv_fs_event_t* handle;
+  int events = 0;
+  char fname[PATH_MAX];
+  char *p;
+
+  handle = container_of(event_watch, uv_fs_event_t, event_watcher);
+
+  /* At this point, we assume that polling has been done on the
+   * file descriptor, so we can just read the AHAFS event occurrence
+   * data and parse its results without having to block anything
+   */
+  bytes = pread(event_watch->fd, result_data, RDWR_BUF_SIZE, 0);
+
+  assert((bytes >= 0) && "uv__ahafs_event - Error reading monitor file");
+
+  /* Parse the data */
+  if(bytes > 0)
+    rc = uv__parse_data(result_data, &events, handle);
+
+  /* Unrecoverable error */
+  if (rc == -1)
+    return;
+
+  /* For directory changes, the name of the files that triggered the change
+   * are never absolute pathnames
+   */
+  if (uv__path_is_a_directory(handle->path) == 0) {
+    p = handle->dir_filename;
+  } else {
+    p = strrchr(handle->path, '/');
+    if (p == NULL)
+      p = handle->path;
+    else
+      p++;
+  }
+  strncpy(fname, p, sizeof(fname) - 1);
+  /* Just in case */
+  fname[sizeof(fname) - 1] = '\0';
+
+  handle->cb(handle, fname, events, 0);
+}
+#endif
+
+
+int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
+#ifdef HAVE_SYS_AHAFS_EVPRODS_H
+  uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
+  return 0;
+#else
+  return -ENOSYS;
+#endif
+}
+
+
+int uv_fs_event_start(uv_fs_event_t* handle,
+                      uv_fs_event_cb cb,
+                      const char* filename,
+                      unsigned int flags) {
+#ifdef HAVE_SYS_AHAFS_EVPRODS_H
+  int  fd, rc, str_offset = 0;
+  char cwd[PATH_MAX];
+  char absolute_path[PATH_MAX];
+  char readlink_cwd[PATH_MAX];
+
+
+  /* Figure out whether filename is absolute or not */
+  if (filename[0] == '/') {
+    /* We have absolute pathname */
+    snprintf(absolute_path, sizeof(absolute_path), "%s", filename);
+  } else {
+    /* We have a relative pathname, compose the absolute pathname */
+    snprintf(cwd, sizeof(cwd), "/proc/%lu/cwd", (unsigned long) getpid());
+    rc = readlink(cwd, readlink_cwd, sizeof(readlink_cwd) - 1);
+    if (rc < 0)
+      return rc;
+    /* readlink does not null terminate our string */
+    readlink_cwd[rc] = '\0';
+
+    if (filename[0] == '.' && filename[1] == '/')
+      str_offset = 2;
+
+    snprintf(absolute_path, sizeof(absolute_path), "%s%s", readlink_cwd,
+             filename + str_offset);
+  }
+
+  if (uv__is_ahafs_mounted() < 0)  /* /aha checks failed */
+    return UV_ENOSYS;
+
+  /* Setup ahafs */
+  rc = uv__setup_ahafs((const char *)absolute_path, &fd);
+  if (rc != 0)
+    return rc;
+
+  /* Setup/Initialize all the libuv routines */
+  uv__handle_start(handle);
+  uv__io_init(&handle->event_watcher, uv__ahafs_event, fd);
+  handle->path = uv__strdup(filename);
+  handle->cb = cb;
+
+  uv__io_start(handle->loop, &handle->event_watcher, POLLIN);
+
+  return 0;
+#else
+  return -ENOSYS;
+#endif
+}
+
+
+int uv_fs_event_stop(uv_fs_event_t* handle) {
+#ifdef HAVE_SYS_AHAFS_EVPRODS_H
+  if (!uv__is_active(handle))
+    return 0;
+
+  uv__io_close(handle->loop, &handle->event_watcher);
+  uv__handle_stop(handle);
+
+  if (uv__path_is_a_directory(handle->path) == 0) {
+    uv__free(handle->dir_filename);
+    handle->dir_filename = NULL;
+  }
+
+  uv__free(handle->path);
+  handle->path = NULL;
+  uv__close(handle->event_watcher.fd);
+  handle->event_watcher.fd = -1;
+
+  return 0;
+#else
+  return -ENOSYS;
+#endif
+}
+
+
+void uv__fs_event_close(uv_fs_event_t* handle) {
+#ifdef HAVE_SYS_AHAFS_EVPRODS_H
+  uv_fs_event_stop(handle);
+#else
+  UNREACHABLE();
+#endif
+}
+
+
+char** uv_setup_args(int argc, char** argv) {
+  return argv;
+}
+
+
+int uv_set_process_title(const char* title) {
+  return 0;
+}
+
+
+int uv_get_process_title(char* buffer, size_t size) {
+  if (buffer == NULL || size == 0)
+    return -EINVAL;
+
+  buffer[0] = '\0';
+  return 0;
+}
+
+
+int uv_resident_set_memory(size_t* rss) {
+  char pp[64];
+  psinfo_t psinfo;
+  int err;
+  int fd;
+
+  snprintf(pp, sizeof(pp), "/proc/%lu/psinfo", (unsigned long) getpid());
+
+  fd = open(pp, O_RDONLY);
+  if (fd == -1)
+    return -errno;
+
+  /* FIXME(bnoordhuis) Handle EINTR. */
+  err = -EINVAL;
+  if (read(fd, &psinfo, sizeof(psinfo)) == sizeof(psinfo)) {
+    *rss = (size_t)psinfo.pr_rssize * 1024;
+    err = 0;
+  }
+  uv__close(fd);
+
+  return err;
+}
+
+
+int uv_uptime(double* uptime) {
+  struct utmp *utmp_buf;
+  size_t entries = 0;
+  time_t boot_time;
+
+  utmpname(UTMP_FILE);
+
+  setutent();
+
+  while ((utmp_buf = getutent()) != NULL) {
+    if (utmp_buf->ut_user[0] && utmp_buf->ut_type == USER_PROCESS)
+      ++entries;
+    if (utmp_buf->ut_type == BOOT_TIME)
+      boot_time = utmp_buf->ut_time;
+  }
+
+  endutent();
+
+  if (boot_time == 0)
+    return -ENOSYS;
+
+  *uptime = time(NULL) - boot_time;
+  return 0;
+}
+
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+  uv_cpu_info_t* cpu_info;
+  perfstat_cpu_total_t ps_total;
+  perfstat_cpu_t* ps_cpus;
+  perfstat_id_t cpu_id;
+  int result, ncpus, idx = 0;
+
+  result = perfstat_cpu_total(NULL, &ps_total, sizeof(ps_total), 1);
+  if (result == -1) {
+    return -ENOSYS;
+  }
+
+  ncpus = result = perfstat_cpu(NULL, NULL, sizeof(perfstat_cpu_t), 0);
+  if (result == -1) {
+    return -ENOSYS;
+  }
+
+  ps_cpus = (perfstat_cpu_t*) uv__malloc(ncpus * sizeof(perfstat_cpu_t));
+  if (!ps_cpus) {
+    return -ENOMEM;
+  }
+
+  strcpy(cpu_id.name, FIRST_CPU);
+  result = perfstat_cpu(&cpu_id, ps_cpus, sizeof(perfstat_cpu_t), ncpus);
+  if (result == -1) {
+    uv__free(ps_cpus);
+    return -ENOSYS;
+  }
+
+  *cpu_infos = (uv_cpu_info_t*) uv__malloc(ncpus * sizeof(uv_cpu_info_t));
+  if (!*cpu_infos) {
+    uv__free(ps_cpus);
+    return -ENOMEM;
+  }
+
+  *count = ncpus;
+
+  cpu_info = *cpu_infos;
+  while (idx < ncpus) {
+    cpu_info->speed = (int)(ps_total.processorHZ / 1000000);
+    cpu_info->model = uv__strdup(ps_total.description);
+    cpu_info->cpu_times.user = ps_cpus[idx].user;
+    cpu_info->cpu_times.sys = ps_cpus[idx].sys;
+    cpu_info->cpu_times.idle = ps_cpus[idx].idle;
+    cpu_info->cpu_times.irq = ps_cpus[idx].wait;
+    cpu_info->cpu_times.nice = 0;
+    cpu_info++;
+    idx++;
+  }
+
+  uv__free(ps_cpus);
+  return 0;
+}
+
+
+void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
+  int i;
+
+  for (i = 0; i < count; ++i) {
+    uv__free(cpu_infos[i].model);
+  }
+
+  uv__free(cpu_infos);
+}
+
+
+int uv_interface_addresses(uv_interface_address_t** addresses,
+  int* count) {
+  uv_interface_address_t* address;
+  int sockfd, size = 1;
+  struct ifconf ifc;
+  struct ifreq *ifr, *p, flg;
+
+  *count = 0;
+
+  if (0 > (sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP))) {
+    return -errno;
+  }
+
+  if (ioctl(sockfd, SIOCGSIZIFCONF, &size) == -1) {
+    uv__close(sockfd);
+    return -errno;
+  }
+
+  ifc.ifc_req = (struct ifreq*)uv__malloc(size);
+  ifc.ifc_len = size;
+  if (ioctl(sockfd, SIOCGIFCONF, &ifc) == -1) {
+    uv__close(sockfd);
+    return -errno;
+  }
+
+#define ADDR_SIZE(p) MAX((p).sa_len, sizeof(p))
+
+  /* Count all up and running ipv4/ipv6 addresses */
+  ifr = ifc.ifc_req;
+  while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) {
+    p = ifr;
+    ifr = (struct ifreq*)
+      ((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr));
+
+    if (!(p->ifr_addr.sa_family == AF_INET6 ||
+          p->ifr_addr.sa_family == AF_INET))
+      continue;
+
+    memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name));
+    if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1) {
+      uv__close(sockfd);
+      return -errno;
+    }
+
+    if (!(flg.ifr_flags & IFF_UP && flg.ifr_flags & IFF_RUNNING))
+      continue;
+
+    (*count)++;
+  }
+
+  /* Alloc the return interface structs */
+  *addresses = (uv_interface_address_t*)
+    uv__malloc(*count * sizeof(uv_interface_address_t));
+  if (!(*addresses)) {
+    uv__close(sockfd);
+    return -ENOMEM;
+  }
+  address = *addresses;
+
+  ifr = ifc.ifc_req;
+  while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) {
+    p = ifr;
+    ifr = (struct ifreq*)
+      ((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr));
+
+    if (!(p->ifr_addr.sa_family == AF_INET6 ||
+          p->ifr_addr.sa_family == AF_INET))
+      continue;
+
+    memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name));
+    if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1) {
+      uv__close(sockfd);
+      return -ENOSYS;
+    }
+
+    if (!(flg.ifr_flags & IFF_UP && flg.ifr_flags & IFF_RUNNING))
+      continue;
+
+    /* All conditions above must match count loop */
+
+    address->name = uv__strdup(p->ifr_name);
+
+    if (p->ifr_addr.sa_family == AF_INET6) {
+      address->address.address6 = *((struct sockaddr_in6*) &p->ifr_addr);
+    } else {
+      address->address.address4 = *((struct sockaddr_in*) &p->ifr_addr);
+    }
+
+    /* TODO: Retrieve netmask using SIOCGIFNETMASK ioctl */
+
+    address->is_internal = flg.ifr_flags & IFF_LOOPBACK ? 1 : 0;
+
+    address++;
+  }
+
+#undef ADDR_SIZE
+
+  uv__close(sockfd);
+  return 0;
+}
+
+
+void uv_free_interface_addresses(uv_interface_address_t* addresses,
+  int count) {
+  int i;
+
+  for (i = 0; i < count; ++i) {
+    uv__free(addresses[i].name);
+  }
+
+  uv__free(addresses);
+}
+
+void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
+  struct pollfd* events;
+  uintptr_t i;
+  uintptr_t nfds;
+  struct poll_ctl pc;
+
+  assert(loop->watchers != NULL);
+
+  events = (struct pollfd*) loop->watchers[loop->nwatchers];
+  nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
+
+  if (events != NULL)
+    /* Invalidate events with same file descriptor */
+    for (i = 0; i < nfds; i++)
+      if ((int) events[i].fd == fd)
+        events[i].fd = -1;
+
+  /* Remove the file descriptor from the poll set */
+  pc.events = 0;
+  pc.cmd = PS_DELETE;
+  pc.fd = fd;
+  if(loop->backend_fd >= 0)
+    pollset_ctl(loop->backend_fd, &pc, 1);
+}

+ 703 - 0
Utilities/cmlibuv/src/unix/android-ifaddrs.c

@@ -0,0 +1,703 @@
+/*
+Copyright (c) 2013, Kenneth MacKay
+Copyright (c) 2014, Emergya (Cloud4all, FP7/2007-2013 grant agreement #289016)
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "android-ifaddrs.h"
+#include "uv-common.h"
+
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <unistd.h>
+#include <sys/socket.h>
+#include <net/if_arp.h>
+#include <netinet/in.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+
+typedef struct NetlinkList
+{
+    struct NetlinkList *m_next;
+    struct nlmsghdr *m_data;
+    unsigned int m_size;
+} NetlinkList;
+
+static int netlink_socket(void)
+{
+    struct sockaddr_nl l_addr;
+
+    int l_socket = socket(PF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
+    if(l_socket < 0)
+    {
+        return -1;
+    }
+
+    memset(&l_addr, 0, sizeof(l_addr));
+    l_addr.nl_family = AF_NETLINK;
+    if(bind(l_socket, (struct sockaddr *)&l_addr, sizeof(l_addr)) < 0)
+    {
+        close(l_socket);
+        return -1;
+    }
+
+    return l_socket;
+}
+
+static int netlink_send(int p_socket, int p_request)
+{
+    char l_buffer[NLMSG_ALIGN(sizeof(struct nlmsghdr)) + NLMSG_ALIGN(sizeof(struct rtgenmsg))];
+
+    struct nlmsghdr *l_hdr;
+    struct rtgenmsg *l_msg;
+    struct sockaddr_nl l_addr;
+
+    memset(l_buffer, 0, sizeof(l_buffer));
+
+    l_hdr = (struct nlmsghdr *)l_buffer;
+    l_msg = (struct rtgenmsg *)NLMSG_DATA(l_hdr);
+
+    l_hdr->nlmsg_len = NLMSG_LENGTH(sizeof(*l_msg));
+    l_hdr->nlmsg_type = p_request;
+    l_hdr->nlmsg_flags = NLM_F_ROOT | NLM_F_MATCH | NLM_F_REQUEST;
+    l_hdr->nlmsg_pid = 0;
+    l_hdr->nlmsg_seq = p_socket;
+    l_msg->rtgen_family = AF_UNSPEC;
+
+    memset(&l_addr, 0, sizeof(l_addr));
+    l_addr.nl_family = AF_NETLINK;
+    return (sendto(p_socket, l_hdr, l_hdr->nlmsg_len, 0, (struct sockaddr *)&l_addr, sizeof(l_addr)));
+}
+
+static int netlink_recv(int p_socket, void *p_buffer, size_t p_len)
+{
+    struct sockaddr_nl l_addr;
+    struct msghdr l_msg;
+
+    struct iovec l_iov;
+    l_iov.iov_base = p_buffer;
+    l_iov.iov_len = p_len;
+
+    for(;;)
+    {
+        int l_result;
+        l_msg.msg_name = (void *)&l_addr;
+        l_msg.msg_namelen = sizeof(l_addr);
+        l_msg.msg_iov = &l_iov;
+        l_msg.msg_iovlen = 1;
+        l_msg.msg_control = NULL;
+        l_msg.msg_controllen = 0;
+        l_msg.msg_flags = 0;
+        l_result = recvmsg(p_socket, &l_msg, 0);
+
+        if(l_result < 0)
+        {
+            if(errno == EINTR)
+            {
+                continue;
+            }
+            return -2;
+        }
+
+        /* Buffer was too small */
+        if(l_msg.msg_flags & MSG_TRUNC)
+        {
+            return -1;
+        }
+        return l_result;
+    }
+}
+
+static struct nlmsghdr *getNetlinkResponse(int p_socket, int *p_size, int *p_done)
+{
+    size_t l_size = 4096;
+    void *l_buffer = NULL;
+
+    for(;;)
+    {
+        int l_read;
+
+        uv__free(l_buffer);
+        l_buffer = uv__malloc(l_size);
+        if (l_buffer == NULL)
+        {
+            return NULL;
+        }
+
+        l_read = netlink_recv(p_socket, l_buffer, l_size);
+        *p_size = l_read;
+        if(l_read == -2)
+        {
+            uv__free(l_buffer);
+            return NULL;
+        }
+        if(l_read >= 0)
+        {
+            pid_t l_pid = getpid();
+            struct nlmsghdr *l_hdr;
+            for(l_hdr = (struct nlmsghdr *)l_buffer; NLMSG_OK(l_hdr, (unsigned int)l_read); l_hdr = (struct nlmsghdr *)NLMSG_NEXT(l_hdr, l_read))
+            {
+                if((pid_t)l_hdr->nlmsg_pid != l_pid || (int)l_hdr->nlmsg_seq != p_socket)
+                {
+                    continue;
+                }
+
+                if(l_hdr->nlmsg_type == NLMSG_DONE)
+                {
+                    *p_done = 1;
+                    break;
+                }
+
+                if(l_hdr->nlmsg_type == NLMSG_ERROR)
+                {
+                    uv__free(l_buffer);
+                    return NULL;
+                }
+            }
+            return l_buffer;
+        }
+
+        l_size *= 2;
+    }
+}
+
+static NetlinkList *newListItem(struct nlmsghdr *p_data, unsigned int p_size)
+{
+    NetlinkList *l_item = uv__malloc(sizeof(NetlinkList));
+    if (l_item == NULL)
+    {
+        return NULL;
+    }
+
+    l_item->m_next = NULL;
+    l_item->m_data = p_data;
+    l_item->m_size = p_size;
+    return l_item;
+}
+
+static void freeResultList(NetlinkList *p_list)
+{
+    NetlinkList *l_cur;
+    while(p_list)
+    {
+        l_cur = p_list;
+        p_list = p_list->m_next;
+        uv__free(l_cur->m_data);
+        uv__free(l_cur);
+    }
+}
+
+static NetlinkList *getResultList(int p_socket, int p_request)
+{
+    int l_size;
+    int l_done;
+    NetlinkList *l_list;
+    NetlinkList *l_end;
+
+    if(netlink_send(p_socket, p_request) < 0)
+    {
+        return NULL;
+    }
+
+    l_list = NULL;
+    l_end = NULL;
+
+    l_done = 0;
+    while(!l_done)
+    {
+        NetlinkList *l_item;
+
+        struct nlmsghdr *l_hdr = getNetlinkResponse(p_socket, &l_size, &l_done);
+        /* Error */
+        if(!l_hdr)
+        {
+            freeResultList(l_list);
+            return NULL;
+        }
+
+        l_item = newListItem(l_hdr, l_size);
+        if (!l_item)
+        {
+            freeResultList(l_list);
+            return NULL;
+        }
+        if(!l_list)
+        {
+            l_list = l_item;
+        }
+        else
+        {
+            l_end->m_next = l_item;
+        }
+        l_end = l_item;
+    }
+    return l_list;
+}
+
+static size_t maxSize(size_t a, size_t b)
+{
+    return (a > b ? a : b);
+}
+
+static size_t calcAddrLen(sa_family_t p_family, int p_dataSize)
+{
+    switch(p_family)
+    {
+        case AF_INET:
+            return sizeof(struct sockaddr_in);
+        case AF_INET6:
+            return sizeof(struct sockaddr_in6);
+        case AF_PACKET:
+            return maxSize(sizeof(struct sockaddr_ll), offsetof(struct sockaddr_ll, sll_addr) + p_dataSize);
+        default:
+            return maxSize(sizeof(struct sockaddr), offsetof(struct sockaddr, sa_data) + p_dataSize);
+    }
+}
+
+static void makeSockaddr(sa_family_t p_family, struct sockaddr *p_dest, void *p_data, size_t p_size)
+{
+    switch(p_family)
+    {
+        case AF_INET:
+            memcpy(&((struct sockaddr_in*)p_dest)->sin_addr, p_data, p_size);
+            break;
+        case AF_INET6:
+            memcpy(&((struct sockaddr_in6*)p_dest)->sin6_addr, p_data, p_size);
+            break;
+        case AF_PACKET:
+            memcpy(((struct sockaddr_ll*)p_dest)->sll_addr, p_data, p_size);
+            ((struct sockaddr_ll*)p_dest)->sll_halen = p_size;
+            break;
+        default:
+            memcpy(p_dest->sa_data, p_data, p_size);
+            break;
+    }
+    p_dest->sa_family = p_family;
+}
+
+static void addToEnd(struct ifaddrs **p_resultList, struct ifaddrs *p_entry)
+{
+    if(!*p_resultList)
+    {
+        *p_resultList = p_entry;
+    }
+    else
+    {
+        struct ifaddrs *l_cur = *p_resultList;
+        while(l_cur->ifa_next)
+        {
+            l_cur = l_cur->ifa_next;
+        }
+        l_cur->ifa_next = p_entry;
+    }
+}
+
+static int interpretLink(struct nlmsghdr *p_hdr, struct ifaddrs **p_resultList)
+{
+    struct ifaddrs *l_entry;
+
+    char *l_index;
+    char *l_name;
+    char *l_addr;
+    char *l_data;
+
+    struct ifinfomsg *l_info = (struct ifinfomsg *)NLMSG_DATA(p_hdr);
+
+    size_t l_nameSize = 0;
+    size_t l_addrSize = 0;
+    size_t l_dataSize = 0;
+
+    size_t l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifinfomsg));
+    struct rtattr *l_rta;
+    for(l_rta = IFLA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize))
+    {
+        size_t l_rtaDataSize = RTA_PAYLOAD(l_rta);
+        switch(l_rta->rta_type)
+        {
+            case IFLA_ADDRESS:
+            case IFLA_BROADCAST:
+                l_addrSize += NLMSG_ALIGN(calcAddrLen(AF_PACKET, l_rtaDataSize));
+                break;
+            case IFLA_IFNAME:
+                l_nameSize += NLMSG_ALIGN(l_rtaSize + 1);
+                break;
+            case IFLA_STATS:
+                l_dataSize += NLMSG_ALIGN(l_rtaSize);
+                break;
+            default:
+                break;
+        }
+    }
+
+    l_entry = uv__malloc(sizeof(struct ifaddrs) + sizeof(int) + l_nameSize + l_addrSize + l_dataSize);
+    if (l_entry == NULL)
+    {
+        return -1;
+    }
+    memset(l_entry, 0, sizeof(struct ifaddrs));
+    l_entry->ifa_name = "";
+
+    l_index = ((char *)l_entry) + sizeof(struct ifaddrs);
+    l_name = l_index + sizeof(int);
+    l_addr = l_name + l_nameSize;
+    l_data = l_addr + l_addrSize;
+
+    /* Save the interface index so we can look it up when handling the
+     * addresses.
+     */
+    memcpy(l_index, &l_info->ifi_index, sizeof(int));
+
+    l_entry->ifa_flags = l_info->ifi_flags;
+
+    l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifinfomsg));
+    for(l_rta = IFLA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize))
+    {
+        void *l_rtaData = RTA_DATA(l_rta);
+        size_t l_rtaDataSize = RTA_PAYLOAD(l_rta);
+        switch(l_rta->rta_type)
+        {
+            case IFLA_ADDRESS:
+            case IFLA_BROADCAST:
+            {
+                size_t l_addrLen = calcAddrLen(AF_PACKET, l_rtaDataSize);
+                makeSockaddr(AF_PACKET, (struct sockaddr *)l_addr, l_rtaData, l_rtaDataSize);
+                ((struct sockaddr_ll *)l_addr)->sll_ifindex = l_info->ifi_index;
+                ((struct sockaddr_ll *)l_addr)->sll_hatype = l_info->ifi_type;
+                if(l_rta->rta_type == IFLA_ADDRESS)
+                {
+                    l_entry->ifa_addr = (struct sockaddr *)l_addr;
+                }
+                else
+                {
+                    l_entry->ifa_broadaddr = (struct sockaddr *)l_addr;
+                }
+                l_addr += NLMSG_ALIGN(l_addrLen);
+                break;
+            }
+            case IFLA_IFNAME:
+                strncpy(l_name, l_rtaData, l_rtaDataSize);
+                l_name[l_rtaDataSize] = '\0';
+                l_entry->ifa_name = l_name;
+                break;
+            case IFLA_STATS:
+                memcpy(l_data, l_rtaData, l_rtaDataSize);
+                l_entry->ifa_data = l_data;
+                break;
+            default:
+                break;
+        }
+    }
+
+    addToEnd(p_resultList, l_entry);
+    return 0;
+}
+
+static struct ifaddrs *findInterface(int p_index, struct ifaddrs **p_links, int p_numLinks)
+{
+    int l_num = 0;
+    struct ifaddrs *l_cur = *p_links;
+    while(l_cur && l_num < p_numLinks)
+    {
+        char *l_indexPtr = ((char *)l_cur) + sizeof(struct ifaddrs);
+        int l_index;
+        memcpy(&l_index, l_indexPtr, sizeof(int));
+        if(l_index == p_index)
+        {
+            return l_cur;
+        }
+
+        l_cur = l_cur->ifa_next;
+        ++l_num;
+    }
+    return NULL;
+}
+
+static int interpretAddr(struct nlmsghdr *p_hdr, struct ifaddrs **p_resultList, int p_numLinks)
+{
+    struct ifaddrmsg *l_info = (struct ifaddrmsg *)NLMSG_DATA(p_hdr);
+    struct ifaddrs *l_interface = findInterface(l_info->ifa_index, p_resultList, p_numLinks);
+
+    size_t l_nameSize = 0;
+    size_t l_addrSize = 0;
+
+    int l_addedNetmask = 0;
+
+    size_t l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifaddrmsg));
+    struct rtattr *l_rta;
+    struct ifaddrs *l_entry;
+
+    char *l_name;
+    char *l_addr;
+
+    for(l_rta = IFLA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize))
+    {
+        size_t l_rtaDataSize = RTA_PAYLOAD(l_rta);
+        if(l_info->ifa_family == AF_PACKET)
+        {
+            continue;
+        }
+
+        switch(l_rta->rta_type)
+        {
+            case IFA_ADDRESS:
+            case IFA_LOCAL:
+                if((l_info->ifa_family == AF_INET || l_info->ifa_family == AF_INET6) && !l_addedNetmask)
+                {
+                    /* Make room for netmask */
+                    l_addrSize += NLMSG_ALIGN(calcAddrLen(l_info->ifa_family, l_rtaDataSize));
+                    l_addedNetmask = 1;
+                }
+            case IFA_BROADCAST:
+                l_addrSize += NLMSG_ALIGN(calcAddrLen(l_info->ifa_family, l_rtaDataSize));
+                break;
+            case IFA_LABEL:
+                l_nameSize += NLMSG_ALIGN(l_rtaSize + 1);
+                break;
+            default:
+                break;
+        }
+    }
+
+    l_entry = uv__malloc(sizeof(struct ifaddrs) + l_nameSize + l_addrSize);
+    if (l_entry == NULL)
+    {
+        return -1;
+    }
+    memset(l_entry, 0, sizeof(struct ifaddrs));
+    l_entry->ifa_name = (l_interface ? l_interface->ifa_name : "");
+
+    l_name = ((char *)l_entry) + sizeof(struct ifaddrs);
+    l_addr = l_name + l_nameSize;
+
+    l_entry->ifa_flags = l_info->ifa_flags;
+    if(l_interface)
+    {
+        l_entry->ifa_flags |= l_interface->ifa_flags;
+    }
+
+    l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifaddrmsg));
+    for(l_rta = IFLA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize))
+    {
+        void *l_rtaData = RTA_DATA(l_rta);
+        size_t l_rtaDataSize = RTA_PAYLOAD(l_rta);
+        switch(l_rta->rta_type)
+        {
+            case IFA_ADDRESS:
+            case IFA_BROADCAST:
+            case IFA_LOCAL:
+            {
+                size_t l_addrLen = calcAddrLen(l_info->ifa_family, l_rtaDataSize);
+                makeSockaddr(l_info->ifa_family, (struct sockaddr *)l_addr, l_rtaData, l_rtaDataSize);
+                if(l_info->ifa_family == AF_INET6)
+                {
+                    if(IN6_IS_ADDR_LINKLOCAL((struct in6_addr *)l_rtaData) || IN6_IS_ADDR_MC_LINKLOCAL((struct in6_addr *)l_rtaData))
+                    {
+                        ((struct sockaddr_in6 *)l_addr)->sin6_scope_id = l_info->ifa_index;
+                    }
+                }
+
+                /* Apparently in a point-to-point network IFA_ADDRESS contains
+                 * the dest address and IFA_LOCAL contains the local address
+                 */
+                if(l_rta->rta_type == IFA_ADDRESS)
+                {
+                    if(l_entry->ifa_addr)
+                    {
+                        l_entry->ifa_dstaddr = (struct sockaddr *)l_addr;
+                    }
+                    else
+                    {
+                        l_entry->ifa_addr = (struct sockaddr *)l_addr;
+                    }
+                }
+                else if(l_rta->rta_type == IFA_LOCAL)
+                {
+                    if(l_entry->ifa_addr)
+                    {
+                        l_entry->ifa_dstaddr = l_entry->ifa_addr;
+                    }
+                    l_entry->ifa_addr = (struct sockaddr *)l_addr;
+                }
+                else
+                {
+                    l_entry->ifa_broadaddr = (struct sockaddr *)l_addr;
+                }
+                l_addr += NLMSG_ALIGN(l_addrLen);
+                break;
+            }
+            case IFA_LABEL:
+                strncpy(l_name, l_rtaData, l_rtaDataSize);
+                l_name[l_rtaDataSize] = '\0';
+                l_entry->ifa_name = l_name;
+                break;
+            default:
+                break;
+        }
+    }
+
+    if(l_entry->ifa_addr && (l_entry->ifa_addr->sa_family == AF_INET || l_entry->ifa_addr->sa_family == AF_INET6))
+    {
+        unsigned l_maxPrefix = (l_entry->ifa_addr->sa_family == AF_INET ? 32 : 128);
+        unsigned l_prefix = (l_info->ifa_prefixlen > l_maxPrefix ? l_maxPrefix : l_info->ifa_prefixlen);
+        char l_mask[16] = {0};
+        unsigned i;
+        for(i=0; i<(l_prefix/8); ++i)
+        {
+            l_mask[i] = 0xff;
+        }
+        if(l_prefix % 8)
+        {
+            l_mask[i] = 0xff << (8 - (l_prefix % 8));
+        }
+
+        makeSockaddr(l_entry->ifa_addr->sa_family, (struct sockaddr *)l_addr, l_mask, l_maxPrefix / 8);
+        l_entry->ifa_netmask = (struct sockaddr *)l_addr;
+    }
+
+    addToEnd(p_resultList, l_entry);
+    return 0;
+}
+
+static int interpretLinks(int p_socket, NetlinkList *p_netlinkList, struct ifaddrs **p_resultList)
+{
+
+    int l_numLinks = 0;
+    pid_t l_pid = getpid();
+    for(; p_netlinkList; p_netlinkList = p_netlinkList->m_next)
+    {
+        unsigned int l_nlsize = p_netlinkList->m_size;
+        struct nlmsghdr *l_hdr;
+        for(l_hdr = p_netlinkList->m_data; NLMSG_OK(l_hdr, l_nlsize); l_hdr = NLMSG_NEXT(l_hdr, l_nlsize))
+        {
+            if((pid_t)l_hdr->nlmsg_pid != l_pid || (int)l_hdr->nlmsg_seq != p_socket)
+            {
+                continue;
+            }
+
+            if(l_hdr->nlmsg_type == NLMSG_DONE)
+            {
+                break;
+            }
+
+            if(l_hdr->nlmsg_type == RTM_NEWLINK)
+            {
+                if(interpretLink(l_hdr, p_resultList) == -1)
+                {
+                    return -1;
+                }
+                ++l_numLinks;
+            }
+        }
+    }
+    return l_numLinks;
+}
+
+static int interpretAddrs(int p_socket, NetlinkList *p_netlinkList, struct ifaddrs **p_resultList, int p_numLinks)
+{
+    pid_t l_pid = getpid();
+    for(; p_netlinkList; p_netlinkList = p_netlinkList->m_next)
+    {
+        unsigned int l_nlsize = p_netlinkList->m_size;
+        struct nlmsghdr *l_hdr;
+        for(l_hdr = p_netlinkList->m_data; NLMSG_OK(l_hdr, l_nlsize); l_hdr = NLMSG_NEXT(l_hdr, l_nlsize))
+        {
+            if((pid_t)l_hdr->nlmsg_pid != l_pid || (int)l_hdr->nlmsg_seq != p_socket)
+            {
+                continue;
+            }
+
+            if(l_hdr->nlmsg_type == NLMSG_DONE)
+            {
+                break;
+            }
+
+            if(l_hdr->nlmsg_type == RTM_NEWADDR)
+            {
+                if (interpretAddr(l_hdr, p_resultList, p_numLinks) == -1)
+                {
+                    return -1;
+                }
+            }
+        }
+    }
+    return 0;
+}
+
+int getifaddrs(struct ifaddrs **ifap)
+{
+    int l_socket;
+    int l_result;
+    int l_numLinks;
+    NetlinkList *l_linkResults;
+    NetlinkList *l_addrResults;
+
+    if(!ifap)
+    {
+        return -1;
+    }
+    *ifap = NULL;
+
+    l_socket = netlink_socket();
+    if(l_socket < 0)
+    {
+        return -1;
+    }
+
+    l_linkResults = getResultList(l_socket, RTM_GETLINK);
+    if(!l_linkResults)
+    {
+        close(l_socket);
+        return -1;
+    }
+
+    l_addrResults = getResultList(l_socket, RTM_GETADDR);
+    if(!l_addrResults)
+    {
+        close(l_socket);
+        freeResultList(l_linkResults);
+        return -1;
+    }
+
+    l_result = 0;
+    l_numLinks = interpretLinks(l_socket, l_linkResults, ifap);
+    if(l_numLinks == -1 || interpretAddrs(l_socket, l_addrResults, ifap, l_numLinks) == -1)
+    {
+        l_result = -1;
+    }
+
+    freeResultList(l_linkResults);
+    freeResultList(l_addrResults);
+    close(l_socket);
+    return l_result;
+}
+
+void freeifaddrs(struct ifaddrs *ifa)
+{
+    struct ifaddrs *l_cur;
+    while(ifa)
+    {
+        l_cur = ifa;
+        ifa = ifa->ifa_next;
+        uv__free(l_cur);
+    }
+}

+ 290 - 0
Utilities/cmlibuv/src/unix/async.c

@@ -0,0 +1,290 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/* This file contains both the uv__async internal infrastructure and the
+ * user-facing uv_async_t functions.
+ */
+
+#include "uv.h"
+#include "internal.h"
+#include "atomic-ops.h"
+
+#include <errno.h>
+#include <stdio.h>  /* snprintf() */
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+static void uv__async_event(uv_loop_t* loop,
+                            struct uv__async* w,
+                            unsigned int nevents);
+static int uv__async_eventfd(void);
+
+
+int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
+  int err;
+
+  err = uv__async_start(loop, &loop->async_watcher, uv__async_event);
+  if (err)
+    return err;
+
+  uv__handle_init(loop, (uv_handle_t*)handle, UV_ASYNC);
+  handle->async_cb = async_cb;
+  handle->pending = 0;
+
+  QUEUE_INSERT_TAIL(&loop->async_handles, &handle->queue);
+  uv__handle_start(handle);
+
+  return 0;
+}
+
+
+int uv_async_send(uv_async_t* handle) {
+  /* Do a cheap read first. */
+  if (ACCESS_ONCE(int, handle->pending) != 0)
+    return 0;
+
+  if (cmpxchgi(&handle->pending, 0, 1) == 0)
+    uv__async_send(&handle->loop->async_watcher);
+
+  return 0;
+}
+
+
+void uv__async_close(uv_async_t* handle) {
+  QUEUE_REMOVE(&handle->queue);
+  uv__handle_stop(handle);
+}
+
+
+static void uv__async_event(uv_loop_t* loop,
+                            struct uv__async* w,
+                            unsigned int nevents) {
+  QUEUE queue;
+  QUEUE* q;
+  uv_async_t* h;
+
+  QUEUE_MOVE(&loop->async_handles, &queue);
+  while (!QUEUE_EMPTY(&queue)) {
+    q = QUEUE_HEAD(&queue);
+    h = QUEUE_DATA(q, uv_async_t, queue);
+
+    QUEUE_REMOVE(q);
+    QUEUE_INSERT_TAIL(&loop->async_handles, q);
+
+    if (cmpxchgi(&h->pending, 1, 0) == 0)
+      continue;
+
+    if (h->async_cb == NULL)
+      continue;
+    h->async_cb(h);
+  }
+}
+
+
+static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
+  struct uv__async* wa;
+  char buf[1024];
+  unsigned n;
+  ssize_t r;
+
+  n = 0;
+  for (;;) {
+    r = read(w->fd, buf, sizeof(buf));
+
+    if (r > 0)
+      n += r;
+
+    if (r == sizeof(buf))
+      continue;
+
+    if (r != -1)
+      break;
+
+    if (errno == EAGAIN || errno == EWOULDBLOCK)
+      break;
+
+    if (errno == EINTR)
+      continue;
+
+    abort();
+  }
+
+  wa = container_of(w, struct uv__async, io_watcher);
+
+#if defined(__linux__)
+  if (wa->wfd == -1) {
+    uint64_t val;
+    assert(n == sizeof(val));
+    memcpy(&val, buf, sizeof(val));  /* Avoid alignment issues. */
+    wa->cb(loop, wa, val);
+    return;
+  }
+#endif
+
+  wa->cb(loop, wa, n);
+}
+
+
+void uv__async_send(struct uv__async* wa) {
+  const void* buf;
+  ssize_t len;
+  int fd;
+  int r;
+
+  buf = "";
+  len = 1;
+  fd = wa->wfd;
+
+#if defined(__linux__)
+  if (fd == -1) {
+    static const uint64_t val = 1;
+    buf = &val;
+    len = sizeof(val);
+    fd = wa->io_watcher.fd;  /* eventfd */
+  }
+#endif
+
+  do
+    r = write(fd, buf, len);
+  while (r == -1 && errno == EINTR);
+
+  if (r == len)
+    return;
+
+  if (r == -1)
+    if (errno == EAGAIN || errno == EWOULDBLOCK)
+      return;
+
+  abort();
+}
+
+
+void uv__async_init(struct uv__async* wa) {
+  wa->io_watcher.fd = -1;
+  wa->wfd = -1;
+}
+
+
+int uv__async_start(uv_loop_t* loop, struct uv__async* wa, uv__async_cb cb) {
+  int pipefd[2];
+  int err;
+
+  if (wa->io_watcher.fd != -1)
+    return 0;
+
+  err = uv__async_eventfd();
+  if (err >= 0) {
+    pipefd[0] = err;
+    pipefd[1] = -1;
+  }
+  else if (err == -ENOSYS) {
+    err = uv__make_pipe(pipefd, UV__F_NONBLOCK);
+#if defined(__linux__)
+    /* Save a file descriptor by opening one of the pipe descriptors as
+     * read/write through the procfs.  That file descriptor can then
+     * function as both ends of the pipe.
+     */
+    if (err == 0) {
+      char buf[32];
+      int fd;
+
+      snprintf(buf, sizeof(buf), "/proc/self/fd/%d", pipefd[0]);
+      fd = uv__open_cloexec(buf, O_RDWR);
+      if (fd >= 0) {
+        uv__close(pipefd[0]);
+        uv__close(pipefd[1]);
+        pipefd[0] = fd;
+        pipefd[1] = fd;
+      }
+    }
+#endif
+  }
+
+  if (err < 0)
+    return err;
+
+  uv__io_init(&wa->io_watcher, uv__async_io, pipefd[0]);
+  uv__io_start(loop, &wa->io_watcher, POLLIN);
+  wa->wfd = pipefd[1];
+  wa->cb = cb;
+
+  return 0;
+}
+
+
+void uv__async_stop(uv_loop_t* loop, struct uv__async* wa) {
+  if (wa->io_watcher.fd == -1)
+    return;
+
+  if (wa->wfd != -1) {
+    if (wa->wfd != wa->io_watcher.fd)
+      uv__close(wa->wfd);
+    wa->wfd = -1;
+  }
+
+  uv__io_stop(loop, &wa->io_watcher, POLLIN);
+  uv__close(wa->io_watcher.fd);
+  wa->io_watcher.fd = -1;
+}
+
+
+static int uv__async_eventfd() {
+#if defined(__linux__)
+  static int no_eventfd2;
+  static int no_eventfd;
+  int fd;
+
+  if (no_eventfd2)
+    goto skip_eventfd2;
+
+  fd = uv__eventfd2(0, UV__EFD_CLOEXEC | UV__EFD_NONBLOCK);
+  if (fd != -1)
+    return fd;
+
+  if (errno != ENOSYS)
+    return -errno;
+
+  no_eventfd2 = 1;
+
+skip_eventfd2:
+
+  if (no_eventfd)
+    goto skip_eventfd;
+
+  fd = uv__eventfd(0);
+  if (fd != -1) {
+    uv__cloexec(fd, 1);
+    uv__nonblock(fd, 1);
+    return fd;
+  }
+
+  if (errno != ENOSYS)
+    return -errno;
+
+  no_eventfd = 1;
+
+skip_eventfd:
+
+#endif
+
+  return -ENOSYS;
+}

+ 88 - 0
Utilities/cmlibuv/src/unix/atomic-ops.h

@@ -0,0 +1,88 @@
+/* Copyright (c) 2013, Ben Noordhuis <[email protected]>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef UV_ATOMIC_OPS_H_
+#define UV_ATOMIC_OPS_H_
+
+#include "internal.h"  /* UV_UNUSED */
+
+#if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
+#include <atomic.h>
+#define __sync_val_compare_and_swap(p, o, n) atomic_cas_ptr(p, o, n)
+#endif
+
+UV_UNUSED(static int cmpxchgi(int* ptr, int oldval, int newval));
+UV_UNUSED(static long cmpxchgl(long* ptr, long oldval, long newval));
+UV_UNUSED(static void cpu_relax(void));
+
+/* Prefer hand-rolled assembly over the gcc builtins because the latter also
+ * issue full memory barriers.
+ */
+UV_UNUSED(static int cmpxchgi(int* ptr, int oldval, int newval)) {
+#if defined(__i386__) || defined(__x86_64__)
+  int out;
+  __asm__ __volatile__ ("lock; cmpxchg %2, %1;"
+                        : "=a" (out), "+m" (*(volatile int*) ptr)
+                        : "r" (newval), "0" (oldval)
+                        : "memory");
+  return out;
+#elif defined(_AIX) && defined(__xlC__)
+  const int out = (*(volatile int*) ptr);
+  __compare_and_swap(ptr, &oldval, newval);
+  return out;
+#elif defined(__MVS__)
+  return __plo_CS(ptr, (unsigned int*) ptr,
+                  oldval, (unsigned int*) &newval);
+#else
+  return __sync_val_compare_and_swap(ptr, oldval, newval);
+#endif
+}
+
+UV_UNUSED(static long cmpxchgl(long* ptr, long oldval, long newval)) {
+#if defined(__i386__) || defined(__x86_64__)
+  long out;
+  __asm__ __volatile__ ("lock; cmpxchg %2, %1;"
+                        : "=a" (out), "+m" (*(volatile long*) ptr)
+                        : "r" (newval), "0" (oldval)
+                        : "memory");
+  return out;
+#elif defined(_AIX) && defined(__xlC__)
+  const long out = (*(volatile int*) ptr);
+# if defined(__64BIT__)
+  __compare_and_swaplp(ptr, &oldval, newval);
+# else
+  __compare_and_swap(ptr, &oldval, newval);
+# endif /* if defined(__64BIT__) */
+  return out;
+#elif defined (__MVS__)
+# ifdef _LP64
+  return __plo_CSGR(ptr, (unsigned long long*) ptr,
+                    oldval, (unsigned long long*) &newval);
+# else
+  return __plo_CS(ptr, (unsigned int*) ptr,
+                  oldval, (unsigned int*) &newval);
+# endif
+#else
+  return __sync_val_compare_and_swap(ptr, oldval, newval);
+#endif
+}
+
+UV_UNUSED(static void cpu_relax(void)) {
+#if defined(__i386__) || defined(__x86_64__)
+  __asm__ __volatile__ ("rep; nop");  /* a.k.a. PAUSE */
+#endif
+}
+
+#endif  /* UV_ATOMIC_OPS_H_ */

+ 1238 - 0
Utilities/cmlibuv/src/unix/core.c

@@ -0,0 +1,1238 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stddef.h> /* NULL */
+#include <stdio.h> /* printf */
+#include <stdlib.h>
+#include <string.h> /* strerror */
+#include <errno.h>
+#include <assert.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <limits.h> /* INT_MAX, PATH_MAX, IOV_MAX */
+#include <sys/uio.h> /* writev */
+#include <sys/resource.h> /* getrusage */
+#include <pwd.h>
+
+#ifdef __sun
+# include <sys/filio.h>
+# include <sys/types.h>
+# include <sys/wait.h>
+#endif
+
+#ifdef __APPLE__
+# include <mach-o/dyld.h> /* _NSGetExecutablePath */
+# include <sys/filio.h>
+# if defined(O_CLOEXEC)
+#  define UV__O_CLOEXEC O_CLOEXEC
+# endif
+#endif
+
+#if defined(__DragonFly__)      || \
+    defined(__FreeBSD__)        || \
+    defined(__FreeBSD_kernel__)
+# include <sys/sysctl.h>
+# include <sys/filio.h>
+# include <sys/wait.h>
+# define UV__O_CLOEXEC O_CLOEXEC
+# if defined(__FreeBSD__) && __FreeBSD__ >= 10
+#  define uv__accept4 accept4
+#  define UV__SOCK_NONBLOCK SOCK_NONBLOCK
+#  define UV__SOCK_CLOEXEC  SOCK_CLOEXEC
+# endif
+# if !defined(F_DUP2FD_CLOEXEC) && defined(_F_DUP2FD_CLOEXEC)
+#  define F_DUP2FD_CLOEXEC  _F_DUP2FD_CLOEXEC
+# endif
+#endif
+
+#if defined(__ANDROID_API__) && __ANDROID_API__ < 21
+# include <dlfcn.h>  /* for dlsym */
+#endif
+
+#if defined(__MVS__)
+#include <sys/ioctl.h>
+#endif
+
+static int uv__run_pending(uv_loop_t* loop);
+
+/* Verify that uv_buf_t is ABI-compatible with struct iovec. */
+STATIC_ASSERT(sizeof(uv_buf_t) == sizeof(struct iovec));
+STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->base) ==
+              sizeof(((struct iovec*) 0)->iov_base));
+STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->len) ==
+              sizeof(((struct iovec*) 0)->iov_len));
+STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base));
+STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len));
+
+
+uint64_t uv_hrtime(void) {
+  return uv__hrtime(UV_CLOCK_PRECISE);
+}
+
+
+void uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
+  assert(!(handle->flags & (UV_CLOSING | UV_CLOSED)));
+
+  handle->flags |= UV_CLOSING;
+  handle->close_cb = close_cb;
+
+  switch (handle->type) {
+  case UV_NAMED_PIPE:
+    uv__pipe_close((uv_pipe_t*)handle);
+    break;
+
+  case UV_TTY:
+    uv__stream_close((uv_stream_t*)handle);
+    break;
+
+  case UV_TCP:
+    uv__tcp_close((uv_tcp_t*)handle);
+    break;
+
+  case UV_UDP:
+    uv__udp_close((uv_udp_t*)handle);
+    break;
+
+  case UV_PREPARE:
+    uv__prepare_close((uv_prepare_t*)handle);
+    break;
+
+  case UV_CHECK:
+    uv__check_close((uv_check_t*)handle);
+    break;
+
+  case UV_IDLE:
+    uv__idle_close((uv_idle_t*)handle);
+    break;
+
+  case UV_ASYNC:
+    uv__async_close((uv_async_t*)handle);
+    break;
+
+  case UV_TIMER:
+    uv__timer_close((uv_timer_t*)handle);
+    break;
+
+  case UV_PROCESS:
+    uv__process_close((uv_process_t*)handle);
+    break;
+
+  case UV_FS_EVENT:
+    uv__fs_event_close((uv_fs_event_t*)handle);
+    break;
+
+  case UV_POLL:
+    uv__poll_close((uv_poll_t*)handle);
+    break;
+
+  case UV_FS_POLL:
+    uv__fs_poll_close((uv_fs_poll_t*)handle);
+    break;
+
+  case UV_SIGNAL:
+    uv__signal_close((uv_signal_t*) handle);
+    /* Signal handles may not be closed immediately. The signal code will */
+    /* itself close uv__make_close_pending whenever appropriate. */
+    return;
+
+  default:
+    assert(0);
+  }
+
+  uv__make_close_pending(handle);
+}
+
+int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value) {
+  int r;
+  int fd;
+  socklen_t len;
+
+  if (handle == NULL || value == NULL)
+    return -EINVAL;
+
+  if (handle->type == UV_TCP || handle->type == UV_NAMED_PIPE)
+    fd = uv__stream_fd((uv_stream_t*) handle);
+  else if (handle->type == UV_UDP)
+    fd = ((uv_udp_t *) handle)->io_watcher.fd;
+  else
+    return -ENOTSUP;
+
+  len = sizeof(*value);
+
+  if (*value == 0)
+    r = getsockopt(fd, SOL_SOCKET, optname, value, &len);
+  else
+    r = setsockopt(fd, SOL_SOCKET, optname, (const void*) value, len);
+
+  if (r < 0)
+    return -errno;
+
+  return 0;
+}
+
+void uv__make_close_pending(uv_handle_t* handle) {
+  assert(handle->flags & UV_CLOSING);
+  assert(!(handle->flags & UV_CLOSED));
+  handle->next_closing = handle->loop->closing_handles;
+  handle->loop->closing_handles = handle;
+}
+
+int uv__getiovmax(void) {
+#if defined(IOV_MAX)
+  return IOV_MAX;
+#elif defined(_SC_IOV_MAX)
+  static int iovmax = -1;
+  if (iovmax == -1) {
+    iovmax = sysconf(_SC_IOV_MAX);
+    /* On some embedded devices (arm-linux-uclibc based ip camera),
+     * sysconf(_SC_IOV_MAX) can not get the correct value. The return
+     * value is -1 and the errno is EINPROGRESS. Degrade the value to 1.
+     */
+    if (iovmax == -1) iovmax = 1;
+  }
+  return iovmax;
+#else
+  return 1024;
+#endif
+}
+
+
+static void uv__finish_close(uv_handle_t* handle) {
+  /* Note: while the handle is in the UV_CLOSING state now, it's still possible
+   * for it to be active in the sense that uv__is_active() returns true.
+   * A good example is when the user calls uv_shutdown(), immediately followed
+   * by uv_close(). The handle is considered active at this point because the
+   * completion of the shutdown req is still pending.
+   */
+  assert(handle->flags & UV_CLOSING);
+  assert(!(handle->flags & UV_CLOSED));
+  handle->flags |= UV_CLOSED;
+
+  switch (handle->type) {
+    case UV_PREPARE:
+    case UV_CHECK:
+    case UV_IDLE:
+    case UV_ASYNC:
+    case UV_TIMER:
+    case UV_PROCESS:
+    case UV_FS_EVENT:
+    case UV_FS_POLL:
+    case UV_POLL:
+    case UV_SIGNAL:
+      break;
+
+    case UV_NAMED_PIPE:
+    case UV_TCP:
+    case UV_TTY:
+      uv__stream_destroy((uv_stream_t*)handle);
+      break;
+
+    case UV_UDP:
+      uv__udp_finish_close((uv_udp_t*)handle);
+      break;
+
+    default:
+      assert(0);
+      break;
+  }
+
+  uv__handle_unref(handle);
+  QUEUE_REMOVE(&handle->handle_queue);
+
+  if (handle->close_cb) {
+    handle->close_cb(handle);
+  }
+}
+
+
+static void uv__run_closing_handles(uv_loop_t* loop) {
+  uv_handle_t* p;
+  uv_handle_t* q;
+
+  p = loop->closing_handles;
+  loop->closing_handles = NULL;
+
+  while (p) {
+    q = p->next_closing;
+    uv__finish_close(p);
+    p = q;
+  }
+}
+
+
+int uv_is_closing(const uv_handle_t* handle) {
+  return uv__is_closing(handle);
+}
+
+
+int uv_backend_fd(const uv_loop_t* loop) {
+  return loop->backend_fd;
+}
+
+
+int uv_backend_timeout(const uv_loop_t* loop) {
+  if (loop->stop_flag != 0)
+    return 0;
+
+  if (!uv__has_active_handles(loop) && !uv__has_active_reqs(loop))
+    return 0;
+
+  if (!QUEUE_EMPTY(&loop->idle_handles))
+    return 0;
+
+  if (!QUEUE_EMPTY(&loop->pending_queue))
+    return 0;
+
+  if (loop->closing_handles)
+    return 0;
+
+  return uv__next_timeout(loop);
+}
+
+
+static int uv__loop_alive(const uv_loop_t* loop) {
+  return uv__has_active_handles(loop) ||
+         uv__has_active_reqs(loop) ||
+         loop->closing_handles != NULL;
+}
+
+
+int uv_loop_alive(const uv_loop_t* loop) {
+    return uv__loop_alive(loop);
+}
+
+
+int uv_run(uv_loop_t* loop, uv_run_mode mode) {
+  int timeout;
+  int r;
+  int ran_pending;
+
+  r = uv__loop_alive(loop);
+  if (!r)
+    uv__update_time(loop);
+
+  while (r != 0 && loop->stop_flag == 0) {
+    uv__update_time(loop);
+    uv__run_timers(loop);
+    ran_pending = uv__run_pending(loop);
+    uv__run_idle(loop);
+    uv__run_prepare(loop);
+
+    timeout = 0;
+    if ((mode == UV_RUN_ONCE && !ran_pending) || mode == UV_RUN_DEFAULT)
+      timeout = uv_backend_timeout(loop);
+
+    uv__io_poll(loop, timeout);
+    uv__run_check(loop);
+    uv__run_closing_handles(loop);
+
+    if (mode == UV_RUN_ONCE) {
+      /* UV_RUN_ONCE implies forward progress: at least one callback must have
+       * been invoked when it returns. uv__io_poll() can return without doing
+       * I/O (meaning: no callbacks) when its timeout expires - which means we
+       * have pending timers that satisfy the forward progress constraint.
+       *
+       * UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
+       * the check.
+       */
+      uv__update_time(loop);
+      uv__run_timers(loop);
+    }
+
+    r = uv__loop_alive(loop);
+    if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT)
+      break;
+  }
+
+  /* The if statement lets gcc compile it to a conditional store. Avoids
+   * dirtying a cache line.
+   */
+  if (loop->stop_flag != 0)
+    loop->stop_flag = 0;
+
+  return r;
+}
+
+
+void uv_update_time(uv_loop_t* loop) {
+  uv__update_time(loop);
+}
+
+
+int uv_is_active(const uv_handle_t* handle) {
+  return uv__is_active(handle);
+}
+
+
+/* Open a socket in non-blocking close-on-exec mode, atomically if possible. */
+int uv__socket(int domain, int type, int protocol) {
+  int sockfd;
+  int err;
+
+#if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC)
+  sockfd = socket(domain, type | SOCK_NONBLOCK | SOCK_CLOEXEC, protocol);
+  if (sockfd != -1)
+    return sockfd;
+
+  if (errno != EINVAL)
+    return -errno;
+#endif
+
+  sockfd = socket(domain, type, protocol);
+  if (sockfd == -1)
+    return -errno;
+
+  err = uv__nonblock(sockfd, 1);
+  if (err == 0)
+    err = uv__cloexec(sockfd, 1);
+
+  if (err) {
+    uv__close(sockfd);
+    return err;
+  }
+
+#if defined(SO_NOSIGPIPE)
+  {
+    int on = 1;
+    setsockopt(sockfd, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on));
+  }
+#endif
+
+  return sockfd;
+}
+
+/* get a file pointer to a file in read-only and close-on-exec mode */
+FILE* uv__open_file(const char* path) {
+  int fd;
+  FILE* fp;
+
+  fd = uv__open_cloexec(path, O_RDONLY);
+  if (fd < 0)
+    return NULL;
+
+   fp = fdopen(fd, "r");
+   if (fp == NULL)
+     uv__close(fd);
+
+   return fp;
+}
+
+
+int uv__accept(int sockfd) {
+  int peerfd;
+  int err;
+
+  assert(sockfd >= 0);
+
+  while (1) {
+#if defined(__linux__) || (defined(__FreeBSD__) && __FreeBSD__ >= 10)
+    static int no_accept4;
+
+    if (no_accept4)
+      goto skip;
+
+    peerfd = uv__accept4(sockfd,
+                         NULL,
+                         NULL,
+                         UV__SOCK_NONBLOCK|UV__SOCK_CLOEXEC);
+    if (peerfd != -1)
+      return peerfd;
+
+    if (errno == EINTR)
+      continue;
+
+    if (errno != ENOSYS)
+      return -errno;
+
+    no_accept4 = 1;
+skip:
+#endif
+
+    peerfd = accept(sockfd, NULL, NULL);
+    if (peerfd == -1) {
+      if (errno == EINTR)
+        continue;
+      return -errno;
+    }
+
+    err = uv__cloexec(peerfd, 1);
+    if (err == 0)
+      err = uv__nonblock(peerfd, 1);
+
+    if (err) {
+      uv__close(peerfd);
+      return err;
+    }
+
+    return peerfd;
+  }
+}
+
+
+int uv__close_nocheckstdio(int fd) {
+  int saved_errno;
+  int rc;
+
+  assert(fd > -1);  /* Catch uninitialized io_watcher.fd bugs. */
+
+  saved_errno = errno;
+  rc = close(fd);
+  if (rc == -1) {
+    rc = -errno;
+    if (rc == -EINTR || rc == -EINPROGRESS)
+      rc = 0;    /* The close is in progress, not an error. */
+    errno = saved_errno;
+  }
+
+  return rc;
+}
+
+
+int uv__close(int fd) {
+  assert(fd > STDERR_FILENO);  /* Catch stdio close bugs. */
+  return uv__close_nocheckstdio(fd);
+}
+
+
+int uv__nonblock_ioctl(int fd, int set) {
+  int r;
+
+  do
+    r = ioctl(fd, FIONBIO, &set);
+  while (r == -1 && errno == EINTR);
+
+  if (r)
+    return -errno;
+
+  return 0;
+}
+
+
+int uv__cloexec_ioctl(int fd, int set) {
+  int r;
+
+  do
+    r = ioctl(fd, set ? FIOCLEX : FIONCLEX);
+  while (r == -1 && errno == EINTR);
+
+  if (r)
+    return -errno;
+
+  return 0;
+}
+
+
+int uv__nonblock_fcntl(int fd, int set) {
+  int flags;
+  int r;
+
+  do
+    r = fcntl(fd, F_GETFL);
+  while (r == -1 && errno == EINTR);
+
+  if (r == -1)
+    return -errno;
+
+  /* Bail out now if already set/clear. */
+  if (!!(r & O_NONBLOCK) == !!set)
+    return 0;
+
+  if (set)
+    flags = r | O_NONBLOCK;
+  else
+    flags = r & ~O_NONBLOCK;
+
+  do
+    r = fcntl(fd, F_SETFL, flags);
+  while (r == -1 && errno == EINTR);
+
+  if (r)
+    return -errno;
+
+  return 0;
+}
+
+
+int uv__cloexec_fcntl(int fd, int set) {
+  int flags;
+  int r;
+
+  do
+    r = fcntl(fd, F_GETFD);
+  while (r == -1 && errno == EINTR);
+
+  if (r == -1)
+    return -errno;
+
+  /* Bail out now if already set/clear. */
+  if (!!(r & FD_CLOEXEC) == !!set)
+    return 0;
+
+  if (set)
+    flags = r | FD_CLOEXEC;
+  else
+    flags = r & ~FD_CLOEXEC;
+
+  do
+    r = fcntl(fd, F_SETFD, flags);
+  while (r == -1 && errno == EINTR);
+
+  if (r)
+    return -errno;
+
+  return 0;
+}
+
+
+/* This function is not execve-safe, there is a race window
+ * between the call to dup() and fcntl(FD_CLOEXEC).
+ */
+int uv__dup(int fd) {
+  int err;
+
+  fd = dup(fd);
+
+  if (fd == -1)
+    return -errno;
+
+  err = uv__cloexec(fd, 1);
+  if (err) {
+    uv__close(fd);
+    return err;
+  }
+
+  return fd;
+}
+
+
+ssize_t uv__recvmsg(int fd, struct msghdr* msg, int flags) {
+  struct cmsghdr* cmsg;
+  ssize_t rc;
+  int* pfd;
+  int* end;
+#if defined(__linux__)
+  static int no_msg_cmsg_cloexec;
+  if (no_msg_cmsg_cloexec == 0) {
+    rc = recvmsg(fd, msg, flags | 0x40000000);  /* MSG_CMSG_CLOEXEC */
+    if (rc != -1)
+      return rc;
+    if (errno != EINVAL)
+      return -errno;
+    rc = recvmsg(fd, msg, flags);
+    if (rc == -1)
+      return -errno;
+    no_msg_cmsg_cloexec = 1;
+  } else {
+    rc = recvmsg(fd, msg, flags);
+  }
+#else
+  rc = recvmsg(fd, msg, flags);
+#endif
+  if (rc == -1)
+    return -errno;
+  if (msg->msg_controllen == 0)
+    return rc;
+  for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg))
+    if (cmsg->cmsg_type == SCM_RIGHTS)
+      for (pfd = (int*) CMSG_DATA(cmsg),
+           end = (int*) ((char*) cmsg + cmsg->cmsg_len);
+           pfd < end;
+           pfd += 1)
+        uv__cloexec(*pfd, 1);
+  return rc;
+}
+
+
+int uv_cwd(char* buffer, size_t* size) {
+  if (buffer == NULL || size == NULL)
+    return -EINVAL;
+
+  if (getcwd(buffer, *size) == NULL)
+    return -errno;
+
+  *size = strlen(buffer);
+  if (*size > 1 && buffer[*size - 1] == '/') {
+    buffer[*size-1] = '\0';
+    (*size)--;
+  }
+
+  return 0;
+}
+
+
+int uv_chdir(const char* dir) {
+  if (chdir(dir))
+    return -errno;
+
+  return 0;
+}
+
+
+void uv_disable_stdio_inheritance(void) {
+  int fd;
+
+  /* Set the CLOEXEC flag on all open descriptors. Unconditionally try the
+   * first 16 file descriptors. After that, bail out after the first error.
+   */
+  for (fd = 0; ; fd++)
+    if (uv__cloexec(fd, 1) && fd > 15)
+      break;
+}
+
+
+int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) {
+  int fd_out;
+
+  switch (handle->type) {
+  case UV_TCP:
+  case UV_NAMED_PIPE:
+  case UV_TTY:
+    fd_out = uv__stream_fd((uv_stream_t*) handle);
+    break;
+
+  case UV_UDP:
+    fd_out = ((uv_udp_t *) handle)->io_watcher.fd;
+    break;
+
+  case UV_POLL:
+    fd_out = ((uv_poll_t *) handle)->io_watcher.fd;
+    break;
+
+  default:
+    return -EINVAL;
+  }
+
+  if (uv__is_closing(handle) || fd_out == -1)
+    return -EBADF;
+
+  *fd = fd_out;
+  return 0;
+}
+
+
+static int uv__run_pending(uv_loop_t* loop) {
+  QUEUE* q;
+  QUEUE pq;
+  uv__io_t* w;
+
+  if (QUEUE_EMPTY(&loop->pending_queue))
+    return 0;
+
+  QUEUE_MOVE(&loop->pending_queue, &pq);
+
+  while (!QUEUE_EMPTY(&pq)) {
+    q = QUEUE_HEAD(&pq);
+    QUEUE_REMOVE(q);
+    QUEUE_INIT(q);
+    w = QUEUE_DATA(q, uv__io_t, pending_queue);
+    w->cb(loop, w, POLLOUT);
+  }
+
+  return 1;
+}
+
+
+static unsigned int next_power_of_two(unsigned int val) {
+  val -= 1;
+  val |= val >> 1;
+  val |= val >> 2;
+  val |= val >> 4;
+  val |= val >> 8;
+  val |= val >> 16;
+  val += 1;
+  return val;
+}
+
+static void maybe_resize(uv_loop_t* loop, unsigned int len) {
+  uv__io_t** watchers;
+  void* fake_watcher_list;
+  void* fake_watcher_count;
+  unsigned int nwatchers;
+  unsigned int i;
+
+  if (len <= loop->nwatchers)
+    return;
+
+  /* Preserve fake watcher list and count at the end of the watchers */
+  if (loop->watchers != NULL) {
+    fake_watcher_list = loop->watchers[loop->nwatchers];
+    fake_watcher_count = loop->watchers[loop->nwatchers + 1];
+  } else {
+    fake_watcher_list = NULL;
+    fake_watcher_count = NULL;
+  }
+
+  nwatchers = next_power_of_two(len + 2) - 2;
+  watchers = uv__realloc(loop->watchers,
+                         (nwatchers + 2) * sizeof(loop->watchers[0]));
+
+  if (watchers == NULL)
+    abort();
+  for (i = loop->nwatchers; i < nwatchers; i++)
+    watchers[i] = NULL;
+  watchers[nwatchers] = fake_watcher_list;
+  watchers[nwatchers + 1] = fake_watcher_count;
+
+  loop->watchers = watchers;
+  loop->nwatchers = nwatchers;
+}
+
+
+void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) {
+  assert(cb != NULL);
+  assert(fd >= -1);
+  QUEUE_INIT(&w->pending_queue);
+  QUEUE_INIT(&w->watcher_queue);
+  w->cb = cb;
+  w->fd = fd;
+  w->events = 0;
+  w->pevents = 0;
+
+#if defined(UV_HAVE_KQUEUE)
+  w->rcount = 0;
+  w->wcount = 0;
+#endif /* defined(UV_HAVE_KQUEUE) */
+}
+
+
+void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
+  assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP)));
+  assert(0 != events);
+  assert(w->fd >= 0);
+  assert(w->fd < INT_MAX);
+
+  w->pevents |= events;
+  maybe_resize(loop, w->fd + 1);
+
+#if !defined(__sun)
+  /* The event ports backend needs to rearm all file descriptors on each and
+   * every tick of the event loop but the other backends allow us to
+   * short-circuit here if the event mask is unchanged.
+   */
+  if (w->events == w->pevents) {
+    if (w->events == 0 && !QUEUE_EMPTY(&w->watcher_queue)) {
+      QUEUE_REMOVE(&w->watcher_queue);
+      QUEUE_INIT(&w->watcher_queue);
+    }
+    return;
+  }
+#endif
+
+  if (QUEUE_EMPTY(&w->watcher_queue))
+    QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
+
+  if (loop->watchers[w->fd] == NULL) {
+    loop->watchers[w->fd] = w;
+    loop->nfds++;
+  }
+}
+
+
+void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
+  assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP)));
+  assert(0 != events);
+
+  if (w->fd == -1)
+    return;
+
+  assert(w->fd >= 0);
+
+  /* Happens when uv__io_stop() is called on a handle that was never started. */
+  if ((unsigned) w->fd >= loop->nwatchers)
+    return;
+
+  w->pevents &= ~events;
+
+  if (w->pevents == 0) {
+    QUEUE_REMOVE(&w->watcher_queue);
+    QUEUE_INIT(&w->watcher_queue);
+
+    if (loop->watchers[w->fd] != NULL) {
+      assert(loop->watchers[w->fd] == w);
+      assert(loop->nfds > 0);
+      loop->watchers[w->fd] = NULL;
+      loop->nfds--;
+      w->events = 0;
+    }
+  }
+  else if (QUEUE_EMPTY(&w->watcher_queue))
+    QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
+}
+
+
+void uv__io_close(uv_loop_t* loop, uv__io_t* w) {
+  uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP);
+  QUEUE_REMOVE(&w->pending_queue);
+
+  /* Remove stale events for this file descriptor */
+  uv__platform_invalidate_fd(loop, w->fd);
+}
+
+
+void uv__io_feed(uv_loop_t* loop, uv__io_t* w) {
+  if (QUEUE_EMPTY(&w->pending_queue))
+    QUEUE_INSERT_TAIL(&loop->pending_queue, &w->pending_queue);
+}
+
+
+int uv__io_active(const uv__io_t* w, unsigned int events) {
+  assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP)));
+  assert(0 != events);
+  return 0 != (w->pevents & events);
+}
+
+
+int uv_getrusage(uv_rusage_t* rusage) {
+  struct rusage usage;
+
+  if (getrusage(RUSAGE_SELF, &usage))
+    return -errno;
+
+  rusage->ru_utime.tv_sec = usage.ru_utime.tv_sec;
+  rusage->ru_utime.tv_usec = usage.ru_utime.tv_usec;
+
+  rusage->ru_stime.tv_sec = usage.ru_stime.tv_sec;
+  rusage->ru_stime.tv_usec = usage.ru_stime.tv_usec;
+
+#if !defined(__MVS__)
+  rusage->ru_maxrss = usage.ru_maxrss;
+  rusage->ru_ixrss = usage.ru_ixrss;
+  rusage->ru_idrss = usage.ru_idrss;
+  rusage->ru_isrss = usage.ru_isrss;
+  rusage->ru_minflt = usage.ru_minflt;
+  rusage->ru_majflt = usage.ru_majflt;
+  rusage->ru_nswap = usage.ru_nswap;
+  rusage->ru_inblock = usage.ru_inblock;
+  rusage->ru_oublock = usage.ru_oublock;
+  rusage->ru_msgsnd = usage.ru_msgsnd;
+  rusage->ru_msgrcv = usage.ru_msgrcv;
+  rusage->ru_nsignals = usage.ru_nsignals;
+  rusage->ru_nvcsw = usage.ru_nvcsw;
+  rusage->ru_nivcsw = usage.ru_nivcsw;
+#endif
+
+  return 0;
+}
+
+
+int uv__open_cloexec(const char* path, int flags) {
+  int err;
+  int fd;
+
+#if defined(UV__O_CLOEXEC)
+  static int no_cloexec;
+
+  if (!no_cloexec) {
+    fd = open(path, flags | UV__O_CLOEXEC);
+    if (fd != -1)
+      return fd;
+
+    if (errno != EINVAL)
+      return -errno;
+
+    /* O_CLOEXEC not supported. */
+    no_cloexec = 1;
+  }
+#endif
+
+  fd = open(path, flags);
+  if (fd == -1)
+    return -errno;
+
+  err = uv__cloexec(fd, 1);
+  if (err) {
+    uv__close(fd);
+    return err;
+  }
+
+  return fd;
+}
+
+
+int uv__dup2_cloexec(int oldfd, int newfd) {
+  int r;
+#if defined(__FreeBSD__) && __FreeBSD__ >= 10
+  r = dup3(oldfd, newfd, O_CLOEXEC);
+  if (r == -1)
+    return -errno;
+  return r;
+#elif defined(__FreeBSD__) && defined(F_DUP2FD_CLOEXEC)
+  r = fcntl(oldfd, F_DUP2FD_CLOEXEC, newfd);
+  if (r != -1)
+    return r;
+  if (errno != EINVAL)
+    return -errno;
+  /* Fall through. */
+#elif defined(__linux__)
+  static int no_dup3;
+  if (!no_dup3) {
+    do
+      r = uv__dup3(oldfd, newfd, UV__O_CLOEXEC);
+    while (r == -1 && errno == EBUSY);
+    if (r != -1)
+      return r;
+    if (errno != ENOSYS)
+      return -errno;
+    /* Fall through. */
+    no_dup3 = 1;
+  }
+#endif
+  {
+    int err;
+    do
+      r = dup2(oldfd, newfd);
+#if defined(__linux__)
+    while (r == -1 && errno == EBUSY);
+#else
+    while (0);  /* Never retry. */
+#endif
+
+    if (r == -1)
+      return -errno;
+
+    err = uv__cloexec(newfd, 1);
+    if (err) {
+      uv__close(newfd);
+      return err;
+    }
+
+    return r;
+  }
+}
+
+
+int uv_os_homedir(char* buffer, size_t* size) {
+  uv_passwd_t pwd;
+  char* buf;
+  size_t len;
+  int r;
+
+  if (buffer == NULL || size == NULL || *size == 0)
+    return -EINVAL;
+
+  /* Check if the HOME environment variable is set first */
+  buf = getenv("HOME");
+
+  if (buf != NULL) {
+    len = strlen(buf);
+
+    if (len >= *size) {
+      *size = len + 1;
+      return -ENOBUFS;
+    }
+
+    memcpy(buffer, buf, len + 1);
+    *size = len;
+
+    return 0;
+  }
+
+  /* HOME is not set, so call uv__getpwuid_r() */
+  r = uv__getpwuid_r(&pwd);
+
+  if (r != 0) {
+    return r;
+  }
+
+  len = strlen(pwd.homedir);
+
+  if (len >= *size) {
+    *size = len + 1;
+    uv_os_free_passwd(&pwd);
+    return -ENOBUFS;
+  }
+
+  memcpy(buffer, pwd.homedir, len + 1);
+  *size = len;
+  uv_os_free_passwd(&pwd);
+
+  return 0;
+}
+
+
+int uv_os_tmpdir(char* buffer, size_t* size) {
+  const char* buf;
+  size_t len;
+
+  if (buffer == NULL || size == NULL || *size == 0)
+    return -EINVAL;
+
+#define CHECK_ENV_VAR(name)                                                   \
+  do {                                                                        \
+    buf = getenv(name);                                                       \
+    if (buf != NULL)                                                          \
+      goto return_buffer;                                                     \
+  }                                                                           \
+  while (0)
+
+  /* Check the TMPDIR, TMP, TEMP, and TEMPDIR environment variables in order */
+  CHECK_ENV_VAR("TMPDIR");
+  CHECK_ENV_VAR("TMP");
+  CHECK_ENV_VAR("TEMP");
+  CHECK_ENV_VAR("TEMPDIR");
+
+#undef CHECK_ENV_VAR
+
+  /* No temp environment variables defined */
+  #if defined(__ANDROID__)
+    buf = "/data/local/tmp";
+  #else
+    buf = "/tmp";
+  #endif
+
+return_buffer:
+  len = strlen(buf);
+
+  if (len >= *size) {
+    *size = len + 1;
+    return -ENOBUFS;
+  }
+
+  /* The returned directory should not have a trailing slash. */
+  if (len > 1 && buf[len - 1] == '/') {
+    len--;
+  }
+
+  memcpy(buffer, buf, len + 1);
+  buffer[len] = '\0';
+  *size = len;
+
+  return 0;
+}
+
+
+int uv__getpwuid_r(uv_passwd_t* pwd) {
+  struct passwd pw;
+  struct passwd* result;
+  char* buf;
+  uid_t uid;
+  size_t bufsize;
+  size_t name_size;
+  size_t homedir_size;
+  size_t shell_size;
+  long initsize;
+  int r;
+#if defined(__ANDROID_API__) && __ANDROID_API__ < 21
+  int (*getpwuid_r)(uid_t, struct passwd*, char*, size_t, struct passwd**);
+
+  getpwuid_r = dlsym(RTLD_DEFAULT, "getpwuid_r");
+  if (getpwuid_r == NULL)
+    return -ENOSYS;
+#endif
+
+  if (pwd == NULL)
+    return -EINVAL;
+
+  initsize = sysconf(_SC_GETPW_R_SIZE_MAX);
+
+  if (initsize <= 0)
+    bufsize = 4096;
+  else
+    bufsize = (size_t) initsize;
+
+  uid = geteuid();
+  buf = NULL;
+
+  for (;;) {
+    uv__free(buf);
+    buf = uv__malloc(bufsize);
+
+    if (buf == NULL)
+      return -ENOMEM;
+
+    r = getpwuid_r(uid, &pw, buf, bufsize, &result);
+
+    if (r != ERANGE)
+      break;
+
+    bufsize *= 2;
+  }
+
+  if (r != 0) {
+    uv__free(buf);
+    return -r;
+  }
+
+  if (result == NULL) {
+    uv__free(buf);
+    return -ENOENT;
+  }
+
+  /* Allocate memory for the username, shell, and home directory */
+  name_size = strlen(pw.pw_name) + 1;
+  homedir_size = strlen(pw.pw_dir) + 1;
+  shell_size = strlen(pw.pw_shell) + 1;
+  pwd->username = uv__malloc(name_size + homedir_size + shell_size);
+
+  if (pwd->username == NULL) {
+    uv__free(buf);
+    return -ENOMEM;
+  }
+
+  /* Copy the username */
+  memcpy(pwd->username, pw.pw_name, name_size);
+
+  /* Copy the home directory */
+  pwd->homedir = pwd->username + name_size;
+  memcpy(pwd->homedir, pw.pw_dir, homedir_size);
+
+  /* Copy the shell */
+  pwd->shell = pwd->homedir + homedir_size;
+  memcpy(pwd->shell, pw.pw_shell, shell_size);
+
+  /* Copy the uid and gid */
+  pwd->uid = pw.pw_uid;
+  pwd->gid = pw.pw_gid;
+
+  uv__free(buf);
+
+  return 0;
+}
+
+
+void uv_os_free_passwd(uv_passwd_t* pwd) {
+  if (pwd == NULL)
+    return;
+
+  /*
+    The memory for name, shell, and homedir are allocated in a single
+    uv__malloc() call. The base of the pointer is stored in pwd->username, so
+    that is the field that needs to be freed.
+  */
+  uv__free(pwd->username);
+  pwd->username = NULL;
+  pwd->shell = NULL;
+  pwd->homedir = NULL;
+}
+
+
+int uv_os_get_passwd(uv_passwd_t* pwd) {
+  return uv__getpwuid_r(pwd);
+}

+ 206 - 0
Utilities/cmlibuv/src/unix/darwin-proctitle.c

@@ -0,0 +1,206 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <TargetConditionals.h>
+
+#if !TARGET_OS_IPHONE
+# include <CoreFoundation/CoreFoundation.h>
+# include <ApplicationServices/ApplicationServices.h>
+#endif
+
+
+static int uv__pthread_setname_np(const char* name) {
+  int (*dynamic_pthread_setname_np)(const char* name);
+  char namebuf[64];  /* MAXTHREADNAMESIZE */
+  int err;
+
+  /* pthread_setname_np() first appeared in OS X 10.6 and iOS 3.2. */
+  *(void **)(&dynamic_pthread_setname_np) =
+      dlsym(RTLD_DEFAULT, "pthread_setname_np");
+
+  if (dynamic_pthread_setname_np == NULL)
+    return -ENOSYS;
+
+  strncpy(namebuf, name, sizeof(namebuf) - 1);
+  namebuf[sizeof(namebuf) - 1] = '\0';
+
+  err = dynamic_pthread_setname_np(namebuf);
+  if (err)
+    return -err;
+
+  return 0;
+}
+
+
+int uv__set_process_title(const char* title) {
+#if TARGET_OS_IPHONE
+  return uv__pthread_setname_np(title);
+#else
+  CFStringRef (*pCFStringCreateWithCString)(CFAllocatorRef,
+                                            const char*,
+                                            CFStringEncoding);
+  CFBundleRef (*pCFBundleGetBundleWithIdentifier)(CFStringRef);
+  void *(*pCFBundleGetDataPointerForName)(CFBundleRef, CFStringRef);
+  void *(*pCFBundleGetFunctionPointerForName)(CFBundleRef, CFStringRef);
+  CFTypeRef (*pLSGetCurrentApplicationASN)(void);
+  OSStatus (*pLSSetApplicationInformationItem)(int,
+                                               CFTypeRef,
+                                               CFStringRef,
+                                               CFStringRef,
+                                               CFDictionaryRef*);
+  void* application_services_handle;
+  void* core_foundation_handle;
+  CFBundleRef launch_services_bundle;
+  CFStringRef* display_name_key;
+  CFDictionaryRef (*pCFBundleGetInfoDictionary)(CFBundleRef);
+  CFBundleRef (*pCFBundleGetMainBundle)(void);
+  CFBundleRef hi_services_bundle;
+  OSStatus (*pSetApplicationIsDaemon)(int);
+  CFDictionaryRef (*pLSApplicationCheckIn)(int, CFDictionaryRef);
+  void (*pLSSetApplicationLaunchServicesServerConnectionStatus)(uint64_t,
+                                                                void*);
+  CFTypeRef asn;
+  int err;
+
+  err = -ENOENT;
+  application_services_handle = dlopen("/System/Library/Frameworks/"
+                                       "ApplicationServices.framework/"
+                                       "Versions/A/ApplicationServices",
+                                       RTLD_LAZY | RTLD_LOCAL);
+  core_foundation_handle = dlopen("/System/Library/Frameworks/"
+                                  "CoreFoundation.framework/"
+                                  "Versions/A/CoreFoundation",
+                                  RTLD_LAZY | RTLD_LOCAL);
+
+  if (application_services_handle == NULL || core_foundation_handle == NULL)
+    goto out;
+
+  *(void **)(&pCFStringCreateWithCString) =
+      dlsym(core_foundation_handle, "CFStringCreateWithCString");
+  *(void **)(&pCFBundleGetBundleWithIdentifier) =
+      dlsym(core_foundation_handle, "CFBundleGetBundleWithIdentifier");
+  *(void **)(&pCFBundleGetDataPointerForName) =
+      dlsym(core_foundation_handle, "CFBundleGetDataPointerForName");
+  *(void **)(&pCFBundleGetFunctionPointerForName) =
+      dlsym(core_foundation_handle, "CFBundleGetFunctionPointerForName");
+
+  if (pCFStringCreateWithCString == NULL ||
+      pCFBundleGetBundleWithIdentifier == NULL ||
+      pCFBundleGetDataPointerForName == NULL ||
+      pCFBundleGetFunctionPointerForName == NULL) {
+    goto out;
+  }
+
+#define S(s) pCFStringCreateWithCString(NULL, (s), kCFStringEncodingUTF8)
+
+  launch_services_bundle =
+      pCFBundleGetBundleWithIdentifier(S("com.apple.LaunchServices"));
+
+  if (launch_services_bundle == NULL)
+    goto out;
+
+  *(void **)(&pLSGetCurrentApplicationASN) =
+      pCFBundleGetFunctionPointerForName(launch_services_bundle,
+                                         S("_LSGetCurrentApplicationASN"));
+
+  if (pLSGetCurrentApplicationASN == NULL)
+    goto out;
+
+  *(void **)(&pLSSetApplicationInformationItem) =
+      pCFBundleGetFunctionPointerForName(launch_services_bundle,
+                                         S("_LSSetApplicationInformationItem"));
+
+  if (pLSSetApplicationInformationItem == NULL)
+    goto out;
+
+  display_name_key = pCFBundleGetDataPointerForName(launch_services_bundle,
+                                                    S("_kLSDisplayNameKey"));
+
+  if (display_name_key == NULL || *display_name_key == NULL)
+    goto out;
+
+  *(void **)(&pCFBundleGetInfoDictionary) = dlsym(core_foundation_handle,
+                                     "CFBundleGetInfoDictionary");
+  *(void **)(&pCFBundleGetMainBundle) = dlsym(core_foundation_handle,
+                                 "CFBundleGetMainBundle");
+  if (pCFBundleGetInfoDictionary == NULL || pCFBundleGetMainBundle == NULL)
+    goto out;
+
+  /* Black 10.9 magic, to remove (Not responding) mark in Activity Monitor */
+  hi_services_bundle =
+      pCFBundleGetBundleWithIdentifier(S("com.apple.HIServices"));
+  err = -ENOENT;
+  if (hi_services_bundle == NULL)
+    goto out;
+
+  *(void **)(&pSetApplicationIsDaemon) = pCFBundleGetFunctionPointerForName(
+      hi_services_bundle,
+      S("SetApplicationIsDaemon"));
+  *(void **)(&pLSApplicationCheckIn) = pCFBundleGetFunctionPointerForName(
+      launch_services_bundle,
+      S("_LSApplicationCheckIn"));
+  *(void **)(&pLSSetApplicationLaunchServicesServerConnectionStatus) =
+      pCFBundleGetFunctionPointerForName(
+          launch_services_bundle,
+          S("_LSSetApplicationLaunchServicesServerConnectionStatus"));
+  if (pSetApplicationIsDaemon == NULL ||
+      pLSApplicationCheckIn == NULL ||
+      pLSSetApplicationLaunchServicesServerConnectionStatus == NULL) {
+    goto out;
+  }
+
+  if (pSetApplicationIsDaemon(1) != noErr)
+    goto out;
+
+  pLSSetApplicationLaunchServicesServerConnectionStatus(0, NULL);
+
+  /* Check into process manager?! */
+  pLSApplicationCheckIn(-2,
+                        pCFBundleGetInfoDictionary(pCFBundleGetMainBundle()));
+
+  asn = pLSGetCurrentApplicationASN();
+
+  err = -EINVAL;
+  if (pLSSetApplicationInformationItem(-2,  /* Magic value. */
+                                       asn,
+                                       *display_name_key,
+                                       S(title),
+                                       NULL) != noErr) {
+    goto out;
+  }
+
+  uv__pthread_setname_np(title);  /* Don't care if it fails. */
+  err = 0;
+
+out:
+  if (core_foundation_handle != NULL)
+    dlclose(core_foundation_handle);
+
+  if (application_services_handle != NULL)
+    dlclose(application_services_handle);
+
+  return err;
+#endif  /* !TARGET_OS_IPHONE */
+}

+ 335 - 0
Utilities/cmlibuv/src/unix/darwin.c

@@ -0,0 +1,335 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <assert.h>
+#include <stdint.h>
+#include <errno.h>
+
+#include <ifaddrs.h>
+#include <net/if.h>
+#include <net/if_dl.h>
+
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+#include <mach-o/dyld.h> /* _NSGetExecutablePath */
+#include <sys/resource.h>
+#include <sys/sysctl.h>
+#include <unistd.h>  /* sysconf */
+
+
+int uv__platform_loop_init(uv_loop_t* loop) {
+  loop->cf_state = NULL;
+
+  if (uv__kqueue_init(loop))
+    return -errno;
+
+  return 0;
+}
+
+
+void uv__platform_loop_delete(uv_loop_t* loop) {
+  uv__fsevents_loop_delete(loop);
+}
+
+
+uint64_t uv__hrtime(uv_clocktype_t type) {
+  static mach_timebase_info_data_t info;
+
+  if ((ACCESS_ONCE(uint32_t, info.numer) == 0 ||
+       ACCESS_ONCE(uint32_t, info.denom) == 0) &&
+      mach_timebase_info(&info) != KERN_SUCCESS)
+    abort();
+
+  return mach_absolute_time() * info.numer / info.denom;
+}
+
+
+int uv_exepath(char* buffer, size_t* size) {
+  /* realpath(exepath) may be > PATH_MAX so double it to be on the safe side. */
+  char abspath[PATH_MAX * 2 + 1];
+  char exepath[PATH_MAX + 1];
+  uint32_t exepath_size;
+  size_t abspath_size;
+
+  if (buffer == NULL || size == NULL || *size == 0)
+    return -EINVAL;
+
+  exepath_size = sizeof(exepath);
+  if (_NSGetExecutablePath(exepath, &exepath_size))
+    return -EIO;
+
+  if (realpath(exepath, abspath) != abspath)
+    return -errno;
+
+  abspath_size = strlen(abspath);
+  if (abspath_size == 0)
+    return -EIO;
+
+  *size -= 1;
+  if (*size > abspath_size)
+    *size = abspath_size;
+
+  memcpy(buffer, abspath, *size);
+  buffer[*size] = '\0';
+
+  return 0;
+}
+
+
+uint64_t uv_get_free_memory(void) {
+  vm_statistics_data_t info;
+  mach_msg_type_number_t count = sizeof(info) / sizeof(integer_t);
+
+  if (host_statistics(mach_host_self(), HOST_VM_INFO,
+                      (host_info_t)&info, &count) != KERN_SUCCESS) {
+    return -EINVAL;  /* FIXME(bnoordhuis) Translate error. */
+  }
+
+  return (uint64_t) info.free_count * sysconf(_SC_PAGESIZE);
+}
+
+
+uint64_t uv_get_total_memory(void) {
+  uint64_t info;
+  int which[] = {CTL_HW, HW_MEMSIZE};
+  size_t size = sizeof(info);
+
+  if (sysctl(which, 2, &info, &size, NULL, 0))
+    return -errno;
+
+  return (uint64_t) info;
+}
+
+
+void uv_loadavg(double avg[3]) {
+  struct loadavg info;
+  size_t size = sizeof(info);
+  int which[] = {CTL_VM, VM_LOADAVG};
+
+  if (sysctl(which, 2, &info, &size, NULL, 0) < 0) return;
+
+  avg[0] = (double) info.ldavg[0] / info.fscale;
+  avg[1] = (double) info.ldavg[1] / info.fscale;
+  avg[2] = (double) info.ldavg[2] / info.fscale;
+}
+
+
+int uv_resident_set_memory(size_t* rss) {
+  mach_msg_type_number_t count;
+  task_basic_info_data_t info;
+  kern_return_t err;
+
+  count = TASK_BASIC_INFO_COUNT;
+  err = task_info(mach_task_self(),
+                  TASK_BASIC_INFO,
+                  (task_info_t) &info,
+                  &count);
+  (void) &err;
+  /* task_info(TASK_BASIC_INFO) cannot really fail. Anything other than
+   * KERN_SUCCESS implies a libuv bug.
+   */
+  assert(err == KERN_SUCCESS);
+  *rss = info.resident_size;
+
+  return 0;
+}
+
+
+int uv_uptime(double* uptime) {
+  time_t now;
+  struct timeval info;
+  size_t size = sizeof(info);
+  static int which[] = {CTL_KERN, KERN_BOOTTIME};
+
+  if (sysctl(which, 2, &info, &size, NULL, 0))
+    return -errno;
+
+  now = time(NULL);
+  *uptime = now - info.tv_sec;
+
+  return 0;
+}
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+  unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK),
+               multiplier = ((uint64_t)1000L / ticks);
+  char model[512];
+  uint64_t cpuspeed;
+  size_t size;
+  unsigned int i;
+  natural_t numcpus;
+  mach_msg_type_number_t msg_type;
+  processor_cpu_load_info_data_t *info;
+  uv_cpu_info_t* cpu_info;
+
+  size = sizeof(model);
+  if (sysctlbyname("machdep.cpu.brand_string", &model, &size, NULL, 0) &&
+      sysctlbyname("hw.model", &model, &size, NULL, 0)) {
+    return -errno;
+  }
+
+  size = sizeof(cpuspeed);
+  if (sysctlbyname("hw.cpufrequency", &cpuspeed, &size, NULL, 0))
+    return -errno;
+
+  if (host_processor_info(mach_host_self(), PROCESSOR_CPU_LOAD_INFO, &numcpus,
+                          (processor_info_array_t*)&info,
+                          &msg_type) != KERN_SUCCESS) {
+    return -EINVAL;  /* FIXME(bnoordhuis) Translate error. */
+  }
+
+  *cpu_infos = uv__malloc(numcpus * sizeof(**cpu_infos));
+  if (!(*cpu_infos)) {
+    vm_deallocate(mach_task_self(), (vm_address_t)info, msg_type);
+    return -ENOMEM;
+  }
+
+  *count = numcpus;
+
+  for (i = 0; i < numcpus; i++) {
+    cpu_info = &(*cpu_infos)[i];
+
+    cpu_info->cpu_times.user = (uint64_t)(info[i].cpu_ticks[0]) * multiplier;
+    cpu_info->cpu_times.nice = (uint64_t)(info[i].cpu_ticks[3]) * multiplier;
+    cpu_info->cpu_times.sys = (uint64_t)(info[i].cpu_ticks[1]) * multiplier;
+    cpu_info->cpu_times.idle = (uint64_t)(info[i].cpu_ticks[2]) * multiplier;
+    cpu_info->cpu_times.irq = 0;
+
+    cpu_info->model = uv__strdup(model);
+    cpu_info->speed = cpuspeed/1000000;
+  }
+  vm_deallocate(mach_task_self(), (vm_address_t)info, msg_type);
+
+  return 0;
+}
+
+
+void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
+  int i;
+
+  for (i = 0; i < count; i++) {
+    uv__free(cpu_infos[i].model);
+  }
+
+  uv__free(cpu_infos);
+}
+
+
+int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
+  struct ifaddrs *addrs, *ent;
+  uv_interface_address_t* address;
+  int i;
+  struct sockaddr_dl *sa_addr;
+
+  if (getifaddrs(&addrs))
+    return -errno;
+
+  *count = 0;
+
+  /* Count the number of interfaces */
+  for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+    if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) ||
+        (ent->ifa_addr == NULL) ||
+        (ent->ifa_addr->sa_family == AF_LINK)) {
+      continue;
+    }
+
+    (*count)++;
+  }
+
+  *addresses = uv__malloc(*count * sizeof(**addresses));
+  if (!(*addresses)) {
+    freeifaddrs(addrs);
+    return -ENOMEM;
+  }
+
+  address = *addresses;
+
+  for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+    if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
+      continue;
+
+    if (ent->ifa_addr == NULL)
+      continue;
+
+    /*
+     * On Mac OS X getifaddrs returns information related to Mac Addresses for
+     * various devices, such as firewire, etc. These are not relevant here.
+     */
+    if (ent->ifa_addr->sa_family == AF_LINK)
+      continue;
+
+    address->name = uv__strdup(ent->ifa_name);
+
+    if (ent->ifa_addr->sa_family == AF_INET6) {
+      address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
+    } else {
+      address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
+    }
+
+    if (ent->ifa_netmask->sa_family == AF_INET6) {
+      address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
+    } else {
+      address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
+    }
+
+    address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK);
+
+    address++;
+  }
+
+  /* Fill in physical addresses for each interface */
+  for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+    if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) ||
+        (ent->ifa_addr == NULL) ||
+        (ent->ifa_addr->sa_family != AF_LINK)) {
+      continue;
+    }
+
+    address = *addresses;
+
+    for (i = 0; i < (*count); i++) {
+      if (strcmp(address->name, ent->ifa_name) == 0) {
+        sa_addr = (struct sockaddr_dl*)(ent->ifa_addr);
+        memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr));
+      }
+      address++;
+    }
+  }
+
+  freeifaddrs(addrs);
+
+  return 0;
+}
+
+
+void uv_free_interface_addresses(uv_interface_address_t* addresses,
+  int count) {
+  int i;
+
+  for (i = 0; i < count; i++) {
+    uv__free(addresses[i].name);
+  }
+
+  uv__free(addresses);
+}

+ 80 - 0
Utilities/cmlibuv/src/unix/dl.c

@@ -0,0 +1,80 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <string.h>
+#include <locale.h>
+
+static int uv__dlerror(uv_lib_t* lib);
+
+
+int uv_dlopen(const char* filename, uv_lib_t* lib) {
+  dlerror(); /* Reset error status. */
+  lib->errmsg = NULL;
+  lib->handle = dlopen(filename, RTLD_LAZY);
+  return lib->handle ? 0 : uv__dlerror(lib);
+}
+
+
+void uv_dlclose(uv_lib_t* lib) {
+  uv__free(lib->errmsg);
+  lib->errmsg = NULL;
+
+  if (lib->handle) {
+    /* Ignore errors. No good way to signal them without leaking memory. */
+    dlclose(lib->handle);
+    lib->handle = NULL;
+  }
+}
+
+
+int uv_dlsym(uv_lib_t* lib, const char* name, void** ptr) {
+  dlerror(); /* Reset error status. */
+  *ptr = dlsym(lib->handle, name);
+  return uv__dlerror(lib);
+}
+
+
+const char* uv_dlerror(const uv_lib_t* lib) {
+  return lib->errmsg ? lib->errmsg : "no error";
+}
+
+
+static int uv__dlerror(uv_lib_t* lib) {
+  const char* errmsg;
+
+  uv__free(lib->errmsg);
+
+  errmsg = dlerror();
+
+  if (errmsg) {
+    lib->errmsg = uv__strdup(errmsg);
+    return -1;
+  }
+  else {
+    lib->errmsg = NULL;
+    return 0;
+  }
+}

+ 460 - 0
Utilities/cmlibuv/src/unix/freebsd.c

@@ -0,0 +1,460 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <assert.h>
+#include <string.h>
+#include <errno.h>
+
+#include <ifaddrs.h>
+#include <net/if.h>
+#include <net/if_dl.h>
+
+#include <kvm.h>
+#include <paths.h>
+#include <sys/user.h>
+#include <sys/types.h>
+#include <sys/resource.h>
+#include <sys/sysctl.h>
+#include <vm/vm_param.h> /* VM_LOADAVG */
+#include <time.h>
+#include <stdlib.h>
+#include <unistd.h> /* sysconf */
+#include <fcntl.h>
+
+#undef NANOSEC
+#define NANOSEC ((uint64_t) 1e9)
+
+#ifndef CPUSTATES
+# define CPUSTATES 5U
+#endif
+#ifndef CP_USER
+# define CP_USER 0
+# define CP_NICE 1
+# define CP_SYS 2
+# define CP_IDLE 3
+# define CP_INTR 4
+#endif
+
+static char *process_title;
+
+
+int uv__platform_loop_init(uv_loop_t* loop) {
+  return uv__kqueue_init(loop);
+}
+
+
+void uv__platform_loop_delete(uv_loop_t* loop) {
+}
+
+
+uint64_t uv__hrtime(uv_clocktype_t type) {
+  struct timespec ts;
+  clock_gettime(CLOCK_MONOTONIC, &ts);
+  return (((uint64_t) ts.tv_sec) * NANOSEC + ts.tv_nsec);
+}
+
+
+#ifdef __DragonFly__
+int uv_exepath(char* buffer, size_t* size) {
+  char abspath[PATH_MAX * 2 + 1];
+  ssize_t abspath_size;
+
+  if (buffer == NULL || size == NULL || *size == 0)
+    return -EINVAL;
+
+  abspath_size = readlink("/proc/curproc/file", abspath, sizeof(abspath));
+  if (abspath_size < 0)
+    return -errno;
+
+  assert(abspath_size > 0);
+  *size -= 1;
+
+  if (*size > abspath_size)
+    *size = abspath_size;
+
+  memcpy(buffer, abspath, *size);
+  buffer[*size] = '\0';
+
+  return 0;
+}
+#else
+int uv_exepath(char* buffer, size_t* size) {
+  char abspath[PATH_MAX * 2 + 1];
+  int mib[4];
+  size_t abspath_size;
+
+  if (buffer == NULL || size == NULL || *size == 0)
+    return -EINVAL;
+
+  mib[0] = CTL_KERN;
+  mib[1] = KERN_PROC;
+  mib[2] = KERN_PROC_PATHNAME;
+  mib[3] = -1;
+
+  abspath_size = sizeof abspath;
+  if (sysctl(mib, 4, abspath, &abspath_size, NULL, 0))
+    return -errno;
+
+  assert(abspath_size > 0);
+  abspath_size -= 1;
+  *size -= 1;
+
+  if (*size > abspath_size)
+    *size = abspath_size;
+
+  memcpy(buffer, abspath, *size);
+  buffer[*size] = '\0';
+
+  return 0;
+}
+#endif
+
+uint64_t uv_get_free_memory(void) {
+  int freecount;
+  size_t size = sizeof(freecount);
+
+  if (sysctlbyname("vm.stats.vm.v_free_count", &freecount, &size, NULL, 0))
+    return -errno;
+
+  return (uint64_t) freecount * sysconf(_SC_PAGESIZE);
+
+}
+
+
+uint64_t uv_get_total_memory(void) {
+  unsigned long info;
+  int which[] = {CTL_HW, HW_PHYSMEM};
+
+  size_t size = sizeof(info);
+
+  if (sysctl(which, 2, &info, &size, NULL, 0))
+    return -errno;
+
+  return (uint64_t) info;
+}
+
+
+void uv_loadavg(double avg[3]) {
+  struct loadavg info;
+  size_t size = sizeof(info);
+  int which[] = {CTL_VM, VM_LOADAVG};
+
+  if (sysctl(which, 2, &info, &size, NULL, 0) < 0) return;
+
+  avg[0] = (double) info.ldavg[0] / info.fscale;
+  avg[1] = (double) info.ldavg[1] / info.fscale;
+  avg[2] = (double) info.ldavg[2] / info.fscale;
+}
+
+
+char** uv_setup_args(int argc, char** argv) {
+  process_title = argc ? uv__strdup(argv[0]) : NULL;
+  return argv;
+}
+
+
+int uv_set_process_title(const char* title) {
+  int oid[4];
+
+  uv__free(process_title);
+  process_title = uv__strdup(title);
+
+  oid[0] = CTL_KERN;
+  oid[1] = KERN_PROC;
+  oid[2] = KERN_PROC_ARGS;
+  oid[3] = getpid();
+
+  sysctl(oid,
+         ARRAY_SIZE(oid),
+         NULL,
+         NULL,
+         process_title,
+         strlen(process_title) + 1);
+
+  return 0;
+}
+
+
+int uv_get_process_title(char* buffer, size_t size) {
+  size_t len;
+
+  if (buffer == NULL || size == 0)
+    return -EINVAL;
+
+  if (process_title) {
+    len = strlen(process_title) + 1;
+
+    if (size < len)
+      return -ENOBUFS;
+
+    memcpy(buffer, process_title, len);
+  } else {
+    len = 0;
+  }
+
+  buffer[len] = '\0';
+
+  return 0;
+}
+
+
+int uv_resident_set_memory(size_t* rss) {
+  kvm_t *kd = NULL;
+  struct kinfo_proc *kinfo = NULL;
+  pid_t pid;
+  int nprocs;
+  size_t page_size = getpagesize();
+
+  pid = getpid();
+
+  kd = kvm_open(NULL, _PATH_DEVNULL, NULL, O_RDONLY, "kvm_open");
+  if (kd == NULL) goto error;
+
+  kinfo = kvm_getprocs(kd, KERN_PROC_PID, pid, &nprocs);
+  if (kinfo == NULL) goto error;
+
+#ifdef __DragonFly__
+  *rss = kinfo->kp_vm_rssize * page_size;
+#else
+  *rss = kinfo->ki_rssize * page_size;
+#endif
+
+  kvm_close(kd);
+
+  return 0;
+
+error:
+  if (kd) kvm_close(kd);
+  return -EPERM;
+}
+
+
+int uv_uptime(double* uptime) {
+  int r;
+  struct timespec sp;
+  r = clock_gettime(CLOCK_MONOTONIC, &sp);
+  if (r)
+    return -errno;
+
+  *uptime = sp.tv_sec;
+  return 0;
+}
+
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+  unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK),
+               multiplier = ((uint64_t)1000L / ticks), cpuspeed, maxcpus,
+               cur = 0;
+  uv_cpu_info_t* cpu_info;
+  const char* maxcpus_key;
+  const char* cptimes_key;
+  char model[512];
+  long* cp_times;
+  int numcpus;
+  size_t size;
+  int i;
+
+#if defined(__DragonFly__)
+  /* This is not quite correct but DragonFlyBSD doesn't seem to have anything
+   * comparable to kern.smp.maxcpus or kern.cp_times (kern.cp_time is a total,
+   * not per CPU). At least this stops uv_cpu_info() from failing completely.
+   */
+  maxcpus_key = "hw.ncpu";
+  cptimes_key = "kern.cp_time";
+#else
+  maxcpus_key = "kern.smp.maxcpus";
+  cptimes_key = "kern.cp_times";
+#endif
+
+  size = sizeof(model);
+  if (sysctlbyname("hw.model", &model, &size, NULL, 0))
+    return -errno;
+
+  size = sizeof(numcpus);
+  if (sysctlbyname("hw.ncpu", &numcpus, &size, NULL, 0))
+    return -errno;
+
+  *cpu_infos = uv__malloc(numcpus * sizeof(**cpu_infos));
+  if (!(*cpu_infos))
+    return -ENOMEM;
+
+  *count = numcpus;
+
+  size = sizeof(cpuspeed);
+  if (sysctlbyname("hw.clockrate", &cpuspeed, &size, NULL, 0)) {
+    uv__free(*cpu_infos);
+    return -errno;
+  }
+
+  /* kern.cp_times on FreeBSD i386 gives an array up to maxcpus instead of
+   * ncpu.
+   */
+  size = sizeof(maxcpus);
+  if (sysctlbyname(maxcpus_key, &maxcpus, &size, NULL, 0)) {
+    uv__free(*cpu_infos);
+    return -errno;
+  }
+
+  size = maxcpus * CPUSTATES * sizeof(long);
+
+  cp_times = uv__malloc(size);
+  if (cp_times == NULL) {
+    uv__free(*cpu_infos);
+    return -ENOMEM;
+  }
+
+  if (sysctlbyname(cptimes_key, cp_times, &size, NULL, 0)) {
+    uv__free(cp_times);
+    uv__free(*cpu_infos);
+    return -errno;
+  }
+
+  for (i = 0; i < numcpus; i++) {
+    cpu_info = &(*cpu_infos)[i];
+
+    cpu_info->cpu_times.user = (uint64_t)(cp_times[CP_USER+cur]) * multiplier;
+    cpu_info->cpu_times.nice = (uint64_t)(cp_times[CP_NICE+cur]) * multiplier;
+    cpu_info->cpu_times.sys = (uint64_t)(cp_times[CP_SYS+cur]) * multiplier;
+    cpu_info->cpu_times.idle = (uint64_t)(cp_times[CP_IDLE+cur]) * multiplier;
+    cpu_info->cpu_times.irq = (uint64_t)(cp_times[CP_INTR+cur]) * multiplier;
+
+    cpu_info->model = uv__strdup(model);
+    cpu_info->speed = cpuspeed;
+
+    cur+=CPUSTATES;
+  }
+
+  uv__free(cp_times);
+  return 0;
+}
+
+
+void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
+  int i;
+
+  for (i = 0; i < count; i++) {
+    uv__free(cpu_infos[i].model);
+  }
+
+  uv__free(cpu_infos);
+}
+
+
+int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
+  struct ifaddrs *addrs, *ent;
+  uv_interface_address_t* address;
+  int i;
+  struct sockaddr_dl *sa_addr;
+
+  if (getifaddrs(&addrs))
+    return -errno;
+
+   *count = 0;
+
+  /* Count the number of interfaces */
+  for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+    if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) ||
+        (ent->ifa_addr == NULL) ||
+        (ent->ifa_addr->sa_family == AF_LINK)) {
+      continue;
+    }
+
+    (*count)++;
+  }
+
+  *addresses = uv__malloc(*count * sizeof(**addresses));
+  if (!(*addresses)) {
+    freeifaddrs(addrs);
+    return -ENOMEM;
+  }
+
+  address = *addresses;
+
+  for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+    if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
+      continue;
+
+    if (ent->ifa_addr == NULL)
+      continue;
+
+    /*
+     * On FreeBSD getifaddrs returns information related to the raw underlying
+     * devices. We're not interested in this information yet.
+     */
+    if (ent->ifa_addr->sa_family == AF_LINK)
+      continue;
+
+    address->name = uv__strdup(ent->ifa_name);
+
+    if (ent->ifa_addr->sa_family == AF_INET6) {
+      address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
+    } else {
+      address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
+    }
+
+    if (ent->ifa_netmask->sa_family == AF_INET6) {
+      address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
+    } else {
+      address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
+    }
+
+    address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK);
+
+    address++;
+  }
+
+  /* Fill in physical addresses for each interface */
+  for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+    if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) ||
+        (ent->ifa_addr == NULL) ||
+        (ent->ifa_addr->sa_family != AF_LINK)) {
+      continue;
+    }
+
+    address = *addresses;
+
+    for (i = 0; i < (*count); i++) {
+      if (strcmp(address->name, ent->ifa_name) == 0) {
+        sa_addr = (struct sockaddr_dl*)(ent->ifa_addr);
+        memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr));
+      }
+      address++;
+    }
+  }
+
+  freeifaddrs(addrs);
+
+  return 0;
+}
+
+
+void uv_free_interface_addresses(uv_interface_address_t* addresses,
+  int count) {
+  int i;
+
+  for (i = 0; i < count; i++) {
+    uv__free(addresses[i].name);
+  }
+
+  uv__free(addresses);
+}

+ 1355 - 0
Utilities/cmlibuv/src/unix/fs.c

@@ -0,0 +1,1355 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/* Caveat emptor: this file deviates from the libuv convention of returning
+ * negated errno codes. Most uv_fs_*() functions map directly to the system
+ * call of the same name. For more complex wrappers, it's easier to just
+ * return -1 with errno set. The dispatcher in uv__fs_work() takes care of
+ * getting the errno to the right place (req->result or as the return value.)
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h> /* PATH_MAX */
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/uio.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <utime.h>
+#include <poll.h>
+
+#if defined(__DragonFly__)        ||                                      \
+    defined(__FreeBSD__)          ||                                      \
+    defined(__FreeBSD_kernel_)    ||                                      \
+    defined(__OpenBSD__)          ||                                      \
+    defined(__NetBSD__)
+# define HAVE_PREADV 1
+#else
+# define HAVE_PREADV 0
+#endif
+
+#if defined(__linux__) || defined(__sun)
+# include <sys/sendfile.h>
+#endif
+
+#define INIT(subtype)                                                         \
+  do {                                                                        \
+    req->type = UV_FS;                                                        \
+    if (cb != NULL)                                                           \
+      uv__req_init(loop, req, UV_FS);                                         \
+    req->fs_type = UV_FS_ ## subtype;                                         \
+    req->result = 0;                                                          \
+    req->ptr = NULL;                                                          \
+    req->loop = loop;                                                         \
+    req->path = NULL;                                                         \
+    req->new_path = NULL;                                                     \
+    req->cb = cb;                                                             \
+  }                                                                           \
+  while (0)
+
+#define PATH                                                                  \
+  do {                                                                        \
+    assert(path != NULL);                                                     \
+    if (cb == NULL) {                                                         \
+      req->path = path;                                                       \
+    } else {                                                                  \
+      req->path = uv__strdup(path);                                           \
+      if (req->path == NULL) {                                                \
+        uv__req_unregister(loop, req);                                        \
+        return -ENOMEM;                                                       \
+      }                                                                       \
+    }                                                                         \
+  }                                                                           \
+  while (0)
+
+#define PATH2                                                                 \
+  do {                                                                        \
+    if (cb == NULL) {                                                         \
+      req->path = path;                                                       \
+      req->new_path = new_path;                                               \
+    } else {                                                                  \
+      size_t path_len;                                                        \
+      size_t new_path_len;                                                    \
+      path_len = strlen(path) + 1;                                            \
+      new_path_len = strlen(new_path) + 1;                                    \
+      req->path = uv__malloc(path_len + new_path_len);                        \
+      if (req->path == NULL) {                                                \
+        uv__req_unregister(loop, req);                                        \
+        return -ENOMEM;                                                       \
+      }                                                                       \
+      req->new_path = req->path + path_len;                                   \
+      memcpy((void*) req->path, path, path_len);                              \
+      memcpy((void*) req->new_path, new_path, new_path_len);                  \
+    }                                                                         \
+  }                                                                           \
+  while (0)
+
+#define POST                                                                  \
+  do {                                                                        \
+    if (cb != NULL) {                                                         \
+      uv__work_submit(loop, &req->work_req, uv__fs_work, uv__fs_done);        \
+      return 0;                                                               \
+    }                                                                         \
+    else {                                                                    \
+      uv__fs_work(&req->work_req);                                            \
+      return req->result;                                                     \
+    }                                                                         \
+  }                                                                           \
+  while (0)
+
+
+static ssize_t uv__fs_fdatasync(uv_fs_t* req) {
+#if defined(__linux__) || defined(__sun) || defined(__NetBSD__)
+  return fdatasync(req->file);
+#elif defined(__APPLE__) && defined(SYS_fdatasync)
+  return syscall(SYS_fdatasync, req->file);
+#else
+  return fsync(req->file);
+#endif
+}
+
+
+static ssize_t uv__fs_futime(uv_fs_t* req) {
+#if defined(__linux__)
+  /* utimesat() has nanosecond resolution but we stick to microseconds
+   * for the sake of consistency with other platforms.
+   */
+  static int no_utimesat;
+  struct timespec ts[2];
+  struct timeval tv[2];
+  char path[sizeof("/proc/self/fd/") + 3 * sizeof(int)];
+  int r;
+
+  if (no_utimesat)
+    goto skip;
+
+  ts[0].tv_sec  = req->atime;
+  ts[0].tv_nsec = (uint64_t)(req->atime * 1000000) % 1000000 * 1000;
+  ts[1].tv_sec  = req->mtime;
+  ts[1].tv_nsec = (uint64_t)(req->mtime * 1000000) % 1000000 * 1000;
+
+  r = uv__utimesat(req->file, NULL, ts, 0);
+  if (r == 0)
+    return r;
+
+  if (errno != ENOSYS)
+    return r;
+
+  no_utimesat = 1;
+
+skip:
+
+  tv[0].tv_sec  = req->atime;
+  tv[0].tv_usec = (uint64_t)(req->atime * 1000000) % 1000000;
+  tv[1].tv_sec  = req->mtime;
+  tv[1].tv_usec = (uint64_t)(req->mtime * 1000000) % 1000000;
+  snprintf(path, sizeof(path), "/proc/self/fd/%d", (int) req->file);
+
+  r = utimes(path, tv);
+  if (r == 0)
+    return r;
+
+  switch (errno) {
+  case ENOENT:
+    if (fcntl(req->file, F_GETFL) == -1 && errno == EBADF)
+      break;
+    /* Fall through. */
+
+  case EACCES:
+  case ENOTDIR:
+    errno = ENOSYS;
+    break;
+  }
+
+  return r;
+
+#elif defined(__APPLE__)                                                      \
+    || defined(__DragonFly__)                                                 \
+    || defined(__FreeBSD__)                                                   \
+    || defined(__FreeBSD_kernel__)                                            \
+    || defined(__NetBSD__)                                                    \
+    || defined(__OpenBSD__)                                                   \
+    || defined(__sun)
+  struct timeval tv[2];
+  tv[0].tv_sec  = req->atime;
+  tv[0].tv_usec = (uint64_t)(req->atime * 1000000) % 1000000;
+  tv[1].tv_sec  = req->mtime;
+  tv[1].tv_usec = (uint64_t)(req->mtime * 1000000) % 1000000;
+# if defined(__sun)
+  return futimesat(req->file, NULL, tv);
+# else
+  return futimes(req->file, tv);
+# endif
+#elif defined(_AIX71)
+  struct timespec ts[2];
+  ts[0].tv_sec  = req->atime;
+  ts[0].tv_nsec = (uint64_t)(req->atime * 1000000) % 1000000 * 1000;
+  ts[1].tv_sec  = req->mtime;
+  ts[1].tv_nsec = (uint64_t)(req->mtime * 1000000) % 1000000 * 1000;
+  return futimens(req->file, ts);
+#elif defined(__MVS__)
+  attrib_t atr;
+  memset(&atr, 0, sizeof(atr));
+  atr.att_mtimechg = 1;
+  atr.att_atimechg = 1;
+  atr.att_mtime = req->mtime;
+  atr.att_atime = req->atime;
+  return __fchattr(req->file, &atr, sizeof(atr));
+#else
+  errno = ENOSYS;
+  return -1;
+#endif
+}
+
+
+static ssize_t uv__fs_mkdtemp(uv_fs_t* req) {
+  return mkdtemp((char*) req->path) ? 0 : -1;
+}
+
+
+static ssize_t uv__fs_open(uv_fs_t* req) {
+  static int no_cloexec_support;
+  int r;
+
+  /* Try O_CLOEXEC before entering locks */
+  if (no_cloexec_support == 0) {
+#ifdef O_CLOEXEC
+    r = open(req->path, req->flags | O_CLOEXEC, req->mode);
+    if (r >= 0)
+      return r;
+    if (errno != EINVAL)
+      return r;
+    no_cloexec_support = 1;
+#endif  /* O_CLOEXEC */
+  }
+
+  if (req->cb != NULL)
+    uv_rwlock_rdlock(&req->loop->cloexec_lock);
+
+  r = open(req->path, req->flags, req->mode);
+
+  /* In case of failure `uv__cloexec` will leave error in `errno`,
+   * so it is enough to just set `r` to `-1`.
+   */
+  if (r >= 0 && uv__cloexec(r, 1) != 0) {
+    r = uv__close(r);
+    if (r != 0)
+      abort();
+    r = -1;
+  }
+
+  if (req->cb != NULL)
+    uv_rwlock_rdunlock(&req->loop->cloexec_lock);
+
+  return r;
+}
+
+
+static ssize_t uv__fs_read(uv_fs_t* req) {
+#if defined(__linux__)
+  static int no_preadv;
+#endif
+  ssize_t result;
+
+#if defined(_AIX)
+  struct stat buf;
+  if(fstat(req->file, &buf))
+    return -1;
+  if(S_ISDIR(buf.st_mode)) {
+    errno = EISDIR;
+    return -1;
+  }
+#endif /* defined(_AIX) */
+  if (req->off < 0) {
+    if (req->nbufs == 1)
+      result = read(req->file, req->bufs[0].base, req->bufs[0].len);
+    else
+      result = readv(req->file, (struct iovec*) req->bufs, req->nbufs);
+  } else {
+    if (req->nbufs == 1) {
+      result = pread(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
+      goto done;
+    }
+
+#if HAVE_PREADV
+    result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
+#else
+# if defined(__linux__)
+    if (no_preadv) retry:
+# endif
+    {
+      off_t nread;
+      size_t index;
+
+      nread = 0;
+      index = 0;
+      result = 1;
+      do {
+        if (req->bufs[index].len > 0) {
+          result = pread(req->file,
+                         req->bufs[index].base,
+                         req->bufs[index].len,
+                         req->off + nread);
+          if (result > 0)
+            nread += result;
+        }
+        index++;
+      } while (index < req->nbufs && result > 0);
+      if (nread > 0)
+        result = nread;
+    }
+# if defined(__linux__)
+    else {
+      result = uv__preadv(req->file,
+                          (struct iovec*)req->bufs,
+                          req->nbufs,
+                          req->off);
+      if (result == -1 && errno == ENOSYS) {
+        no_preadv = 1;
+        goto retry;
+      }
+    }
+# endif
+#endif
+  }
+
+done:
+  return result;
+}
+
+
+#if defined(__OpenBSD__) || (defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_8))
+static int uv__fs_scandir_filter(uv__dirent_t* dent) {
+#else
+static int uv__fs_scandir_filter(const uv__dirent_t* dent) {
+#endif
+  return strcmp(dent->d_name, ".") != 0 && strcmp(dent->d_name, "..") != 0;
+}
+
+
+static ssize_t uv__fs_scandir(uv_fs_t* req) {
+  uv__dirent_t **dents;
+  int saved_errno;
+  int n;
+
+  dents = NULL;
+  n = scandir(req->path, &dents, uv__fs_scandir_filter, alphasort);
+
+  /* NOTE: We will use nbufs as an index field */
+  req->nbufs = 0;
+
+  if (n == 0)
+    goto out; /* osx still needs to deallocate some memory */
+  else if (n == -1)
+    return n;
+
+  req->ptr = dents;
+
+  return n;
+
+out:
+  saved_errno = errno;
+  if (dents != NULL) {
+    int i;
+
+    /* Memory was allocated using the system allocator, so use free() here. */
+    for (i = 0; i < n; i++)
+      free(dents[i]);
+    free(dents);
+  }
+  errno = saved_errno;
+
+  req->ptr = NULL;
+
+  return n;
+}
+
+
+static ssize_t uv__fs_pathmax_size(const char* path) {
+  ssize_t pathmax;
+
+  pathmax = pathconf(path, _PC_PATH_MAX);
+
+  if (pathmax == -1) {
+#if defined(PATH_MAX)
+    return PATH_MAX;
+#else
+#error "PATH_MAX undefined in the current platform"
+#endif
+  }
+
+  return pathmax;
+}
+
+static ssize_t uv__fs_readlink(uv_fs_t* req) {
+  ssize_t len;
+  char* buf;
+
+  len = uv__fs_pathmax_size(req->path);
+  buf = uv__malloc(len + 1);
+
+  if (buf == NULL) {
+    errno = ENOMEM;
+    return -1;
+  }
+
+  len = readlink(req->path, buf, len);
+
+  if (len == -1) {
+    uv__free(buf);
+    return -1;
+  }
+
+  buf[len] = '\0';
+  req->ptr = buf;
+
+  return 0;
+}
+
+static ssize_t uv__fs_realpath(uv_fs_t* req) {
+  ssize_t len;
+  char* buf;
+
+  len = uv__fs_pathmax_size(req->path);
+  buf = uv__malloc(len + 1);
+
+  if (buf == NULL) {
+    errno = ENOMEM;
+    return -1;
+  }
+
+  if (realpath(req->path, buf) == NULL) {
+    uv__free(buf);
+    return -1;
+  }
+
+  req->ptr = buf;
+
+  return 0;
+}
+
+static ssize_t uv__fs_sendfile_emul(uv_fs_t* req) {
+  struct pollfd pfd;
+  int use_pread;
+  off_t offset;
+  ssize_t nsent;
+  ssize_t nread;
+  ssize_t nwritten;
+  size_t buflen;
+  size_t len;
+  ssize_t n;
+  int in_fd;
+  int out_fd;
+  char buf[8192];
+
+  len = req->bufsml[0].len;
+  in_fd = req->flags;
+  out_fd = req->file;
+  offset = req->off;
+  use_pread = 1;
+
+  /* Here are the rules regarding errors:
+   *
+   * 1. Read errors are reported only if nsent==0, otherwise we return nsent.
+   *    The user needs to know that some data has already been sent, to stop
+   *    them from sending it twice.
+   *
+   * 2. Write errors are always reported. Write errors are bad because they
+   *    mean data loss: we've read data but now we can't write it out.
+   *
+   * We try to use pread() and fall back to regular read() if the source fd
+   * doesn't support positional reads, for example when it's a pipe fd.
+   *
+   * If we get EAGAIN when writing to the target fd, we poll() on it until
+   * it becomes writable again.
+   *
+   * FIXME: If we get a write error when use_pread==1, it should be safe to
+   *        return the number of sent bytes instead of an error because pread()
+   *        is, in theory, idempotent. However, special files in /dev or /proc
+   *        may support pread() but not necessarily return the same data on
+   *        successive reads.
+   *
+   * FIXME: There is no way now to signal that we managed to send *some* data
+   *        before a write error.
+   */
+  for (nsent = 0; (size_t) nsent < len; ) {
+    buflen = len - nsent;
+
+    if (buflen > sizeof(buf))
+      buflen = sizeof(buf);
+
+    do
+      if (use_pread)
+        nread = pread(in_fd, buf, buflen, offset);
+      else
+        nread = read(in_fd, buf, buflen);
+    while (nread == -1 && errno == EINTR);
+
+    if (nread == 0)
+      goto out;
+
+    if (nread == -1) {
+      if (use_pread && nsent == 0 && (errno == EIO || errno == ESPIPE)) {
+        use_pread = 0;
+        continue;
+      }
+
+      if (nsent == 0)
+        nsent = -1;
+
+      goto out;
+    }
+
+    for (nwritten = 0; nwritten < nread; ) {
+      do
+        n = write(out_fd, buf + nwritten, nread - nwritten);
+      while (n == -1 && errno == EINTR);
+
+      if (n != -1) {
+        nwritten += n;
+        continue;
+      }
+
+      if (errno != EAGAIN && errno != EWOULDBLOCK) {
+        nsent = -1;
+        goto out;
+      }
+
+      pfd.fd = out_fd;
+      pfd.events = POLLOUT;
+      pfd.revents = 0;
+
+      do
+        n = poll(&pfd, 1, -1);
+      while (n == -1 && errno == EINTR);
+
+      if (n == -1 || (pfd.revents & ~POLLOUT) != 0) {
+        errno = EIO;
+        nsent = -1;
+        goto out;
+      }
+    }
+
+    offset += nread;
+    nsent += nread;
+  }
+
+out:
+  if (nsent != -1)
+    req->off = offset;
+
+  return nsent;
+}
+
+
+static ssize_t uv__fs_sendfile(uv_fs_t* req) {
+  int in_fd;
+  int out_fd;
+
+  in_fd = req->flags;
+  out_fd = req->file;
+
+#if defined(__linux__) || defined(__sun)
+  {
+    off_t off;
+    ssize_t r;
+
+    off = req->off;
+    r = sendfile(out_fd, in_fd, &off, req->bufsml[0].len);
+
+    /* sendfile() on SunOS returns EINVAL if the target fd is not a socket but
+     * it still writes out data. Fortunately, we can detect it by checking if
+     * the offset has been updated.
+     */
+    if (r != -1 || off > req->off) {
+      r = off - req->off;
+      req->off = off;
+      return r;
+    }
+
+    if (errno == EINVAL ||
+        errno == EIO ||
+        errno == ENOTSOCK ||
+        errno == EXDEV) {
+      errno = 0;
+      return uv__fs_sendfile_emul(req);
+    }
+
+    return -1;
+  }
+#elif defined(__APPLE__)           || \
+      defined(__DragonFly__)       || \
+      defined(__FreeBSD__)         || \
+      defined(__FreeBSD_kernel__)
+  {
+    off_t len;
+    ssize_t r;
+
+    /* sendfile() on FreeBSD and Darwin returns EAGAIN if the target fd is in
+     * non-blocking mode and not all data could be written. If a non-zero
+     * number of bytes have been sent, we don't consider it an error.
+     */
+
+#if defined(__FreeBSD__) || defined(__DragonFly__)
+    len = 0;
+    r = sendfile(in_fd, out_fd, req->off, req->bufsml[0].len, NULL, &len, 0);
+#elif defined(__FreeBSD_kernel__)
+    len = 0;
+    r = bsd_sendfile(in_fd,
+                     out_fd,
+                     req->off,
+                     req->bufsml[0].len,
+                     NULL,
+                     &len,
+                     0);
+#else
+    /* The darwin sendfile takes len as an input for the length to send,
+     * so make sure to initialize it with the caller's value. */
+    len = req->bufsml[0].len;
+    r = sendfile(in_fd, out_fd, req->off, &len, NULL, 0);
+#endif
+
+     /*
+     * The man page for sendfile(2) on DragonFly states that `len` contains
+     * a meaningful value ONLY in case of EAGAIN and EINTR.
+     * Nothing is said about it's value in case of other errors, so better
+     * not depend on the potential wrong assumption that is was not modified
+     * by the syscall.
+     */
+    if (r == 0 || ((errno == EAGAIN || errno == EINTR) && len != 0)) {
+      req->off += len;
+      return (ssize_t) len;
+    }
+
+    if (errno == EINVAL ||
+        errno == EIO ||
+        errno == ENOTSOCK ||
+        errno == EXDEV) {
+      errno = 0;
+      return uv__fs_sendfile_emul(req);
+    }
+
+    return -1;
+  }
+#else
+  /* Squelch compiler warnings. */
+  (void) &in_fd;
+  (void) &out_fd;
+
+  return uv__fs_sendfile_emul(req);
+#endif
+}
+
+
+static ssize_t uv__fs_utime(uv_fs_t* req) {
+  struct utimbuf buf;
+  buf.actime = req->atime;
+  buf.modtime = req->mtime;
+  return utime(req->path, &buf); /* TODO use utimes() where available */
+}
+
+
+static ssize_t uv__fs_write(uv_fs_t* req) {
+#if defined(__linux__)
+  static int no_pwritev;
+#endif
+  ssize_t r;
+
+  /* Serialize writes on OS X, concurrent write() and pwrite() calls result in
+   * data loss. We can't use a per-file descriptor lock, the descriptor may be
+   * a dup().
+   */
+#if defined(__APPLE__)
+  static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
+
+  if (pthread_mutex_lock(&lock))
+    abort();
+#endif
+
+  if (req->off < 0) {
+    if (req->nbufs == 1)
+      r = write(req->file, req->bufs[0].base, req->bufs[0].len);
+    else
+      r = writev(req->file, (struct iovec*) req->bufs, req->nbufs);
+  } else {
+    if (req->nbufs == 1) {
+      r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
+      goto done;
+    }
+#if HAVE_PREADV
+    r = pwritev(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
+#else
+# if defined(__linux__)
+    if (no_pwritev) retry:
+# endif
+    {
+      off_t written;
+      size_t index;
+
+      written = 0;
+      index = 0;
+      r = 0;
+      do {
+        if (req->bufs[index].len > 0) {
+          r = pwrite(req->file,
+                     req->bufs[index].base,
+                     req->bufs[index].len,
+                     req->off + written);
+          if (r > 0)
+            written += r;
+        }
+        index++;
+      } while (index < req->nbufs && r >= 0);
+      if (written > 0)
+        r = written;
+    }
+# if defined(__linux__)
+    else {
+      r = uv__pwritev(req->file,
+                      (struct iovec*) req->bufs,
+                      req->nbufs,
+                      req->off);
+      if (r == -1 && errno == ENOSYS) {
+        no_pwritev = 1;
+        goto retry;
+      }
+    }
+# endif
+#endif
+  }
+
+done:
+#if defined(__APPLE__)
+  if (pthread_mutex_unlock(&lock))
+    abort();
+#endif
+
+  return r;
+}
+
+static void uv__to_stat(struct stat* src, uv_stat_t* dst) {
+  dst->st_dev = src->st_dev;
+  dst->st_mode = src->st_mode;
+  dst->st_nlink = src->st_nlink;
+  dst->st_uid = src->st_uid;
+  dst->st_gid = src->st_gid;
+  dst->st_rdev = src->st_rdev;
+  dst->st_ino = src->st_ino;
+  dst->st_size = src->st_size;
+  dst->st_blksize = src->st_blksize;
+  dst->st_blocks = src->st_blocks;
+
+#if defined(__APPLE__)
+  dst->st_atim.tv_sec = src->st_atimespec.tv_sec;
+  dst->st_atim.tv_nsec = src->st_atimespec.tv_nsec;
+  dst->st_mtim.tv_sec = src->st_mtimespec.tv_sec;
+  dst->st_mtim.tv_nsec = src->st_mtimespec.tv_nsec;
+  dst->st_ctim.tv_sec = src->st_ctimespec.tv_sec;
+  dst->st_ctim.tv_nsec = src->st_ctimespec.tv_nsec;
+  dst->st_birthtim.tv_sec = src->st_birthtimespec.tv_sec;
+  dst->st_birthtim.tv_nsec = src->st_birthtimespec.tv_nsec;
+  dst->st_flags = src->st_flags;
+  dst->st_gen = src->st_gen;
+#elif defined(__ANDROID__)
+  dst->st_atim.tv_sec = src->st_atime;
+  dst->st_atim.tv_nsec = src->st_atimensec;
+  dst->st_mtim.tv_sec = src->st_mtime;
+  dst->st_mtim.tv_nsec = src->st_mtimensec;
+  dst->st_ctim.tv_sec = src->st_ctime;
+  dst->st_ctim.tv_nsec = src->st_ctimensec;
+  dst->st_birthtim.tv_sec = src->st_ctime;
+  dst->st_birthtim.tv_nsec = src->st_ctimensec;
+  dst->st_flags = 0;
+  dst->st_gen = 0;
+#elif !defined(_AIX) && (       \
+    defined(_BSD_SOURCE)     || \
+    defined(_SVID_SOURCE)    || \
+    defined(_XOPEN_SOURCE)   || \
+    defined(_DEFAULT_SOURCE))
+  dst->st_atim.tv_sec = src->st_atim.tv_sec;
+  dst->st_atim.tv_nsec = src->st_atim.tv_nsec;
+  dst->st_mtim.tv_sec = src->st_mtim.tv_sec;
+  dst->st_mtim.tv_nsec = src->st_mtim.tv_nsec;
+  dst->st_ctim.tv_sec = src->st_ctim.tv_sec;
+  dst->st_ctim.tv_nsec = src->st_ctim.tv_nsec;
+# if defined(__DragonFly__)  || \
+     defined(__FreeBSD__)    || \
+     defined(__OpenBSD__)    || \
+     defined(__NetBSD__)
+  dst->st_birthtim.tv_sec = src->st_birthtim.tv_sec;
+  dst->st_birthtim.tv_nsec = src->st_birthtim.tv_nsec;
+  dst->st_flags = src->st_flags;
+  dst->st_gen = src->st_gen;
+# else
+  dst->st_birthtim.tv_sec = src->st_ctim.tv_sec;
+  dst->st_birthtim.tv_nsec = src->st_ctim.tv_nsec;
+  dst->st_flags = 0;
+  dst->st_gen = 0;
+# endif
+#else
+  dst->st_atim.tv_sec = src->st_atime;
+  dst->st_atim.tv_nsec = 0;
+  dst->st_mtim.tv_sec = src->st_mtime;
+  dst->st_mtim.tv_nsec = 0;
+  dst->st_ctim.tv_sec = src->st_ctime;
+  dst->st_ctim.tv_nsec = 0;
+  dst->st_birthtim.tv_sec = src->st_ctime;
+  dst->st_birthtim.tv_nsec = 0;
+  dst->st_flags = 0;
+  dst->st_gen = 0;
+#endif
+}
+
+
+static int uv__fs_stat(const char *path, uv_stat_t *buf) {
+  struct stat pbuf;
+  int ret;
+
+  ret = stat(path, &pbuf);
+  if (ret == 0)
+    uv__to_stat(&pbuf, buf);
+
+  return ret;
+}
+
+
+static int uv__fs_lstat(const char *path, uv_stat_t *buf) {
+  struct stat pbuf;
+  int ret;
+
+  ret = lstat(path, &pbuf);
+  if (ret == 0)
+    uv__to_stat(&pbuf, buf);
+
+  return ret;
+}
+
+
+static int uv__fs_fstat(int fd, uv_stat_t *buf) {
+  struct stat pbuf;
+  int ret;
+
+  ret = fstat(fd, &pbuf);
+  if (ret == 0)
+    uv__to_stat(&pbuf, buf);
+
+  return ret;
+}
+
+
+typedef ssize_t (*uv__fs_buf_iter_processor)(uv_fs_t* req);
+static ssize_t uv__fs_buf_iter(uv_fs_t* req, uv__fs_buf_iter_processor process) {
+  unsigned int iovmax;
+  unsigned int nbufs;
+  uv_buf_t* bufs;
+  ssize_t total;
+  ssize_t result;
+
+  iovmax = uv__getiovmax();
+  nbufs = req->nbufs;
+  bufs = req->bufs;
+  total = 0;
+
+  while (nbufs > 0) {
+    req->nbufs = nbufs;
+    if (req->nbufs > iovmax)
+      req->nbufs = iovmax;
+
+    result = process(req);
+    if (result <= 0) {
+      if (total == 0)
+        total = result;
+      break;
+    }
+
+    if (req->off >= 0)
+      req->off += result;
+
+    req->bufs += req->nbufs;
+    nbufs -= req->nbufs;
+    total += result;
+  }
+
+  if (errno == EINTR && total == -1)
+    return total;
+
+  if (bufs != req->bufsml)
+    uv__free(bufs);
+
+  req->bufs = NULL;
+  req->nbufs = 0;
+
+  return total;
+}
+
+
+static void uv__fs_work(struct uv__work* w) {
+  int retry_on_eintr;
+  uv_fs_t* req;
+  ssize_t r;
+
+  req = container_of(w, uv_fs_t, work_req);
+  retry_on_eintr = !(req->fs_type == UV_FS_CLOSE);
+
+  do {
+    errno = 0;
+
+#define X(type, action)                                                       \
+  case UV_FS_ ## type:                                                        \
+    r = action;                                                               \
+    break;
+
+    switch (req->fs_type) {
+    X(ACCESS, access(req->path, req->flags));
+    X(CHMOD, chmod(req->path, req->mode));
+    X(CHOWN, chown(req->path, req->uid, req->gid));
+    X(CLOSE, close(req->file));
+    X(FCHMOD, fchmod(req->file, req->mode));
+    X(FCHOWN, fchown(req->file, req->uid, req->gid));
+    X(FDATASYNC, uv__fs_fdatasync(req));
+    X(FSTAT, uv__fs_fstat(req->file, &req->statbuf));
+    X(FSYNC, fsync(req->file));
+    X(FTRUNCATE, ftruncate(req->file, req->off));
+    X(FUTIME, uv__fs_futime(req));
+    X(LSTAT, uv__fs_lstat(req->path, &req->statbuf));
+    X(LINK, link(req->path, req->new_path));
+    X(MKDIR, mkdir(req->path, req->mode));
+    X(MKDTEMP, uv__fs_mkdtemp(req));
+    X(OPEN, uv__fs_open(req));
+    X(READ, uv__fs_buf_iter(req, uv__fs_read));
+    X(SCANDIR, uv__fs_scandir(req));
+    X(READLINK, uv__fs_readlink(req));
+    X(REALPATH, uv__fs_realpath(req));
+    X(RENAME, rename(req->path, req->new_path));
+    X(RMDIR, rmdir(req->path));
+    X(SENDFILE, uv__fs_sendfile(req));
+    X(STAT, uv__fs_stat(req->path, &req->statbuf));
+    X(SYMLINK, symlink(req->path, req->new_path));
+    X(UNLINK, unlink(req->path));
+    X(UTIME, uv__fs_utime(req));
+    X(WRITE, uv__fs_buf_iter(req, uv__fs_write));
+    default: abort();
+    }
+#undef X
+  } while (r == -1 && errno == EINTR && retry_on_eintr);
+
+  if (r == -1)
+    req->result = -errno;
+  else
+    req->result = r;
+
+  if (r == 0 && (req->fs_type == UV_FS_STAT ||
+                 req->fs_type == UV_FS_FSTAT ||
+                 req->fs_type == UV_FS_LSTAT)) {
+    req->ptr = &req->statbuf;
+  }
+}
+
+
+static void uv__fs_done(struct uv__work* w, int status) {
+  uv_fs_t* req;
+
+  req = container_of(w, uv_fs_t, work_req);
+  uv__req_unregister(req->loop, req);
+
+  if (status == -ECANCELED) {
+    assert(req->result == 0);
+    req->result = -ECANCELED;
+  }
+
+  req->cb(req);
+}
+
+
+int uv_fs_access(uv_loop_t* loop,
+                 uv_fs_t* req,
+                 const char* path,
+                 int flags,
+                 uv_fs_cb cb) {
+  INIT(ACCESS);
+  PATH;
+  req->flags = flags;
+  POST;
+}
+
+
+int uv_fs_chmod(uv_loop_t* loop,
+                uv_fs_t* req,
+                const char* path,
+                int mode,
+                uv_fs_cb cb) {
+  INIT(CHMOD);
+  PATH;
+  req->mode = mode;
+  POST;
+}
+
+
+int uv_fs_chown(uv_loop_t* loop,
+                uv_fs_t* req,
+                const char* path,
+                uv_uid_t uid,
+                uv_gid_t gid,
+                uv_fs_cb cb) {
+  INIT(CHOWN);
+  PATH;
+  req->uid = uid;
+  req->gid = gid;
+  POST;
+}
+
+
+int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
+  INIT(CLOSE);
+  req->file = file;
+  POST;
+}
+
+
+int uv_fs_fchmod(uv_loop_t* loop,
+                 uv_fs_t* req,
+                 uv_file file,
+                 int mode,
+                 uv_fs_cb cb) {
+  INIT(FCHMOD);
+  req->file = file;
+  req->mode = mode;
+  POST;
+}
+
+
+int uv_fs_fchown(uv_loop_t* loop,
+                 uv_fs_t* req,
+                 uv_file file,
+                 uv_uid_t uid,
+                 uv_gid_t gid,
+                 uv_fs_cb cb) {
+  INIT(FCHOWN);
+  req->file = file;
+  req->uid = uid;
+  req->gid = gid;
+  POST;
+}
+
+
+int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
+  INIT(FDATASYNC);
+  req->file = file;
+  POST;
+}
+
+
+int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
+  INIT(FSTAT);
+  req->file = file;
+  POST;
+}
+
+
+int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
+  INIT(FSYNC);
+  req->file = file;
+  POST;
+}
+
+
+int uv_fs_ftruncate(uv_loop_t* loop,
+                    uv_fs_t* req,
+                    uv_file file,
+                    int64_t off,
+                    uv_fs_cb cb) {
+  INIT(FTRUNCATE);
+  req->file = file;
+  req->off = off;
+  POST;
+}
+
+
+int uv_fs_futime(uv_loop_t* loop,
+                 uv_fs_t* req,
+                 uv_file file,
+                 double atime,
+                 double mtime,
+                 uv_fs_cb cb) {
+  INIT(FUTIME);
+  req->file = file;
+  req->atime = atime;
+  req->mtime = mtime;
+  POST;
+}
+
+
+int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
+  INIT(LSTAT);
+  PATH;
+  POST;
+}
+
+
+int uv_fs_link(uv_loop_t* loop,
+               uv_fs_t* req,
+               const char* path,
+               const char* new_path,
+               uv_fs_cb cb) {
+  INIT(LINK);
+  PATH2;
+  POST;
+}
+
+
+int uv_fs_mkdir(uv_loop_t* loop,
+                uv_fs_t* req,
+                const char* path,
+                int mode,
+                uv_fs_cb cb) {
+  INIT(MKDIR);
+  PATH;
+  req->mode = mode;
+  POST;
+}
+
+
+int uv_fs_mkdtemp(uv_loop_t* loop,
+                  uv_fs_t* req,
+                  const char* tpl,
+                  uv_fs_cb cb) {
+  INIT(MKDTEMP);
+  req->path = uv__strdup(tpl);
+  if (req->path == NULL) {
+    if (cb != NULL)
+      uv__req_unregister(loop, req);
+    return -ENOMEM;
+  }
+  POST;
+}
+
+
+int uv_fs_open(uv_loop_t* loop,
+               uv_fs_t* req,
+               const char* path,
+               int flags,
+               int mode,
+               uv_fs_cb cb) {
+  INIT(OPEN);
+  PATH;
+  req->flags = flags;
+  req->mode = mode;
+  POST;
+}
+
+
+int uv_fs_read(uv_loop_t* loop, uv_fs_t* req,
+               uv_file file,
+               const uv_buf_t bufs[],
+               unsigned int nbufs,
+               int64_t off,
+               uv_fs_cb cb) {
+  if (bufs == NULL || nbufs == 0)
+    return -EINVAL;
+
+  INIT(READ);
+  req->file = file;
+
+  req->nbufs = nbufs;
+  req->bufs = req->bufsml;
+  if (nbufs > ARRAY_SIZE(req->bufsml))
+    req->bufs = uv__malloc(nbufs * sizeof(*bufs));
+
+  if (req->bufs == NULL) {
+    if (cb != NULL)
+      uv__req_unregister(loop, req);
+    return -ENOMEM;
+  }
+
+  memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
+
+  req->off = off;
+  POST;
+}
+
+
+int uv_fs_scandir(uv_loop_t* loop,
+                  uv_fs_t* req,
+                  const char* path,
+                  int flags,
+                  uv_fs_cb cb) {
+  INIT(SCANDIR);
+  PATH;
+  req->flags = flags;
+  POST;
+}
+
+
+int uv_fs_readlink(uv_loop_t* loop,
+                   uv_fs_t* req,
+                   const char* path,
+                   uv_fs_cb cb) {
+  INIT(READLINK);
+  PATH;
+  POST;
+}
+
+
+int uv_fs_realpath(uv_loop_t* loop,
+                  uv_fs_t* req,
+                  const char * path,
+                  uv_fs_cb cb) {
+  INIT(REALPATH);
+  PATH;
+  POST;
+}
+
+
+int uv_fs_rename(uv_loop_t* loop,
+                 uv_fs_t* req,
+                 const char* path,
+                 const char* new_path,
+                 uv_fs_cb cb) {
+  INIT(RENAME);
+  PATH2;
+  POST;
+}
+
+
+int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
+  INIT(RMDIR);
+  PATH;
+  POST;
+}
+
+
+int uv_fs_sendfile(uv_loop_t* loop,
+                   uv_fs_t* req,
+                   uv_file out_fd,
+                   uv_file in_fd,
+                   int64_t off,
+                   size_t len,
+                   uv_fs_cb cb) {
+  INIT(SENDFILE);
+  req->flags = in_fd; /* hack */
+  req->file = out_fd;
+  req->off = off;
+  req->bufsml[0].len = len;
+  POST;
+}
+
+
+int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
+  INIT(STAT);
+  PATH;
+  POST;
+}
+
+
+int uv_fs_symlink(uv_loop_t* loop,
+                  uv_fs_t* req,
+                  const char* path,
+                  const char* new_path,
+                  int flags,
+                  uv_fs_cb cb) {
+  INIT(SYMLINK);
+  PATH2;
+  req->flags = flags;
+  POST;
+}
+
+
+int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
+  INIT(UNLINK);
+  PATH;
+  POST;
+}
+
+
+int uv_fs_utime(uv_loop_t* loop,
+                uv_fs_t* req,
+                const char* path,
+                double atime,
+                double mtime,
+                uv_fs_cb cb) {
+  INIT(UTIME);
+  PATH;
+  req->atime = atime;
+  req->mtime = mtime;
+  POST;
+}
+
+
+int uv_fs_write(uv_loop_t* loop,
+                uv_fs_t* req,
+                uv_file file,
+                const uv_buf_t bufs[],
+                unsigned int nbufs,
+                int64_t off,
+                uv_fs_cb cb) {
+  if (bufs == NULL || nbufs == 0)
+    return -EINVAL;
+
+  INIT(WRITE);
+  req->file = file;
+
+  req->nbufs = nbufs;
+  req->bufs = req->bufsml;
+  if (nbufs > ARRAY_SIZE(req->bufsml))
+    req->bufs = uv__malloc(nbufs * sizeof(*bufs));
+
+  if (req->bufs == NULL) {
+    if (cb != NULL)
+      uv__req_unregister(loop, req);
+    return -ENOMEM;
+  }
+
+  memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
+
+  req->off = off;
+  POST;
+}
+
+
+void uv_fs_req_cleanup(uv_fs_t* req) {
+  /* Only necessary for asychronous requests, i.e., requests with a callback.
+   * Synchronous ones don't copy their arguments and have req->path and
+   * req->new_path pointing to user-owned memory.  UV_FS_MKDTEMP is the
+   * exception to the rule, it always allocates memory.
+   */
+  if (req->path != NULL && (req->cb != NULL || req->fs_type == UV_FS_MKDTEMP))
+    uv__free((void*) req->path);  /* Memory is shared with req->new_path. */
+
+  req->path = NULL;
+  req->new_path = NULL;
+
+  if (req->fs_type == UV_FS_SCANDIR && req->ptr != NULL)
+    uv__fs_scandir_cleanup(req);
+
+  if (req->ptr != &req->statbuf)
+    uv__free(req->ptr);
+  req->ptr = NULL;
+}

+ 904 - 0
Utilities/cmlibuv/src/unix/fsevents.c

@@ -0,0 +1,904 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#if TARGET_OS_IPHONE
+
+/* iOS (currently) doesn't provide the FSEvents-API (nor CoreServices) */
+
+int uv__fsevents_init(uv_fs_event_t* handle) {
+  return 0;
+}
+
+
+int uv__fsevents_close(uv_fs_event_t* handle) {
+  return 0;
+}
+
+
+void uv__fsevents_loop_delete(uv_loop_t* loop) {
+}
+
+#else /* TARGET_OS_IPHONE */
+
+#include <dlfcn.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <pthread.h>
+
+#include <CoreFoundation/CFRunLoop.h>
+#include <CoreServices/CoreServices.h>
+
+/* These are macros to avoid "initializer element is not constant" errors
+ * with old versions of gcc.
+ */
+#define kFSEventsModified (kFSEventStreamEventFlagItemFinderInfoMod |         \
+                           kFSEventStreamEventFlagItemModified |              \
+                           kFSEventStreamEventFlagItemInodeMetaMod |          \
+                           kFSEventStreamEventFlagItemChangeOwner |           \
+                           kFSEventStreamEventFlagItemXattrMod)
+
+#define kFSEventsRenamed  (kFSEventStreamEventFlagItemCreated |               \
+                           kFSEventStreamEventFlagItemRemoved |               \
+                           kFSEventStreamEventFlagItemRenamed)
+
+#define kFSEventsSystem   (kFSEventStreamEventFlagUserDropped |               \
+                           kFSEventStreamEventFlagKernelDropped |             \
+                           kFSEventStreamEventFlagEventIdsWrapped |           \
+                           kFSEventStreamEventFlagHistoryDone |               \
+                           kFSEventStreamEventFlagMount |                     \
+                           kFSEventStreamEventFlagUnmount |                   \
+                           kFSEventStreamEventFlagRootChanged)
+
+typedef struct uv__fsevents_event_s uv__fsevents_event_t;
+typedef struct uv__cf_loop_signal_s uv__cf_loop_signal_t;
+typedef struct uv__cf_loop_state_s uv__cf_loop_state_t;
+
+enum uv__cf_loop_signal_type_e {
+  kUVCFLoopSignalRegular,
+  kUVCFLoopSignalClosing
+};
+typedef enum uv__cf_loop_signal_type_e uv__cf_loop_signal_type_t;
+
+struct uv__cf_loop_signal_s {
+  QUEUE member;
+  uv_fs_event_t* handle;
+  uv__cf_loop_signal_type_t type;
+};
+
+struct uv__fsevents_event_s {
+  QUEUE member;
+  int events;
+  char path[1];
+};
+
+struct uv__cf_loop_state_s {
+  CFRunLoopRef loop;
+  CFRunLoopSourceRef signal_source;
+  int fsevent_need_reschedule;
+  FSEventStreamRef fsevent_stream;
+  uv_sem_t fsevent_sem;
+  uv_mutex_t fsevent_mutex;
+  void* fsevent_handles[2];
+  unsigned int fsevent_handle_count;
+};
+
+/* Forward declarations */
+static void uv__cf_loop_cb(void* arg);
+static void* uv__cf_loop_runner(void* arg);
+static int uv__cf_loop_signal(uv_loop_t* loop,
+                              uv_fs_event_t* handle,
+                              uv__cf_loop_signal_type_t type);
+
+/* Lazy-loaded by uv__fsevents_global_init(). */
+static CFArrayRef (*pCFArrayCreate)(CFAllocatorRef,
+                                    const void**,
+                                    CFIndex,
+                                    const CFArrayCallBacks*);
+static void (*pCFRelease)(CFTypeRef);
+static void (*pCFRunLoopAddSource)(CFRunLoopRef,
+                                   CFRunLoopSourceRef,
+                                   CFStringRef);
+static CFRunLoopRef (*pCFRunLoopGetCurrent)(void);
+static void (*pCFRunLoopRemoveSource)(CFRunLoopRef,
+                                      CFRunLoopSourceRef,
+                                      CFStringRef);
+static void (*pCFRunLoopRun)(void);
+static CFRunLoopSourceRef (*pCFRunLoopSourceCreate)(CFAllocatorRef,
+                                                    CFIndex,
+                                                    CFRunLoopSourceContext*);
+static void (*pCFRunLoopSourceSignal)(CFRunLoopSourceRef);
+static void (*pCFRunLoopStop)(CFRunLoopRef);
+static void (*pCFRunLoopWakeUp)(CFRunLoopRef);
+static CFStringRef (*pCFStringCreateWithFileSystemRepresentation)(
+    CFAllocatorRef,
+    const char*);
+static CFStringEncoding (*pCFStringGetSystemEncoding)(void);
+static CFStringRef (*pkCFRunLoopDefaultMode);
+static FSEventStreamRef (*pFSEventStreamCreate)(CFAllocatorRef,
+                                                FSEventStreamCallback,
+                                                FSEventStreamContext*,
+                                                CFArrayRef,
+                                                FSEventStreamEventId,
+                                                CFTimeInterval,
+                                                FSEventStreamCreateFlags);
+static void (*pFSEventStreamFlushSync)(FSEventStreamRef);
+static void (*pFSEventStreamInvalidate)(FSEventStreamRef);
+static void (*pFSEventStreamRelease)(FSEventStreamRef);
+static void (*pFSEventStreamScheduleWithRunLoop)(FSEventStreamRef,
+                                                 CFRunLoopRef,
+                                                 CFStringRef);
+static Boolean (*pFSEventStreamStart)(FSEventStreamRef);
+static void (*pFSEventStreamStop)(FSEventStreamRef);
+
+#define UV__FSEVENTS_PROCESS(handle, block)                                   \
+    do {                                                                      \
+      QUEUE events;                                                           \
+      QUEUE* q;                                                               \
+      uv__fsevents_event_t* event;                                            \
+      int err;                                                                \
+      uv_mutex_lock(&(handle)->cf_mutex);                                     \
+      /* Split-off all events and empty original queue */                     \
+      QUEUE_MOVE(&(handle)->cf_events, &events);                              \
+      /* Get error (if any) and zero original one */                          \
+      err = (handle)->cf_error;                                               \
+      (handle)->cf_error = 0;                                                 \
+      uv_mutex_unlock(&(handle)->cf_mutex);                                   \
+      /* Loop through events, deallocating each after processing */           \
+      while (!QUEUE_EMPTY(&events)) {                                         \
+        q = QUEUE_HEAD(&events);                                              \
+        event = QUEUE_DATA(q, uv__fsevents_event_t, member);                  \
+        QUEUE_REMOVE(q);                                                      \
+        /* NOTE: Checking uv__is_active() is required here, because handle    \
+         * callback may close handle and invoking it after it will lead to    \
+         * incorrect behaviour */                                             \
+        if (!uv__is_closing((handle)) && uv__is_active((handle)))             \
+          block                                                               \
+        /* Free allocated data */                                             \
+        uv__free(event);                                                      \
+      }                                                                       \
+      if (err != 0 && !uv__is_closing((handle)) && uv__is_active((handle)))   \
+        (handle)->cb((handle), NULL, 0, err);                                 \
+    } while (0)
+
+
+/* Runs in UV loop's thread, when there're events to report to handle */
+static void uv__fsevents_cb(uv_async_t* cb) {
+  uv_fs_event_t* handle;
+
+  handle = cb->data;
+
+  UV__FSEVENTS_PROCESS(handle, {
+    handle->cb(handle, event->path[0] ? event->path : NULL, event->events, 0);
+  });
+}
+
+
+/* Runs in CF thread, pushed event into handle's event list */
+static void uv__fsevents_push_event(uv_fs_event_t* handle,
+                                    QUEUE* events,
+                                    int err) {
+  assert(events != NULL || err != 0);
+  uv_mutex_lock(&handle->cf_mutex);
+
+  /* Concatenate two queues */
+  if (events != NULL)
+    QUEUE_ADD(&handle->cf_events, events);
+
+  /* Propagate error */
+  if (err != 0)
+    handle->cf_error = err;
+  uv_mutex_unlock(&handle->cf_mutex);
+
+  uv_async_send(handle->cf_cb);
+}
+
+
+/* Runs in CF thread, when there're events in FSEventStream */
+static void uv__fsevents_event_cb(ConstFSEventStreamRef streamRef,
+                                  void* info,
+                                  size_t numEvents,
+                                  void* eventPaths,
+                                  const FSEventStreamEventFlags eventFlags[],
+                                  const FSEventStreamEventId eventIds[]) {
+  size_t i;
+  int len;
+  char** paths;
+  char* path;
+  char* pos;
+  uv_fs_event_t* handle;
+  QUEUE* q;
+  uv_loop_t* loop;
+  uv__cf_loop_state_t* state;
+  uv__fsevents_event_t* event;
+  QUEUE head;
+
+  loop = info;
+  state = loop->cf_state;
+  assert(state != NULL);
+  paths = eventPaths;
+
+  /* For each handle */
+  uv_mutex_lock(&state->fsevent_mutex);
+  QUEUE_FOREACH(q, &state->fsevent_handles) {
+    handle = QUEUE_DATA(q, uv_fs_event_t, cf_member);
+    QUEUE_INIT(&head);
+
+    /* Process and filter out events */
+    for (i = 0; i < numEvents; i++) {
+      /* Ignore system events */
+      if (eventFlags[i] & kFSEventsSystem)
+        continue;
+
+      path = paths[i];
+      len = strlen(path);
+
+      /* Filter out paths that are outside handle's request */
+      if (strncmp(path, handle->realpath, handle->realpath_len) != 0)
+        continue;
+
+      if (handle->realpath_len > 1 || *handle->realpath != '/') {
+        path += handle->realpath_len;
+        len -= handle->realpath_len;
+
+        /* Skip forward slash */
+        if (*path != '\0') {
+          path++;
+          len--;
+        }
+      }
+
+#ifdef MAC_OS_X_VERSION_10_7
+      /* Ignore events with path equal to directory itself */
+      if (len == 0)
+        continue;
+#endif /* MAC_OS_X_VERSION_10_7 */
+
+      /* Do not emit events from subdirectories (without option set) */
+      if ((handle->cf_flags & UV_FS_EVENT_RECURSIVE) == 0 && *path != 0) {
+        pos = strchr(path + 1, '/');
+        if (pos != NULL)
+          continue;
+      }
+
+#ifndef MAC_OS_X_VERSION_10_7
+      path = "";
+      len = 0;
+#endif /* MAC_OS_X_VERSION_10_7 */
+
+      event = uv__malloc(sizeof(*event) + len);
+      if (event == NULL)
+        break;
+
+      memset(event, 0, sizeof(*event));
+      memcpy(event->path, path, len + 1);
+
+      if ((eventFlags[i] & kFSEventsModified) != 0 &&
+          (eventFlags[i] & kFSEventsRenamed) == 0)
+        event->events = UV_CHANGE;
+      else
+        event->events = UV_RENAME;
+
+      QUEUE_INSERT_TAIL(&head, &event->member);
+    }
+
+    if (!QUEUE_EMPTY(&head))
+      uv__fsevents_push_event(handle, &head, 0);
+  }
+  uv_mutex_unlock(&state->fsevent_mutex);
+}
+
+
+/* Runs in CF thread */
+static int uv__fsevents_create_stream(uv_loop_t* loop, CFArrayRef paths) {
+  uv__cf_loop_state_t* state;
+  FSEventStreamContext ctx;
+  FSEventStreamRef ref;
+  CFAbsoluteTime latency;
+  FSEventStreamCreateFlags flags;
+
+  /* Initialize context */
+  ctx.version = 0;
+  ctx.info = loop;
+  ctx.retain = NULL;
+  ctx.release = NULL;
+  ctx.copyDescription = NULL;
+
+  latency = 0.05;
+
+  /* Explanation of selected flags:
+   * 1. NoDefer - without this flag, events that are happening continuously
+   *    (i.e. each event is happening after time interval less than `latency`,
+   *    counted from previous event), will be deferred and passed to callback
+   *    once they'll either fill whole OS buffer, or when this continuous stream
+   *    will stop (i.e. there'll be delay between events, bigger than
+   *    `latency`).
+   *    Specifying this flag will invoke callback after `latency` time passed
+   *    since event.
+   * 2. FileEvents - fire callback for file changes too (by default it is firing
+   *    it only for directory changes).
+   */
+  flags = kFSEventStreamCreateFlagNoDefer | kFSEventStreamCreateFlagFileEvents;
+
+  /*
+   * NOTE: It might sound like a good idea to remember last seen StreamEventId,
+   * but in reality one dir might have last StreamEventId less than, the other,
+   * that is being watched now. Which will cause FSEventStream API to report
+   * changes to files from the past.
+   */
+  ref = pFSEventStreamCreate(NULL,
+                             &uv__fsevents_event_cb,
+                             &ctx,
+                             paths,
+                             kFSEventStreamEventIdSinceNow,
+                             latency,
+                             flags);
+  assert(ref != NULL);
+
+  state = loop->cf_state;
+  pFSEventStreamScheduleWithRunLoop(ref,
+                                    state->loop,
+                                    *pkCFRunLoopDefaultMode);
+  if (!pFSEventStreamStart(ref)) {
+    pFSEventStreamInvalidate(ref);
+    pFSEventStreamRelease(ref);
+    return -EMFILE;
+  }
+
+  state->fsevent_stream = ref;
+  return 0;
+}
+
+
+/* Runs in CF thread */
+static void uv__fsevents_destroy_stream(uv_loop_t* loop) {
+  uv__cf_loop_state_t* state;
+
+  state = loop->cf_state;
+
+  if (state->fsevent_stream == NULL)
+    return;
+
+  /* Flush all accumulated events */
+  pFSEventStreamFlushSync(state->fsevent_stream);
+
+  /* Stop emitting events */
+  pFSEventStreamStop(state->fsevent_stream);
+
+  /* Release stream */
+  pFSEventStreamInvalidate(state->fsevent_stream);
+  pFSEventStreamRelease(state->fsevent_stream);
+  state->fsevent_stream = NULL;
+}
+
+
+/* Runs in CF thread, when there're new fsevent handles to add to stream */
+static void uv__fsevents_reschedule(uv_fs_event_t* handle,
+                                    uv__cf_loop_signal_type_t type) {
+  uv__cf_loop_state_t* state;
+  QUEUE* q;
+  uv_fs_event_t* curr;
+  CFArrayRef cf_paths;
+  CFStringRef* paths;
+  unsigned int i;
+  int err;
+  unsigned int path_count;
+
+  state = handle->loop->cf_state;
+  paths = NULL;
+  cf_paths = NULL;
+  err = 0;
+  /* NOTE: `i` is used in deallocation loop below */
+  i = 0;
+
+  /* Optimization to prevent O(n^2) time spent when starting to watch
+   * many files simultaneously
+   */
+  uv_mutex_lock(&state->fsevent_mutex);
+  if (state->fsevent_need_reschedule == 0) {
+    uv_mutex_unlock(&state->fsevent_mutex);
+    goto final;
+  }
+  state->fsevent_need_reschedule = 0;
+  uv_mutex_unlock(&state->fsevent_mutex);
+
+  /* Destroy previous FSEventStream */
+  uv__fsevents_destroy_stream(handle->loop);
+
+  /* Any failure below will be a memory failure */
+  err = -ENOMEM;
+
+  /* Create list of all watched paths */
+  uv_mutex_lock(&state->fsevent_mutex);
+  path_count = state->fsevent_handle_count;
+  if (path_count != 0) {
+    paths = uv__malloc(sizeof(*paths) * path_count);
+    if (paths == NULL) {
+      uv_mutex_unlock(&state->fsevent_mutex);
+      goto final;
+    }
+
+    q = &state->fsevent_handles;
+    for (; i < path_count; i++) {
+      q = QUEUE_NEXT(q);
+      assert(q != &state->fsevent_handles);
+      curr = QUEUE_DATA(q, uv_fs_event_t, cf_member);
+
+      assert(curr->realpath != NULL);
+      paths[i] =
+          pCFStringCreateWithFileSystemRepresentation(NULL, curr->realpath);
+      if (paths[i] == NULL) {
+        uv_mutex_unlock(&state->fsevent_mutex);
+        goto final;
+      }
+    }
+  }
+  uv_mutex_unlock(&state->fsevent_mutex);
+  err = 0;
+
+  if (path_count != 0) {
+    /* Create new FSEventStream */
+    cf_paths = pCFArrayCreate(NULL, (const void**) paths, path_count, NULL);
+    if (cf_paths == NULL) {
+      err = -ENOMEM;
+      goto final;
+    }
+    err = uv__fsevents_create_stream(handle->loop, cf_paths);
+  }
+
+final:
+  /* Deallocate all paths in case of failure */
+  if (err != 0) {
+    if (cf_paths == NULL) {
+      while (i != 0)
+        pCFRelease(paths[--i]);
+      uv__free(paths);
+    } else {
+      /* CFArray takes ownership of both strings and original C-array */
+      pCFRelease(cf_paths);
+    }
+
+    /* Broadcast error to all handles */
+    uv_mutex_lock(&state->fsevent_mutex);
+    QUEUE_FOREACH(q, &state->fsevent_handles) {
+      curr = QUEUE_DATA(q, uv_fs_event_t, cf_member);
+      uv__fsevents_push_event(curr, NULL, err);
+    }
+    uv_mutex_unlock(&state->fsevent_mutex);
+  }
+
+  /*
+   * Main thread will block until the removal of handle from the list,
+   * we must tell it when we're ready.
+   *
+   * NOTE: This is coupled with `uv_sem_wait()` in `uv__fsevents_close`
+   */
+  if (type == kUVCFLoopSignalClosing)
+    uv_sem_post(&state->fsevent_sem);
+}
+
+
+static int uv__fsevents_global_init(void) {
+  static pthread_mutex_t global_init_mutex = PTHREAD_MUTEX_INITIALIZER;
+  static void* core_foundation_handle;
+  static void* core_services_handle;
+  int err;
+
+  err = 0;
+  pthread_mutex_lock(&global_init_mutex);
+  if (core_foundation_handle != NULL)
+    goto out;
+
+  /* The libraries are never unloaded because we currently don't have a good
+   * mechanism for keeping a reference count. It's unlikely to be an issue
+   * but if it ever becomes one, we can turn the dynamic library handles into
+   * per-event loop properties and have the dynamic linker keep track for us.
+   */
+  err = -ENOSYS;
+  core_foundation_handle = dlopen("/System/Library/Frameworks/"
+                                  "CoreFoundation.framework/"
+                                  "Versions/A/CoreFoundation",
+                                  RTLD_LAZY | RTLD_LOCAL);
+  if (core_foundation_handle == NULL)
+    goto out;
+
+  core_services_handle = dlopen("/System/Library/Frameworks/"
+                                "CoreServices.framework/"
+                                "Versions/A/CoreServices",
+                                RTLD_LAZY | RTLD_LOCAL);
+  if (core_services_handle == NULL)
+    goto out;
+
+  err = -ENOENT;
+#define V(handle, symbol)                                                     \
+  do {                                                                        \
+    *(void **)(&p ## symbol) = dlsym((handle), #symbol);                      \
+    if (p ## symbol == NULL)                                                  \
+      goto out;                                                               \
+  }                                                                           \
+  while (0)
+  V(core_foundation_handle, CFArrayCreate);
+  V(core_foundation_handle, CFRelease);
+  V(core_foundation_handle, CFRunLoopAddSource);
+  V(core_foundation_handle, CFRunLoopGetCurrent);
+  V(core_foundation_handle, CFRunLoopRemoveSource);
+  V(core_foundation_handle, CFRunLoopRun);
+  V(core_foundation_handle, CFRunLoopSourceCreate);
+  V(core_foundation_handle, CFRunLoopSourceSignal);
+  V(core_foundation_handle, CFRunLoopStop);
+  V(core_foundation_handle, CFRunLoopWakeUp);
+  V(core_foundation_handle, CFStringCreateWithFileSystemRepresentation);
+  V(core_foundation_handle, CFStringGetSystemEncoding);
+  V(core_foundation_handle, kCFRunLoopDefaultMode);
+  V(core_services_handle, FSEventStreamCreate);
+  V(core_services_handle, FSEventStreamFlushSync);
+  V(core_services_handle, FSEventStreamInvalidate);
+  V(core_services_handle, FSEventStreamRelease);
+  V(core_services_handle, FSEventStreamScheduleWithRunLoop);
+  V(core_services_handle, FSEventStreamStart);
+  V(core_services_handle, FSEventStreamStop);
+#undef V
+  err = 0;
+
+out:
+  if (err && core_services_handle != NULL) {
+    dlclose(core_services_handle);
+    core_services_handle = NULL;
+  }
+
+  if (err && core_foundation_handle != NULL) {
+    dlclose(core_foundation_handle);
+    core_foundation_handle = NULL;
+  }
+
+  pthread_mutex_unlock(&global_init_mutex);
+  return err;
+}
+
+
+/* Runs in UV loop */
+static int uv__fsevents_loop_init(uv_loop_t* loop) {
+  CFRunLoopSourceContext ctx;
+  uv__cf_loop_state_t* state;
+  pthread_attr_t attr_storage;
+  pthread_attr_t* attr;
+  int err;
+
+  if (loop->cf_state != NULL)
+    return 0;
+
+  err = uv__fsevents_global_init();
+  if (err)
+    return err;
+
+  state = uv__calloc(1, sizeof(*state));
+  if (state == NULL)
+    return -ENOMEM;
+
+  err = uv_mutex_init(&loop->cf_mutex);
+  if (err)
+    goto fail_mutex_init;
+
+  err = uv_sem_init(&loop->cf_sem, 0);
+  if (err)
+    goto fail_sem_init;
+
+  QUEUE_INIT(&loop->cf_signals);
+
+  err = uv_sem_init(&state->fsevent_sem, 0);
+  if (err)
+    goto fail_fsevent_sem_init;
+
+  err = uv_mutex_init(&state->fsevent_mutex);
+  if (err)
+    goto fail_fsevent_mutex_init;
+
+  QUEUE_INIT(&state->fsevent_handles);
+  state->fsevent_need_reschedule = 0;
+  state->fsevent_handle_count = 0;
+
+  memset(&ctx, 0, sizeof(ctx));
+  ctx.info = loop;
+  ctx.perform = uv__cf_loop_cb;
+  state->signal_source = pCFRunLoopSourceCreate(NULL, 0, &ctx);
+  if (state->signal_source == NULL) {
+    err = -ENOMEM;
+    goto fail_signal_source_create;
+  }
+
+  /* In the unlikely event that pthread_attr_init() fails, create the thread
+   * with the default stack size. We'll use a little more address space but
+   * that in itself is not a fatal error.
+   */
+  attr = &attr_storage;
+  if (pthread_attr_init(attr))
+    attr = NULL;
+
+  if (attr != NULL)
+    if (pthread_attr_setstacksize(attr, 4 * PTHREAD_STACK_MIN))
+      abort();
+
+  loop->cf_state = state;
+
+  /* uv_thread_t is an alias for pthread_t. */
+  err = -pthread_create(&loop->cf_thread, attr, uv__cf_loop_runner, loop);
+
+  if (attr != NULL)
+    pthread_attr_destroy(attr);
+
+  if (err)
+    goto fail_thread_create;
+
+  /* Synchronize threads */
+  uv_sem_wait(&loop->cf_sem);
+  return 0;
+
+fail_thread_create:
+  loop->cf_state = NULL;
+
+fail_signal_source_create:
+  uv_mutex_destroy(&state->fsevent_mutex);
+
+fail_fsevent_mutex_init:
+  uv_sem_destroy(&state->fsevent_sem);
+
+fail_fsevent_sem_init:
+  uv_sem_destroy(&loop->cf_sem);
+
+fail_sem_init:
+  uv_mutex_destroy(&loop->cf_mutex);
+
+fail_mutex_init:
+  uv__free(state);
+  return err;
+}
+
+
+/* Runs in UV loop */
+void uv__fsevents_loop_delete(uv_loop_t* loop) {
+  uv__cf_loop_signal_t* s;
+  uv__cf_loop_state_t* state;
+  QUEUE* q;
+
+  if (loop->cf_state == NULL)
+    return;
+
+  if (uv__cf_loop_signal(loop, NULL, kUVCFLoopSignalRegular) != 0)
+    abort();
+
+  uv_thread_join(&loop->cf_thread);
+  uv_sem_destroy(&loop->cf_sem);
+  uv_mutex_destroy(&loop->cf_mutex);
+
+  /* Free any remaining data */
+  while (!QUEUE_EMPTY(&loop->cf_signals)) {
+    q = QUEUE_HEAD(&loop->cf_signals);
+    s = QUEUE_DATA(q, uv__cf_loop_signal_t, member);
+    QUEUE_REMOVE(q);
+    uv__free(s);
+  }
+
+  /* Destroy state */
+  state = loop->cf_state;
+  uv_sem_destroy(&state->fsevent_sem);
+  uv_mutex_destroy(&state->fsevent_mutex);
+  pCFRelease(state->signal_source);
+  uv__free(state);
+  loop->cf_state = NULL;
+}
+
+
+/* Runs in CF thread. This is the CF loop's body */
+static void* uv__cf_loop_runner(void* arg) {
+  uv_loop_t* loop;
+  uv__cf_loop_state_t* state;
+
+  loop = arg;
+  state = loop->cf_state;
+  state->loop = pCFRunLoopGetCurrent();
+
+  pCFRunLoopAddSource(state->loop,
+                      state->signal_source,
+                      *pkCFRunLoopDefaultMode);
+
+  uv_sem_post(&loop->cf_sem);
+
+  pCFRunLoopRun();
+  pCFRunLoopRemoveSource(state->loop,
+                         state->signal_source,
+                         *pkCFRunLoopDefaultMode);
+
+  return NULL;
+}
+
+
+/* Runs in CF thread, executed after `uv__cf_loop_signal()` */
+static void uv__cf_loop_cb(void* arg) {
+  uv_loop_t* loop;
+  uv__cf_loop_state_t* state;
+  QUEUE* item;
+  QUEUE split_head;
+  uv__cf_loop_signal_t* s;
+
+  loop = arg;
+  state = loop->cf_state;
+
+  uv_mutex_lock(&loop->cf_mutex);
+  QUEUE_MOVE(&loop->cf_signals, &split_head);
+  uv_mutex_unlock(&loop->cf_mutex);
+
+  while (!QUEUE_EMPTY(&split_head)) {
+    item = QUEUE_HEAD(&split_head);
+    QUEUE_REMOVE(item);
+
+    s = QUEUE_DATA(item, uv__cf_loop_signal_t, member);
+
+    /* This was a termination signal */
+    if (s->handle == NULL)
+      pCFRunLoopStop(state->loop);
+    else
+      uv__fsevents_reschedule(s->handle, s->type);
+
+    uv__free(s);
+  }
+}
+
+
+/* Runs in UV loop to notify CF thread */
+int uv__cf_loop_signal(uv_loop_t* loop,
+                       uv_fs_event_t* handle,
+                       uv__cf_loop_signal_type_t type) {
+  uv__cf_loop_signal_t* item;
+  uv__cf_loop_state_t* state;
+
+  item = uv__malloc(sizeof(*item));
+  if (item == NULL)
+    return -ENOMEM;
+
+  item->handle = handle;
+  item->type = type;
+
+  uv_mutex_lock(&loop->cf_mutex);
+  QUEUE_INSERT_TAIL(&loop->cf_signals, &item->member);
+  uv_mutex_unlock(&loop->cf_mutex);
+
+  state = loop->cf_state;
+  assert(state != NULL);
+  pCFRunLoopSourceSignal(state->signal_source);
+  pCFRunLoopWakeUp(state->loop);
+
+  return 0;
+}
+
+
+/* Runs in UV loop to initialize handle */
+int uv__fsevents_init(uv_fs_event_t* handle) {
+  int err;
+  uv__cf_loop_state_t* state;
+
+  err = uv__fsevents_loop_init(handle->loop);
+  if (err)
+    return err;
+
+  /* Get absolute path to file */
+  handle->realpath = realpath(handle->path, NULL);
+  if (handle->realpath == NULL)
+    return -errno;
+  handle->realpath_len = strlen(handle->realpath);
+
+  /* Initialize event queue */
+  QUEUE_INIT(&handle->cf_events);
+  handle->cf_error = 0;
+
+  /*
+   * Events will occur in other thread.
+   * Initialize callback for getting them back into event loop's thread
+   */
+  handle->cf_cb = uv__malloc(sizeof(*handle->cf_cb));
+  if (handle->cf_cb == NULL) {
+    err = -ENOMEM;
+    goto fail_cf_cb_malloc;
+  }
+
+  handle->cf_cb->data = handle;
+  uv_async_init(handle->loop, handle->cf_cb, uv__fsevents_cb);
+  handle->cf_cb->flags |= UV__HANDLE_INTERNAL;
+  uv_unref((uv_handle_t*) handle->cf_cb);
+
+  err = uv_mutex_init(&handle->cf_mutex);
+  if (err)
+    goto fail_cf_mutex_init;
+
+  /* Insert handle into the list */
+  state = handle->loop->cf_state;
+  uv_mutex_lock(&state->fsevent_mutex);
+  QUEUE_INSERT_TAIL(&state->fsevent_handles, &handle->cf_member);
+  state->fsevent_handle_count++;
+  state->fsevent_need_reschedule = 1;
+  uv_mutex_unlock(&state->fsevent_mutex);
+
+  /* Reschedule FSEventStream */
+  assert(handle != NULL);
+  err = uv__cf_loop_signal(handle->loop, handle, kUVCFLoopSignalRegular);
+  if (err)
+    goto fail_loop_signal;
+
+  return 0;
+
+fail_loop_signal:
+  uv_mutex_destroy(&handle->cf_mutex);
+
+fail_cf_mutex_init:
+  uv__free(handle->cf_cb);
+  handle->cf_cb = NULL;
+
+fail_cf_cb_malloc:
+  uv__free(handle->realpath);
+  handle->realpath = NULL;
+  handle->realpath_len = 0;
+
+  return err;
+}
+
+
+/* Runs in UV loop to de-initialize handle */
+int uv__fsevents_close(uv_fs_event_t* handle) {
+  int err;
+  uv__cf_loop_state_t* state;
+
+  if (handle->cf_cb == NULL)
+    return -EINVAL;
+
+  /* Remove handle from  the list */
+  state = handle->loop->cf_state;
+  uv_mutex_lock(&state->fsevent_mutex);
+  QUEUE_REMOVE(&handle->cf_member);
+  state->fsevent_handle_count--;
+  state->fsevent_need_reschedule = 1;
+  uv_mutex_unlock(&state->fsevent_mutex);
+
+  /* Reschedule FSEventStream */
+  assert(handle != NULL);
+  err = uv__cf_loop_signal(handle->loop, handle, kUVCFLoopSignalClosing);
+  if (err)
+    return -err;
+
+  /* Wait for deinitialization */
+  uv_sem_wait(&state->fsevent_sem);
+
+  uv_close((uv_handle_t*) handle->cf_cb, (uv_close_cb) uv__free);
+  handle->cf_cb = NULL;
+
+  /* Free data in queue */
+  UV__FSEVENTS_PROCESS(handle, {
+    /* NOP */
+  });
+
+  uv_mutex_destroy(&handle->cf_mutex);
+  uv__free(handle->realpath);
+  handle->realpath = NULL;
+  handle->realpath_len = 0;
+
+  return 0;
+}
+
+#endif /* TARGET_OS_IPHONE */

+ 202 - 0
Utilities/cmlibuv/src/unix/getaddrinfo.c

@@ -0,0 +1,202 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/* Expose glibc-specific EAI_* error codes. Needs to be defined before we
+ * include any headers.
+ */
+#ifndef _GNU_SOURCE
+# define _GNU_SOURCE
+#endif
+
+#include "uv.h"
+#include "internal.h"
+
+#include <errno.h>
+#include <stddef.h> /* NULL */
+#include <stdlib.h>
+#include <string.h>
+
+/* EAI_* constants. */
+#include <netdb.h>
+
+
+int uv__getaddrinfo_translate_error(int sys_err) {
+  switch (sys_err) {
+  case 0: return 0;
+#if defined(EAI_ADDRFAMILY)
+  case EAI_ADDRFAMILY: return UV_EAI_ADDRFAMILY;
+#endif
+#if defined(EAI_AGAIN)
+  case EAI_AGAIN: return UV_EAI_AGAIN;
+#endif
+#if defined(EAI_BADFLAGS)
+  case EAI_BADFLAGS: return UV_EAI_BADFLAGS;
+#endif
+#if defined(EAI_BADHINTS)
+  case EAI_BADHINTS: return UV_EAI_BADHINTS;
+#endif
+#if defined(EAI_CANCELED)
+  case EAI_CANCELED: return UV_EAI_CANCELED;
+#endif
+#if defined(EAI_FAIL)
+  case EAI_FAIL: return UV_EAI_FAIL;
+#endif
+#if defined(EAI_FAMILY)
+  case EAI_FAMILY: return UV_EAI_FAMILY;
+#endif
+#if defined(EAI_MEMORY)
+  case EAI_MEMORY: return UV_EAI_MEMORY;
+#endif
+#if defined(EAI_NODATA)
+  case EAI_NODATA: return UV_EAI_NODATA;
+#endif
+#if defined(EAI_NONAME)
+# if !defined(EAI_NODATA) || EAI_NODATA != EAI_NONAME
+  case EAI_NONAME: return UV_EAI_NONAME;
+# endif
+#endif
+#if defined(EAI_OVERFLOW)
+  case EAI_OVERFLOW: return UV_EAI_OVERFLOW;
+#endif
+#if defined(EAI_PROTOCOL)
+  case EAI_PROTOCOL: return UV_EAI_PROTOCOL;
+#endif
+#if defined(EAI_SERVICE)
+  case EAI_SERVICE: return UV_EAI_SERVICE;
+#endif
+#if defined(EAI_SOCKTYPE)
+  case EAI_SOCKTYPE: return UV_EAI_SOCKTYPE;
+#endif
+#if defined(EAI_SYSTEM)
+  case EAI_SYSTEM: return -errno;
+#endif
+  }
+  assert(!"unknown EAI_* error code");
+  abort();
+  return 0;  /* Pacify compiler. */
+}
+
+
+static void uv__getaddrinfo_work(struct uv__work* w) {
+  uv_getaddrinfo_t* req;
+  int err;
+
+  req = container_of(w, uv_getaddrinfo_t, work_req);
+  err = getaddrinfo(req->hostname, req->service, req->hints, &req->addrinfo);
+  req->retcode = uv__getaddrinfo_translate_error(err);
+}
+
+
+static void uv__getaddrinfo_done(struct uv__work* w, int status) {
+  uv_getaddrinfo_t* req;
+
+  req = container_of(w, uv_getaddrinfo_t, work_req);
+  uv__req_unregister(req->loop, req);
+
+  /* See initialization in uv_getaddrinfo(). */
+  if (req->hints)
+    uv__free(req->hints);
+  else if (req->service)
+    uv__free(req->service);
+  else if (req->hostname)
+    uv__free(req->hostname);
+  else
+    assert(0);
+
+  req->hints = NULL;
+  req->service = NULL;
+  req->hostname = NULL;
+
+  if (status == -ECANCELED) {
+    assert(req->retcode == 0);
+    req->retcode = UV_EAI_CANCELED;
+  }
+
+  if (req->cb)
+    req->cb(req, req->retcode, req->addrinfo);
+}
+
+
+int uv_getaddrinfo(uv_loop_t* loop,
+                   uv_getaddrinfo_t* req,
+                   uv_getaddrinfo_cb cb,
+                   const char* hostname,
+                   const char* service,
+                   const struct addrinfo* hints) {
+  size_t hostname_len;
+  size_t service_len;
+  size_t hints_len;
+  size_t len;
+  char* buf;
+
+  if (req == NULL || (hostname == NULL && service == NULL))
+    return -EINVAL;
+
+  hostname_len = hostname ? strlen(hostname) + 1 : 0;
+  service_len = service ? strlen(service) + 1 : 0;
+  hints_len = hints ? sizeof(*hints) : 0;
+  buf = uv__malloc(hostname_len + service_len + hints_len);
+
+  if (buf == NULL)
+    return -ENOMEM;
+
+  uv__req_init(loop, req, UV_GETADDRINFO);
+  req->loop = loop;
+  req->cb = cb;
+  req->addrinfo = NULL;
+  req->hints = NULL;
+  req->service = NULL;
+  req->hostname = NULL;
+  req->retcode = 0;
+
+  /* order matters, see uv_getaddrinfo_done() */
+  len = 0;
+
+  if (hints) {
+    req->hints = memcpy(buf + len, hints, sizeof(*hints));
+    len += sizeof(*hints);
+  }
+
+  if (service) {
+    req->service = memcpy(buf + len, service, service_len);
+    len += service_len;
+  }
+
+  if (hostname)
+    req->hostname = memcpy(buf + len, hostname, hostname_len);
+
+  if (cb) {
+    uv__work_submit(loop,
+                    &req->work_req,
+                    uv__getaddrinfo_work,
+                    uv__getaddrinfo_done);
+    return 0;
+  } else {
+    uv__getaddrinfo_work(&req->work_req);
+    uv__getaddrinfo_done(&req->work_req, 0);
+    return req->retcode;
+  }
+}
+
+
+void uv_freeaddrinfo(struct addrinfo* ai) {
+  if (ai)
+    freeaddrinfo(ai);
+}

+ 120 - 0
Utilities/cmlibuv/src/unix/getnameinfo.c

@@ -0,0 +1,120 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+*
+* Permission is hereby granted, free of charge, to any person obtaining a copy
+* of this software and associated documentation files (the "Software"), to
+* deal in the Software without restriction, including without limitation the
+* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+* sell copies of the Software, and to permit persons to whom the Software is
+* furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+* IN THE SOFTWARE.
+*/
+
+#include "uv.h"
+#include "internal.h"
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+
+static void uv__getnameinfo_work(struct uv__work* w) {
+  uv_getnameinfo_t* req;
+  int err;
+  socklen_t salen;
+
+  req = container_of(w, uv_getnameinfo_t, work_req);
+
+  if (req->storage.ss_family == AF_INET)
+    salen = sizeof(struct sockaddr_in);
+  else if (req->storage.ss_family == AF_INET6)
+    salen = sizeof(struct sockaddr_in6);
+  else
+    abort();
+
+  err = getnameinfo((struct sockaddr*) &req->storage,
+                    salen,
+                    req->host,
+                    sizeof(req->host),
+                    req->service,
+                    sizeof(req->service),
+                    req->flags);
+  req->retcode = uv__getaddrinfo_translate_error(err);
+}
+
+static void uv__getnameinfo_done(struct uv__work* w, int status) {
+  uv_getnameinfo_t* req;
+  char* host;
+  char* service;
+
+  req = container_of(w, uv_getnameinfo_t, work_req);
+  uv__req_unregister(req->loop, req);
+  host = service = NULL;
+
+  if (status == -ECANCELED) {
+    assert(req->retcode == 0);
+    req->retcode = UV_EAI_CANCELED;
+  } else if (req->retcode == 0) {
+    host = req->host;
+    service = req->service;
+  }
+
+  if (req->getnameinfo_cb)
+    req->getnameinfo_cb(req, req->retcode, host, service);
+}
+
+/*
+* Entry point for getnameinfo
+* return 0 if a callback will be made
+* return error code if validation fails
+*/
+int uv_getnameinfo(uv_loop_t* loop,
+                   uv_getnameinfo_t* req,
+                   uv_getnameinfo_cb getnameinfo_cb,
+                   const struct sockaddr* addr,
+                   int flags) {
+  if (req == NULL || addr == NULL)
+    return UV_EINVAL;
+
+  if (addr->sa_family == AF_INET) {
+    memcpy(&req->storage,
+           addr,
+           sizeof(struct sockaddr_in));
+  } else if (addr->sa_family == AF_INET6) {
+    memcpy(&req->storage,
+           addr,
+           sizeof(struct sockaddr_in6));
+  } else {
+    return UV_EINVAL;
+  }
+
+  uv__req_init(loop, (uv_req_t*)req, UV_GETNAMEINFO);
+
+  req->getnameinfo_cb = getnameinfo_cb;
+  req->flags = flags;
+  req->type = UV_GETNAMEINFO;
+  req->loop = loop;
+  req->retcode = 0;
+
+  if (getnameinfo_cb) {
+    uv__work_submit(loop,
+                    &req->work_req,
+                    uv__getnameinfo_work,
+                    uv__getnameinfo_done);
+    return 0;
+  } else {
+    uv__getnameinfo_work(&req->work_req);
+    uv__getnameinfo_done(&req->work_req, 0);
+    return req->retcode;
+  }
+}

+ 322 - 0
Utilities/cmlibuv/src/unix/internal.h

@@ -0,0 +1,322 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_UNIX_INTERNAL_H_
+#define UV_UNIX_INTERNAL_H_
+
+#include "uv-common.h"
+
+#include <assert.h>
+#include <stdlib.h> /* abort */
+#include <string.h> /* strrchr */
+#include <fcntl.h>  /* O_CLOEXEC, may be */
+#include <stdio.h>
+
+#if defined(__STRICT_ANSI__)
+# define inline __inline
+#endif
+
+#if defined(__linux__)
+# include "linux-syscalls.h"
+#endif /* __linux__ */
+
+#if defined(__sun)
+# include <sys/port.h>
+# include <port.h>
+#endif /* __sun */
+
+#if defined(_AIX)
+# define reqevents events
+# define rtnevents revents
+# include <sys/poll.h>
+#else
+# include <poll.h>
+#endif /* _AIX */
+
+#if defined(__ANDROID__)
+int uv__pthread_sigmask(int how, const sigset_t* set, sigset_t* oset);
+# ifdef pthread_sigmask
+# undef pthread_sigmask
+# endif
+# define pthread_sigmask(how, set, oldset) uv__pthread_sigmask(how, set, oldset)
+#endif
+
+#define ACCESS_ONCE(type, var)                                                \
+  (*(volatile type*) &(var))
+
+#define ROUND_UP(a, b)                                                        \
+  ((a) % (b) ? ((a) + (b)) - ((a) % (b)) : (a))
+
+#define UNREACHABLE()                                                         \
+  do {                                                                        \
+    assert(0 && "unreachable code");                                          \
+    abort();                                                                  \
+  }                                                                           \
+  while (0)
+
+#define SAVE_ERRNO(block)                                                     \
+  do {                                                                        \
+    int _saved_errno = errno;                                                 \
+    do { block; } while (0);                                                  \
+    errno = _saved_errno;                                                     \
+  }                                                                           \
+  while (0)
+
+/* The __clang__ and __INTEL_COMPILER checks are superfluous because they
+ * define __GNUC__. They are here to convey to you, dear reader, that these
+ * macros are enabled when compiling with clang or icc.
+ */
+#if defined(__clang__) ||                                                     \
+    defined(__GNUC__) ||                                                      \
+    defined(__INTEL_COMPILER) ||                                              \
+    defined(__SUNPRO_C)
+# define UV_DESTRUCTOR(declaration) __attribute__((destructor)) declaration
+# define UV_UNUSED(declaration)     __attribute__((unused)) declaration
+#else
+# define UV_DESTRUCTOR(declaration) declaration
+# define UV_UNUSED(declaration)     declaration
+#endif
+
+/* Leans on the fact that, on Linux, POLLRDHUP == EPOLLRDHUP. */
+#ifdef POLLRDHUP
+# define UV__POLLRDHUP POLLRDHUP
+#else
+# define UV__POLLRDHUP 0x2000
+#endif
+
+#if !defined(O_CLOEXEC) && defined(__FreeBSD__)
+/*
+ * It may be that we are just missing `__POSIX_VISIBLE >= 200809`.
+ * Try using fixed value const and give up, if it doesn't work
+ */
+# define O_CLOEXEC 0x00100000
+#endif
+
+typedef struct uv__stream_queued_fds_s uv__stream_queued_fds_t;
+
+/* handle flags */
+enum {
+  UV_CLOSING              = 0x01,   /* uv_close() called but not finished. */
+  UV_CLOSED               = 0x02,   /* close(2) finished. */
+  UV_STREAM_READING       = 0x04,   /* uv_read_start() called. */
+  UV_STREAM_SHUTTING      = 0x08,   /* uv_shutdown() called but not complete. */
+  UV_STREAM_SHUT          = 0x10,   /* Write side closed. */
+  UV_STREAM_READABLE      = 0x20,   /* The stream is readable */
+  UV_STREAM_WRITABLE      = 0x40,   /* The stream is writable */
+  UV_STREAM_BLOCKING      = 0x80,   /* Synchronous writes. */
+  UV_STREAM_READ_PARTIAL  = 0x100,  /* read(2) read less than requested. */
+  UV_STREAM_READ_EOF      = 0x200,  /* read(2) read EOF. */
+  UV_TCP_NODELAY          = 0x400,  /* Disable Nagle. */
+  UV_TCP_KEEPALIVE        = 0x800,  /* Turn on keep-alive. */
+  UV_TCP_SINGLE_ACCEPT    = 0x1000, /* Only accept() when idle. */
+  UV_HANDLE_IPV6          = 0x10000, /* Handle is bound to a IPv6 socket. */
+  UV_UDP_PROCESSING       = 0x20000, /* Handle is running the send callback queue. */
+  UV_HANDLE_BOUND         = 0x40000  /* Handle is bound to an address and port */
+};
+
+/* loop flags */
+enum {
+  UV_LOOP_BLOCK_SIGPROF = 1
+};
+
+typedef enum {
+  UV_CLOCK_PRECISE = 0,  /* Use the highest resolution clock available. */
+  UV_CLOCK_FAST = 1      /* Use the fastest clock with <= 1ms granularity. */
+} uv_clocktype_t;
+
+struct uv__stream_queued_fds_s {
+  unsigned int size;
+  unsigned int offset;
+  int fds[1];
+};
+
+
+#if defined(_AIX) || \
+    defined(__APPLE__) || \
+    defined(__DragonFly__) || \
+    defined(__FreeBSD__) || \
+    defined(__FreeBSD_kernel__) || \
+    defined(__linux__)
+#define uv__cloexec uv__cloexec_ioctl
+#define uv__nonblock uv__nonblock_ioctl
+#else
+#define uv__cloexec uv__cloexec_fcntl
+#define uv__nonblock uv__nonblock_fcntl
+#endif
+
+/* core */
+int uv__cloexec_ioctl(int fd, int set);
+int uv__cloexec_fcntl(int fd, int set);
+int uv__nonblock_ioctl(int fd, int set);
+int uv__nonblock_fcntl(int fd, int set);
+int uv__close(int fd);
+int uv__close_nocheckstdio(int fd);
+int uv__socket(int domain, int type, int protocol);
+int uv__dup(int fd);
+ssize_t uv__recvmsg(int fd, struct msghdr *msg, int flags);
+void uv__make_close_pending(uv_handle_t* handle);
+int uv__getiovmax(void);
+
+void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd);
+void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events);
+void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events);
+void uv__io_close(uv_loop_t* loop, uv__io_t* w);
+void uv__io_feed(uv_loop_t* loop, uv__io_t* w);
+int uv__io_active(const uv__io_t* w, unsigned int events);
+int uv__io_check_fd(uv_loop_t* loop, int fd);
+void uv__io_poll(uv_loop_t* loop, int timeout); /* in milliseconds or -1 */
+
+/* async */
+void uv__async_send(struct uv__async* wa);
+void uv__async_init(struct uv__async* wa);
+int uv__async_start(uv_loop_t* loop, struct uv__async* wa, uv__async_cb cb);
+void uv__async_stop(uv_loop_t* loop, struct uv__async* wa);
+
+/* loop */
+void uv__run_idle(uv_loop_t* loop);
+void uv__run_check(uv_loop_t* loop);
+void uv__run_prepare(uv_loop_t* loop);
+
+/* stream */
+void uv__stream_init(uv_loop_t* loop, uv_stream_t* stream,
+    uv_handle_type type);
+int uv__stream_open(uv_stream_t*, int fd, int flags);
+void uv__stream_destroy(uv_stream_t* stream);
+#if defined(__APPLE__)
+int uv__stream_try_select(uv_stream_t* stream, int* fd);
+#endif /* defined(__APPLE__) */
+void uv__server_io(uv_loop_t* loop, uv__io_t* w, unsigned int events);
+int uv__accept(int sockfd);
+int uv__dup2_cloexec(int oldfd, int newfd);
+int uv__open_cloexec(const char* path, int flags);
+
+/* tcp */
+int uv_tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb);
+int uv__tcp_nodelay(int fd, int on);
+int uv__tcp_keepalive(int fd, int on, unsigned int delay);
+
+/* pipe */
+int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb);
+
+/* timer */
+void uv__run_timers(uv_loop_t* loop);
+int uv__next_timeout(const uv_loop_t* loop);
+
+/* signal */
+void uv__signal_close(uv_signal_t* handle);
+void uv__signal_global_once_init(void);
+void uv__signal_loop_cleanup(uv_loop_t* loop);
+
+/* platform specific */
+uint64_t uv__hrtime(uv_clocktype_t type);
+int uv__kqueue_init(uv_loop_t* loop);
+int uv__platform_loop_init(uv_loop_t* loop);
+void uv__platform_loop_delete(uv_loop_t* loop);
+void uv__platform_invalidate_fd(uv_loop_t* loop, int fd);
+
+/* various */
+void uv__async_close(uv_async_t* handle);
+void uv__check_close(uv_check_t* handle);
+void uv__fs_event_close(uv_fs_event_t* handle);
+void uv__idle_close(uv_idle_t* handle);
+void uv__pipe_close(uv_pipe_t* handle);
+void uv__poll_close(uv_poll_t* handle);
+void uv__prepare_close(uv_prepare_t* handle);
+void uv__process_close(uv_process_t* handle);
+void uv__stream_close(uv_stream_t* handle);
+void uv__tcp_close(uv_tcp_t* handle);
+void uv__timer_close(uv_timer_t* handle);
+void uv__udp_close(uv_udp_t* handle);
+void uv__udp_finish_close(uv_udp_t* handle);
+uv_handle_type uv__handle_type(int fd);
+FILE* uv__open_file(const char* path);
+int uv__getpwuid_r(uv_passwd_t* pwd);
+
+
+#if defined(__APPLE__)
+int uv___stream_fd(const uv_stream_t* handle);
+#define uv__stream_fd(handle) (uv___stream_fd((const uv_stream_t*) (handle)))
+#else
+#define uv__stream_fd(handle) ((handle)->io_watcher.fd)
+#endif /* defined(__APPLE__) */
+
+#ifdef UV__O_NONBLOCK
+# define UV__F_NONBLOCK UV__O_NONBLOCK
+#else
+# define UV__F_NONBLOCK 1
+#endif
+
+int uv__make_socketpair(int fds[2], int flags);
+int uv__make_pipe(int fds[2], int flags);
+
+#if defined(__APPLE__)
+#include <AvailabilityMacros.h>
+
+int uv__fsevents_init(uv_fs_event_t* handle);
+int uv__fsevents_close(uv_fs_event_t* handle);
+void uv__fsevents_loop_delete(uv_loop_t* loop);
+
+/* OSX < 10.7 has no file events, polyfill them */
+#ifndef MAC_OS_X_VERSION_10_7
+
+static const int kFSEventStreamCreateFlagFileEvents = 0x00000010;
+static const int kFSEventStreamEventFlagItemCreated = 0x00000100;
+static const int kFSEventStreamEventFlagItemRemoved = 0x00000200;
+static const int kFSEventStreamEventFlagItemInodeMetaMod = 0x00000400;
+static const int kFSEventStreamEventFlagItemRenamed = 0x00000800;
+static const int kFSEventStreamEventFlagItemModified = 0x00001000;
+static const int kFSEventStreamEventFlagItemFinderInfoMod = 0x00002000;
+static const int kFSEventStreamEventFlagItemChangeOwner = 0x00004000;
+static const int kFSEventStreamEventFlagItemXattrMod = 0x00008000;
+static const int kFSEventStreamEventFlagItemIsFile = 0x00010000;
+static const int kFSEventStreamEventFlagItemIsDir = 0x00020000;
+static const int kFSEventStreamEventFlagItemIsSymlink = 0x00040000;
+
+#endif /* __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 1070 */
+
+#endif /* defined(__APPLE__) */
+
+UV_UNUSED(static void uv__req_init(uv_loop_t* loop,
+                                   uv_req_t* req,
+                                   uv_req_type type)) {
+  req->type = type;
+  uv__req_register(loop, req);
+}
+#define uv__req_init(loop, req, type) \
+  uv__req_init((loop), (uv_req_t*)(req), (type))
+
+UV_UNUSED(static void uv__update_time(uv_loop_t* loop)) {
+  /* Use a fast time source if available.  We only need millisecond precision.
+   */
+  loop->time = uv__hrtime(UV_CLOCK_FAST) / 1000000;
+}
+
+UV_UNUSED(static char* uv__basename_r(const char* path)) {
+  char* s;
+
+  s = strrchr(path, '/');
+  if (s == NULL)
+    return (char*) path;
+
+  return s + 1;
+}
+
+#endif /* UV_UNIX_INTERNAL_H_ */

+ 463 - 0
Utilities/cmlibuv/src/unix/kqueue.c

@@ -0,0 +1,463 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include <sys/sysctl.h>
+#include <sys/types.h>
+#include <sys/event.h>
+#include <sys/time.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <time.h>
+
+static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags);
+
+
+int uv__kqueue_init(uv_loop_t* loop) {
+  loop->backend_fd = kqueue();
+  if (loop->backend_fd == -1)
+    return -errno;
+
+  uv__cloexec(loop->backend_fd, 1);
+
+  return 0;
+}
+
+
+int uv__io_check_fd(uv_loop_t* loop, int fd) {
+  struct kevent ev;
+  int rc;
+
+  rc = 0;
+  EV_SET(&ev, fd, EVFILT_READ, EV_ADD, 0, 0, 0);
+  if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
+    rc = -errno;
+
+  EV_SET(&ev, fd, EVFILT_READ, EV_DELETE, 0, 0, 0);
+  if (rc == 0)
+    if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
+      abort();
+
+  return rc;
+}
+
+
+void uv__io_poll(uv_loop_t* loop, int timeout) {
+  struct kevent events[1024];
+  struct kevent* ev;
+  struct timespec spec;
+  unsigned int nevents;
+  unsigned int revents;
+  QUEUE* q;
+  uv__io_t* w;
+  sigset_t* pset;
+  sigset_t set;
+  uint64_t base;
+  uint64_t diff;
+  int have_signals;
+  int filter;
+  int fflags;
+  int count;
+  int nfds;
+  int fd;
+  int op;
+  int i;
+
+  if (loop->nfds == 0) {
+    assert(QUEUE_EMPTY(&loop->watcher_queue));
+    return;
+  }
+
+  nevents = 0;
+
+  while (!QUEUE_EMPTY(&loop->watcher_queue)) {
+    q = QUEUE_HEAD(&loop->watcher_queue);
+    QUEUE_REMOVE(q);
+    QUEUE_INIT(q);
+
+    w = QUEUE_DATA(q, uv__io_t, watcher_queue);
+    assert(w->pevents != 0);
+    assert(w->fd >= 0);
+    assert(w->fd < (int) loop->nwatchers);
+
+    if ((w->events & POLLIN) == 0 && (w->pevents & POLLIN) != 0) {
+      filter = EVFILT_READ;
+      fflags = 0;
+      op = EV_ADD;
+
+      if (w->cb == uv__fs_event) {
+        filter = EVFILT_VNODE;
+        fflags = NOTE_ATTRIB | NOTE_WRITE  | NOTE_RENAME
+               | NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
+        op = EV_ADD | EV_ONESHOT; /* Stop the event from firing repeatedly. */
+      }
+
+      EV_SET(events + nevents, w->fd, filter, op, fflags, 0, 0);
+
+      if (++nevents == ARRAY_SIZE(events)) {
+        if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
+          abort();
+        nevents = 0;
+      }
+    }
+
+    if ((w->events & POLLOUT) == 0 && (w->pevents & POLLOUT) != 0) {
+      EV_SET(events + nevents, w->fd, EVFILT_WRITE, EV_ADD, 0, 0, 0);
+
+      if (++nevents == ARRAY_SIZE(events)) {
+        if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
+          abort();
+        nevents = 0;
+      }
+    }
+
+    w->events = w->pevents;
+  }
+
+  pset = NULL;
+  if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
+    pset = &set;
+    sigemptyset(pset);
+    sigaddset(pset, SIGPROF);
+  }
+
+  assert(timeout >= -1);
+  base = loop->time;
+  count = 48; /* Benchmarks suggest this gives the best throughput. */
+
+  for (;; nevents = 0) {
+    if (timeout != -1) {
+      spec.tv_sec = timeout / 1000;
+      spec.tv_nsec = (timeout % 1000) * 1000000;
+    }
+
+    if (pset != NULL)
+      pthread_sigmask(SIG_BLOCK, pset, NULL);
+
+    nfds = kevent(loop->backend_fd,
+                  events,
+                  nevents,
+                  events,
+                  ARRAY_SIZE(events),
+                  timeout == -1 ? NULL : &spec);
+
+    if (pset != NULL)
+      pthread_sigmask(SIG_UNBLOCK, pset, NULL);
+
+    /* Update loop->time unconditionally. It's tempting to skip the update when
+     * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
+     * operating system didn't reschedule our process while in the syscall.
+     */
+    SAVE_ERRNO(uv__update_time(loop));
+
+    if (nfds == 0) {
+      assert(timeout != -1);
+      return;
+    }
+
+    if (nfds == -1) {
+      if (errno != EINTR)
+        abort();
+
+      if (timeout == 0)
+        return;
+
+      if (timeout == -1)
+        continue;
+
+      /* Interrupted by a signal. Update timeout and poll again. */
+      goto update_timeout;
+    }
+
+    have_signals = 0;
+    nevents = 0;
+
+    assert(loop->watchers != NULL);
+    loop->watchers[loop->nwatchers] = (void*) events;
+    loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
+    for (i = 0; i < nfds; i++) {
+      ev = events + i;
+      fd = ev->ident;
+      /* Skip invalidated events, see uv__platform_invalidate_fd */
+      if (fd == -1)
+        continue;
+      w = loop->watchers[fd];
+
+      if (w == NULL) {
+        /* File descriptor that we've stopped watching, disarm it. */
+        /* TODO batch up */
+        struct kevent events[1];
+
+        EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
+        if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
+          if (errno != EBADF && errno != ENOENT)
+            abort();
+
+        continue;
+      }
+
+      if (ev->filter == EVFILT_VNODE) {
+        assert(w->events == POLLIN);
+        assert(w->pevents == POLLIN);
+        w->cb(loop, w, ev->fflags); /* XXX always uv__fs_event() */
+        nevents++;
+        continue;
+      }
+
+      revents = 0;
+
+      if (ev->filter == EVFILT_READ) {
+        if (w->pevents & POLLIN) {
+          revents |= POLLIN;
+          w->rcount = ev->data;
+        } else {
+          /* TODO batch up */
+          struct kevent events[1];
+          EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
+          if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
+            if (errno != ENOENT)
+              abort();
+        }
+      }
+
+      if (ev->filter == EVFILT_WRITE) {
+        if (w->pevents & POLLOUT) {
+          revents |= POLLOUT;
+          w->wcount = ev->data;
+        } else {
+          /* TODO batch up */
+          struct kevent events[1];
+          EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
+          if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
+            if (errno != ENOENT)
+              abort();
+        }
+      }
+
+      if (ev->flags & EV_ERROR)
+        revents |= POLLERR;
+
+      if ((ev->flags & EV_EOF) && (w->pevents & UV__POLLRDHUP))
+        revents |= UV__POLLRDHUP;
+
+      if (revents == 0)
+        continue;
+
+      /* Run signal watchers last.  This also affects child process watchers
+       * because those are implemented in terms of signal watchers.
+       */
+      if (w == &loop->signal_io_watcher)
+        have_signals = 1;
+      else
+        w->cb(loop, w, revents);
+
+      nevents++;
+    }
+
+    if (have_signals != 0)
+      loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
+
+    loop->watchers[loop->nwatchers] = NULL;
+    loop->watchers[loop->nwatchers + 1] = NULL;
+
+    if (have_signals != 0)
+      return;  /* Event loop should cycle now so don't poll again. */
+
+    if (nevents != 0) {
+      if (nfds == ARRAY_SIZE(events) && --count != 0) {
+        /* Poll for more events but don't block this time. */
+        timeout = 0;
+        continue;
+      }
+      return;
+    }
+
+    if (timeout == 0)
+      return;
+
+    if (timeout == -1)
+      continue;
+
+update_timeout:
+    assert(timeout > 0);
+
+    diff = loop->time - base;
+    if (diff >= (uint64_t) timeout)
+      return;
+
+    timeout -= diff;
+  }
+}
+
+
+void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
+  struct kevent* events;
+  uintptr_t i;
+  uintptr_t nfds;
+
+  assert(loop->watchers != NULL);
+
+  events = (struct kevent*) loop->watchers[loop->nwatchers];
+  nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
+  if (events == NULL)
+    return;
+
+  /* Invalidate events with same file descriptor */
+  for (i = 0; i < nfds; i++)
+    if ((int) events[i].ident == fd)
+      events[i].ident = -1;
+}
+
+
+static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags) {
+  uv_fs_event_t* handle;
+  struct kevent ev;
+  int events;
+  const char* path;
+#if defined(F_GETPATH)
+  /* MAXPATHLEN == PATH_MAX but the former is what XNU calls it internally. */
+  char pathbuf[MAXPATHLEN];
+#endif
+
+  handle = container_of(w, uv_fs_event_t, event_watcher);
+
+  if (fflags & (NOTE_ATTRIB | NOTE_EXTEND))
+    events = UV_CHANGE;
+  else
+    events = UV_RENAME;
+
+  path = NULL;
+#if defined(F_GETPATH)
+  /* Also works when the file has been unlinked from the file system. Passing
+   * in the path when the file has been deleted is arguably a little strange
+   * but it's consistent with what the inotify backend does.
+   */
+  if (fcntl(handle->event_watcher.fd, F_GETPATH, pathbuf) == 0)
+    path = uv__basename_r(pathbuf);
+#endif
+  handle->cb(handle, path, events, 0);
+
+  if (handle->event_watcher.fd == -1)
+    return;
+
+  /* Watcher operates in one-shot mode, re-arm it. */
+  fflags = NOTE_ATTRIB | NOTE_WRITE  | NOTE_RENAME
+         | NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
+
+  EV_SET(&ev, w->fd, EVFILT_VNODE, EV_ADD | EV_ONESHOT, fflags, 0, 0);
+
+  if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
+    abort();
+}
+
+
+int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
+  uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
+  return 0;
+}
+
+
+int uv_fs_event_start(uv_fs_event_t* handle,
+                      uv_fs_event_cb cb,
+                      const char* path,
+                      unsigned int flags) {
+#if defined(__APPLE__)
+  struct stat statbuf;
+#endif /* defined(__APPLE__) */
+  int fd;
+
+  if (uv__is_active(handle))
+    return -EINVAL;
+
+  /* TODO open asynchronously - but how do we report back errors? */
+  fd = open(path, O_RDONLY);
+  if (fd == -1)
+    return -errno;
+
+  uv__handle_start(handle);
+  uv__io_init(&handle->event_watcher, uv__fs_event, fd);
+  handle->path = uv__strdup(path);
+  handle->cb = cb;
+
+#if defined(__APPLE__)
+  /* Nullify field to perform checks later */
+  handle->cf_cb = NULL;
+  handle->realpath = NULL;
+  handle->realpath_len = 0;
+  handle->cf_flags = flags;
+
+  if (fstat(fd, &statbuf))
+    goto fallback;
+  /* FSEvents works only with directories */
+  if (!(statbuf.st_mode & S_IFDIR))
+    goto fallback;
+
+  /* The fallback fd is no longer needed */
+  uv__close(fd);
+  handle->event_watcher.fd = -1;
+
+  return uv__fsevents_init(handle);
+
+fallback:
+#endif /* defined(__APPLE__) */
+
+  uv__io_start(handle->loop, &handle->event_watcher, POLLIN);
+
+  return 0;
+}
+
+
+int uv_fs_event_stop(uv_fs_event_t* handle) {
+  if (!uv__is_active(handle))
+    return 0;
+
+  uv__handle_stop(handle);
+
+#if defined(__APPLE__)
+  if (uv__fsevents_close(handle))
+#endif /* defined(__APPLE__) */
+  {
+    uv__io_close(handle->loop, &handle->event_watcher);
+  }
+
+  uv__free(handle->path);
+  handle->path = NULL;
+
+  if (handle->event_watcher.fd != -1) {
+    /* When FSEvents is used, we don't use the event_watcher's fd under certain
+     * confitions. (see uv_fs_event_start) */
+    uv__close(handle->event_watcher.fd);
+    handle->event_watcher.fd = -1;
+  }
+
+  return 0;
+}
+
+
+void uv__fs_event_close(uv_fs_event_t* handle) {
+  uv_fs_event_stop(handle);
+}

+ 985 - 0
Utilities/cmlibuv/src/unix/linux-core.c

@@ -0,0 +1,985 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/* We lean on the fact that POLL{IN,OUT,ERR,HUP} correspond with their
+ * EPOLL* counterparts.  We use the POLL* variants in this file because that
+ * is what libuv uses elsewhere and it avoids a dependency on <sys/epoll.h>.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+
+#include <net/if.h>
+#include <sys/param.h>
+#include <sys/prctl.h>
+#include <sys/sysinfo.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <time.h>
+
+#define HAVE_IFADDRS_H 1
+
+#ifdef __UCLIBC__
+# if __UCLIBC_MAJOR__ < 0 && __UCLIBC_MINOR__ < 9 && __UCLIBC_SUBLEVEL__ < 32
+#  undef HAVE_IFADDRS_H
+# endif
+#endif
+
+#ifdef HAVE_IFADDRS_H
+# if defined(__ANDROID__)
+#  include "android-ifaddrs.h"
+# else
+#  include <ifaddrs.h>
+# endif
+# include <sys/socket.h>
+# include <net/ethernet.h>
+# include <netpacket/packet.h>
+#endif /* HAVE_IFADDRS_H */
+
+/* Available from 2.6.32 onwards. */
+#ifndef CLOCK_MONOTONIC_COARSE
+# define CLOCK_MONOTONIC_COARSE 6
+#endif
+
+/* This is rather annoying: CLOCK_BOOTTIME lives in <linux/time.h> but we can't
+ * include that file because it conflicts with <time.h>. We'll just have to
+ * define it ourselves.
+ */
+#ifndef CLOCK_BOOTTIME
+# define CLOCK_BOOTTIME 7
+#endif
+
+static int read_models(unsigned int numcpus, uv_cpu_info_t* ci);
+static int read_times(FILE* statfile_fp,
+                      unsigned int numcpus,
+                      uv_cpu_info_t* ci);
+static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci);
+static unsigned long read_cpufreq(unsigned int cpunum);
+
+
+int uv__platform_loop_init(uv_loop_t* loop) {
+  int fd;
+
+  fd = uv__epoll_create1(UV__EPOLL_CLOEXEC);
+
+  /* epoll_create1() can fail either because it's not implemented (old kernel)
+   * or because it doesn't understand the EPOLL_CLOEXEC flag.
+   */
+  if (fd == -1 && (errno == ENOSYS || errno == EINVAL)) {
+    fd = uv__epoll_create(256);
+
+    if (fd != -1)
+      uv__cloexec(fd, 1);
+  }
+
+  loop->backend_fd = fd;
+  loop->inotify_fd = -1;
+  loop->inotify_watchers = NULL;
+
+  if (fd == -1)
+    return -errno;
+
+  return 0;
+}
+
+
+void uv__platform_loop_delete(uv_loop_t* loop) {
+  if (loop->inotify_fd == -1) return;
+  uv__io_stop(loop, &loop->inotify_read_watcher, POLLIN);
+  uv__close(loop->inotify_fd);
+  loop->inotify_fd = -1;
+}
+
+
+void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
+  struct uv__epoll_event* events;
+  struct uv__epoll_event dummy;
+  uintptr_t i;
+  uintptr_t nfds;
+
+  assert(loop->watchers != NULL);
+
+  events = (struct uv__epoll_event*) loop->watchers[loop->nwatchers];
+  nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
+  if (events != NULL)
+    /* Invalidate events with same file descriptor */
+    for (i = 0; i < nfds; i++)
+      if ((int) events[i].data == fd)
+        events[i].data = -1;
+
+  /* Remove the file descriptor from the epoll.
+   * This avoids a problem where the same file description remains open
+   * in another process, causing repeated junk epoll events.
+   *
+   * We pass in a dummy epoll_event, to work around a bug in old kernels.
+   */
+  if (loop->backend_fd >= 0) {
+    /* Work around a bug in kernels 3.10 to 3.19 where passing a struct that
+     * has the EPOLLWAKEUP flag set generates spurious audit syslog warnings.
+     */
+    memset(&dummy, 0, sizeof(dummy));
+    uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_DEL, fd, &dummy);
+  }
+}
+
+
+int uv__io_check_fd(uv_loop_t* loop, int fd) {
+  struct uv__epoll_event e;
+  int rc;
+
+  e.events = POLLIN;
+  e.data = -1;
+
+  rc = 0;
+  if (uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_ADD, fd, &e))
+    if (errno != EEXIST)
+      rc = -errno;
+
+  if (rc == 0)
+    if (uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_DEL, fd, &e))
+      abort();
+
+  return rc;
+}
+
+
+void uv__io_poll(uv_loop_t* loop, int timeout) {
+  /* A bug in kernels < 2.6.37 makes timeouts larger than ~30 minutes
+   * effectively infinite on 32 bits architectures.  To avoid blocking
+   * indefinitely, we cap the timeout and poll again if necessary.
+   *
+   * Note that "30 minutes" is a simplification because it depends on
+   * the value of CONFIG_HZ.  The magic constant assumes CONFIG_HZ=1200,
+   * that being the largest value I have seen in the wild (and only once.)
+   */
+  static const int max_safe_timeout = 1789569;
+  static int no_epoll_pwait;
+  static int no_epoll_wait;
+  struct uv__epoll_event events[1024];
+  struct uv__epoll_event* pe;
+  struct uv__epoll_event e;
+  int real_timeout;
+  QUEUE* q;
+  uv__io_t* w;
+  sigset_t sigset;
+  uint64_t sigmask;
+  uint64_t base;
+  int have_signals;
+  int nevents;
+  int count;
+  int nfds;
+  int fd;
+  int op;
+  int i;
+
+  if (loop->nfds == 0) {
+    assert(QUEUE_EMPTY(&loop->watcher_queue));
+    return;
+  }
+
+  while (!QUEUE_EMPTY(&loop->watcher_queue)) {
+    q = QUEUE_HEAD(&loop->watcher_queue);
+    QUEUE_REMOVE(q);
+    QUEUE_INIT(q);
+
+    w = QUEUE_DATA(q, uv__io_t, watcher_queue);
+    assert(w->pevents != 0);
+    assert(w->fd >= 0);
+    assert(w->fd < (int) loop->nwatchers);
+
+    e.events = w->pevents;
+    e.data = w->fd;
+
+    if (w->events == 0)
+      op = UV__EPOLL_CTL_ADD;
+    else
+      op = UV__EPOLL_CTL_MOD;
+
+    /* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching
+     * events, skip the syscall and squelch the events after epoll_wait().
+     */
+    if (uv__epoll_ctl(loop->backend_fd, op, w->fd, &e)) {
+      if (errno != EEXIST)
+        abort();
+
+      assert(op == UV__EPOLL_CTL_ADD);
+
+      /* We've reactivated a file descriptor that's been watched before. */
+      if (uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_MOD, w->fd, &e))
+        abort();
+    }
+
+    w->events = w->pevents;
+  }
+
+  sigmask = 0;
+  if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
+    sigemptyset(&sigset);
+    sigaddset(&sigset, SIGPROF);
+    sigmask |= 1 << (SIGPROF - 1);
+  }
+
+  assert(timeout >= -1);
+  base = loop->time;
+  count = 48; /* Benchmarks suggest this gives the best throughput. */
+  real_timeout = timeout;
+
+  for (;;) {
+    /* See the comment for max_safe_timeout for an explanation of why
+     * this is necessary.  Executive summary: kernel bug workaround.
+     */
+    if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout)
+      timeout = max_safe_timeout;
+
+    if (sigmask != 0 && no_epoll_pwait != 0)
+      if (pthread_sigmask(SIG_BLOCK, &sigset, NULL))
+        abort();
+
+    if (no_epoll_wait != 0 || (sigmask != 0 && no_epoll_pwait == 0)) {
+      nfds = uv__epoll_pwait(loop->backend_fd,
+                             events,
+                             ARRAY_SIZE(events),
+                             timeout,
+                             sigmask);
+      if (nfds == -1 && errno == ENOSYS)
+        no_epoll_pwait = 1;
+    } else {
+      nfds = uv__epoll_wait(loop->backend_fd,
+                            events,
+                            ARRAY_SIZE(events),
+                            timeout);
+      if (nfds == -1 && errno == ENOSYS)
+        no_epoll_wait = 1;
+    }
+
+    if (sigmask != 0 && no_epoll_pwait != 0)
+      if (pthread_sigmask(SIG_UNBLOCK, &sigset, NULL))
+        abort();
+
+    /* Update loop->time unconditionally. It's tempting to skip the update when
+     * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
+     * operating system didn't reschedule our process while in the syscall.
+     */
+    SAVE_ERRNO(uv__update_time(loop));
+
+    if (nfds == 0) {
+      assert(timeout != -1);
+
+      if (timeout == 0)
+        return;
+
+      /* We may have been inside the system call for longer than |timeout|
+       * milliseconds so we need to update the timestamp to avoid drift.
+       */
+      goto update_timeout;
+    }
+
+    if (nfds == -1) {
+      if (errno == ENOSYS) {
+        /* epoll_wait() or epoll_pwait() failed, try the other system call. */
+        assert(no_epoll_wait == 0 || no_epoll_pwait == 0);
+        continue;
+      }
+
+      if (errno != EINTR)
+        abort();
+
+      if (timeout == -1)
+        continue;
+
+      if (timeout == 0)
+        return;
+
+      /* Interrupted by a signal. Update timeout and poll again. */
+      goto update_timeout;
+    }
+
+    have_signals = 0;
+    nevents = 0;
+
+    assert(loop->watchers != NULL);
+    loop->watchers[loop->nwatchers] = (void*) events;
+    loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
+    for (i = 0; i < nfds; i++) {
+      pe = events + i;
+      fd = pe->data;
+
+      /* Skip invalidated events, see uv__platform_invalidate_fd */
+      if (fd == -1)
+        continue;
+
+      assert(fd >= 0);
+      assert((unsigned) fd < loop->nwatchers);
+
+      w = loop->watchers[fd];
+
+      if (w == NULL) {
+        /* File descriptor that we've stopped watching, disarm it.
+         *
+         * Ignore all errors because we may be racing with another thread
+         * when the file descriptor is closed.
+         */
+        uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_DEL, fd, pe);
+        continue;
+      }
+
+      /* Give users only events they're interested in. Prevents spurious
+       * callbacks when previous callback invocation in this loop has stopped
+       * the current watcher. Also, filters out events that users has not
+       * requested us to watch.
+       */
+      pe->events &= w->pevents | POLLERR | POLLHUP;
+
+      /* Work around an epoll quirk where it sometimes reports just the
+       * EPOLLERR or EPOLLHUP event.  In order to force the event loop to
+       * move forward, we merge in the read/write events that the watcher
+       * is interested in; uv__read() and uv__write() will then deal with
+       * the error or hangup in the usual fashion.
+       *
+       * Note to self: happens when epoll reports EPOLLIN|EPOLLHUP, the user
+       * reads the available data, calls uv_read_stop(), then sometime later
+       * calls uv_read_start() again.  By then, libuv has forgotten about the
+       * hangup and the kernel won't report EPOLLIN again because there's
+       * nothing left to read.  If anything, libuv is to blame here.  The
+       * current hack is just a quick bandaid; to properly fix it, libuv
+       * needs to remember the error/hangup event.  We should get that for
+       * free when we switch over to edge-triggered I/O.
+       */
+      if (pe->events == POLLERR || pe->events == POLLHUP)
+        pe->events |= w->pevents & (POLLIN | POLLOUT);
+
+      if (pe->events != 0) {
+        /* Run signal watchers last.  This also affects child process watchers
+         * because those are implemented in terms of signal watchers.
+         */
+        if (w == &loop->signal_io_watcher)
+          have_signals = 1;
+        else
+          w->cb(loop, w, pe->events);
+
+        nevents++;
+      }
+    }
+
+    if (have_signals != 0)
+      loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
+
+    loop->watchers[loop->nwatchers] = NULL;
+    loop->watchers[loop->nwatchers + 1] = NULL;
+
+    if (have_signals != 0)
+      return;  /* Event loop should cycle now so don't poll again. */
+
+    if (nevents != 0) {
+      if (nfds == ARRAY_SIZE(events) && --count != 0) {
+        /* Poll for more events but don't block this time. */
+        timeout = 0;
+        continue;
+      }
+      return;
+    }
+
+    if (timeout == 0)
+      return;
+
+    if (timeout == -1)
+      continue;
+
+update_timeout:
+    assert(timeout > 0);
+
+    real_timeout -= (loop->time - base);
+    if (real_timeout <= 0)
+      return;
+
+    timeout = real_timeout;
+  }
+}
+
+
+uint64_t uv__hrtime(uv_clocktype_t type) {
+  static clock_t fast_clock_id = -1;
+  struct timespec t;
+  clock_t clock_id;
+
+  /* Prefer CLOCK_MONOTONIC_COARSE if available but only when it has
+   * millisecond granularity or better.  CLOCK_MONOTONIC_COARSE is
+   * serviced entirely from the vDSO, whereas CLOCK_MONOTONIC may
+   * decide to make a costly system call.
+   */
+  /* TODO(bnoordhuis) Use CLOCK_MONOTONIC_COARSE for UV_CLOCK_PRECISE
+   * when it has microsecond granularity or better (unlikely).
+   */
+  if (type == UV_CLOCK_FAST && fast_clock_id == -1) {
+    if (clock_getres(CLOCK_MONOTONIC_COARSE, &t) == 0 &&
+        t.tv_nsec <= 1 * 1000 * 1000) {
+      fast_clock_id = CLOCK_MONOTONIC_COARSE;
+    } else {
+      fast_clock_id = CLOCK_MONOTONIC;
+    }
+  }
+
+  clock_id = CLOCK_MONOTONIC;
+  if (type == UV_CLOCK_FAST)
+    clock_id = fast_clock_id;
+
+  if (clock_gettime(clock_id, &t))
+    return 0;  /* Not really possible. */
+
+  return t.tv_sec * (uint64_t) 1e9 + t.tv_nsec;
+}
+
+
+void uv_loadavg(double avg[3]) {
+  struct sysinfo info;
+
+  if (sysinfo(&info) < 0) return;
+
+  avg[0] = (double) info.loads[0] / 65536.0;
+  avg[1] = (double) info.loads[1] / 65536.0;
+  avg[2] = (double) info.loads[2] / 65536.0;
+}
+
+
+int uv_exepath(char* buffer, size_t* size) {
+  ssize_t n;
+
+  if (buffer == NULL || size == NULL || *size == 0)
+    return -EINVAL;
+
+  n = *size - 1;
+  if (n > 0)
+    n = readlink("/proc/self/exe", buffer, n);
+
+  if (n == -1)
+    return -errno;
+
+  buffer[n] = '\0';
+  *size = n;
+
+  return 0;
+}
+
+
+uint64_t uv_get_free_memory(void) {
+  struct sysinfo info;
+
+  if (sysinfo(&info) == 0)
+    return (uint64_t) info.freeram * info.mem_unit;
+  return 0;
+}
+
+
+uint64_t uv_get_total_memory(void) {
+  struct sysinfo info;
+
+  if (sysinfo(&info) == 0)
+    return (uint64_t) info.totalram * info.mem_unit;
+  return 0;
+}
+
+
+int uv_resident_set_memory(size_t* rss) {
+  char buf[1024];
+  const char* s;
+  ssize_t n;
+  long val;
+  int fd;
+  int i;
+
+  do
+    fd = open("/proc/self/stat", O_RDONLY);
+  while (fd == -1 && errno == EINTR);
+
+  if (fd == -1)
+    return -errno;
+
+  do
+    n = read(fd, buf, sizeof(buf) - 1);
+  while (n == -1 && errno == EINTR);
+
+  uv__close(fd);
+  if (n == -1)
+    return -errno;
+  buf[n] = '\0';
+
+  s = strchr(buf, ' ');
+  if (s == NULL)
+    goto err;
+
+  s += 1;
+  if (*s != '(')
+    goto err;
+
+  s = strchr(s, ')');
+  if (s == NULL)
+    goto err;
+
+  for (i = 1; i <= 22; i++) {
+    s = strchr(s + 1, ' ');
+    if (s == NULL)
+      goto err;
+  }
+
+  errno = 0;
+  val = strtol(s, NULL, 10);
+  if (errno != 0)
+    goto err;
+  if (val < 0)
+    goto err;
+
+  *rss = val * getpagesize();
+  return 0;
+
+err:
+  return -EINVAL;
+}
+
+
+int uv_uptime(double* uptime) {
+  static volatile int no_clock_boottime;
+  struct timespec now;
+  int r;
+
+  /* Try CLOCK_BOOTTIME first, fall back to CLOCK_MONOTONIC if not available
+   * (pre-2.6.39 kernels). CLOCK_MONOTONIC doesn't increase when the system
+   * is suspended.
+   */
+  if (no_clock_boottime) {
+    retry: r = clock_gettime(CLOCK_MONOTONIC, &now);
+  }
+  else if ((r = clock_gettime(CLOCK_BOOTTIME, &now)) && errno == EINVAL) {
+    no_clock_boottime = 1;
+    goto retry;
+  }
+
+  if (r)
+    return -errno;
+
+  *uptime = now.tv_sec;
+  return 0;
+}
+
+
+static int uv__cpu_num(FILE* statfile_fp, unsigned int* numcpus) {
+  unsigned int num;
+  char buf[1024];
+
+  if (!fgets(buf, sizeof(buf), statfile_fp))
+    return -EIO;
+
+  num = 0;
+  while (fgets(buf, sizeof(buf), statfile_fp)) {
+    if (strncmp(buf, "cpu", 3))
+      break;
+    num++;
+  }
+
+  if (num == 0)
+    return -EIO;
+
+  *numcpus = num;
+  return 0;
+}
+
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+  unsigned int numcpus;
+  uv_cpu_info_t* ci;
+  int err;
+  FILE* statfile_fp;
+
+  *cpu_infos = NULL;
+  *count = 0;
+
+  statfile_fp = uv__open_file("/proc/stat");
+  if (statfile_fp == NULL)
+    return -errno;
+
+  err = uv__cpu_num(statfile_fp, &numcpus);
+  if (err < 0)
+    goto out;
+
+  err = -ENOMEM;
+  ci = uv__calloc(numcpus, sizeof(*ci));
+  if (ci == NULL)
+    goto out;
+
+  err = read_models(numcpus, ci);
+  if (err == 0)
+    err = read_times(statfile_fp, numcpus, ci);
+
+  if (err) {
+    uv_free_cpu_info(ci, numcpus);
+    goto out;
+  }
+
+  /* read_models() on x86 also reads the CPU speed from /proc/cpuinfo.
+   * We don't check for errors here. Worst case, the field is left zero.
+   */
+  if (ci[0].speed == 0)
+    read_speeds(numcpus, ci);
+
+  *cpu_infos = ci;
+  *count = numcpus;
+  err = 0;
+
+out:
+
+  if (fclose(statfile_fp))
+    if (errno != EINTR && errno != EINPROGRESS)
+      abort();
+
+  return err;
+}
+
+
+static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci) {
+  unsigned int num;
+
+  for (num = 0; num < numcpus; num++)
+    ci[num].speed = read_cpufreq(num) / 1000;
+}
+
+
+/* Also reads the CPU frequency on x86. The other architectures only have
+ * a BogoMIPS field, which may not be very accurate.
+ *
+ * Note: Simply returns on error, uv_cpu_info() takes care of the cleanup.
+ */
+static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) {
+  static const char model_marker[] = "model name\t: ";
+  static const char speed_marker[] = "cpu MHz\t\t: ";
+  const char* inferred_model;
+  unsigned int model_idx;
+  unsigned int speed_idx;
+  char buf[1024];
+  char* model;
+  FILE* fp;
+
+  /* Most are unused on non-ARM, non-MIPS and non-x86 architectures. */
+  (void) &model_marker;
+  (void) &speed_marker;
+  (void) &speed_idx;
+  (void) &model;
+  (void) &buf;
+  (void) &fp;
+
+  model_idx = 0;
+  speed_idx = 0;
+
+#if defined(__arm__) || \
+    defined(__i386__) || \
+    defined(__mips__) || \
+    defined(__x86_64__)
+  fp = uv__open_file("/proc/cpuinfo");
+  if (fp == NULL)
+    return -errno;
+
+  while (fgets(buf, sizeof(buf), fp)) {
+    if (model_idx < numcpus) {
+      if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
+        model = buf + sizeof(model_marker) - 1;
+        model = uv__strndup(model, strlen(model) - 1);  /* Strip newline. */
+        if (model == NULL) {
+          fclose(fp);
+          return -ENOMEM;
+        }
+        ci[model_idx++].model = model;
+        continue;
+      }
+    }
+#if defined(__arm__) || defined(__mips__)
+    if (model_idx < numcpus) {
+#if defined(__arm__)
+      /* Fallback for pre-3.8 kernels. */
+      static const char model_marker[] = "Processor\t: ";
+#else	/* defined(__mips__) */
+      static const char model_marker[] = "cpu model\t\t: ";
+#endif
+      if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
+        model = buf + sizeof(model_marker) - 1;
+        model = uv__strndup(model, strlen(model) - 1);  /* Strip newline. */
+        if (model == NULL) {
+          fclose(fp);
+          return -ENOMEM;
+        }
+        ci[model_idx++].model = model;
+        continue;
+      }
+    }
+#else  /* !__arm__ && !__mips__ */
+    if (speed_idx < numcpus) {
+      if (strncmp(buf, speed_marker, sizeof(speed_marker) - 1) == 0) {
+        ci[speed_idx++].speed = atoi(buf + sizeof(speed_marker) - 1);
+        continue;
+      }
+    }
+#endif  /* __arm__ || __mips__ */
+  }
+
+  fclose(fp);
+#endif  /* __arm__ || __i386__ || __mips__ || __x86_64__ */
+
+  /* Now we want to make sure that all the models contain *something* because
+   * it's not safe to leave them as null. Copy the last entry unless there
+   * isn't one, in that case we simply put "unknown" into everything.
+   */
+  inferred_model = "unknown";
+  if (model_idx > 0)
+    inferred_model = ci[model_idx - 1].model;
+
+  while (model_idx < numcpus) {
+    model = uv__strndup(inferred_model, strlen(inferred_model));
+    if (model == NULL)
+      return -ENOMEM;
+    ci[model_idx++].model = model;
+  }
+
+  return 0;
+}
+
+
+static int read_times(FILE* statfile_fp,
+                      unsigned int numcpus,
+                      uv_cpu_info_t* ci) {
+  unsigned long clock_ticks;
+  struct uv_cpu_times_s ts;
+  unsigned long user;
+  unsigned long nice;
+  unsigned long sys;
+  unsigned long idle;
+  unsigned long dummy;
+  unsigned long irq;
+  unsigned int num;
+  unsigned int len;
+  char buf[1024];
+
+  clock_ticks = sysconf(_SC_CLK_TCK);
+  assert(clock_ticks != (unsigned long) -1);
+  assert(clock_ticks != 0);
+
+  rewind(statfile_fp);
+
+  if (!fgets(buf, sizeof(buf), statfile_fp))
+    abort();
+
+  num = 0;
+
+  while (fgets(buf, sizeof(buf), statfile_fp)) {
+    if (num >= numcpus)
+      break;
+
+    if (strncmp(buf, "cpu", 3))
+      break;
+
+    /* skip "cpu<num> " marker */
+    {
+      unsigned int n;
+      int r = sscanf(buf, "cpu%u ", &n);
+      assert(r == 1);
+      (void) r;  /* silence build warning */
+      for (len = sizeof("cpu0"); n /= 10; len++);
+    }
+
+    /* Line contains user, nice, system, idle, iowait, irq, softirq, steal,
+     * guest, guest_nice but we're only interested in the first four + irq.
+     *
+     * Don't use %*s to skip fields or %ll to read straight into the uint64_t
+     * fields, they're not allowed in C89 mode.
+     */
+    if (6 != sscanf(buf + len,
+                    "%lu %lu %lu %lu %lu %lu",
+                    &user,
+                    &nice,
+                    &sys,
+                    &idle,
+                    &dummy,
+                    &irq))
+      abort();
+
+    ts.user = clock_ticks * user;
+    ts.nice = clock_ticks * nice;
+    ts.sys  = clock_ticks * sys;
+    ts.idle = clock_ticks * idle;
+    ts.irq  = clock_ticks * irq;
+    ci[num++].cpu_times = ts;
+  }
+  assert(num == numcpus);
+
+  return 0;
+}
+
+
+static unsigned long read_cpufreq(unsigned int cpunum) {
+  unsigned long val;
+  char buf[1024];
+  FILE* fp;
+
+  snprintf(buf,
+           sizeof(buf),
+           "/sys/devices/system/cpu/cpu%u/cpufreq/scaling_cur_freq",
+           cpunum);
+
+  fp = uv__open_file(buf);
+  if (fp == NULL)
+    return 0;
+
+  if (fscanf(fp, "%lu", &val) != 1)
+    val = 0;
+
+  fclose(fp);
+
+  return val;
+}
+
+
+void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
+  int i;
+
+  for (i = 0; i < count; i++) {
+    uv__free(cpu_infos[i].model);
+  }
+
+  uv__free(cpu_infos);
+}
+
+
+int uv_interface_addresses(uv_interface_address_t** addresses,
+  int* count) {
+#ifndef HAVE_IFADDRS_H
+  return -ENOSYS;
+#else
+  struct ifaddrs *addrs, *ent;
+  uv_interface_address_t* address;
+  int i;
+  struct sockaddr_ll *sll;
+
+  if (getifaddrs(&addrs))
+    return -errno;
+
+  *count = 0;
+  *addresses = NULL;
+
+  /* Count the number of interfaces */
+  for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+    if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) ||
+        (ent->ifa_addr == NULL) ||
+        (ent->ifa_addr->sa_family == PF_PACKET)) {
+      continue;
+    }
+
+    (*count)++;
+  }
+
+  if (*count == 0)
+    return 0;
+
+  *addresses = uv__malloc(*count * sizeof(**addresses));
+  if (!(*addresses)) {
+    freeifaddrs(addrs);
+    return -ENOMEM;
+  }
+
+  address = *addresses;
+
+  for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+    if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
+      continue;
+
+    if (ent->ifa_addr == NULL)
+      continue;
+
+    /*
+     * On Linux getifaddrs returns information related to the raw underlying
+     * devices. We're not interested in this information yet.
+     */
+    if (ent->ifa_addr->sa_family == PF_PACKET)
+      continue;
+
+    address->name = uv__strdup(ent->ifa_name);
+
+    if (ent->ifa_addr->sa_family == AF_INET6) {
+      address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
+    } else {
+      address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
+    }
+
+    if (ent->ifa_netmask->sa_family == AF_INET6) {
+      address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
+    } else {
+      address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
+    }
+
+    address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK);
+
+    address++;
+  }
+
+  /* Fill in physical addresses for each interface */
+  for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+    if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) ||
+        (ent->ifa_addr == NULL) ||
+        (ent->ifa_addr->sa_family != PF_PACKET)) {
+      continue;
+    }
+
+    address = *addresses;
+
+    for (i = 0; i < (*count); i++) {
+      if (strcmp(address->name, ent->ifa_name) == 0) {
+        sll = (struct sockaddr_ll*)ent->ifa_addr;
+        memcpy(address->phys_addr, sll->sll_addr, sizeof(address->phys_addr));
+      }
+      address++;
+    }
+  }
+
+  freeifaddrs(addrs);
+
+  return 0;
+#endif
+}
+
+
+void uv_free_interface_addresses(uv_interface_address_t* addresses,
+  int count) {
+  int i;
+
+  for (i = 0; i < count; i++) {
+    uv__free(addresses[i].name);
+  }
+
+  uv__free(addresses);
+}
+
+
+void uv__set_process_title(const char* title) {
+#if defined(PR_SET_NAME)
+  prctl(PR_SET_NAME, title);  /* Only copies first 16 characters. */
+#endif
+}

+ 285 - 0
Utilities/cmlibuv/src/unix/linux-inotify.c

@@ -0,0 +1,285 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "tree.h"
+#include "internal.h"
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+
+#include <sys/types.h>
+#include <unistd.h>
+
+struct watcher_list {
+  RB_ENTRY(watcher_list) entry;
+  QUEUE watchers;
+  int iterating;
+  char* path;
+  int wd;
+};
+
+struct watcher_root {
+  struct watcher_list* rbh_root;
+};
+#define CAST(p) ((struct watcher_root*)(p))
+
+
+static int compare_watchers(const struct watcher_list* a,
+                            const struct watcher_list* b) {
+  if (a->wd < b->wd) return -1;
+  if (a->wd > b->wd) return 1;
+  return 0;
+}
+
+
+RB_GENERATE_STATIC(watcher_root, watcher_list, entry, compare_watchers)
+
+
+static void uv__inotify_read(uv_loop_t* loop,
+                             uv__io_t* w,
+                             unsigned int revents);
+
+
+static int new_inotify_fd(void) {
+  int err;
+  int fd;
+
+  fd = uv__inotify_init1(UV__IN_NONBLOCK | UV__IN_CLOEXEC);
+  if (fd != -1)
+    return fd;
+
+  if (errno != ENOSYS)
+    return -errno;
+
+  fd = uv__inotify_init();
+  if (fd == -1)
+    return -errno;
+
+  err = uv__cloexec(fd, 1);
+  if (err == 0)
+    err = uv__nonblock(fd, 1);
+
+  if (err) {
+    uv__close(fd);
+    return err;
+  }
+
+  return fd;
+}
+
+
+static int init_inotify(uv_loop_t* loop) {
+  int err;
+
+  if (loop->inotify_fd != -1)
+    return 0;
+
+  err = new_inotify_fd();
+  if (err < 0)
+    return err;
+
+  loop->inotify_fd = err;
+  uv__io_init(&loop->inotify_read_watcher, uv__inotify_read, loop->inotify_fd);
+  uv__io_start(loop, &loop->inotify_read_watcher, POLLIN);
+
+  return 0;
+}
+
+
+static struct watcher_list* find_watcher(uv_loop_t* loop, int wd) {
+  struct watcher_list w;
+  w.wd = wd;
+  return RB_FIND(watcher_root, CAST(&loop->inotify_watchers), &w);
+}
+
+static void maybe_free_watcher_list(struct watcher_list* w, uv_loop_t* loop) {
+  /* if the watcher_list->watchers is being iterated over, we can't free it. */
+  if ((!w->iterating) && QUEUE_EMPTY(&w->watchers)) {
+    /* No watchers left for this path. Clean up. */
+    RB_REMOVE(watcher_root, CAST(&loop->inotify_watchers), w);
+    uv__inotify_rm_watch(loop->inotify_fd, w->wd);
+    uv__free(w);
+  }
+}
+
+static void uv__inotify_read(uv_loop_t* loop,
+                             uv__io_t* dummy,
+                             unsigned int events) {
+  const struct uv__inotify_event* e;
+  struct watcher_list* w;
+  uv_fs_event_t* h;
+  QUEUE queue;
+  QUEUE* q;
+  const char* path;
+  ssize_t size;
+  const char *p;
+  /* needs to be large enough for sizeof(inotify_event) + strlen(path) */
+  char buf[4096];
+
+  while (1) {
+    do
+      size = read(loop->inotify_fd, buf, sizeof(buf));
+    while (size == -1 && errno == EINTR);
+
+    if (size == -1) {
+      assert(errno == EAGAIN || errno == EWOULDBLOCK);
+      break;
+    }
+
+    assert(size > 0); /* pre-2.6.21 thing, size=0 == read buffer too small */
+
+    /* Now we have one or more inotify_event structs. */
+    for (p = buf; p < buf + size; p += sizeof(*e) + e->len) {
+      e = (const struct uv__inotify_event*)p;
+
+      events = 0;
+      if (e->mask & (UV__IN_ATTRIB|UV__IN_MODIFY))
+        events |= UV_CHANGE;
+      if (e->mask & ~(UV__IN_ATTRIB|UV__IN_MODIFY))
+        events |= UV_RENAME;
+
+      w = find_watcher(loop, e->wd);
+      if (w == NULL)
+        continue; /* Stale event, no watchers left. */
+
+      /* inotify does not return the filename when monitoring a single file
+       * for modifications. Repurpose the filename for API compatibility.
+       * I'm not convinced this is a good thing, maybe it should go.
+       */
+      path = e->len ? (const char*) (e + 1) : uv__basename_r(w->path);
+
+      /* We're about to iterate over the queue and call user's callbacks.
+       * What can go wrong?
+       * A callback could call uv_fs_event_stop()
+       * and the queue can change under our feet.
+       * So, we use QUEUE_MOVE() trick to safely iterate over the queue.
+       * And we don't free the watcher_list until we're done iterating.
+       *
+       * First,
+       * tell uv_fs_event_stop() (that could be called from a user's callback)
+       * not to free watcher_list.
+       */
+      w->iterating = 1;
+      QUEUE_MOVE(&w->watchers, &queue);
+      while (!QUEUE_EMPTY(&queue)) {
+        q = QUEUE_HEAD(&queue);
+        h = QUEUE_DATA(q, uv_fs_event_t, watchers);
+
+        QUEUE_REMOVE(q);
+        QUEUE_INSERT_TAIL(&w->watchers, q);
+
+        h->cb(h, path, events, 0);
+      }
+      /* done iterating, time to (maybe) free empty watcher_list */
+      w->iterating = 0;
+      maybe_free_watcher_list(w, loop);
+    }
+  }
+}
+
+
+int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
+  uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
+  return 0;
+}
+
+
+int uv_fs_event_start(uv_fs_event_t* handle,
+                      uv_fs_event_cb cb,
+                      const char* path,
+                      unsigned int flags) {
+  struct watcher_list* w;
+  int events;
+  int err;
+  int wd;
+
+  if (uv__is_active(handle))
+    return -EINVAL;
+
+  err = init_inotify(handle->loop);
+  if (err)
+    return err;
+
+  events = UV__IN_ATTRIB
+         | UV__IN_CREATE
+         | UV__IN_MODIFY
+         | UV__IN_DELETE
+         | UV__IN_DELETE_SELF
+         | UV__IN_MOVE_SELF
+         | UV__IN_MOVED_FROM
+         | UV__IN_MOVED_TO;
+
+  wd = uv__inotify_add_watch(handle->loop->inotify_fd, path, events);
+  if (wd == -1)
+    return -errno;
+
+  w = find_watcher(handle->loop, wd);
+  if (w)
+    goto no_insert;
+
+  w = uv__malloc(sizeof(*w) + strlen(path) + 1);
+  if (w == NULL)
+    return -ENOMEM;
+
+  w->wd = wd;
+  w->path = strcpy((char*)(w + 1), path);
+  QUEUE_INIT(&w->watchers);
+  w->iterating = 0;
+  RB_INSERT(watcher_root, CAST(&handle->loop->inotify_watchers), w);
+
+no_insert:
+  uv__handle_start(handle);
+  QUEUE_INSERT_TAIL(&w->watchers, &handle->watchers);
+  handle->path = w->path;
+  handle->cb = cb;
+  handle->wd = wd;
+
+  return 0;
+}
+
+
+int uv_fs_event_stop(uv_fs_event_t* handle) {
+  struct watcher_list* w;
+
+  if (!uv__is_active(handle))
+    return 0;
+
+  w = find_watcher(handle->loop, handle->wd);
+  assert(w != NULL);
+
+  handle->wd = -1;
+  handle->path = NULL;
+  uv__handle_stop(handle);
+  QUEUE_REMOVE(&handle->watchers);
+
+  maybe_free_watcher_list(w, handle->loop);
+
+  return 0;
+}
+
+
+void uv__fs_event_close(uv_fs_event_t* handle) {
+  uv_fs_event_stop(handle);
+}

+ 471 - 0
Utilities/cmlibuv/src/unix/linux-syscalls.c

@@ -0,0 +1,471 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "linux-syscalls.h"
+#include <unistd.h>
+#include <signal.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <errno.h>
+
+#if defined(__has_feature)
+# if __has_feature(memory_sanitizer)
+#  define MSAN_ACTIVE 1
+#  include <sanitizer/msan_interface.h>
+# endif
+#endif
+
+#if defined(__i386__)
+# ifndef __NR_socketcall
+#  define __NR_socketcall 102
+# endif
+#endif
+
+#if defined(__arm__)
+# if defined(__thumb__) || defined(__ARM_EABI__)
+#  define UV_SYSCALL_BASE 0
+# else
+#  define UV_SYSCALL_BASE 0x900000
+# endif
+#endif /* __arm__ */
+
+#ifndef __NR_accept4
+# if defined(__x86_64__)
+#  define __NR_accept4 288
+# elif defined(__i386__)
+   /* Nothing. Handled through socketcall(). */
+# elif defined(__arm__)
+#  define __NR_accept4 (UV_SYSCALL_BASE + 366)
+# endif
+#endif /* __NR_accept4 */
+
+#ifndef __NR_eventfd
+# if defined(__x86_64__)
+#  define __NR_eventfd 284
+# elif defined(__i386__)
+#  define __NR_eventfd 323
+# elif defined(__arm__)
+#  define __NR_eventfd (UV_SYSCALL_BASE + 351)
+# endif
+#endif /* __NR_eventfd */
+
+#ifndef __NR_eventfd2
+# if defined(__x86_64__)
+#  define __NR_eventfd2 290
+# elif defined(__i386__)
+#  define __NR_eventfd2 328
+# elif defined(__arm__)
+#  define __NR_eventfd2 (UV_SYSCALL_BASE + 356)
+# endif
+#endif /* __NR_eventfd2 */
+
+#ifndef __NR_epoll_create
+# if defined(__x86_64__)
+#  define __NR_epoll_create 213
+# elif defined(__i386__)
+#  define __NR_epoll_create 254
+# elif defined(__arm__)
+#  define __NR_epoll_create (UV_SYSCALL_BASE + 250)
+# endif
+#endif /* __NR_epoll_create */
+
+#ifndef __NR_epoll_create1
+# if defined(__x86_64__)
+#  define __NR_epoll_create1 291
+# elif defined(__i386__)
+#  define __NR_epoll_create1 329
+# elif defined(__arm__)
+#  define __NR_epoll_create1 (UV_SYSCALL_BASE + 357)
+# endif
+#endif /* __NR_epoll_create1 */
+
+#ifndef __NR_epoll_ctl
+# if defined(__x86_64__)
+#  define __NR_epoll_ctl 233 /* used to be 214 */
+# elif defined(__i386__)
+#  define __NR_epoll_ctl 255
+# elif defined(__arm__)
+#  define __NR_epoll_ctl (UV_SYSCALL_BASE + 251)
+# endif
+#endif /* __NR_epoll_ctl */
+
+#ifndef __NR_epoll_wait
+# if defined(__x86_64__)
+#  define __NR_epoll_wait 232 /* used to be 215 */
+# elif defined(__i386__)
+#  define __NR_epoll_wait 256
+# elif defined(__arm__)
+#  define __NR_epoll_wait (UV_SYSCALL_BASE + 252)
+# endif
+#endif /* __NR_epoll_wait */
+
+#ifndef __NR_epoll_pwait
+# if defined(__x86_64__)
+#  define __NR_epoll_pwait 281
+# elif defined(__i386__)
+#  define __NR_epoll_pwait 319
+# elif defined(__arm__)
+#  define __NR_epoll_pwait (UV_SYSCALL_BASE + 346)
+# endif
+#endif /* __NR_epoll_pwait */
+
+#ifndef __NR_inotify_init
+# if defined(__x86_64__)
+#  define __NR_inotify_init 253
+# elif defined(__i386__)
+#  define __NR_inotify_init 291
+# elif defined(__arm__)
+#  define __NR_inotify_init (UV_SYSCALL_BASE + 316)
+# endif
+#endif /* __NR_inotify_init */
+
+#ifndef __NR_inotify_init1
+# if defined(__x86_64__)
+#  define __NR_inotify_init1 294
+# elif defined(__i386__)
+#  define __NR_inotify_init1 332
+# elif defined(__arm__)
+#  define __NR_inotify_init1 (UV_SYSCALL_BASE + 360)
+# endif
+#endif /* __NR_inotify_init1 */
+
+#ifndef __NR_inotify_add_watch
+# if defined(__x86_64__)
+#  define __NR_inotify_add_watch 254
+# elif defined(__i386__)
+#  define __NR_inotify_add_watch 292
+# elif defined(__arm__)
+#  define __NR_inotify_add_watch (UV_SYSCALL_BASE + 317)
+# endif
+#endif /* __NR_inotify_add_watch */
+
+#ifndef __NR_inotify_rm_watch
+# if defined(__x86_64__)
+#  define __NR_inotify_rm_watch 255
+# elif defined(__i386__)
+#  define __NR_inotify_rm_watch 293
+# elif defined(__arm__)
+#  define __NR_inotify_rm_watch (UV_SYSCALL_BASE + 318)
+# endif
+#endif /* __NR_inotify_rm_watch */
+
+#ifndef __NR_pipe2
+# if defined(__x86_64__)
+#  define __NR_pipe2 293
+# elif defined(__i386__)
+#  define __NR_pipe2 331
+# elif defined(__arm__)
+#  define __NR_pipe2 (UV_SYSCALL_BASE + 359)
+# endif
+#endif /* __NR_pipe2 */
+
+#ifndef __NR_recvmmsg
+# if defined(__x86_64__)
+#  define __NR_recvmmsg 299
+# elif defined(__i386__)
+#  define __NR_recvmmsg 337
+# elif defined(__arm__)
+#  define __NR_recvmmsg (UV_SYSCALL_BASE + 365)
+# endif
+#endif /* __NR_recvmsg */
+
+#ifndef __NR_sendmmsg
+# if defined(__x86_64__)
+#  define __NR_sendmmsg 307
+# elif defined(__i386__)
+#  define __NR_sendmmsg 345
+# elif defined(__arm__)
+#  define __NR_sendmmsg (UV_SYSCALL_BASE + 374)
+# endif
+#endif /* __NR_sendmmsg */
+
+#ifndef __NR_utimensat
+# if defined(__x86_64__)
+#  define __NR_utimensat 280
+# elif defined(__i386__)
+#  define __NR_utimensat 320
+# elif defined(__arm__)
+#  define __NR_utimensat (UV_SYSCALL_BASE + 348)
+# endif
+#endif /* __NR_utimensat */
+
+#ifndef __NR_preadv
+# if defined(__x86_64__)
+#  define __NR_preadv 295
+# elif defined(__i386__)
+#  define __NR_preadv 333
+# elif defined(__arm__)
+#  define __NR_preadv (UV_SYSCALL_BASE + 361)
+# endif
+#endif /* __NR_preadv */
+
+#ifndef __NR_pwritev
+# if defined(__x86_64__)
+#  define __NR_pwritev 296
+# elif defined(__i386__)
+#  define __NR_pwritev 334
+# elif defined(__arm__)
+#  define __NR_pwritev (UV_SYSCALL_BASE + 362)
+# endif
+#endif /* __NR_pwritev */
+
+#ifndef __NR_dup3
+# if defined(__x86_64__)
+#  define __NR_dup3 292
+# elif defined(__i386__)
+#  define __NR_dup3 330
+# elif defined(__arm__)
+#  define __NR_dup3 (UV_SYSCALL_BASE + 358)
+# endif
+#endif /* __NR_pwritev */
+
+
+int uv__accept4(int fd, struct sockaddr* addr, socklen_t* addrlen, int flags) {
+#if defined(__i386__)
+  unsigned long args[4];
+  int r;
+
+  args[0] = (unsigned long) fd;
+  args[1] = (unsigned long) addr;
+  args[2] = (unsigned long) addrlen;
+  args[3] = (unsigned long) flags;
+
+  r = syscall(__NR_socketcall, 18 /* SYS_ACCEPT4 */, args);
+
+  /* socketcall() raises EINVAL when SYS_ACCEPT4 is not supported but so does
+   * a bad flags argument. Try to distinguish between the two cases.
+   */
+  if (r == -1)
+    if (errno == EINVAL)
+      if ((flags & ~(UV__SOCK_CLOEXEC|UV__SOCK_NONBLOCK)) == 0)
+        errno = ENOSYS;
+
+  return r;
+#elif defined(__NR_accept4)
+  return syscall(__NR_accept4, fd, addr, addrlen, flags);
+#else
+  return errno = ENOSYS, -1;
+#endif
+}
+
+
+int uv__eventfd(unsigned int count) {
+#if defined(__NR_eventfd)
+  return syscall(__NR_eventfd, count);
+#else
+  return errno = ENOSYS, -1;
+#endif
+}
+
+
+int uv__eventfd2(unsigned int count, int flags) {
+#if defined(__NR_eventfd2)
+  return syscall(__NR_eventfd2, count, flags);
+#else
+  return errno = ENOSYS, -1;
+#endif
+}
+
+
+int uv__epoll_create(int size) {
+#if defined(__NR_epoll_create)
+  return syscall(__NR_epoll_create, size);
+#else
+  return errno = ENOSYS, -1;
+#endif
+}
+
+
+int uv__epoll_create1(int flags) {
+#if defined(__NR_epoll_create1)
+  return syscall(__NR_epoll_create1, flags);
+#else
+  return errno = ENOSYS, -1;
+#endif
+}
+
+
+int uv__epoll_ctl(int epfd, int op, int fd, struct uv__epoll_event* events) {
+#if defined(__NR_epoll_ctl)
+  return syscall(__NR_epoll_ctl, epfd, op, fd, events);
+#else
+  return errno = ENOSYS, -1;
+#endif
+}
+
+
+int uv__epoll_wait(int epfd,
+                   struct uv__epoll_event* events,
+                   int nevents,
+                   int timeout) {
+#if defined(__NR_epoll_wait)
+  int result;
+  result = syscall(__NR_epoll_wait, epfd, events, nevents, timeout);
+#if MSAN_ACTIVE
+  if (result > 0)
+    __msan_unpoison(events, sizeof(events[0]) * result);
+#endif
+  return result;
+#else
+  return errno = ENOSYS, -1;
+#endif
+}
+
+
+int uv__epoll_pwait(int epfd,
+                    struct uv__epoll_event* events,
+                    int nevents,
+                    int timeout,
+                    uint64_t sigmask) {
+#if defined(__NR_epoll_pwait)
+  int result;
+  result = syscall(__NR_epoll_pwait,
+                   epfd,
+                   events,
+                   nevents,
+                   timeout,
+                   &sigmask,
+                   sizeof(sigmask));
+#if MSAN_ACTIVE
+  if (result > 0)
+    __msan_unpoison(events, sizeof(events[0]) * result);
+#endif
+  return result;
+#else
+  return errno = ENOSYS, -1;
+#endif
+}
+
+
+int uv__inotify_init(void) {
+#if defined(__NR_inotify_init)
+  return syscall(__NR_inotify_init);
+#else
+  return errno = ENOSYS, -1;
+#endif
+}
+
+
+int uv__inotify_init1(int flags) {
+#if defined(__NR_inotify_init1)
+  return syscall(__NR_inotify_init1, flags);
+#else
+  return errno = ENOSYS, -1;
+#endif
+}
+
+
+int uv__inotify_add_watch(int fd, const char* path, uint32_t mask) {
+#if defined(__NR_inotify_add_watch)
+  return syscall(__NR_inotify_add_watch, fd, path, mask);
+#else
+  return errno = ENOSYS, -1;
+#endif
+}
+
+
+int uv__inotify_rm_watch(int fd, int32_t wd) {
+#if defined(__NR_inotify_rm_watch)
+  return syscall(__NR_inotify_rm_watch, fd, wd);
+#else
+  return errno = ENOSYS, -1;
+#endif
+}
+
+
+int uv__pipe2(int pipefd[2], int flags) {
+#if defined(__NR_pipe2)
+  int result;
+  result = syscall(__NR_pipe2, pipefd, flags);
+#if MSAN_ACTIVE
+  if (!result)
+    __msan_unpoison(pipefd, sizeof(int[2]));
+#endif
+  return result;
+#else
+  return errno = ENOSYS, -1;
+#endif
+}
+
+
+int uv__sendmmsg(int fd,
+                 struct uv__mmsghdr* mmsg,
+                 unsigned int vlen,
+                 unsigned int flags) {
+#if defined(__NR_sendmmsg)
+  return syscall(__NR_sendmmsg, fd, mmsg, vlen, flags);
+#else
+  return errno = ENOSYS, -1;
+#endif
+}
+
+
+int uv__recvmmsg(int fd,
+                 struct uv__mmsghdr* mmsg,
+                 unsigned int vlen,
+                 unsigned int flags,
+                 struct timespec* timeout) {
+#if defined(__NR_recvmmsg)
+  return syscall(__NR_recvmmsg, fd, mmsg, vlen, flags, timeout);
+#else
+  return errno = ENOSYS, -1;
+#endif
+}
+
+
+int uv__utimesat(int dirfd,
+                 const char* path,
+                 const struct timespec times[2],
+                 int flags)
+{
+#if defined(__NR_utimensat)
+  return syscall(__NR_utimensat, dirfd, path, times, flags);
+#else
+  return errno = ENOSYS, -1;
+#endif
+}
+
+
+ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt, int64_t offset) {
+#if defined(__NR_preadv)
+  return syscall(__NR_preadv, fd, iov, iovcnt, (long)offset, (long)(offset >> 32));
+#else
+  return errno = ENOSYS, -1;
+#endif
+}
+
+
+ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset) {
+#if defined(__NR_pwritev)
+  return syscall(__NR_pwritev, fd, iov, iovcnt, (long)offset, (long)(offset >> 32));
+#else
+  return errno = ENOSYS, -1;
+#endif
+}
+
+
+int uv__dup3(int oldfd, int newfd, int flags) {
+#if defined(__NR_dup3)
+  return syscall(__NR_dup3, oldfd, newfd, flags);
+#else
+  return errno = ENOSYS, -1;
+#endif
+}

+ 151 - 0
Utilities/cmlibuv/src/unix/linux-syscalls.h

@@ -0,0 +1,151 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_LINUX_SYSCALL_H_
+#define UV_LINUX_SYSCALL_H_
+
+#undef  _GNU_SOURCE
+#define _GNU_SOURCE
+
+#include <stdint.h>
+#include <signal.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/socket.h>
+
+#if defined(__alpha__)
+# define UV__O_CLOEXEC        0x200000
+#elif defined(__hppa__)
+# define UV__O_CLOEXEC        0x200000
+#elif defined(__sparc__)
+# define UV__O_CLOEXEC        0x400000
+#else
+# define UV__O_CLOEXEC        0x80000
+#endif
+
+#if defined(__alpha__)
+# define UV__O_NONBLOCK       0x4
+#elif defined(__hppa__)
+# define UV__O_NONBLOCK       O_NONBLOCK
+#elif defined(__mips__)
+# define UV__O_NONBLOCK       0x80
+#elif defined(__sparc__)
+# define UV__O_NONBLOCK       0x4000
+#else
+# define UV__O_NONBLOCK       0x800
+#endif
+
+#define UV__EFD_CLOEXEC       UV__O_CLOEXEC
+#define UV__EFD_NONBLOCK      UV__O_NONBLOCK
+
+#define UV__IN_CLOEXEC        UV__O_CLOEXEC
+#define UV__IN_NONBLOCK       UV__O_NONBLOCK
+
+#define UV__SOCK_CLOEXEC      UV__O_CLOEXEC
+#if defined(SOCK_NONBLOCK)
+# define UV__SOCK_NONBLOCK    SOCK_NONBLOCK
+#else
+# define UV__SOCK_NONBLOCK    UV__O_NONBLOCK
+#endif
+
+/* epoll flags */
+#define UV__EPOLL_CLOEXEC     UV__O_CLOEXEC
+#define UV__EPOLL_CTL_ADD     1
+#define UV__EPOLL_CTL_DEL     2
+#define UV__EPOLL_CTL_MOD     3
+
+/* inotify flags */
+#define UV__IN_ACCESS         0x001
+#define UV__IN_MODIFY         0x002
+#define UV__IN_ATTRIB         0x004
+#define UV__IN_CLOSE_WRITE    0x008
+#define UV__IN_CLOSE_NOWRITE  0x010
+#define UV__IN_OPEN           0x020
+#define UV__IN_MOVED_FROM     0x040
+#define UV__IN_MOVED_TO       0x080
+#define UV__IN_CREATE         0x100
+#define UV__IN_DELETE         0x200
+#define UV__IN_DELETE_SELF    0x400
+#define UV__IN_MOVE_SELF      0x800
+
+#if defined(__x86_64__)
+struct uv__epoll_event {
+  uint32_t events;
+  uint64_t data;
+} __attribute__((packed));
+#else
+struct uv__epoll_event {
+  uint32_t events;
+  uint64_t data;
+};
+#endif
+
+struct uv__inotify_event {
+  int32_t wd;
+  uint32_t mask;
+  uint32_t cookie;
+  uint32_t len;
+  /* char name[0]; */
+};
+
+struct uv__mmsghdr {
+  struct msghdr msg_hdr;
+  unsigned int msg_len;
+};
+
+int uv__accept4(int fd, struct sockaddr* addr, socklen_t* addrlen, int flags);
+int uv__eventfd(unsigned int count);
+int uv__epoll_create(int size);
+int uv__epoll_create1(int flags);
+int uv__epoll_ctl(int epfd, int op, int fd, struct uv__epoll_event *ev);
+int uv__epoll_wait(int epfd,
+                   struct uv__epoll_event* events,
+                   int nevents,
+                   int timeout);
+int uv__epoll_pwait(int epfd,
+                    struct uv__epoll_event* events,
+                    int nevents,
+                    int timeout,
+                    uint64_t sigmask);
+int uv__eventfd2(unsigned int count, int flags);
+int uv__inotify_init(void);
+int uv__inotify_init1(int flags);
+int uv__inotify_add_watch(int fd, const char* path, uint32_t mask);
+int uv__inotify_rm_watch(int fd, int32_t wd);
+int uv__pipe2(int pipefd[2], int flags);
+int uv__recvmmsg(int fd,
+                 struct uv__mmsghdr* mmsg,
+                 unsigned int vlen,
+                 unsigned int flags,
+                 struct timespec* timeout);
+int uv__sendmmsg(int fd,
+                 struct uv__mmsghdr* mmsg,
+                 unsigned int vlen,
+                 unsigned int flags);
+int uv__utimesat(int dirfd,
+                 const char* path,
+                 const struct timespec times[2],
+                 int flags);
+ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt, int64_t offset);
+ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset);
+int uv__dup3(int oldfd, int newfd, int flags);
+
+#endif /* UV_LINUX_SYSCALL_H_ */

+ 68 - 0
Utilities/cmlibuv/src/unix/loop-watcher.c

@@ -0,0 +1,68 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#define UV_LOOP_WATCHER_DEFINE(name, type)                                    \
+  int uv_##name##_init(uv_loop_t* loop, uv_##name##_t* handle) {              \
+    uv__handle_init(loop, (uv_handle_t*)handle, UV_##type);                   \
+    handle->name##_cb = NULL;                                                 \
+    return 0;                                                                 \
+  }                                                                           \
+                                                                              \
+  int uv_##name##_start(uv_##name##_t* handle, uv_##name##_cb cb) {           \
+    if (uv__is_active(handle)) return 0;                                      \
+    if (cb == NULL) return -EINVAL;                                           \
+    QUEUE_INSERT_HEAD(&handle->loop->name##_handles, &handle->queue);         \
+    handle->name##_cb = cb;                                                   \
+    uv__handle_start(handle);                                                 \
+    return 0;                                                                 \
+  }                                                                           \
+                                                                              \
+  int uv_##name##_stop(uv_##name##_t* handle) {                               \
+    if (!uv__is_active(handle)) return 0;                                     \
+    QUEUE_REMOVE(&handle->queue);                                             \
+    uv__handle_stop(handle);                                                  \
+    return 0;                                                                 \
+  }                                                                           \
+                                                                              \
+  void uv__run_##name(uv_loop_t* loop) {                                      \
+    uv_##name##_t* h;                                                         \
+    QUEUE queue;                                                              \
+    QUEUE* q;                                                                 \
+    QUEUE_MOVE(&loop->name##_handles, &queue);                                \
+    while (!QUEUE_EMPTY(&queue)) {                                            \
+      q = QUEUE_HEAD(&queue);                                                 \
+      h = QUEUE_DATA(q, uv_##name##_t, queue);                                \
+      QUEUE_REMOVE(q);                                                        \
+      QUEUE_INSERT_TAIL(&loop->name##_handles, q);                            \
+      h->name##_cb(h);                                                        \
+    }                                                                         \
+  }                                                                           \
+                                                                              \
+  void uv__##name##_close(uv_##name##_t* handle) {                            \
+    uv_##name##_stop(handle);                                                 \
+  }
+
+UV_LOOP_WATCHER_DEFINE(prepare, PREPARE)
+UV_LOOP_WATCHER_DEFINE(check, CHECK)
+UV_LOOP_WATCHER_DEFINE(idle, IDLE)

+ 159 - 0
Utilities/cmlibuv/src/unix/loop.c

@@ -0,0 +1,159 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "tree.h"
+#include "internal.h"
+#include "heap-inl.h"
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+int uv_loop_init(uv_loop_t* loop) {
+  void* saved_data;
+  int err;
+
+  uv__signal_global_once_init();
+
+  saved_data = loop->data;
+  memset(loop, 0, sizeof(*loop));
+  loop->data = saved_data;
+
+  heap_init((struct heap*) &loop->timer_heap);
+  QUEUE_INIT(&loop->wq);
+  QUEUE_INIT(&loop->active_reqs);
+  QUEUE_INIT(&loop->idle_handles);
+  QUEUE_INIT(&loop->async_handles);
+  QUEUE_INIT(&loop->check_handles);
+  QUEUE_INIT(&loop->prepare_handles);
+  QUEUE_INIT(&loop->handle_queue);
+
+  loop->nfds = 0;
+  loop->watchers = NULL;
+  loop->nwatchers = 0;
+  QUEUE_INIT(&loop->pending_queue);
+  QUEUE_INIT(&loop->watcher_queue);
+
+  loop->closing_handles = NULL;
+  uv__update_time(loop);
+  uv__async_init(&loop->async_watcher);
+  loop->signal_pipefd[0] = -1;
+  loop->signal_pipefd[1] = -1;
+  loop->backend_fd = -1;
+  loop->emfile_fd = -1;
+
+  loop->timer_counter = 0;
+  loop->stop_flag = 0;
+
+  err = uv__platform_loop_init(loop);
+  if (err)
+    return err;
+
+  err = uv_signal_init(loop, &loop->child_watcher);
+  if (err)
+    goto fail_signal_init;
+
+  uv__handle_unref(&loop->child_watcher);
+  loop->child_watcher.flags |= UV__HANDLE_INTERNAL;
+  QUEUE_INIT(&loop->process_handles);
+
+  err = uv_rwlock_init(&loop->cloexec_lock);
+  if (err)
+    goto fail_rwlock_init;
+
+  err = uv_mutex_init(&loop->wq_mutex);
+  if (err)
+    goto fail_mutex_init;
+
+  err = uv_async_init(loop, &loop->wq_async, uv__work_done);
+  if (err)
+    goto fail_async_init;
+
+  uv__handle_unref(&loop->wq_async);
+  loop->wq_async.flags |= UV__HANDLE_INTERNAL;
+
+  return 0;
+
+fail_async_init:
+  uv_mutex_destroy(&loop->wq_mutex);
+
+fail_mutex_init:
+  uv_rwlock_destroy(&loop->cloexec_lock);
+
+fail_rwlock_init:
+  uv__signal_loop_cleanup(loop);
+
+fail_signal_init:
+  uv__platform_loop_delete(loop);
+
+  return err;
+}
+
+
+void uv__loop_close(uv_loop_t* loop) {
+  uv__signal_loop_cleanup(loop);
+  uv__platform_loop_delete(loop);
+  uv__async_stop(loop, &loop->async_watcher);
+
+  if (loop->emfile_fd != -1) {
+    uv__close(loop->emfile_fd);
+    loop->emfile_fd = -1;
+  }
+
+  if (loop->backend_fd != -1) {
+    uv__close(loop->backend_fd);
+    loop->backend_fd = -1;
+  }
+
+  uv_mutex_lock(&loop->wq_mutex);
+  assert(QUEUE_EMPTY(&loop->wq) && "thread pool work queue not empty!");
+  assert(!uv__has_active_reqs(loop));
+  uv_mutex_unlock(&loop->wq_mutex);
+  uv_mutex_destroy(&loop->wq_mutex);
+
+  /*
+   * Note that all thread pool stuff is finished at this point and
+   * it is safe to just destroy rw lock
+   */
+  uv_rwlock_destroy(&loop->cloexec_lock);
+
+#if 0
+  assert(QUEUE_EMPTY(&loop->pending_queue));
+  assert(QUEUE_EMPTY(&loop->watcher_queue));
+  assert(loop->nfds == 0);
+#endif
+
+  uv__free(loop->watchers);
+  loop->watchers = NULL;
+  loop->nwatchers = 0;
+}
+
+
+int uv__loop_configure(uv_loop_t* loop, uv_loop_option option, va_list ap) {
+  if (option != UV_LOOP_BLOCK_SIGNAL)
+    return UV_ENOSYS;
+
+  if (va_arg(ap, int) != SIGPROF)
+    return UV_EINVAL;
+
+  loop->flags |= UV_LOOP_BLOCK_SIGPROF;
+  return 0;
+}

+ 380 - 0
Utilities/cmlibuv/src/unix/netbsd.c

@@ -0,0 +1,380 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <assert.h>
+#include <string.h>
+#include <errno.h>
+
+#include <kvm.h>
+#include <paths.h>
+#include <ifaddrs.h>
+#include <unistd.h>
+#include <time.h>
+#include <stdlib.h>
+#include <fcntl.h>
+
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <sys/resource.h>
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#include <uvm/uvm_extern.h>
+
+#include <unistd.h>
+#include <time.h>
+
+#undef NANOSEC
+#define NANOSEC ((uint64_t) 1e9)
+
+static char *process_title;
+
+
+int uv__platform_loop_init(uv_loop_t* loop) {
+  return uv__kqueue_init(loop);
+}
+
+
+void uv__platform_loop_delete(uv_loop_t* loop) {
+}
+
+
+uint64_t uv__hrtime(uv_clocktype_t type) {
+  struct timespec ts;
+  clock_gettime(CLOCK_MONOTONIC, &ts);
+  return (((uint64_t) ts.tv_sec) * NANOSEC + ts.tv_nsec);
+}
+
+
+void uv_loadavg(double avg[3]) {
+  struct loadavg info;
+  size_t size = sizeof(info);
+  int which[] = {CTL_VM, VM_LOADAVG};
+
+  if (sysctl(which, 2, &info, &size, NULL, 0) == -1) return;
+
+  avg[0] = (double) info.ldavg[0] / info.fscale;
+  avg[1] = (double) info.ldavg[1] / info.fscale;
+  avg[2] = (double) info.ldavg[2] / info.fscale;
+}
+
+
+int uv_exepath(char* buffer, size_t* size) {
+  int mib[4];
+  size_t cb;
+  pid_t mypid;
+
+  if (buffer == NULL || size == NULL || *size == 0)
+    return -EINVAL;
+
+  mypid = getpid();
+  mib[0] = CTL_KERN;
+  mib[1] = KERN_PROC_ARGS;
+  mib[2] = mypid;
+  mib[3] = KERN_PROC_ARGV;
+
+  cb = *size;
+  if (sysctl(mib, 4, buffer, &cb, NULL, 0))
+    return -errno;
+  *size = strlen(buffer);
+
+  return 0;
+}
+
+
+uint64_t uv_get_free_memory(void) {
+  struct uvmexp info;
+  size_t size = sizeof(info);
+  int which[] = {CTL_VM, VM_UVMEXP};
+
+  if (sysctl(which, 2, &info, &size, NULL, 0))
+    return -errno;
+
+  return (uint64_t) info.free * sysconf(_SC_PAGESIZE);
+}
+
+
+uint64_t uv_get_total_memory(void) {
+#if defined(HW_PHYSMEM64)
+  uint64_t info;
+  int which[] = {CTL_HW, HW_PHYSMEM64};
+#else
+  unsigned int info;
+  int which[] = {CTL_HW, HW_PHYSMEM};
+#endif
+  size_t size = sizeof(info);
+
+  if (sysctl(which, 2, &info, &size, NULL, 0))
+    return -errno;
+
+  return (uint64_t) info;
+}
+
+
+char** uv_setup_args(int argc, char** argv) {
+  process_title = argc ? uv__strdup(argv[0]) : NULL;
+  return argv;
+}
+
+
+int uv_set_process_title(const char* title) {
+  if (process_title) uv__free(process_title);
+
+  process_title = uv__strdup(title);
+  setproctitle("%s", title);
+
+  return 0;
+}
+
+
+int uv_get_process_title(char* buffer, size_t size) {
+  size_t len;
+
+  if (buffer == NULL || size == 0)
+    return -EINVAL;
+
+  if (process_title) {
+    len = strlen(process_title) + 1;
+
+    if (size < len)
+      return -ENOBUFS;
+
+    memcpy(buffer, process_title, len);
+  } else {
+    len = 0;
+  }
+
+  buffer[len] = '\0';
+
+  return 0;
+}
+
+
+int uv_resident_set_memory(size_t* rss) {
+  kvm_t *kd = NULL;
+  struct kinfo_proc2 *kinfo = NULL;
+  pid_t pid;
+  int nprocs;
+  int max_size = sizeof(struct kinfo_proc2);
+  int page_size;
+
+  page_size = getpagesize();
+  pid = getpid();
+
+  kd = kvm_open(NULL, NULL, NULL, KVM_NO_FILES, "kvm_open");
+
+  if (kd == NULL) goto error;
+
+  kinfo = kvm_getproc2(kd, KERN_PROC_PID, pid, max_size, &nprocs);
+  if (kinfo == NULL) goto error;
+
+  *rss = kinfo->p_vm_rssize * page_size;
+
+  kvm_close(kd);
+
+  return 0;
+
+error:
+  if (kd) kvm_close(kd);
+  return -EPERM;
+}
+
+
+int uv_uptime(double* uptime) {
+  time_t now;
+  struct timeval info;
+  size_t size = sizeof(info);
+  static int which[] = {CTL_KERN, KERN_BOOTTIME};
+
+  if (sysctl(which, 2, &info, &size, NULL, 0))
+    return -errno;
+
+  now = time(NULL);
+
+  *uptime = (double)(now - info.tv_sec);
+  return 0;
+}
+
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+  unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK);
+  unsigned int multiplier = ((uint64_t)1000L / ticks);
+  unsigned int cur = 0;
+  uv_cpu_info_t* cpu_info;
+  u_int64_t* cp_times;
+  char model[512];
+  u_int64_t cpuspeed;
+  int numcpus;
+  size_t size;
+  int i;
+
+  size = sizeof(model);
+  if (sysctlbyname("machdep.cpu_brand", &model, &size, NULL, 0) &&
+      sysctlbyname("hw.model", &model, &size, NULL, 0)) {
+    return -errno;
+  }
+
+  size = sizeof(numcpus);
+  if (sysctlbyname("hw.ncpu", &numcpus, &size, NULL, 0))
+    return -errno;
+  *count = numcpus;
+
+  /* Only i386 and amd64 have machdep.tsc_freq */
+  size = sizeof(cpuspeed);
+  if (sysctlbyname("machdep.tsc_freq", &cpuspeed, &size, NULL, 0))
+    cpuspeed = 0;
+
+  size = numcpus * CPUSTATES * sizeof(*cp_times);
+  cp_times = uv__malloc(size);
+  if (cp_times == NULL)
+    return -ENOMEM;
+
+  if (sysctlbyname("kern.cp_time", cp_times, &size, NULL, 0))
+    return -errno;
+
+  *cpu_infos = uv__malloc(numcpus * sizeof(**cpu_infos));
+  if (!(*cpu_infos)) {
+    uv__free(cp_times);
+    uv__free(*cpu_infos);
+    return -ENOMEM;
+  }
+
+  for (i = 0; i < numcpus; i++) {
+    cpu_info = &(*cpu_infos)[i];
+    cpu_info->cpu_times.user = (uint64_t)(cp_times[CP_USER+cur]) * multiplier;
+    cpu_info->cpu_times.nice = (uint64_t)(cp_times[CP_NICE+cur]) * multiplier;
+    cpu_info->cpu_times.sys = (uint64_t)(cp_times[CP_SYS+cur]) * multiplier;
+    cpu_info->cpu_times.idle = (uint64_t)(cp_times[CP_IDLE+cur]) * multiplier;
+    cpu_info->cpu_times.irq = (uint64_t)(cp_times[CP_INTR+cur]) * multiplier;
+    cpu_info->model = uv__strdup(model);
+    cpu_info->speed = (int)(cpuspeed/(uint64_t) 1e6);
+    cur += CPUSTATES;
+  }
+  uv__free(cp_times);
+  return 0;
+}
+
+
+void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
+  int i;
+
+  for (i = 0; i < count; i++) {
+    uv__free(cpu_infos[i].model);
+  }
+
+  uv__free(cpu_infos);
+}
+
+
+int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
+  struct ifaddrs *addrs, *ent;
+  uv_interface_address_t* address;
+  int i;
+  struct sockaddr_dl *sa_addr;
+
+  if (getifaddrs(&addrs))
+    return -errno;
+
+  *count = 0;
+
+  /* Count the number of interfaces */
+  for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+    if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) ||
+        (ent->ifa_addr == NULL) ||
+        (ent->ifa_addr->sa_family != PF_INET)) {
+      continue;
+    }
+    (*count)++;
+  }
+
+  *addresses = uv__malloc(*count * sizeof(**addresses));
+
+  if (!(*addresses)) {
+    freeifaddrs(addrs);
+    return -ENOMEM;
+  }
+
+  address = *addresses;
+
+  for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+    if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
+      continue;
+
+    if (ent->ifa_addr == NULL)
+      continue;
+
+    if (ent->ifa_addr->sa_family != PF_INET)
+      continue;
+
+    address->name = uv__strdup(ent->ifa_name);
+
+    if (ent->ifa_addr->sa_family == AF_INET6) {
+      address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
+    } else {
+      address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
+    }
+
+    if (ent->ifa_netmask->sa_family == AF_INET6) {
+      address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
+    } else {
+      address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
+    }
+
+    address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK);
+
+    address++;
+  }
+
+  /* Fill in physical addresses for each interface */
+  for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+    if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) ||
+        (ent->ifa_addr == NULL) ||
+        (ent->ifa_addr->sa_family != AF_LINK)) {
+      continue;
+    }
+
+    address = *addresses;
+
+    for (i = 0; i < (*count); i++) {
+      if (strcmp(address->name, ent->ifa_name) == 0) {
+        sa_addr = (struct sockaddr_dl*)(ent->ifa_addr);
+        memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr));
+      }
+      address++;
+    }
+  }
+
+  freeifaddrs(addrs);
+
+  return 0;
+}
+
+
+void uv_free_interface_addresses(uv_interface_address_t* addresses, int count) {
+  int i;
+
+  for (i = 0; i < count; i++) {
+    uv__free(addresses[i].name);
+  }
+
+  uv__free(addresses);
+}

+ 396 - 0
Utilities/cmlibuv/src/unix/openbsd.c

@@ -0,0 +1,396 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/resource.h>
+#include <sys/sched.h>
+#include <sys/time.h>
+#include <sys/sysctl.h>
+
+#include <ifaddrs.h>
+#include <net/if.h>
+#include <net/if_dl.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <kvm.h>
+#include <paths.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#undef NANOSEC
+#define NANOSEC ((uint64_t) 1e9)
+
+
+static char *process_title;
+
+
+int uv__platform_loop_init(uv_loop_t* loop) {
+  return uv__kqueue_init(loop);
+}
+
+
+void uv__platform_loop_delete(uv_loop_t* loop) {
+}
+
+
+uint64_t uv__hrtime(uv_clocktype_t type) {
+  struct timespec ts;
+  clock_gettime(CLOCK_MONOTONIC, &ts);
+  return (((uint64_t) ts.tv_sec) * NANOSEC + ts.tv_nsec);
+}
+
+
+void uv_loadavg(double avg[3]) {
+  struct loadavg info;
+  size_t size = sizeof(info);
+  int which[] = {CTL_VM, VM_LOADAVG};
+
+  if (sysctl(which, 2, &info, &size, NULL, 0) < 0) return;
+
+  avg[0] = (double) info.ldavg[0] / info.fscale;
+  avg[1] = (double) info.ldavg[1] / info.fscale;
+  avg[2] = (double) info.ldavg[2] / info.fscale;
+}
+
+
+int uv_exepath(char* buffer, size_t* size) {
+  int mib[4];
+  char **argsbuf = NULL;
+  char **argsbuf_tmp;
+  size_t argsbuf_size = 100U;
+  size_t exepath_size;
+  pid_t mypid;
+  int err;
+
+  if (buffer == NULL || size == NULL || *size == 0)
+    return -EINVAL;
+
+  mypid = getpid();
+  for (;;) {
+    err = -ENOMEM;
+    argsbuf_tmp = uv__realloc(argsbuf, argsbuf_size);
+    if (argsbuf_tmp == NULL)
+      goto out;
+    argsbuf = argsbuf_tmp;
+    mib[0] = CTL_KERN;
+    mib[1] = KERN_PROC_ARGS;
+    mib[2] = mypid;
+    mib[3] = KERN_PROC_ARGV;
+    if (sysctl(mib, 4, argsbuf, &argsbuf_size, NULL, 0) == 0) {
+      break;
+    }
+    if (errno != ENOMEM) {
+      err = -errno;
+      goto out;
+    }
+    argsbuf_size *= 2U;
+  }
+
+  if (argsbuf[0] == NULL) {
+    err = -EINVAL;  /* FIXME(bnoordhuis) More appropriate error. */
+    goto out;
+  }
+
+  *size -= 1;
+  exepath_size = strlen(argsbuf[0]);
+  if (*size > exepath_size)
+    *size = exepath_size;
+
+  memcpy(buffer, argsbuf[0], *size);
+  buffer[*size] = '\0';
+  err = 0;
+
+out:
+  uv__free(argsbuf);
+
+  return err;
+}
+
+
+uint64_t uv_get_free_memory(void) {
+  struct uvmexp info;
+  size_t size = sizeof(info);
+  int which[] = {CTL_VM, VM_UVMEXP};
+
+  if (sysctl(which, 2, &info, &size, NULL, 0))
+    return -errno;
+
+  return (uint64_t) info.free * sysconf(_SC_PAGESIZE);
+}
+
+
+uint64_t uv_get_total_memory(void) {
+  uint64_t info;
+  int which[] = {CTL_HW, HW_PHYSMEM64};
+  size_t size = sizeof(info);
+
+  if (sysctl(which, 2, &info, &size, NULL, 0))
+    return -errno;
+
+  return (uint64_t) info;
+}
+
+
+char** uv_setup_args(int argc, char** argv) {
+  process_title = argc ? uv__strdup(argv[0]) : NULL;
+  return argv;
+}
+
+
+int uv_set_process_title(const char* title) {
+  uv__free(process_title);
+  process_title = uv__strdup(title);
+  setproctitle(title);
+  return 0;
+}
+
+
+int uv_get_process_title(char* buffer, size_t size) {
+  size_t len;
+
+  if (buffer == NULL || size == 0)
+    return -EINVAL;
+
+  if (process_title) {
+    len = strlen(process_title) + 1;
+
+    if (size < len)
+      return -ENOBUFS;
+
+    memcpy(buffer, process_title, len);
+  } else {
+    len = 0;
+  }
+
+  buffer[len] = '\0';
+
+  return 0;
+}
+
+
+int uv_resident_set_memory(size_t* rss) {
+  struct kinfo_proc kinfo;
+  size_t page_size = getpagesize();
+  size_t size = sizeof(struct kinfo_proc);
+  int mib[6];
+
+  mib[0] = CTL_KERN;
+  mib[1] = KERN_PROC;
+  mib[2] = KERN_PROC_PID;
+  mib[3] = getpid();
+  mib[4] = sizeof(struct kinfo_proc);
+  mib[5] = 1;
+
+  if (sysctl(mib, 6, &kinfo, &size, NULL, 0) < 0)
+    return -errno;
+
+  *rss = kinfo.p_vm_rssize * page_size;
+  return 0;
+}
+
+
+int uv_uptime(double* uptime) {
+  time_t now;
+  struct timeval info;
+  size_t size = sizeof(info);
+  static int which[] = {CTL_KERN, KERN_BOOTTIME};
+
+  if (sysctl(which, 2, &info, &size, NULL, 0))
+    return -errno;
+
+  now = time(NULL);
+
+  *uptime = (double)(now - info.tv_sec);
+  return 0;
+}
+
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+  unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK),
+               multiplier = ((uint64_t)1000L / ticks), cpuspeed;
+  uint64_t info[CPUSTATES];
+  char model[512];
+  int numcpus = 1;
+  int which[] = {CTL_HW,HW_MODEL,0};
+  size_t size;
+  int i;
+  uv_cpu_info_t* cpu_info;
+
+  size = sizeof(model);
+  if (sysctl(which, 2, &model, &size, NULL, 0))
+    return -errno;
+
+  which[1] = HW_NCPU;
+  size = sizeof(numcpus);
+  if (sysctl(which, 2, &numcpus, &size, NULL, 0))
+    return -errno;
+
+  *cpu_infos = uv__malloc(numcpus * sizeof(**cpu_infos));
+  if (!(*cpu_infos))
+    return -ENOMEM;
+
+  *count = numcpus;
+
+  which[1] = HW_CPUSPEED;
+  size = sizeof(cpuspeed);
+  if (sysctl(which, 2, &cpuspeed, &size, NULL, 0)) {
+    uv__free(*cpu_infos);
+    return -errno;
+  }
+
+  size = sizeof(info);
+  which[0] = CTL_KERN;
+  which[1] = KERN_CPTIME2;
+  for (i = 0; i < numcpus; i++) {
+    which[2] = i;
+    size = sizeof(info);
+    if (sysctl(which, 3, &info, &size, NULL, 0)) {
+      uv__free(*cpu_infos);
+      return -errno;
+    }
+
+    cpu_info = &(*cpu_infos)[i];
+
+    cpu_info->cpu_times.user = (uint64_t)(info[CP_USER]) * multiplier;
+    cpu_info->cpu_times.nice = (uint64_t)(info[CP_NICE]) * multiplier;
+    cpu_info->cpu_times.sys = (uint64_t)(info[CP_SYS]) * multiplier;
+    cpu_info->cpu_times.idle = (uint64_t)(info[CP_IDLE]) * multiplier;
+    cpu_info->cpu_times.irq = (uint64_t)(info[CP_INTR]) * multiplier;
+
+    cpu_info->model = uv__strdup(model);
+    cpu_info->speed = cpuspeed;
+  }
+
+  return 0;
+}
+
+
+void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
+  int i;
+
+  for (i = 0; i < count; i++) {
+    uv__free(cpu_infos[i].model);
+  }
+
+  uv__free(cpu_infos);
+}
+
+
+int uv_interface_addresses(uv_interface_address_t** addresses,
+  int* count) {
+  struct ifaddrs *addrs, *ent;
+  uv_interface_address_t* address;
+  int i;
+  struct sockaddr_dl *sa_addr;
+
+  if (getifaddrs(&addrs) != 0)
+    return -errno;
+
+   *count = 0;
+
+  /* Count the number of interfaces */
+  for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+    if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) ||
+        (ent->ifa_addr == NULL) ||
+        (ent->ifa_addr->sa_family != PF_INET)) {
+      continue;
+    }
+    (*count)++;
+  }
+
+  *addresses = uv__malloc(*count * sizeof(**addresses));
+
+  if (!(*addresses)) {
+    freeifaddrs(addrs);
+    return -ENOMEM;
+  }
+
+  address = *addresses;
+
+  for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+    if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
+      continue;
+
+    if (ent->ifa_addr == NULL)
+      continue;
+
+    if (ent->ifa_addr->sa_family != PF_INET)
+      continue;
+
+    address->name = uv__strdup(ent->ifa_name);
+
+    if (ent->ifa_addr->sa_family == AF_INET6) {
+      address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
+    } else {
+      address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
+    }
+
+    if (ent->ifa_netmask->sa_family == AF_INET6) {
+      address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
+    } else {
+      address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
+    }
+
+    address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK);
+
+    address++;
+  }
+
+  /* Fill in physical addresses for each interface */
+  for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+    if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) ||
+        (ent->ifa_addr == NULL) ||
+        (ent->ifa_addr->sa_family != AF_LINK)) {
+      continue;
+    }
+
+    address = *addresses;
+
+    for (i = 0; i < (*count); i++) {
+      if (strcmp(address->name, ent->ifa_name) == 0) {
+        sa_addr = (struct sockaddr_dl*)(ent->ifa_addr);
+        memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr));
+      }
+      address++;
+    }
+  }
+
+  freeifaddrs(addrs);
+
+  return 0;
+}
+
+
+void uv_free_interface_addresses(uv_interface_address_t* addresses,
+  int count) {
+  int i;
+
+  for (i = 0; i < count; i++) {
+    uv__free(addresses[i].name);
+  }
+
+  uv__free(addresses);
+}

+ 42 - 0
Utilities/cmlibuv/src/unix/os390.c

@@ -0,0 +1,42 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "internal.h"
+
+int uv__io_check_fd(uv_loop_t* loop, int fd) {
+  struct pollfd p[1];
+  int rv;
+
+  p[0].fd = fd;
+  p[0].events = POLLIN;
+
+  do
+    rv = poll(p, 1, 0);
+  while (rv == -1 && errno == EINTR);
+
+  if (rv == -1)
+    abort();
+
+  if (p[0].revents & POLLNVAL)
+    return -1;
+
+  return 0;
+}

+ 298 - 0
Utilities/cmlibuv/src/unix/pipe.c

@@ -0,0 +1,298 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <assert.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/un.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+
+int uv_pipe_init(uv_loop_t* loop, uv_pipe_t* handle, int ipc) {
+  uv__stream_init(loop, (uv_stream_t*)handle, UV_NAMED_PIPE);
+  handle->shutdown_req = NULL;
+  handle->connect_req = NULL;
+  handle->pipe_fname = NULL;
+  handle->ipc = ipc;
+  return 0;
+}
+
+
+int uv_pipe_bind(uv_pipe_t* handle, const char* name) {
+  struct sockaddr_un saddr;
+  const char* pipe_fname;
+  int sockfd;
+  int err;
+
+  pipe_fname = NULL;
+  sockfd = -1;
+
+  /* Already bound? */
+  if (uv__stream_fd(handle) >= 0)
+    return -EINVAL;
+
+  /* Make a copy of the file name, it outlives this function's scope. */
+  pipe_fname = uv__strdup(name);
+  if (pipe_fname == NULL)
+    return -ENOMEM;
+
+  /* We've got a copy, don't touch the original any more. */
+  name = NULL;
+
+  err = uv__socket(AF_UNIX, SOCK_STREAM, 0);
+  if (err < 0)
+    goto err_socket;
+  sockfd = err;
+
+  memset(&saddr, 0, sizeof saddr);
+  strncpy(saddr.sun_path, pipe_fname, sizeof(saddr.sun_path) - 1);
+  saddr.sun_path[sizeof(saddr.sun_path) - 1] = '\0';
+  saddr.sun_family = AF_UNIX;
+
+  if (bind(sockfd, (struct sockaddr*)&saddr, sizeof saddr)) {
+    err = -errno;
+    /* Convert ENOENT to EACCES for compatibility with Windows. */
+    if (err == -ENOENT)
+      err = -EACCES;
+    goto err_bind;
+  }
+
+  /* Success. */
+  handle->flags |= UV_HANDLE_BOUND;
+  handle->pipe_fname = pipe_fname; /* Is a strdup'ed copy. */
+  handle->io_watcher.fd = sockfd;
+  return 0;
+
+err_bind:
+  uv__close(sockfd);
+
+err_socket:
+  uv__free((void*)pipe_fname);
+  return err;
+}
+
+
+int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb) {
+  if (uv__stream_fd(handle) == -1)
+    return -EINVAL;
+
+#if defined(__MVS__)
+  /* On zOS, backlog=0 has undefined behaviour */
+  if (backlog == 0)
+    backlog = 1;
+  else if (backlog < 0)
+    backlog = SOMAXCONN;
+#endif
+
+  if (listen(uv__stream_fd(handle), backlog))
+    return -errno;
+
+  handle->connection_cb = cb;
+  handle->io_watcher.cb = uv__server_io;
+  uv__io_start(handle->loop, &handle->io_watcher, POLLIN);
+  return 0;
+}
+
+
+void uv__pipe_close(uv_pipe_t* handle) {
+  if (handle->pipe_fname) {
+    /*
+     * Unlink the file system entity before closing the file descriptor.
+     * Doing it the other way around introduces a race where our process
+     * unlinks a socket with the same name that's just been created by
+     * another thread or process.
+     */
+    unlink(handle->pipe_fname);
+    uv__free((void*)handle->pipe_fname);
+    handle->pipe_fname = NULL;
+  }
+
+  uv__stream_close((uv_stream_t*)handle);
+}
+
+
+int uv_pipe_open(uv_pipe_t* handle, uv_file fd) {
+  int err;
+
+  err = uv__nonblock(fd, 1);
+  if (err)
+    return err;
+
+#if defined(__APPLE__)
+  err = uv__stream_try_select((uv_stream_t*) handle, &fd);
+  if (err)
+    return err;
+#endif /* defined(__APPLE__) */
+
+  return uv__stream_open((uv_stream_t*)handle,
+                         fd,
+                         UV_STREAM_READABLE | UV_STREAM_WRITABLE);
+}
+
+
+void uv_pipe_connect(uv_connect_t* req,
+                    uv_pipe_t* handle,
+                    const char* name,
+                    uv_connect_cb cb) {
+  struct sockaddr_un saddr;
+  int new_sock;
+  int err;
+  int r;
+
+  new_sock = (uv__stream_fd(handle) == -1);
+
+  if (new_sock) {
+    err = uv__socket(AF_UNIX, SOCK_STREAM, 0);
+    if (err < 0)
+      goto out;
+    handle->io_watcher.fd = err;
+  }
+
+  memset(&saddr, 0, sizeof saddr);
+  strncpy(saddr.sun_path, name, sizeof(saddr.sun_path) - 1);
+  saddr.sun_path[sizeof(saddr.sun_path) - 1] = '\0';
+  saddr.sun_family = AF_UNIX;
+
+  do {
+    r = connect(uv__stream_fd(handle),
+                (struct sockaddr*)&saddr, sizeof saddr);
+  }
+  while (r == -1 && errno == EINTR);
+
+  if (r == -1 && errno != EINPROGRESS) {
+    err = -errno;
+    goto out;
+  }
+
+  err = 0;
+  if (new_sock) {
+    err = uv__stream_open((uv_stream_t*)handle,
+                          uv__stream_fd(handle),
+                          UV_STREAM_READABLE | UV_STREAM_WRITABLE);
+  }
+
+  if (err == 0)
+    uv__io_start(handle->loop, &handle->io_watcher, POLLIN | POLLOUT);
+
+out:
+  handle->delayed_error = err;
+  handle->connect_req = req;
+
+  uv__req_init(handle->loop, req, UV_CONNECT);
+  req->handle = (uv_stream_t*)handle;
+  req->cb = cb;
+  QUEUE_INIT(&req->queue);
+
+  /* Force callback to run on next tick in case of error. */
+  if (err)
+    uv__io_feed(handle->loop, &handle->io_watcher);
+
+}
+
+
+typedef int (*uv__peersockfunc)(int, struct sockaddr*, socklen_t*);
+
+
+static int uv__pipe_getsockpeername(const uv_pipe_t* handle,
+                                    uv__peersockfunc func,
+                                    char* buffer,
+                                    size_t* size) {
+  struct sockaddr_un sa;
+  socklen_t addrlen;
+  int err;
+
+  addrlen = sizeof(sa);
+  memset(&sa, 0, addrlen);
+  err = func(uv__stream_fd(handle), (struct sockaddr*) &sa, &addrlen);
+  if (err < 0) {
+    *size = 0;
+    return -errno;
+  }
+
+#if defined(__linux__)
+  if (sa.sun_path[0] == 0)
+    /* Linux abstract namespace */
+    addrlen -= offsetof(struct sockaddr_un, sun_path);
+  else
+#endif
+    addrlen = strlen(sa.sun_path);
+
+
+  if (addrlen >= *size) {
+    *size = addrlen + 1;
+    return UV_ENOBUFS;
+  }
+
+  memcpy(buffer, sa.sun_path, addrlen);
+  *size = addrlen;
+
+  /* only null-terminate if it's not an abstract socket */
+  if (buffer[0] != '\0')
+    buffer[addrlen] = '\0';
+
+  return 0;
+}
+
+
+int uv_pipe_getsockname(const uv_pipe_t* handle, char* buffer, size_t* size) {
+  return uv__pipe_getsockpeername(handle, getsockname, buffer, size);
+}
+
+
+int uv_pipe_getpeername(const uv_pipe_t* handle, char* buffer, size_t* size) {
+  return uv__pipe_getsockpeername(handle, getpeername, buffer, size);
+}
+
+
+void uv_pipe_pending_instances(uv_pipe_t* handle, int count) {
+}
+
+
+int uv_pipe_pending_count(uv_pipe_t* handle) {
+  uv__stream_queued_fds_t* queued_fds;
+
+  if (!handle->ipc)
+    return 0;
+
+  if (handle->accepted_fd == -1)
+    return 0;
+
+  if (handle->queued_fds == NULL)
+    return 1;
+
+  queued_fds = handle->queued_fds;
+  return queued_fds->offset + 1;
+}
+
+
+uv_handle_type uv_pipe_pending_type(uv_pipe_t* handle) {
+  if (!handle->ipc)
+    return UV_UNKNOWN_HANDLE;
+
+  if (handle->accepted_fd == -1)
+    return UV_UNKNOWN_HANDLE;
+  else
+    return uv__handle_type(handle->accepted_fd);
+}

+ 130 - 0
Utilities/cmlibuv/src/unix/poll.c

@@ -0,0 +1,130 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <unistd.h>
+#include <assert.h>
+#include <errno.h>
+
+
+static void uv__poll_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
+  uv_poll_t* handle;
+  int pevents;
+
+  handle = container_of(w, uv_poll_t, io_watcher);
+
+  if (events & POLLERR) {
+    uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP);
+    uv__handle_stop(handle);
+    handle->poll_cb(handle, -EBADF, 0);
+    return;
+  }
+
+  pevents = 0;
+  if (events & POLLIN)
+    pevents |= UV_READABLE;
+  if (events & POLLOUT)
+    pevents |= UV_WRITABLE;
+  if (events & UV__POLLRDHUP)
+    pevents |= UV_DISCONNECT;
+
+  handle->poll_cb(handle, 0, pevents);
+}
+
+
+int uv_poll_init(uv_loop_t* loop, uv_poll_t* handle, int fd) {
+  int err;
+
+  err = uv__io_check_fd(loop, fd);
+  if (err)
+    return err;
+
+  /* If ioctl(FIONBIO) reports ENOTTY, try fcntl(F_GETFL) + fcntl(F_SETFL).
+   * Workaround for e.g. kqueue fds not supporting ioctls.
+   */
+  err = uv__nonblock(fd, 1);
+  if (err == -ENOTTY)
+    if (uv__nonblock == uv__nonblock_ioctl)
+      err = uv__nonblock_fcntl(fd, 1);
+
+  if (err)
+    return err;
+
+  uv__handle_init(loop, (uv_handle_t*) handle, UV_POLL);
+  uv__io_init(&handle->io_watcher, uv__poll_io, fd);
+  handle->poll_cb = NULL;
+  return 0;
+}
+
+
+int uv_poll_init_socket(uv_loop_t* loop, uv_poll_t* handle,
+    uv_os_sock_t socket) {
+  return uv_poll_init(loop, handle, socket);
+}
+
+
+static void uv__poll_stop(uv_poll_t* handle) {
+  uv__io_stop(handle->loop,
+              &handle->io_watcher,
+              POLLIN | POLLOUT | UV__POLLRDHUP);
+  uv__handle_stop(handle);
+}
+
+
+int uv_poll_stop(uv_poll_t* handle) {
+  assert(!(handle->flags & (UV_CLOSING | UV_CLOSED)));
+  uv__poll_stop(handle);
+  return 0;
+}
+
+
+int uv_poll_start(uv_poll_t* handle, int pevents, uv_poll_cb poll_cb) {
+  int events;
+
+  assert((pevents & ~(UV_READABLE | UV_WRITABLE | UV_DISCONNECT)) == 0);
+  assert(!(handle->flags & (UV_CLOSING | UV_CLOSED)));
+
+  uv__poll_stop(handle);
+
+  if (pevents == 0)
+    return 0;
+
+  events = 0;
+  if (pevents & UV_READABLE)
+    events |= POLLIN;
+  if (pevents & UV_WRITABLE)
+    events |= POLLOUT;
+  if (pevents & UV_DISCONNECT)
+    events |= UV__POLLRDHUP;
+
+  uv__io_start(handle->loop, &handle->io_watcher, events);
+  uv__handle_start(handle);
+  handle->poll_cb = poll_cb;
+
+  return 0;
+}
+
+
+void uv__poll_close(uv_poll_t* handle) {
+  uv__poll_stop(handle);
+}

+ 563 - 0
Utilities/cmlibuv/src/unix/process.c

@@ -0,0 +1,563 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <errno.h>
+
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <poll.h>
+
+#if defined(__APPLE__) && !TARGET_OS_IPHONE
+# include <crt_externs.h>
+# define environ (*_NSGetEnviron())
+#else
+extern char **environ;
+#endif
+
+#if defined(__linux__) || defined(__GLIBC__)
+# include <grp.h>
+#endif
+
+
+static void uv__chld(uv_signal_t* handle, int signum) {
+  uv_process_t* process;
+  uv_loop_t* loop;
+  int exit_status;
+  int term_signal;
+  int status;
+  pid_t pid;
+  QUEUE pending;
+  QUEUE* q;
+  QUEUE* h;
+
+  assert(signum == SIGCHLD);
+
+  QUEUE_INIT(&pending);
+  loop = handle->loop;
+
+  h = &loop->process_handles;
+  q = QUEUE_HEAD(h);
+  while (q != h) {
+    process = QUEUE_DATA(q, uv_process_t, queue);
+    q = QUEUE_NEXT(q);
+
+    do
+      pid = waitpid(process->pid, &status, WNOHANG);
+    while (pid == -1 && errno == EINTR);
+
+    if (pid == 0)
+      continue;
+
+    if (pid == -1) {
+      if (errno != ECHILD)
+        abort();
+      continue;
+    }
+
+    process->status = status;
+    QUEUE_REMOVE(&process->queue);
+    QUEUE_INSERT_TAIL(&pending, &process->queue);
+  }
+
+  h = &pending;
+  q = QUEUE_HEAD(h);
+  while (q != h) {
+    process = QUEUE_DATA(q, uv_process_t, queue);
+    q = QUEUE_NEXT(q);
+
+    QUEUE_REMOVE(&process->queue);
+    QUEUE_INIT(&process->queue);
+    uv__handle_stop(process);
+
+    if (process->exit_cb == NULL)
+      continue;
+
+    exit_status = 0;
+    if (WIFEXITED(process->status))
+      exit_status = WEXITSTATUS(process->status);
+
+    term_signal = 0;
+    if (WIFSIGNALED(process->status))
+      term_signal = WTERMSIG(process->status);
+
+    process->exit_cb(process, exit_status, term_signal);
+  }
+  assert(QUEUE_EMPTY(&pending));
+}
+
+
+int uv__make_socketpair(int fds[2], int flags) {
+#if defined(__linux__)
+  static int no_cloexec;
+
+  if (no_cloexec)
+    goto skip;
+
+  if (socketpair(AF_UNIX, SOCK_STREAM | UV__SOCK_CLOEXEC | flags, 0, fds) == 0)
+    return 0;
+
+  /* Retry on EINVAL, it means SOCK_CLOEXEC is not supported.
+   * Anything else is a genuine error.
+   */
+  if (errno != EINVAL)
+    return -errno;
+
+  no_cloexec = 1;
+
+skip:
+#endif
+
+  if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds))
+    return -errno;
+
+  uv__cloexec(fds[0], 1);
+  uv__cloexec(fds[1], 1);
+
+  if (flags & UV__F_NONBLOCK) {
+    uv__nonblock(fds[0], 1);
+    uv__nonblock(fds[1], 1);
+  }
+
+  return 0;
+}
+
+
+int uv__make_pipe(int fds[2], int flags) {
+#if defined(__linux__)
+  static int no_pipe2;
+
+  if (no_pipe2)
+    goto skip;
+
+  if (uv__pipe2(fds, flags | UV__O_CLOEXEC) == 0)
+    return 0;
+
+  if (errno != ENOSYS)
+    return -errno;
+
+  no_pipe2 = 1;
+
+skip:
+#endif
+
+  if (pipe(fds))
+    return -errno;
+
+  uv__cloexec(fds[0], 1);
+  uv__cloexec(fds[1], 1);
+
+  if (flags & UV__F_NONBLOCK) {
+    uv__nonblock(fds[0], 1);
+    uv__nonblock(fds[1], 1);
+  }
+
+  return 0;
+}
+
+
+/*
+ * Used for initializing stdio streams like options.stdin_stream. Returns
+ * zero on success. See also the cleanup section in uv_spawn().
+ */
+static int uv__process_init_stdio(uv_stdio_container_t* container, int fds[2]) {
+  int mask;
+  int fd;
+
+  mask = UV_IGNORE | UV_CREATE_PIPE | UV_INHERIT_FD | UV_INHERIT_STREAM;
+
+  switch (container->flags & mask) {
+  case UV_IGNORE:
+    return 0;
+
+  case UV_CREATE_PIPE:
+    assert(container->data.stream != NULL);
+    if (container->data.stream->type != UV_NAMED_PIPE)
+      return -EINVAL;
+    else
+      return uv__make_socketpair(fds, 0);
+
+  case UV_INHERIT_FD:
+  case UV_INHERIT_STREAM:
+    if (container->flags & UV_INHERIT_FD)
+      fd = container->data.fd;
+    else
+      fd = uv__stream_fd(container->data.stream);
+
+    if (fd == -1)
+      return -EINVAL;
+
+    fds[1] = fd;
+    return 0;
+
+  default:
+    assert(0 && "Unexpected flags");
+    return -EINVAL;
+  }
+}
+
+
+static int uv__process_open_stream(uv_stdio_container_t* container,
+                                   int pipefds[2],
+                                   int writable) {
+  int flags;
+  int err;
+
+  if (!(container->flags & UV_CREATE_PIPE) || pipefds[0] < 0)
+    return 0;
+
+  err = uv__close(pipefds[1]);
+  if (err != 0)
+    abort();
+
+  pipefds[1] = -1;
+  uv__nonblock(pipefds[0], 1);
+
+  if (container->data.stream->type == UV_NAMED_PIPE &&
+      ((uv_pipe_t*)container->data.stream)->ipc)
+    flags = UV_STREAM_READABLE | UV_STREAM_WRITABLE;
+  else if (writable)
+    flags = UV_STREAM_WRITABLE;
+  else
+    flags = UV_STREAM_READABLE;
+
+  return uv__stream_open(container->data.stream, pipefds[0], flags);
+}
+
+
+static void uv__process_close_stream(uv_stdio_container_t* container) {
+  if (!(container->flags & UV_CREATE_PIPE)) return;
+  uv__stream_close((uv_stream_t*)container->data.stream);
+}
+
+
+static void uv__write_int(int fd, int val) {
+  ssize_t n;
+
+  do
+    n = write(fd, &val, sizeof(val));
+  while (n == -1 && errno == EINTR);
+
+  if (n == -1 && errno == EPIPE)
+    return; /* parent process has quit */
+
+  assert(n == sizeof(val));
+}
+
+
+#if !(defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH))
+/* execvp is marked __WATCHOS_PROHIBITED __TVOS_PROHIBITED, so must be
+ * avoided. Since this isn't called on those targets, the function
+ * doesn't even need to be defined for them.
+ */
+static void uv__process_child_init(const uv_process_options_t* options,
+                                   int stdio_count,
+                                   int (*pipes)[2],
+                                   int error_fd) {
+  int close_fd;
+  int use_fd;
+  int fd;
+
+  if (options->flags & UV_PROCESS_DETACHED)
+    setsid();
+
+  /* First duplicate low numbered fds, since it's not safe to duplicate them,
+   * they could get replaced. Example: swapping stdout and stderr; without
+   * this fd 2 (stderr) would be duplicated into fd 1, thus making both
+   * stdout and stderr go to the same fd, which was not the intention. */
+  for (fd = 0; fd < stdio_count; fd++) {
+    use_fd = pipes[fd][1];
+    if (use_fd < 0 || use_fd >= fd)
+      continue;
+    pipes[fd][1] = fcntl(use_fd, F_DUPFD, stdio_count);
+    if (pipes[fd][1] == -1) {
+      uv__write_int(error_fd, -errno);
+      _exit(127);
+    }
+  }
+
+  for (fd = 0; fd < stdio_count; fd++) {
+    close_fd = pipes[fd][0];
+    use_fd = pipes[fd][1];
+
+    if (use_fd < 0) {
+      if (fd >= 3)
+        continue;
+      else {
+        /* redirect stdin, stdout and stderr to /dev/null even if UV_IGNORE is
+         * set
+         */
+        use_fd = open("/dev/null", fd == 0 ? O_RDONLY : O_RDWR);
+        close_fd = use_fd;
+
+        if (use_fd == -1) {
+          uv__write_int(error_fd, -errno);
+          _exit(127);
+        }
+      }
+    }
+
+    if (fd == use_fd)
+      uv__cloexec(use_fd, 0);
+    else
+      fd = dup2(use_fd, fd);
+
+    if (fd == -1) {
+      uv__write_int(error_fd, -errno);
+      _exit(127);
+    }
+
+    if (fd <= 2)
+      uv__nonblock(fd, 0);
+
+    if (close_fd >= stdio_count)
+      uv__close(close_fd);
+  }
+
+  for (fd = 0; fd < stdio_count; fd++) {
+    use_fd = pipes[fd][1];
+
+    if (use_fd >= stdio_count)
+      uv__close(use_fd);
+  }
+
+  if (options->cwd != NULL && chdir(options->cwd)) {
+    uv__write_int(error_fd, -errno);
+    _exit(127);
+  }
+
+  if (options->flags & (UV_PROCESS_SETUID | UV_PROCESS_SETGID)) {
+    /* When dropping privileges from root, the `setgroups` call will
+     * remove any extraneous groups. If we don't call this, then
+     * even though our uid has dropped, we may still have groups
+     * that enable us to do super-user things. This will fail if we
+     * aren't root, so don't bother checking the return value, this
+     * is just done as an optimistic privilege dropping function.
+     */
+    SAVE_ERRNO(setgroups(0, NULL));
+  }
+
+  if ((options->flags & UV_PROCESS_SETGID) && setgid(options->gid)) {
+    uv__write_int(error_fd, -errno);
+    _exit(127);
+  }
+
+  if ((options->flags & UV_PROCESS_SETUID) && setuid(options->uid)) {
+    uv__write_int(error_fd, -errno);
+    _exit(127);
+  }
+
+  if (options->env != NULL) {
+    environ = options->env;
+  }
+
+  execvp(options->file, options->args);
+  uv__write_int(error_fd, -errno);
+  _exit(127);
+}
+#endif
+
+
+int uv_spawn(uv_loop_t* loop,
+             uv_process_t* process,
+             const uv_process_options_t* options) {
+#if defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH)
+  /* fork is marked __WATCHOS_PROHIBITED __TVOS_PROHIBITED. */
+  return -ENOSYS;
+#else
+  int signal_pipe[2] = { -1, -1 };
+  int (*pipes)[2];
+  int stdio_count;
+  ssize_t r;
+  pid_t pid;
+  int err;
+  int exec_errorno;
+  int i;
+  int status;
+
+  assert(options->file != NULL);
+  assert(!(options->flags & ~(UV_PROCESS_DETACHED |
+                              UV_PROCESS_SETGID |
+                              UV_PROCESS_SETUID |
+                              UV_PROCESS_WINDOWS_HIDE |
+                              UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS)));
+
+  uv__handle_init(loop, (uv_handle_t*)process, UV_PROCESS);
+  QUEUE_INIT(&process->queue);
+
+  stdio_count = options->stdio_count;
+  if (stdio_count < 3)
+    stdio_count = 3;
+
+  err = -ENOMEM;
+  pipes = uv__malloc(stdio_count * sizeof(*pipes));
+  if (pipes == NULL)
+    goto error;
+
+  for (i = 0; i < stdio_count; i++) {
+    pipes[i][0] = -1;
+    pipes[i][1] = -1;
+  }
+
+  for (i = 0; i < options->stdio_count; i++) {
+    err = uv__process_init_stdio(options->stdio + i, pipes[i]);
+    if (err)
+      goto error;
+  }
+
+  /* This pipe is used by the parent to wait until
+   * the child has called `execve()`. We need this
+   * to avoid the following race condition:
+   *
+   *    if ((pid = fork()) > 0) {
+   *      kill(pid, SIGTERM);
+   *    }
+   *    else if (pid == 0) {
+   *      execve("/bin/cat", argp, envp);
+   *    }
+   *
+   * The parent sends a signal immediately after forking.
+   * Since the child may not have called `execve()` yet,
+   * there is no telling what process receives the signal,
+   * our fork or /bin/cat.
+   *
+   * To avoid ambiguity, we create a pipe with both ends
+   * marked close-on-exec. Then, after the call to `fork()`,
+   * the parent polls the read end until it EOFs or errors with EPIPE.
+   */
+  err = uv__make_pipe(signal_pipe, 0);
+  if (err)
+    goto error;
+
+  uv_signal_start(&loop->child_watcher, uv__chld, SIGCHLD);
+
+  /* Acquire write lock to prevent opening new fds in worker threads */
+  uv_rwlock_wrlock(&loop->cloexec_lock);
+  pid = fork();
+
+  if (pid == -1) {
+    err = -errno;
+    uv_rwlock_wrunlock(&loop->cloexec_lock);
+    uv__close(signal_pipe[0]);
+    uv__close(signal_pipe[1]);
+    goto error;
+  }
+
+  if (pid == 0) {
+    uv__process_child_init(options, stdio_count, pipes, signal_pipe[1]);
+    abort();
+  }
+
+  /* Release lock in parent process */
+  uv_rwlock_wrunlock(&loop->cloexec_lock);
+  uv__close(signal_pipe[1]);
+
+  process->status = 0;
+  exec_errorno = 0;
+  do
+    r = read(signal_pipe[0], &exec_errorno, sizeof(exec_errorno));
+  while (r == -1 && errno == EINTR);
+
+  if (r == 0)
+    ; /* okay, EOF */
+  else if (r == sizeof(exec_errorno)) {
+    do
+      err = waitpid(pid, &status, 0); /* okay, read errorno */
+    while (err == -1 && errno == EINTR);
+    assert(err == pid);
+  } else if (r == -1 && errno == EPIPE) {
+    do
+      err = waitpid(pid, &status, 0); /* okay, got EPIPE */
+    while (err == -1 && errno == EINTR);
+    assert(err == pid);
+  } else
+    abort();
+
+  uv__close_nocheckstdio(signal_pipe[0]);
+
+  for (i = 0; i < options->stdio_count; i++) {
+    err = uv__process_open_stream(options->stdio + i, pipes[i], i == 0);
+    if (err == 0)
+      continue;
+
+    while (i--)
+      uv__process_close_stream(options->stdio + i);
+
+    goto error;
+  }
+
+  /* Only activate this handle if exec() happened successfully */
+  if (exec_errorno == 0) {
+    QUEUE_INSERT_TAIL(&loop->process_handles, &process->queue);
+    uv__handle_start(process);
+  }
+
+  process->pid = pid;
+  process->exit_cb = options->exit_cb;
+
+  uv__free(pipes);
+  return exec_errorno;
+
+error:
+  if (pipes != NULL) {
+    for (i = 0; i < stdio_count; i++) {
+      if (i < options->stdio_count)
+        if (options->stdio[i].flags & (UV_INHERIT_FD | UV_INHERIT_STREAM))
+          continue;
+      if (pipes[i][0] != -1)
+        uv__close_nocheckstdio(pipes[i][0]);
+      if (pipes[i][1] != -1)
+        uv__close_nocheckstdio(pipes[i][1]);
+    }
+    uv__free(pipes);
+  }
+
+  return err;
+#endif
+}
+
+
+int uv_process_kill(uv_process_t* process, int signum) {
+  return uv_kill(process->pid, signum);
+}
+
+
+int uv_kill(int pid, int signum) {
+  if (kill(pid, signum))
+    return -errno;
+  else
+    return 0;
+}
+
+
+void uv__process_close(uv_process_t* handle) {
+  QUEUE_REMOVE(&handle->queue);
+  uv__handle_stop(handle);
+  if (QUEUE_EMPTY(&handle->loop->process_handles))
+    uv_signal_stop(&handle->loop->child_watcher);
+}

+ 105 - 0
Utilities/cmlibuv/src/unix/proctitle.c

@@ -0,0 +1,105 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+extern void uv__set_process_title(const char* title);
+
+static void* args_mem;
+
+static struct {
+  char* str;
+  size_t len;
+} process_title;
+
+
+char** uv_setup_args(int argc, char** argv) {
+  char** new_argv;
+  size_t size;
+  char* s;
+  int i;
+
+  if (argc <= 0)
+    return argv;
+
+  /* Calculate how much memory we need for the argv strings. */
+  size = 0;
+  for (i = 0; i < argc; i++)
+    size += strlen(argv[i]) + 1;
+
+  process_title.str = argv[0];
+  process_title.len = argv[argc - 1] + strlen(argv[argc - 1]) - argv[0];
+  assert(process_title.len + 1 == size);  /* argv memory should be adjacent. */
+
+  /* Add space for the argv pointers. */
+  size += (argc + 1) * sizeof(char*);
+
+  new_argv = uv__malloc(size);
+  if (new_argv == NULL)
+    return argv;
+  args_mem = new_argv;
+
+  /* Copy over the strings and set up the pointer table. */
+  s = (char*) &new_argv[argc + 1];
+  for (i = 0; i < argc; i++) {
+    size = strlen(argv[i]) + 1;
+    memcpy(s, argv[i], size);
+    new_argv[i] = s;
+    s += size;
+  }
+  new_argv[i] = NULL;
+
+  return new_argv;
+}
+
+
+int uv_set_process_title(const char* title) {
+  if (process_title.len == 0)
+    return 0;
+
+  /* No need to terminate, byte after is always '\0'. */
+  strncpy(process_title.str, title, process_title.len);
+  uv__set_process_title(title);
+
+  return 0;
+}
+
+
+int uv_get_process_title(char* buffer, size_t size) {
+  if (buffer == NULL || size == 0)
+    return -EINVAL;
+  else if (size <= process_title.len)
+    return -ENOBUFS;
+
+  memcpy(buffer, process_title.str, process_title.len + 1);
+  buffer[process_title.len] = '\0';
+
+  return 0;
+}
+
+
+UV_DESTRUCTOR(static void free_args_mem(void)) {
+  uv__free(args_mem);  /* Keep valgrind happy. */
+  args_mem = NULL;
+}

+ 120 - 0
Utilities/cmlibuv/src/unix/pthread-barrier.c

@@ -0,0 +1,120 @@
+/*
+Copyright (c) 2016, Kari Tristan Helgason <[email protected]>
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+#include "uv-common.h"
+#include "pthread-barrier.h"
+
+#include <stdlib.h>
+#include <assert.h>
+
+/* TODO: support barrier_attr */
+int pthread_barrier_init(pthread_barrier_t* barrier,
+                         const void* barrier_attr,
+                         unsigned count) {
+  int rc;
+  _uv_barrier* b;
+
+  if (barrier == NULL || count == 0)
+    return EINVAL;
+
+  if (barrier_attr != NULL)
+    return ENOTSUP;
+
+  b = uv__malloc(sizeof(*b));
+  if (b == NULL)
+    return ENOMEM;
+
+  b->in = 0;
+  b->out = 0;
+  b->threshold = count;
+
+  if ((rc = pthread_mutex_init(&b->mutex, NULL)) != 0)
+    goto error2;
+  if ((rc = pthread_cond_init(&b->cond, NULL)) != 0)
+    goto error;
+
+  barrier->b = b;
+  return 0;
+
+error:
+  pthread_mutex_destroy(&b->mutex);
+error2:
+  uv__free(b);
+  return rc;
+}
+
+int pthread_barrier_wait(pthread_barrier_t* barrier) {
+  int rc;
+  _uv_barrier* b;
+
+  if (barrier == NULL || barrier->b == NULL)
+    return EINVAL;
+
+  b = barrier->b;
+  /* Lock the mutex*/
+  if ((rc = pthread_mutex_lock(&b->mutex)) != 0)
+    return rc;
+
+  /* Increment the count. If this is the first thread to reach the threshold,
+     wake up waiters, unlock the mutex, then return
+     PTHREAD_BARRIER_SERIAL_THREAD. */
+  if (++b->in == b->threshold) {
+    b->in = 0;
+    b->out = b->threshold - 1;
+    assert(pthread_cond_signal(&b->cond) == 0);
+
+    pthread_mutex_unlock(&b->mutex);
+    return PTHREAD_BARRIER_SERIAL_THREAD;
+  }
+  /* Otherwise, wait for other threads until in is set to 0,
+     then return 0 to indicate this is not the first thread. */
+  do {
+    if ((rc = pthread_cond_wait(&b->cond, &b->mutex)) != 0)
+      break;
+  } while (b->in != 0);
+
+  /* mark thread exit */
+  b->out--;
+  pthread_cond_signal(&b->cond);
+  pthread_mutex_unlock(&b->mutex);
+  return rc;
+}
+
+int pthread_barrier_destroy(pthread_barrier_t* barrier) {
+  int rc;
+  _uv_barrier* b;
+
+  if (barrier == NULL || barrier->b == NULL)
+    return EINVAL;
+
+  b = barrier->b;
+
+  if ((rc = pthread_mutex_lock(&b->mutex)) != 0)
+    return rc;
+
+  if (b->in > 0 || b->out > 0)
+    rc = EBUSY;
+
+  pthread_mutex_unlock(&b->mutex);
+
+  if (rc)
+    return rc;
+
+  pthread_cond_destroy(&b->cond);
+  pthread_mutex_destroy(&b->mutex);
+  uv__free(barrier->b);
+  barrier->b = NULL;
+  return 0;
+}

+ 56 - 0
Utilities/cmlibuv/src/unix/pthread-fixes.c

@@ -0,0 +1,56 @@
+/* Copyright (c) 2013, Sony Mobile Communications AB
+ * Copyright (c) 2012, Google Inc.
+   All rights reserved.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+     * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+       * Neither the name of Google Inc. nor the names of its
+   contributors may be used to endorse or promote products derived from
+   this software without specific prior written permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/* Android versions < 4.1 have a broken pthread_sigmask. */
+#include <errno.h>
+#include <pthread.h>
+#include <signal.h>
+
+int uv__pthread_sigmask(int how, const sigset_t* set, sigset_t* oset) {
+  static int workaround;
+  int err;
+
+  if (workaround) {
+    return sigprocmask(how, set, oset);
+  } else {
+    err = pthread_sigmask(how, set, oset);
+    if (err) {
+      if (err == EINVAL && sigprocmask(how, set, oset) == 0) {
+        workaround = 1;
+        return 0;
+      } else {
+        return -1;
+      }
+    }
+  }
+
+  return 0;
+}

+ 467 - 0
Utilities/cmlibuv/src/unix/signal.c

@@ -0,0 +1,467 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <assert.h>
+#include <errno.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+
+typedef struct {
+  uv_signal_t* handle;
+  int signum;
+} uv__signal_msg_t;
+
+RB_HEAD(uv__signal_tree_s, uv_signal_s);
+
+
+static int uv__signal_unlock(void);
+static void uv__signal_event(uv_loop_t* loop, uv__io_t* w, unsigned int events);
+static int uv__signal_compare(uv_signal_t* w1, uv_signal_t* w2);
+static void uv__signal_stop(uv_signal_t* handle);
+
+
+static pthread_once_t uv__signal_global_init_guard = PTHREAD_ONCE_INIT;
+static struct uv__signal_tree_s uv__signal_tree =
+    RB_INITIALIZER(uv__signal_tree);
+static int uv__signal_lock_pipefd[2];
+
+
+RB_GENERATE_STATIC(uv__signal_tree_s,
+                   uv_signal_s, tree_entry,
+                   uv__signal_compare)
+
+
+static void uv__signal_global_init(void) {
+  if (uv__make_pipe(uv__signal_lock_pipefd, 0))
+    abort();
+
+  if (uv__signal_unlock())
+    abort();
+}
+
+
+void uv__signal_global_once_init(void) {
+  pthread_once(&uv__signal_global_init_guard, uv__signal_global_init);
+}
+
+
+
+static int uv__signal_lock(void) {
+  int r;
+  char data;
+
+  do {
+    r = read(uv__signal_lock_pipefd[0], &data, sizeof data);
+  } while (r < 0 && errno == EINTR);
+
+  return (r < 0) ? -1 : 0;
+}
+
+
+static int uv__signal_unlock(void) {
+  int r;
+  char data = 42;
+
+  do {
+    r = write(uv__signal_lock_pipefd[1], &data, sizeof data);
+  } while (r < 0 && errno == EINTR);
+
+  return (r < 0) ? -1 : 0;
+}
+
+
+static void uv__signal_block_and_lock(sigset_t* saved_sigmask) {
+  sigset_t new_mask;
+
+  if (sigfillset(&new_mask))
+    abort();
+
+  if (pthread_sigmask(SIG_SETMASK, &new_mask, saved_sigmask))
+    abort();
+
+  if (uv__signal_lock())
+    abort();
+}
+
+
+static void uv__signal_unlock_and_unblock(sigset_t* saved_sigmask) {
+  if (uv__signal_unlock())
+    abort();
+
+  if (pthread_sigmask(SIG_SETMASK, saved_sigmask, NULL))
+    abort();
+}
+
+
+static uv_signal_t* uv__signal_first_handle(int signum) {
+  /* This function must be called with the signal lock held. */
+  uv_signal_t lookup;
+  uv_signal_t* handle;
+
+  lookup.signum = signum;
+  lookup.loop = NULL;
+
+  handle = RB_NFIND(uv__signal_tree_s, &uv__signal_tree, &lookup);
+
+  if (handle != NULL && handle->signum == signum)
+    return handle;
+
+  return NULL;
+}
+
+
+static void uv__signal_handler(int signum) {
+  uv__signal_msg_t msg;
+  uv_signal_t* handle;
+  int saved_errno;
+
+  saved_errno = errno;
+  memset(&msg, 0, sizeof msg);
+
+  if (uv__signal_lock()) {
+    errno = saved_errno;
+    return;
+  }
+
+  for (handle = uv__signal_first_handle(signum);
+       handle != NULL && handle->signum == signum;
+       handle = RB_NEXT(uv__signal_tree_s, &uv__signal_tree, handle)) {
+    int r;
+
+    msg.signum = signum;
+    msg.handle = handle;
+
+    /* write() should be atomic for small data chunks, so the entire message
+     * should be written at once. In theory the pipe could become full, in
+     * which case the user is out of luck.
+     */
+    do {
+      r = write(handle->loop->signal_pipefd[1], &msg, sizeof msg);
+    } while (r == -1 && errno == EINTR);
+
+    assert(r == sizeof msg ||
+           (r == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)));
+
+    if (r != -1)
+      handle->caught_signals++;
+  }
+
+  uv__signal_unlock();
+  errno = saved_errno;
+}
+
+
+static int uv__signal_register_handler(int signum) {
+  /* When this function is called, the signal lock must be held. */
+  struct sigaction sa;
+
+  /* XXX use a separate signal stack? */
+  memset(&sa, 0, sizeof(sa));
+  if (sigfillset(&sa.sa_mask))
+    abort();
+  sa.sa_handler = uv__signal_handler;
+
+  /* XXX save old action so we can restore it later on? */
+  if (sigaction(signum, &sa, NULL))
+    return -errno;
+
+  return 0;
+}
+
+
+static void uv__signal_unregister_handler(int signum) {
+  /* When this function is called, the signal lock must be held. */
+  struct sigaction sa;
+
+  memset(&sa, 0, sizeof(sa));
+  sa.sa_handler = SIG_DFL;
+
+  /* sigaction can only fail with EINVAL or EFAULT; an attempt to deregister a
+   * signal implies that it was successfully registered earlier, so EINVAL
+   * should never happen.
+   */
+  if (sigaction(signum, &sa, NULL))
+    abort();
+}
+
+
+static int uv__signal_loop_once_init(uv_loop_t* loop) {
+  int err;
+
+  /* Return if already initialized. */
+  if (loop->signal_pipefd[0] != -1)
+    return 0;
+
+  err = uv__make_pipe(loop->signal_pipefd, UV__F_NONBLOCK);
+  if (err)
+    return err;
+
+  uv__io_init(&loop->signal_io_watcher,
+              uv__signal_event,
+              loop->signal_pipefd[0]);
+  uv__io_start(loop, &loop->signal_io_watcher, POLLIN);
+
+  return 0;
+}
+
+
+void uv__signal_loop_cleanup(uv_loop_t* loop) {
+  QUEUE* q;
+
+  /* Stop all the signal watchers that are still attached to this loop. This
+   * ensures that the (shared) signal tree doesn't contain any invalid entries
+   * entries, and that signal handlers are removed when appropriate.
+   * It's safe to use QUEUE_FOREACH here because the handles and the handle
+   * queue are not modified by uv__signal_stop().
+   */
+  QUEUE_FOREACH(q, &loop->handle_queue) {
+    uv_handle_t* handle = QUEUE_DATA(q, uv_handle_t, handle_queue);
+
+    if (handle->type == UV_SIGNAL)
+      uv__signal_stop((uv_signal_t*) handle);
+  }
+
+  if (loop->signal_pipefd[0] != -1) {
+    uv__close(loop->signal_pipefd[0]);
+    loop->signal_pipefd[0] = -1;
+  }
+
+  if (loop->signal_pipefd[1] != -1) {
+    uv__close(loop->signal_pipefd[1]);
+    loop->signal_pipefd[1] = -1;
+  }
+}
+
+
+int uv_signal_init(uv_loop_t* loop, uv_signal_t* handle) {
+  int err;
+
+  err = uv__signal_loop_once_init(loop);
+  if (err)
+    return err;
+
+  uv__handle_init(loop, (uv_handle_t*) handle, UV_SIGNAL);
+  handle->signum = 0;
+  handle->caught_signals = 0;
+  handle->dispatched_signals = 0;
+
+  return 0;
+}
+
+
+void uv__signal_close(uv_signal_t* handle) {
+
+  uv__signal_stop(handle);
+
+  /* If there are any caught signals "trapped" in the signal pipe, we can't
+   * call the close callback yet. Otherwise, add the handle to the finish_close
+   * queue.
+   */
+  if (handle->caught_signals == handle->dispatched_signals) {
+    uv__make_close_pending((uv_handle_t*) handle);
+  }
+}
+
+
+int uv_signal_start(uv_signal_t* handle, uv_signal_cb signal_cb, int signum) {
+  sigset_t saved_sigmask;
+  int err;
+
+  assert(!(handle->flags & (UV_CLOSING | UV_CLOSED)));
+
+  /* If the user supplies signum == 0, then return an error already. If the
+   * signum is otherwise invalid then uv__signal_register will find out
+   * eventually.
+   */
+  if (signum == 0)
+    return -EINVAL;
+
+  /* Short circuit: if the signal watcher is already watching {signum} don't
+   * go through the process of deregistering and registering the handler.
+   * Additionally, this avoids pending signals getting lost in the small time
+   * time frame that handle->signum == 0.
+   */
+  if (signum == handle->signum) {
+    handle->signal_cb = signal_cb;
+    return 0;
+  }
+
+  /* If the signal handler was already active, stop it first. */
+  if (handle->signum != 0) {
+    uv__signal_stop(handle);
+  }
+
+  uv__signal_block_and_lock(&saved_sigmask);
+
+  /* If at this point there are no active signal watchers for this signum (in
+   * any of the loops), it's time to try and register a handler for it here.
+   */
+  if (uv__signal_first_handle(signum) == NULL) {
+    err = uv__signal_register_handler(signum);
+    if (err) {
+      /* Registering the signal handler failed. Must be an invalid signal. */
+      uv__signal_unlock_and_unblock(&saved_sigmask);
+      return err;
+    }
+  }
+
+  handle->signum = signum;
+  RB_INSERT(uv__signal_tree_s, &uv__signal_tree, handle);
+
+  uv__signal_unlock_and_unblock(&saved_sigmask);
+
+  handle->signal_cb = signal_cb;
+  uv__handle_start(handle);
+
+  return 0;
+}
+
+
+static void uv__signal_event(uv_loop_t* loop,
+                             uv__io_t* w,
+                             unsigned int events) {
+  uv__signal_msg_t* msg;
+  uv_signal_t* handle;
+  char buf[sizeof(uv__signal_msg_t) * 32];
+  size_t bytes, end, i;
+  int r;
+
+  bytes = 0;
+  end = 0;
+
+  do {
+    r = read(loop->signal_pipefd[0], buf + bytes, sizeof(buf) - bytes);
+
+    if (r == -1 && errno == EINTR)
+      continue;
+
+    if (r == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) {
+      /* If there are bytes in the buffer already (which really is extremely
+       * unlikely if possible at all) we can't exit the function here. We'll
+       * spin until more bytes are read instead.
+       */
+      if (bytes > 0)
+        continue;
+
+      /* Otherwise, there was nothing there. */
+      return;
+    }
+
+    /* Other errors really should never happen. */
+    if (r == -1)
+      abort();
+
+    bytes += r;
+
+    /* `end` is rounded down to a multiple of sizeof(uv__signal_msg_t). */
+    end = (bytes / sizeof(uv__signal_msg_t)) * sizeof(uv__signal_msg_t);
+
+    for (i = 0; i < end; i += sizeof(uv__signal_msg_t)) {
+      msg = (uv__signal_msg_t*) (buf + i);
+      handle = msg->handle;
+
+      if (msg->signum == handle->signum) {
+        assert(!(handle->flags & UV_CLOSING));
+        handle->signal_cb(handle, handle->signum);
+      }
+
+      handle->dispatched_signals++;
+
+      /* If uv_close was called while there were caught signals that were not
+       * yet dispatched, the uv__finish_close was deferred. Make close pending
+       * now if this has happened.
+       */
+      if ((handle->flags & UV_CLOSING) &&
+          (handle->caught_signals == handle->dispatched_signals)) {
+        uv__make_close_pending((uv_handle_t*) handle);
+      }
+    }
+
+    bytes -= end;
+
+    /* If there are any "partial" messages left, move them to the start of the
+     * the buffer, and spin. This should not happen.
+     */
+    if (bytes) {
+      memmove(buf, buf + end, bytes);
+      continue;
+    }
+  } while (end == sizeof buf);
+}
+
+
+static int uv__signal_compare(uv_signal_t* w1, uv_signal_t* w2) {
+  /* Compare signums first so all watchers with the same signnum end up
+   * adjacent.
+   */
+  if (w1->signum < w2->signum) return -1;
+  if (w1->signum > w2->signum) return 1;
+
+  /* Sort by loop pointer, so we can easily look up the first item after
+   * { .signum = x, .loop = NULL }.
+   */
+  if (w1->loop < w2->loop) return -1;
+  if (w1->loop > w2->loop) return 1;
+
+  if (w1 < w2) return -1;
+  if (w1 > w2) return 1;
+
+  return 0;
+}
+
+
+int uv_signal_stop(uv_signal_t* handle) {
+  assert(!(handle->flags & (UV_CLOSING | UV_CLOSED)));
+  uv__signal_stop(handle);
+  return 0;
+}
+
+
+static void uv__signal_stop(uv_signal_t* handle) {
+  uv_signal_t* removed_handle;
+  sigset_t saved_sigmask;
+
+  /* If the watcher wasn't started, this is a no-op. */
+  if (handle->signum == 0)
+    return;
+
+  uv__signal_block_and_lock(&saved_sigmask);
+
+  removed_handle = RB_REMOVE(uv__signal_tree_s, &uv__signal_tree, handle);
+  assert(removed_handle == handle);
+  (void) removed_handle;
+
+  /* Check if there are other active signal watchers observing this signal. If
+   * not, unregister the signal handler.
+   */
+  if (uv__signal_first_handle(handle->signum) == NULL)
+    uv__signal_unregister_handler(handle->signum);
+
+  uv__signal_unlock_and_unblock(&saved_sigmask);
+
+  handle->signum = 0;
+  uv__handle_stop(handle);
+}

+ 53 - 0
Utilities/cmlibuv/src/unix/spinlock.h

@@ -0,0 +1,53 @@
+/* Copyright (c) 2013, Ben Noordhuis <[email protected]>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef UV_SPINLOCK_H_
+#define UV_SPINLOCK_H_
+
+#include "internal.h"  /* ACCESS_ONCE, UV_UNUSED */
+#include "atomic-ops.h"
+
+#define UV_SPINLOCK_INITIALIZER { 0 }
+
+typedef struct {
+  int lock;
+} uv_spinlock_t;
+
+UV_UNUSED(static void uv_spinlock_init(uv_spinlock_t* spinlock));
+UV_UNUSED(static void uv_spinlock_lock(uv_spinlock_t* spinlock));
+UV_UNUSED(static void uv_spinlock_unlock(uv_spinlock_t* spinlock));
+UV_UNUSED(static int uv_spinlock_trylock(uv_spinlock_t* spinlock));
+
+UV_UNUSED(static void uv_spinlock_init(uv_spinlock_t* spinlock)) {
+  ACCESS_ONCE(int, spinlock->lock) = 0;
+}
+
+UV_UNUSED(static void uv_spinlock_lock(uv_spinlock_t* spinlock)) {
+  while (!uv_spinlock_trylock(spinlock)) cpu_relax();
+}
+
+UV_UNUSED(static void uv_spinlock_unlock(uv_spinlock_t* spinlock)) {
+  ACCESS_ONCE(int, spinlock->lock) = 0;
+}
+
+UV_UNUSED(static int uv_spinlock_trylock(uv_spinlock_t* spinlock)) {
+  /* TODO(bnoordhuis) Maybe change to a ticket lock to guarantee fair queueing.
+   * Not really critical until we have locks that are (frequently) contended
+   * for by several threads.
+   */
+  return 0 == cmpxchgi(&spinlock->lock, 0, 1);
+}
+
+#endif  /* UV_SPINLOCK_H_ */

+ 1638 - 0
Utilities/cmlibuv/src/unix/stream.c

@@ -0,0 +1,1638 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/uio.h>
+#include <sys/un.h>
+#include <unistd.h>
+#include <limits.h> /* IOV_MAX */
+
+#if defined(__APPLE__)
+# include <sys/event.h>
+# include <sys/time.h>
+# include <sys/select.h>
+
+/* Forward declaration */
+typedef struct uv__stream_select_s uv__stream_select_t;
+
+struct uv__stream_select_s {
+  uv_stream_t* stream;
+  uv_thread_t thread;
+  uv_sem_t close_sem;
+  uv_sem_t async_sem;
+  uv_async_t async;
+  int events;
+  int fake_fd;
+  int int_fd;
+  int fd;
+  fd_set* sread;
+  size_t sread_sz;
+  fd_set* swrite;
+  size_t swrite_sz;
+};
+#endif /* defined(__APPLE__) */
+
+static void uv__stream_connect(uv_stream_t*);
+static void uv__write(uv_stream_t* stream);
+static void uv__read(uv_stream_t* stream);
+static void uv__stream_io(uv_loop_t* loop, uv__io_t* w, unsigned int events);
+static void uv__write_callbacks(uv_stream_t* stream);
+static size_t uv__write_req_size(uv_write_t* req);
+
+
+void uv__stream_init(uv_loop_t* loop,
+                     uv_stream_t* stream,
+                     uv_handle_type type) {
+  int err;
+
+  uv__handle_init(loop, (uv_handle_t*)stream, type);
+  stream->read_cb = NULL;
+  stream->alloc_cb = NULL;
+  stream->close_cb = NULL;
+  stream->connection_cb = NULL;
+  stream->connect_req = NULL;
+  stream->shutdown_req = NULL;
+  stream->accepted_fd = -1;
+  stream->queued_fds = NULL;
+  stream->delayed_error = 0;
+  QUEUE_INIT(&stream->write_queue);
+  QUEUE_INIT(&stream->write_completed_queue);
+  stream->write_queue_size = 0;
+
+  if (loop->emfile_fd == -1) {
+    err = uv__open_cloexec("/dev/null", O_RDONLY);
+    if (err < 0)
+        /* In the rare case that "/dev/null" isn't mounted open "/"
+         * instead.
+         */
+        err = uv__open_cloexec("/", O_RDONLY);
+    if (err >= 0)
+      loop->emfile_fd = err;
+  }
+
+#if defined(__APPLE__)
+  stream->select = NULL;
+#endif /* defined(__APPLE_) */
+
+  uv__io_init(&stream->io_watcher, uv__stream_io, -1);
+}
+
+
+static void uv__stream_osx_interrupt_select(uv_stream_t* stream) {
+#if defined(__APPLE__)
+  /* Notify select() thread about state change */
+  uv__stream_select_t* s;
+  int r;
+
+  s = stream->select;
+  if (s == NULL)
+    return;
+
+  /* Interrupt select() loop
+   * NOTE: fake_fd and int_fd are socketpair(), thus writing to one will
+   * emit read event on other side
+   */
+  do
+    r = write(s->fake_fd, "x", 1);
+  while (r == -1 && errno == EINTR);
+
+  assert(r == 1);
+#else  /* !defined(__APPLE__) */
+  /* No-op on any other platform */
+#endif  /* !defined(__APPLE__) */
+}
+
+
+#if defined(__APPLE__)
+static void uv__stream_osx_select(void* arg) {
+  uv_stream_t* stream;
+  uv__stream_select_t* s;
+  char buf[1024];
+  int events;
+  int fd;
+  int r;
+  int max_fd;
+
+  stream = arg;
+  s = stream->select;
+  fd = s->fd;
+
+  if (fd > s->int_fd)
+    max_fd = fd;
+  else
+    max_fd = s->int_fd;
+
+  while (1) {
+    /* Terminate on semaphore */
+    if (uv_sem_trywait(&s->close_sem) == 0)
+      break;
+
+    /* Watch fd using select(2) */
+    memset(s->sread, 0, s->sread_sz);
+    memset(s->swrite, 0, s->swrite_sz);
+
+    if (uv__io_active(&stream->io_watcher, POLLIN))
+      FD_SET(fd, s->sread);
+    if (uv__io_active(&stream->io_watcher, POLLOUT))
+      FD_SET(fd, s->swrite);
+    FD_SET(s->int_fd, s->sread);
+
+    /* Wait indefinitely for fd events */
+    r = select(max_fd + 1, s->sread, s->swrite, NULL, NULL);
+    if (r == -1) {
+      if (errno == EINTR)
+        continue;
+
+      /* XXX: Possible?! */
+      abort();
+    }
+
+    /* Ignore timeouts */
+    if (r == 0)
+      continue;
+
+    /* Empty socketpair's buffer in case of interruption */
+    if (FD_ISSET(s->int_fd, s->sread))
+      while (1) {
+        r = read(s->int_fd, buf, sizeof(buf));
+
+        if (r == sizeof(buf))
+          continue;
+
+        if (r != -1)
+          break;
+
+        if (errno == EAGAIN || errno == EWOULDBLOCK)
+          break;
+
+        if (errno == EINTR)
+          continue;
+
+        abort();
+      }
+
+    /* Handle events */
+    events = 0;
+    if (FD_ISSET(fd, s->sread))
+      events |= POLLIN;
+    if (FD_ISSET(fd, s->swrite))
+      events |= POLLOUT;
+
+    assert(events != 0 || FD_ISSET(s->int_fd, s->sread));
+    if (events != 0) {
+      ACCESS_ONCE(int, s->events) = events;
+
+      uv_async_send(&s->async);
+      uv_sem_wait(&s->async_sem);
+
+      /* Should be processed at this stage */
+      assert((s->events == 0) || (stream->flags & UV_CLOSING));
+    }
+  }
+}
+
+
+static void uv__stream_osx_select_cb(uv_async_t* handle) {
+  uv__stream_select_t* s;
+  uv_stream_t* stream;
+  int events;
+
+  s = container_of(handle, uv__stream_select_t, async);
+  stream = s->stream;
+
+  /* Get and reset stream's events */
+  events = s->events;
+  ACCESS_ONCE(int, s->events) = 0;
+
+  assert(events != 0);
+  assert(events == (events & (POLLIN | POLLOUT)));
+
+  /* Invoke callback on event-loop */
+  if ((events & POLLIN) && uv__io_active(&stream->io_watcher, POLLIN))
+    uv__stream_io(stream->loop, &stream->io_watcher, POLLIN);
+
+  if ((events & POLLOUT) && uv__io_active(&stream->io_watcher, POLLOUT))
+    uv__stream_io(stream->loop, &stream->io_watcher, POLLOUT);
+
+  if (stream->flags & UV_CLOSING)
+    return;
+
+  /* NOTE: It is important to do it here, otherwise `select()` might be called
+   * before the actual `uv__read()`, leading to the blocking syscall
+   */
+  uv_sem_post(&s->async_sem);
+}
+
+
+static void uv__stream_osx_cb_close(uv_handle_t* async) {
+  uv__stream_select_t* s;
+
+  s = container_of(async, uv__stream_select_t, async);
+  uv__free(s);
+}
+
+
+int uv__stream_try_select(uv_stream_t* stream, int* fd) {
+  /*
+   * kqueue doesn't work with some files from /dev mount on osx.
+   * select(2) in separate thread for those fds
+   */
+
+  struct kevent filter[1];
+  struct kevent events[1];
+  struct timespec timeout;
+  uv__stream_select_t* s;
+  int fds[2];
+  int err;
+  int ret;
+  int kq;
+  int old_fd;
+  int max_fd;
+  size_t sread_sz;
+  size_t swrite_sz;
+
+  kq = kqueue();
+  if (kq == -1) {
+    perror("(libuv) kqueue()");
+    return -errno;
+  }
+
+  EV_SET(&filter[0], *fd, EVFILT_READ, EV_ADD | EV_ENABLE, 0, 0, 0);
+
+  /* Use small timeout, because we only want to capture EINVALs */
+  timeout.tv_sec = 0;
+  timeout.tv_nsec = 1;
+
+  do
+    ret = kevent(kq, filter, 1, events, 1, &timeout);
+  while (ret == -1 && errno == EINTR);
+
+  uv__close(kq);
+
+  if (ret == -1)
+    return -errno;
+
+  if (ret == 0 || (events[0].flags & EV_ERROR) == 0 || events[0].data != EINVAL)
+    return 0;
+
+  /* At this point we definitely know that this fd won't work with kqueue */
+
+  /*
+   * Create fds for io watcher and to interrupt the select() loop.
+   * NOTE: do it ahead of malloc below to allocate enough space for fd_sets
+   */
+  if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds))
+    return -errno;
+
+  max_fd = *fd;
+  if (fds[1] > max_fd)
+    max_fd = fds[1];
+
+  sread_sz = ROUND_UP(max_fd + 1, sizeof(uint32_t) * NBBY) / NBBY;
+  swrite_sz = sread_sz;
+
+  s = uv__malloc(sizeof(*s) + sread_sz + swrite_sz);
+  if (s == NULL) {
+    err = -ENOMEM;
+    goto failed_malloc;
+  }
+
+  s->events = 0;
+  s->fd = *fd;
+  s->sread = (fd_set*) ((char*) s + sizeof(*s));
+  s->sread_sz = sread_sz;
+  s->swrite = (fd_set*) ((char*) s->sread + sread_sz);
+  s->swrite_sz = swrite_sz;
+
+  err = uv_async_init(stream->loop, &s->async, uv__stream_osx_select_cb);
+  if (err)
+    goto failed_async_init;
+
+  s->async.flags |= UV__HANDLE_INTERNAL;
+  uv__handle_unref(&s->async);
+
+  err = uv_sem_init(&s->close_sem, 0);
+  if (err != 0)
+    goto failed_close_sem_init;
+
+  err = uv_sem_init(&s->async_sem, 0);
+  if (err != 0)
+    goto failed_async_sem_init;
+
+  s->fake_fd = fds[0];
+  s->int_fd = fds[1];
+
+  old_fd = *fd;
+  s->stream = stream;
+  stream->select = s;
+  *fd = s->fake_fd;
+
+  err = uv_thread_create(&s->thread, uv__stream_osx_select, stream);
+  if (err != 0)
+    goto failed_thread_create;
+
+  return 0;
+
+failed_thread_create:
+  s->stream = NULL;
+  stream->select = NULL;
+  *fd = old_fd;
+
+  uv_sem_destroy(&s->async_sem);
+
+failed_async_sem_init:
+  uv_sem_destroy(&s->close_sem);
+
+failed_close_sem_init:
+  uv__close(fds[0]);
+  uv__close(fds[1]);
+  uv_close((uv_handle_t*) &s->async, uv__stream_osx_cb_close);
+  return err;
+
+failed_async_init:
+  uv__free(s);
+
+failed_malloc:
+  uv__close(fds[0]);
+  uv__close(fds[1]);
+
+  return err;
+}
+#endif /* defined(__APPLE__) */
+
+
+int uv__stream_open(uv_stream_t* stream, int fd, int flags) {
+#if defined(__APPLE__)
+  int enable;
+#endif
+
+  if (!(stream->io_watcher.fd == -1 || stream->io_watcher.fd == fd))
+    return -EBUSY;
+
+  assert(fd >= 0);
+  stream->flags |= flags;
+
+  if (stream->type == UV_TCP) {
+    if ((stream->flags & UV_TCP_NODELAY) && uv__tcp_nodelay(fd, 1))
+      return -errno;
+
+    /* TODO Use delay the user passed in. */
+    if ((stream->flags & UV_TCP_KEEPALIVE) && uv__tcp_keepalive(fd, 1, 60))
+      return -errno;
+  }
+
+#if defined(__APPLE__)
+  enable = 1;
+  if (setsockopt(fd, SOL_SOCKET, SO_OOBINLINE, &enable, sizeof(enable)) &&
+      errno != ENOTSOCK &&
+      errno != EINVAL) {
+    return -errno;
+  }
+#endif
+
+  stream->io_watcher.fd = fd;
+
+  return 0;
+}
+
+
+void uv__stream_flush_write_queue(uv_stream_t* stream, int error) {
+  uv_write_t* req;
+  QUEUE* q;
+  while (!QUEUE_EMPTY(&stream->write_queue)) {
+    q = QUEUE_HEAD(&stream->write_queue);
+    QUEUE_REMOVE(q);
+
+    req = QUEUE_DATA(q, uv_write_t, queue);
+    req->error = error;
+
+    QUEUE_INSERT_TAIL(&stream->write_completed_queue, &req->queue);
+  }
+}
+
+
+void uv__stream_destroy(uv_stream_t* stream) {
+  assert(!uv__io_active(&stream->io_watcher, POLLIN | POLLOUT));
+  assert(stream->flags & UV_CLOSED);
+
+  if (stream->connect_req) {
+    uv__req_unregister(stream->loop, stream->connect_req);
+    stream->connect_req->cb(stream->connect_req, -ECANCELED);
+    stream->connect_req = NULL;
+  }
+
+  uv__stream_flush_write_queue(stream, -ECANCELED);
+  uv__write_callbacks(stream);
+
+  if (stream->shutdown_req) {
+    /* The ECANCELED error code is a lie, the shutdown(2) syscall is a
+     * fait accompli at this point. Maybe we should revisit this in v0.11.
+     * A possible reason for leaving it unchanged is that it informs the
+     * callee that the handle has been destroyed.
+     */
+    uv__req_unregister(stream->loop, stream->shutdown_req);
+    stream->shutdown_req->cb(stream->shutdown_req, -ECANCELED);
+    stream->shutdown_req = NULL;
+  }
+
+  assert(stream->write_queue_size == 0);
+}
+
+
+/* Implements a best effort approach to mitigating accept() EMFILE errors.
+ * We have a spare file descriptor stashed away that we close to get below
+ * the EMFILE limit. Next, we accept all pending connections and close them
+ * immediately to signal the clients that we're overloaded - and we are, but
+ * we still keep on trucking.
+ *
+ * There is one caveat: it's not reliable in a multi-threaded environment.
+ * The file descriptor limit is per process. Our party trick fails if another
+ * thread opens a file or creates a socket in the time window between us
+ * calling close() and accept().
+ */
+static int uv__emfile_trick(uv_loop_t* loop, int accept_fd) {
+  int err;
+  int emfile_fd;
+
+  if (loop->emfile_fd == -1)
+    return -EMFILE;
+
+  uv__close(loop->emfile_fd);
+  loop->emfile_fd = -1;
+
+  do {
+    err = uv__accept(accept_fd);
+    if (err >= 0)
+      uv__close(err);
+  } while (err >= 0 || err == -EINTR);
+
+  emfile_fd = uv__open_cloexec("/", O_RDONLY);
+  if (emfile_fd >= 0)
+    loop->emfile_fd = emfile_fd;
+
+  return err;
+}
+
+
+#if defined(UV_HAVE_KQUEUE)
+# define UV_DEC_BACKLOG(w) w->rcount--;
+#else
+# define UV_DEC_BACKLOG(w) /* no-op */
+#endif /* defined(UV_HAVE_KQUEUE) */
+
+
+void uv__server_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
+  uv_stream_t* stream;
+  int err;
+
+  stream = container_of(w, uv_stream_t, io_watcher);
+  assert(events == POLLIN);
+  assert(stream->accepted_fd == -1);
+  assert(!(stream->flags & UV_CLOSING));
+
+  uv__io_start(stream->loop, &stream->io_watcher, POLLIN);
+
+  /* connection_cb can close the server socket while we're
+   * in the loop so check it on each iteration.
+   */
+  while (uv__stream_fd(stream) != -1) {
+    assert(stream->accepted_fd == -1);
+
+#if defined(UV_HAVE_KQUEUE)
+    if (w->rcount <= 0)
+      return;
+#endif /* defined(UV_HAVE_KQUEUE) */
+
+    err = uv__accept(uv__stream_fd(stream));
+    if (err < 0) {
+      if (err == -EAGAIN || err == -EWOULDBLOCK)
+        return;  /* Not an error. */
+
+      if (err == -ECONNABORTED)
+        continue;  /* Ignore. Nothing we can do about that. */
+
+      if (err == -EMFILE || err == -ENFILE) {
+        err = uv__emfile_trick(loop, uv__stream_fd(stream));
+        if (err == -EAGAIN || err == -EWOULDBLOCK)
+          break;
+      }
+
+      stream->connection_cb(stream, err);
+      continue;
+    }
+
+    UV_DEC_BACKLOG(w)
+    stream->accepted_fd = err;
+    stream->connection_cb(stream, 0);
+
+    if (stream->accepted_fd != -1) {
+      /* The user hasn't yet accepted called uv_accept() */
+      uv__io_stop(loop, &stream->io_watcher, POLLIN);
+      return;
+    }
+
+    if (stream->type == UV_TCP && (stream->flags & UV_TCP_SINGLE_ACCEPT)) {
+      /* Give other processes a chance to accept connections. */
+      struct timespec timeout = { 0, 1 };
+      nanosleep(&timeout, NULL);
+    }
+  }
+}
+
+
+#undef UV_DEC_BACKLOG
+
+
+int uv_accept(uv_stream_t* server, uv_stream_t* client) {
+  int err;
+
+  assert(server->loop == client->loop);
+
+  if (server->accepted_fd == -1)
+    return -EAGAIN;
+
+  switch (client->type) {
+    case UV_NAMED_PIPE:
+    case UV_TCP:
+      err = uv__stream_open(client,
+                            server->accepted_fd,
+                            UV_STREAM_READABLE | UV_STREAM_WRITABLE);
+      if (err) {
+        /* TODO handle error */
+        uv__close(server->accepted_fd);
+        goto done;
+      }
+      break;
+
+    case UV_UDP:
+      err = uv_udp_open((uv_udp_t*) client, server->accepted_fd);
+      if (err) {
+        uv__close(server->accepted_fd);
+        goto done;
+      }
+      break;
+
+    default:
+      return -EINVAL;
+  }
+
+  client->flags |= UV_HANDLE_BOUND;
+
+done:
+  /* Process queued fds */
+  if (server->queued_fds != NULL) {
+    uv__stream_queued_fds_t* queued_fds;
+
+    queued_fds = server->queued_fds;
+
+    /* Read first */
+    server->accepted_fd = queued_fds->fds[0];
+
+    /* All read, free */
+    assert(queued_fds->offset > 0);
+    if (--queued_fds->offset == 0) {
+      uv__free(queued_fds);
+      server->queued_fds = NULL;
+    } else {
+      /* Shift rest */
+      memmove(queued_fds->fds,
+              queued_fds->fds + 1,
+              queued_fds->offset * sizeof(*queued_fds->fds));
+    }
+  } else {
+    server->accepted_fd = -1;
+    if (err == 0)
+      uv__io_start(server->loop, &server->io_watcher, POLLIN);
+  }
+  return err;
+}
+
+
+int uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb) {
+  int err;
+
+  switch (stream->type) {
+  case UV_TCP:
+    err = uv_tcp_listen((uv_tcp_t*)stream, backlog, cb);
+    break;
+
+  case UV_NAMED_PIPE:
+    err = uv_pipe_listen((uv_pipe_t*)stream, backlog, cb);
+    break;
+
+  default:
+    err = -EINVAL;
+  }
+
+  if (err == 0)
+    uv__handle_start(stream);
+
+  return err;
+}
+
+
+static void uv__drain(uv_stream_t* stream) {
+  uv_shutdown_t* req;
+  int err;
+
+  assert(QUEUE_EMPTY(&stream->write_queue));
+  uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT);
+  uv__stream_osx_interrupt_select(stream);
+
+  /* Shutdown? */
+  if ((stream->flags & UV_STREAM_SHUTTING) &&
+      !(stream->flags & UV_CLOSING) &&
+      !(stream->flags & UV_STREAM_SHUT)) {
+    assert(stream->shutdown_req);
+
+    req = stream->shutdown_req;
+    stream->shutdown_req = NULL;
+    stream->flags &= ~UV_STREAM_SHUTTING;
+    uv__req_unregister(stream->loop, req);
+
+    err = 0;
+    if (shutdown(uv__stream_fd(stream), SHUT_WR))
+      err = -errno;
+
+    if (err == 0)
+      stream->flags |= UV_STREAM_SHUT;
+
+    if (req->cb != NULL)
+      req->cb(req, err);
+  }
+}
+
+
+static size_t uv__write_req_size(uv_write_t* req) {
+  size_t size;
+
+  assert(req->bufs != NULL);
+  size = uv__count_bufs(req->bufs + req->write_index,
+                        req->nbufs - req->write_index);
+  assert(req->handle->write_queue_size >= size);
+
+  return size;
+}
+
+
+static void uv__write_req_finish(uv_write_t* req) {
+  uv_stream_t* stream = req->handle;
+
+  /* Pop the req off tcp->write_queue. */
+  QUEUE_REMOVE(&req->queue);
+
+  /* Only free when there was no error. On error, we touch up write_queue_size
+   * right before making the callback. The reason we don't do that right away
+   * is that a write_queue_size > 0 is our only way to signal to the user that
+   * they should stop writing - which they should if we got an error. Something
+   * to revisit in future revisions of the libuv API.
+   */
+  if (req->error == 0) {
+    if (req->bufs != req->bufsml)
+      uv__free(req->bufs);
+    req->bufs = NULL;
+  }
+
+  /* Add it to the write_completed_queue where it will have its
+   * callback called in the near future.
+   */
+  QUEUE_INSERT_TAIL(&stream->write_completed_queue, &req->queue);
+  uv__io_feed(stream->loop, &stream->io_watcher);
+}
+
+
+static int uv__handle_fd(uv_handle_t* handle) {
+  switch (handle->type) {
+    case UV_NAMED_PIPE:
+    case UV_TCP:
+      return ((uv_stream_t*) handle)->io_watcher.fd;
+
+    case UV_UDP:
+      return ((uv_udp_t*) handle)->io_watcher.fd;
+
+    default:
+      return -1;
+  }
+}
+
+static void uv__write(uv_stream_t* stream) {
+  struct iovec* iov;
+  QUEUE* q;
+  uv_write_t* req;
+  int iovmax;
+  int iovcnt;
+  ssize_t n;
+
+start:
+
+  assert(uv__stream_fd(stream) >= 0);
+
+  if (QUEUE_EMPTY(&stream->write_queue))
+    return;
+
+  q = QUEUE_HEAD(&stream->write_queue);
+  req = QUEUE_DATA(q, uv_write_t, queue);
+  assert(req->handle == stream);
+
+  /*
+   * Cast to iovec. We had to have our own uv_buf_t instead of iovec
+   * because Windows's WSABUF is not an iovec.
+   */
+  assert(sizeof(uv_buf_t) == sizeof(struct iovec));
+  iov = (struct iovec*) &(req->bufs[req->write_index]);
+  iovcnt = req->nbufs - req->write_index;
+
+  iovmax = uv__getiovmax();
+
+  /* Limit iov count to avoid EINVALs from writev() */
+  if (iovcnt > iovmax)
+    iovcnt = iovmax;
+
+  /*
+   * Now do the actual writev. Note that we've been updating the pointers
+   * inside the iov each time we write. So there is no need to offset it.
+   */
+
+  if (req->send_handle) {
+    struct msghdr msg;
+    struct cmsghdr *cmsg;
+    int fd_to_send = uv__handle_fd((uv_handle_t*) req->send_handle);
+    char scratch[64] = {0};
+
+    assert(fd_to_send >= 0);
+
+    msg.msg_name = NULL;
+    msg.msg_namelen = 0;
+    msg.msg_iov = iov;
+    msg.msg_iovlen = iovcnt;
+    msg.msg_flags = 0;
+
+    msg.msg_control = (void*) scratch;
+    msg.msg_controllen = CMSG_SPACE(sizeof(fd_to_send));
+
+    cmsg = CMSG_FIRSTHDR(&msg);
+    cmsg->cmsg_level = SOL_SOCKET;
+    cmsg->cmsg_type = SCM_RIGHTS;
+    cmsg->cmsg_len = CMSG_LEN(sizeof(fd_to_send));
+
+    /* silence aliasing warning */
+    {
+      void* pv = CMSG_DATA(cmsg);
+      int* pi = pv;
+      *pi = fd_to_send;
+    }
+
+    do {
+      n = sendmsg(uv__stream_fd(stream), &msg, 0);
+    }
+#if defined(__APPLE__)
+    /*
+     * Due to a possible kernel bug at least in OS X 10.10 "Yosemite",
+     * EPROTOTYPE can be returned while trying to write to a socket that is
+     * shutting down. If we retry the write, we should get the expected EPIPE
+     * instead.
+     */
+    while (n == -1 && (errno == EINTR || errno == EPROTOTYPE));
+#else
+    while (n == -1 && errno == EINTR);
+#endif
+  } else {
+    do {
+      if (iovcnt == 1) {
+        n = write(uv__stream_fd(stream), iov[0].iov_base, iov[0].iov_len);
+      } else {
+        n = writev(uv__stream_fd(stream), iov, iovcnt);
+      }
+    }
+#if defined(__APPLE__)
+    /*
+     * Due to a possible kernel bug at least in OS X 10.10 "Yosemite",
+     * EPROTOTYPE can be returned while trying to write to a socket that is
+     * shutting down. If we retry the write, we should get the expected EPIPE
+     * instead.
+     */
+    while (n == -1 && (errno == EINTR || errno == EPROTOTYPE));
+#else
+    while (n == -1 && errno == EINTR);
+#endif
+  }
+
+  if (n < 0) {
+    if (errno != EAGAIN && errno != EWOULDBLOCK) {
+      /* Error */
+      req->error = -errno;
+      uv__write_req_finish(req);
+      uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT);
+      if (!uv__io_active(&stream->io_watcher, POLLIN))
+        uv__handle_stop(stream);
+      uv__stream_osx_interrupt_select(stream);
+      return;
+    } else if (stream->flags & UV_STREAM_BLOCKING) {
+      /* If this is a blocking stream, try again. */
+      goto start;
+    }
+  } else {
+    /* Successful write */
+
+    while (n >= 0) {
+      uv_buf_t* buf = &(req->bufs[req->write_index]);
+      size_t len = buf->len;
+
+      assert(req->write_index < req->nbufs);
+
+      if ((size_t)n < len) {
+        buf->base += n;
+        buf->len -= n;
+        stream->write_queue_size -= n;
+        n = 0;
+
+        /* There is more to write. */
+        if (stream->flags & UV_STREAM_BLOCKING) {
+          /*
+           * If we're blocking then we should not be enabling the write
+           * watcher - instead we need to try again.
+           */
+          goto start;
+        } else {
+          /* Break loop and ensure the watcher is pending. */
+          break;
+        }
+
+      } else {
+        /* Finished writing the buf at index req->write_index. */
+        req->write_index++;
+
+        assert((size_t)n >= len);
+        n -= len;
+
+        assert(stream->write_queue_size >= len);
+        stream->write_queue_size -= len;
+
+        if (req->write_index == req->nbufs) {
+          /* Then we're done! */
+          assert(n == 0);
+          uv__write_req_finish(req);
+          /* TODO: start trying to write the next request. */
+          return;
+        }
+      }
+    }
+  }
+
+  /* Either we've counted n down to zero or we've got EAGAIN. */
+  assert(n == 0 || n == -1);
+
+  /* Only non-blocking streams should use the write_watcher. */
+  assert(!(stream->flags & UV_STREAM_BLOCKING));
+
+  /* We're not done. */
+  uv__io_start(stream->loop, &stream->io_watcher, POLLOUT);
+
+  /* Notify select() thread about state change */
+  uv__stream_osx_interrupt_select(stream);
+}
+
+
+static void uv__write_callbacks(uv_stream_t* stream) {
+  uv_write_t* req;
+  QUEUE* q;
+
+  while (!QUEUE_EMPTY(&stream->write_completed_queue)) {
+    /* Pop a req off write_completed_queue. */
+    q = QUEUE_HEAD(&stream->write_completed_queue);
+    req = QUEUE_DATA(q, uv_write_t, queue);
+    QUEUE_REMOVE(q);
+    uv__req_unregister(stream->loop, req);
+
+    if (req->bufs != NULL) {
+      stream->write_queue_size -= uv__write_req_size(req);
+      if (req->bufs != req->bufsml)
+        uv__free(req->bufs);
+      req->bufs = NULL;
+    }
+
+    /* NOTE: call callback AFTER freeing the request data. */
+    if (req->cb)
+      req->cb(req, req->error);
+  }
+
+  assert(QUEUE_EMPTY(&stream->write_completed_queue));
+}
+
+
+uv_handle_type uv__handle_type(int fd) {
+  struct sockaddr_storage ss;
+  socklen_t sslen;
+  socklen_t len;
+  int type;
+
+  memset(&ss, 0, sizeof(ss));
+  sslen = sizeof(ss);
+
+  if (getsockname(fd, (struct sockaddr*)&ss, &sslen))
+    return UV_UNKNOWN_HANDLE;
+
+  len = sizeof type;
+
+  if (getsockopt(fd, SOL_SOCKET, SO_TYPE, &type, &len))
+    return UV_UNKNOWN_HANDLE;
+
+  if (type == SOCK_STREAM) {
+#if defined(_AIX) || defined(__DragonFly__)
+    /* on AIX/DragonFly the getsockname call returns an empty sa structure
+     * for sockets of type AF_UNIX.  For all other types it will
+     * return a properly filled in structure.
+     */
+    if (sslen == 0)
+      return UV_NAMED_PIPE;
+#endif
+    switch (ss.ss_family) {
+      case AF_UNIX:
+        return UV_NAMED_PIPE;
+      case AF_INET:
+      case AF_INET6:
+        return UV_TCP;
+      }
+  }
+
+  if (type == SOCK_DGRAM &&
+      (ss.ss_family == AF_INET || ss.ss_family == AF_INET6))
+    return UV_UDP;
+
+  return UV_UNKNOWN_HANDLE;
+}
+
+
+static void uv__stream_eof(uv_stream_t* stream, const uv_buf_t* buf) {
+  stream->flags |= UV_STREAM_READ_EOF;
+  uv__io_stop(stream->loop, &stream->io_watcher, POLLIN);
+  if (!uv__io_active(&stream->io_watcher, POLLOUT))
+    uv__handle_stop(stream);
+  uv__stream_osx_interrupt_select(stream);
+  stream->read_cb(stream, UV_EOF, buf);
+  stream->flags &= ~UV_STREAM_READING;
+}
+
+
+static int uv__stream_queue_fd(uv_stream_t* stream, int fd) {
+  uv__stream_queued_fds_t* queued_fds;
+  unsigned int queue_size;
+
+  queued_fds = stream->queued_fds;
+  if (queued_fds == NULL) {
+    queue_size = 8;
+    queued_fds = uv__malloc((queue_size - 1) * sizeof(*queued_fds->fds) +
+                            sizeof(*queued_fds));
+    if (queued_fds == NULL)
+      return -ENOMEM;
+    queued_fds->size = queue_size;
+    queued_fds->offset = 0;
+    stream->queued_fds = queued_fds;
+
+    /* Grow */
+  } else if (queued_fds->size == queued_fds->offset) {
+    queue_size = queued_fds->size + 8;
+    queued_fds = uv__realloc(queued_fds,
+                             (queue_size - 1) * sizeof(*queued_fds->fds) +
+                              sizeof(*queued_fds));
+
+    /*
+     * Allocation failure, report back.
+     * NOTE: if it is fatal - sockets will be closed in uv__stream_close
+     */
+    if (queued_fds == NULL)
+      return -ENOMEM;
+    queued_fds->size = queue_size;
+    stream->queued_fds = queued_fds;
+  }
+
+  /* Put fd in a queue */
+  queued_fds->fds[queued_fds->offset++] = fd;
+
+  return 0;
+}
+
+
+#define UV__CMSG_FD_COUNT 64
+#define UV__CMSG_FD_SIZE (UV__CMSG_FD_COUNT * sizeof(int))
+
+
+static int uv__stream_recv_cmsg(uv_stream_t* stream, struct msghdr* msg) {
+  struct cmsghdr* cmsg;
+
+  for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) {
+    char* start;
+    char* end;
+    int err;
+    void* pv;
+    int* pi;
+    unsigned int i;
+    unsigned int count;
+
+    if (cmsg->cmsg_type != SCM_RIGHTS) {
+      fprintf(stderr, "ignoring non-SCM_RIGHTS ancillary data: %d\n",
+          cmsg->cmsg_type);
+      continue;
+    }
+
+    /* silence aliasing warning */
+    pv = CMSG_DATA(cmsg);
+    pi = pv;
+
+    /* Count available fds */
+    start = (char*) cmsg;
+    end = (char*) cmsg + cmsg->cmsg_len;
+    count = 0;
+    while (start + CMSG_LEN(count * sizeof(*pi)) < end)
+      count++;
+    assert(start + CMSG_LEN(count * sizeof(*pi)) == end);
+
+    for (i = 0; i < count; i++) {
+      /* Already has accepted fd, queue now */
+      if (stream->accepted_fd != -1) {
+        err = uv__stream_queue_fd(stream, pi[i]);
+        if (err != 0) {
+          /* Close rest */
+          for (; i < count; i++)
+            uv__close(pi[i]);
+          return err;
+        }
+      } else {
+        stream->accepted_fd = pi[i];
+      }
+    }
+  }
+
+  return 0;
+}
+
+
+#ifdef __clang__
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wgnu-folding-constant"
+#endif
+
+static void uv__read(uv_stream_t* stream) {
+  uv_buf_t buf;
+  ssize_t nread;
+  struct msghdr msg;
+  char cmsg_space[CMSG_SPACE(UV__CMSG_FD_SIZE)];
+  int count;
+  int err;
+  int is_ipc;
+
+  stream->flags &= ~UV_STREAM_READ_PARTIAL;
+
+  /* Prevent loop starvation when the data comes in as fast as (or faster than)
+   * we can read it. XXX Need to rearm fd if we switch to edge-triggered I/O.
+   */
+  count = 32;
+
+  is_ipc = stream->type == UV_NAMED_PIPE && ((uv_pipe_t*) stream)->ipc;
+
+  /* XXX: Maybe instead of having UV_STREAM_READING we just test if
+   * tcp->read_cb is NULL or not?
+   */
+  while (stream->read_cb
+      && (stream->flags & UV_STREAM_READING)
+      && (count-- > 0)) {
+    assert(stream->alloc_cb != NULL);
+
+    buf = uv_buf_init(NULL, 0);
+    stream->alloc_cb((uv_handle_t*)stream, 64 * 1024, &buf);
+    if (buf.base == NULL || buf.len == 0) {
+      /* User indicates it can't or won't handle the read. */
+      stream->read_cb(stream, UV_ENOBUFS, &buf);
+      return;
+    }
+
+    assert(buf.base != NULL);
+    assert(uv__stream_fd(stream) >= 0);
+
+    if (!is_ipc) {
+      do {
+        nread = read(uv__stream_fd(stream), buf.base, buf.len);
+      }
+      while (nread < 0 && errno == EINTR);
+    } else {
+      /* ipc uses recvmsg */
+      msg.msg_flags = 0;
+      msg.msg_iov = (struct iovec*) &buf;
+      msg.msg_iovlen = 1;
+      msg.msg_name = NULL;
+      msg.msg_namelen = 0;
+      /* Set up to receive a descriptor even if one isn't in the message */
+      msg.msg_controllen = sizeof(cmsg_space);
+      msg.msg_control = cmsg_space;
+
+      do {
+        nread = uv__recvmsg(uv__stream_fd(stream), &msg, 0);
+      }
+      while (nread < 0 && errno == EINTR);
+    }
+
+    if (nread < 0) {
+      /* Error */
+      if (errno == EAGAIN || errno == EWOULDBLOCK) {
+        /* Wait for the next one. */
+        if (stream->flags & UV_STREAM_READING) {
+          uv__io_start(stream->loop, &stream->io_watcher, POLLIN);
+          uv__stream_osx_interrupt_select(stream);
+        }
+        stream->read_cb(stream, 0, &buf);
+      } else {
+        /* Error. User should call uv_close(). */
+        stream->read_cb(stream, -errno, &buf);
+        if (stream->flags & UV_STREAM_READING) {
+          stream->flags &= ~UV_STREAM_READING;
+          uv__io_stop(stream->loop, &stream->io_watcher, POLLIN);
+          if (!uv__io_active(&stream->io_watcher, POLLOUT))
+            uv__handle_stop(stream);
+          uv__stream_osx_interrupt_select(stream);
+        }
+      }
+      return;
+    } else if (nread == 0) {
+      uv__stream_eof(stream, &buf);
+      return;
+    } else {
+      /* Successful read */
+      ssize_t buflen = buf.len;
+
+      if (is_ipc) {
+        err = uv__stream_recv_cmsg(stream, &msg);
+        if (err != 0) {
+          stream->read_cb(stream, err, &buf);
+          return;
+        }
+      }
+      stream->read_cb(stream, nread, &buf);
+
+      /* Return if we didn't fill the buffer, there is no more data to read. */
+      if (nread < buflen) {
+        stream->flags |= UV_STREAM_READ_PARTIAL;
+        return;
+      }
+    }
+  }
+}
+
+
+#ifdef __clang__
+# pragma clang diagnostic pop
+#endif
+
+#undef UV__CMSG_FD_COUNT
+#undef UV__CMSG_FD_SIZE
+
+
+int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) {
+  assert((stream->type == UV_TCP || stream->type == UV_NAMED_PIPE) &&
+         "uv_shutdown (unix) only supports uv_handle_t right now");
+
+  if (!(stream->flags & UV_STREAM_WRITABLE) ||
+      stream->flags & UV_STREAM_SHUT ||
+      stream->flags & UV_STREAM_SHUTTING ||
+      stream->flags & UV_CLOSED ||
+      stream->flags & UV_CLOSING) {
+    return -ENOTCONN;
+  }
+
+  assert(uv__stream_fd(stream) >= 0);
+
+  /* Initialize request */
+  uv__req_init(stream->loop, req, UV_SHUTDOWN);
+  req->handle = stream;
+  req->cb = cb;
+  stream->shutdown_req = req;
+  stream->flags |= UV_STREAM_SHUTTING;
+
+  uv__io_start(stream->loop, &stream->io_watcher, POLLOUT);
+  uv__stream_osx_interrupt_select(stream);
+
+  return 0;
+}
+
+
+static void uv__stream_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
+  uv_stream_t* stream;
+
+  stream = container_of(w, uv_stream_t, io_watcher);
+
+  assert(stream->type == UV_TCP ||
+         stream->type == UV_NAMED_PIPE ||
+         stream->type == UV_TTY);
+  assert(!(stream->flags & UV_CLOSING));
+
+  if (stream->connect_req) {
+    uv__stream_connect(stream);
+    return;
+  }
+
+  assert(uv__stream_fd(stream) >= 0);
+
+  /* Ignore POLLHUP here. Even it it's set, there may still be data to read. */
+  if (events & (POLLIN | POLLERR | POLLHUP))
+    uv__read(stream);
+
+  if (uv__stream_fd(stream) == -1)
+    return;  /* read_cb closed stream. */
+
+  /* Short-circuit iff POLLHUP is set, the user is still interested in read
+   * events and uv__read() reported a partial read but not EOF. If the EOF
+   * flag is set, uv__read() called read_cb with err=UV_EOF and we don't
+   * have to do anything. If the partial read flag is not set, we can't
+   * report the EOF yet because there is still data to read.
+   */
+  if ((events & POLLHUP) &&
+      (stream->flags & UV_STREAM_READING) &&
+      (stream->flags & UV_STREAM_READ_PARTIAL) &&
+      !(stream->flags & UV_STREAM_READ_EOF)) {
+    uv_buf_t buf = { NULL, 0 };
+    uv__stream_eof(stream, &buf);
+  }
+
+  if (uv__stream_fd(stream) == -1)
+    return;  /* read_cb closed stream. */
+
+  if (events & (POLLOUT | POLLERR | POLLHUP)) {
+    uv__write(stream);
+    uv__write_callbacks(stream);
+
+    /* Write queue drained. */
+    if (QUEUE_EMPTY(&stream->write_queue))
+      uv__drain(stream);
+  }
+}
+
+
+/**
+ * We get called here from directly following a call to connect(2).
+ * In order to determine if we've errored out or succeeded must call
+ * getsockopt.
+ */
+static void uv__stream_connect(uv_stream_t* stream) {
+  int error;
+  uv_connect_t* req = stream->connect_req;
+  socklen_t errorsize = sizeof(int);
+
+  assert(stream->type == UV_TCP || stream->type == UV_NAMED_PIPE);
+  assert(req);
+
+  if (stream->delayed_error) {
+    /* To smooth over the differences between unixes errors that
+     * were reported synchronously on the first connect can be delayed
+     * until the next tick--which is now.
+     */
+    error = stream->delayed_error;
+    stream->delayed_error = 0;
+  } else {
+    /* Normal situation: we need to get the socket error from the kernel. */
+    assert(uv__stream_fd(stream) >= 0);
+    getsockopt(uv__stream_fd(stream),
+               SOL_SOCKET,
+               SO_ERROR,
+               &error,
+               &errorsize);
+    error = -error;
+  }
+
+  if (error == -EINPROGRESS)
+    return;
+
+  stream->connect_req = NULL;
+  uv__req_unregister(stream->loop, req);
+
+  if (error < 0 || QUEUE_EMPTY(&stream->write_queue)) {
+    uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT);
+  }
+
+  if (req->cb)
+    req->cb(req, error);
+
+  if (uv__stream_fd(stream) == -1)
+    return;
+
+  if (error < 0) {
+    uv__stream_flush_write_queue(stream, -ECANCELED);
+    uv__write_callbacks(stream);
+  }
+}
+
+
+int uv_write2(uv_write_t* req,
+              uv_stream_t* stream,
+              const uv_buf_t bufs[],
+              unsigned int nbufs,
+              uv_stream_t* send_handle,
+              uv_write_cb cb) {
+  int empty_queue;
+
+  assert(nbufs > 0);
+  assert((stream->type == UV_TCP ||
+          stream->type == UV_NAMED_PIPE ||
+          stream->type == UV_TTY) &&
+         "uv_write (unix) does not yet support other types of streams");
+
+  if (uv__stream_fd(stream) < 0)
+    return -EBADF;
+
+  if (send_handle) {
+    if (stream->type != UV_NAMED_PIPE || !((uv_pipe_t*)stream)->ipc)
+      return -EINVAL;
+
+    /* XXX We abuse uv_write2() to send over UDP handles to child processes.
+     * Don't call uv__stream_fd() on those handles, it's a macro that on OS X
+     * evaluates to a function that operates on a uv_stream_t with a couple of
+     * OS X specific fields. On other Unices it does (handle)->io_watcher.fd,
+     * which works but only by accident.
+     */
+    if (uv__handle_fd((uv_handle_t*) send_handle) < 0)
+      return -EBADF;
+  }
+
+  /* It's legal for write_queue_size > 0 even when the write_queue is empty;
+   * it means there are error-state requests in the write_completed_queue that
+   * will touch up write_queue_size later, see also uv__write_req_finish().
+   * We could check that write_queue is empty instead but that implies making
+   * a write() syscall when we know that the handle is in error mode.
+   */
+  empty_queue = (stream->write_queue_size == 0);
+
+  /* Initialize the req */
+  uv__req_init(stream->loop, req, UV_WRITE);
+  req->cb = cb;
+  req->handle = stream;
+  req->error = 0;
+  req->send_handle = send_handle;
+  QUEUE_INIT(&req->queue);
+
+  req->bufs = req->bufsml;
+  if (nbufs > ARRAY_SIZE(req->bufsml))
+    req->bufs = uv__malloc(nbufs * sizeof(bufs[0]));
+
+  if (req->bufs == NULL)
+    return -ENOMEM;
+
+  memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0]));
+  req->nbufs = nbufs;
+  req->write_index = 0;
+  stream->write_queue_size += uv__count_bufs(bufs, nbufs);
+
+  /* Append the request to write_queue. */
+  QUEUE_INSERT_TAIL(&stream->write_queue, &req->queue);
+
+  /* If the queue was empty when this function began, we should attempt to
+   * do the write immediately. Otherwise start the write_watcher and wait
+   * for the fd to become writable.
+   */
+  if (stream->connect_req) {
+    /* Still connecting, do nothing. */
+  }
+  else if (empty_queue) {
+    uv__write(stream);
+  }
+  else {
+    /*
+     * blocking streams should never have anything in the queue.
+     * if this assert fires then somehow the blocking stream isn't being
+     * sufficiently flushed in uv__write.
+     */
+    assert(!(stream->flags & UV_STREAM_BLOCKING));
+    uv__io_start(stream->loop, &stream->io_watcher, POLLOUT);
+    uv__stream_osx_interrupt_select(stream);
+  }
+
+  return 0;
+}
+
+
+/* The buffers to be written must remain valid until the callback is called.
+ * This is not required for the uv_buf_t array.
+ */
+int uv_write(uv_write_t* req,
+             uv_stream_t* handle,
+             const uv_buf_t bufs[],
+             unsigned int nbufs,
+             uv_write_cb cb) {
+  return uv_write2(req, handle, bufs, nbufs, NULL, cb);
+}
+
+
+void uv_try_write_cb(uv_write_t* req, int status) {
+  /* Should not be called */
+  abort();
+}
+
+
+int uv_try_write(uv_stream_t* stream,
+                 const uv_buf_t bufs[],
+                 unsigned int nbufs) {
+  int r;
+  int has_pollout;
+  size_t written;
+  size_t req_size;
+  uv_write_t req;
+
+  /* Connecting or already writing some data */
+  if (stream->connect_req != NULL || stream->write_queue_size != 0)
+    return -EAGAIN;
+
+  has_pollout = uv__io_active(&stream->io_watcher, POLLOUT);
+
+  r = uv_write(&req, stream, bufs, nbufs, uv_try_write_cb);
+  if (r != 0)
+    return r;
+
+  /* Remove not written bytes from write queue size */
+  written = uv__count_bufs(bufs, nbufs);
+  if (req.bufs != NULL)
+    req_size = uv__write_req_size(&req);
+  else
+    req_size = 0;
+  written -= req_size;
+  stream->write_queue_size -= req_size;
+
+  /* Unqueue request, regardless of immediateness */
+  QUEUE_REMOVE(&req.queue);
+  uv__req_unregister(stream->loop, &req);
+  if (req.bufs != req.bufsml)
+    uv__free(req.bufs);
+  req.bufs = NULL;
+
+  /* Do not poll for writable, if we wasn't before calling this */
+  if (!has_pollout) {
+    uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT);
+    uv__stream_osx_interrupt_select(stream);
+  }
+
+  if (written == 0 && req_size != 0)
+    return -EAGAIN;
+  else
+    return written;
+}
+
+
+int uv_read_start(uv_stream_t* stream,
+                  uv_alloc_cb alloc_cb,
+                  uv_read_cb read_cb) {
+  assert(stream->type == UV_TCP || stream->type == UV_NAMED_PIPE ||
+      stream->type == UV_TTY);
+
+  if (stream->flags & UV_CLOSING)
+    return -EINVAL;
+
+  /* The UV_STREAM_READING flag is irrelevant of the state of the tcp - it just
+   * expresses the desired state of the user.
+   */
+  stream->flags |= UV_STREAM_READING;
+
+  /* TODO: try to do the read inline? */
+  /* TODO: keep track of tcp state. If we've gotten a EOF then we should
+   * not start the IO watcher.
+   */
+  assert(uv__stream_fd(stream) >= 0);
+  assert(alloc_cb);
+
+  stream->read_cb = read_cb;
+  stream->alloc_cb = alloc_cb;
+
+  uv__io_start(stream->loop, &stream->io_watcher, POLLIN);
+  uv__handle_start(stream);
+  uv__stream_osx_interrupt_select(stream);
+
+  return 0;
+}
+
+
+int uv_read_stop(uv_stream_t* stream) {
+  if (!(stream->flags & UV_STREAM_READING))
+    return 0;
+
+  stream->flags &= ~UV_STREAM_READING;
+  uv__io_stop(stream->loop, &stream->io_watcher, POLLIN);
+  if (!uv__io_active(&stream->io_watcher, POLLOUT))
+    uv__handle_stop(stream);
+  uv__stream_osx_interrupt_select(stream);
+
+  stream->read_cb = NULL;
+  stream->alloc_cb = NULL;
+  return 0;
+}
+
+
+int uv_is_readable(const uv_stream_t* stream) {
+  return !!(stream->flags & UV_STREAM_READABLE);
+}
+
+
+int uv_is_writable(const uv_stream_t* stream) {
+  return !!(stream->flags & UV_STREAM_WRITABLE);
+}
+
+
+#if defined(__APPLE__)
+int uv___stream_fd(const uv_stream_t* handle) {
+  const uv__stream_select_t* s;
+
+  assert(handle->type == UV_TCP ||
+         handle->type == UV_TTY ||
+         handle->type == UV_NAMED_PIPE);
+
+  s = handle->select;
+  if (s != NULL)
+    return s->fd;
+
+  return handle->io_watcher.fd;
+}
+#endif /* defined(__APPLE__) */
+
+
+void uv__stream_close(uv_stream_t* handle) {
+  unsigned int i;
+  uv__stream_queued_fds_t* queued_fds;
+
+#if defined(__APPLE__)
+  /* Terminate select loop first */
+  if (handle->select != NULL) {
+    uv__stream_select_t* s;
+
+    s = handle->select;
+
+    uv_sem_post(&s->close_sem);
+    uv_sem_post(&s->async_sem);
+    uv__stream_osx_interrupt_select(handle);
+    uv_thread_join(&s->thread);
+    uv_sem_destroy(&s->close_sem);
+    uv_sem_destroy(&s->async_sem);
+    uv__close(s->fake_fd);
+    uv__close(s->int_fd);
+    uv_close((uv_handle_t*) &s->async, uv__stream_osx_cb_close);
+
+    handle->select = NULL;
+  }
+#endif /* defined(__APPLE__) */
+
+  uv__io_close(handle->loop, &handle->io_watcher);
+  uv_read_stop(handle);
+  uv__handle_stop(handle);
+
+  if (handle->io_watcher.fd != -1) {
+    /* Don't close stdio file descriptors.  Nothing good comes from it. */
+    if (handle->io_watcher.fd > STDERR_FILENO)
+      uv__close(handle->io_watcher.fd);
+    handle->io_watcher.fd = -1;
+  }
+
+  if (handle->accepted_fd != -1) {
+    uv__close(handle->accepted_fd);
+    handle->accepted_fd = -1;
+  }
+
+  /* Close all queued fds */
+  if (handle->queued_fds != NULL) {
+    queued_fds = handle->queued_fds;
+    for (i = 0; i < queued_fds->offset; i++)
+      uv__close(queued_fds->fds[i]);
+    uv__free(handle->queued_fds);
+    handle->queued_fds = NULL;
+  }
+
+  assert(!uv__io_active(&handle->io_watcher, POLLIN | POLLOUT));
+}
+
+
+int uv_stream_set_blocking(uv_stream_t* handle, int blocking) {
+  /* Don't need to check the file descriptor, uv__nonblock()
+   * will fail with EBADF if it's not valid.
+   */
+  return uv__nonblock(uv__stream_fd(handle), !blocking);
+}

+ 821 - 0
Utilities/cmlibuv/src/unix/sunos.c

@@ -0,0 +1,821 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+
+#ifndef SUNOS_NO_IFADDRS
+# include <ifaddrs.h>
+#endif
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/if_arp.h>
+#include <sys/sockio.h>
+
+#include <sys/loadavg.h>
+#include <sys/time.h>
+#include <unistd.h>
+#include <kstat.h>
+#include <fcntl.h>
+
+#include <sys/port.h>
+#include <port.h>
+
+#define PORT_FIRED 0x69
+#define PORT_UNUSED 0x0
+#define PORT_LOADED 0x99
+#define PORT_DELETED -1
+
+#if (!defined(_LP64)) && (_FILE_OFFSET_BITS - 0 == 64)
+#define PROCFS_FILE_OFFSET_BITS_HACK 1
+#undef _FILE_OFFSET_BITS
+#else
+#define PROCFS_FILE_OFFSET_BITS_HACK 0
+#endif
+
+#include <procfs.h>
+
+#if (PROCFS_FILE_OFFSET_BITS_HACK - 0 == 1)
+#define _FILE_OFFSET_BITS 64
+#endif
+
+
+int uv__platform_loop_init(uv_loop_t* loop) {
+  int err;
+  int fd;
+
+  loop->fs_fd = -1;
+  loop->backend_fd = -1;
+
+  fd = port_create();
+  if (fd == -1)
+    return -errno;
+
+  err = uv__cloexec(fd, 1);
+  if (err) {
+    uv__close(fd);
+    return err;
+  }
+  loop->backend_fd = fd;
+
+  return 0;
+}
+
+
+void uv__platform_loop_delete(uv_loop_t* loop) {
+  if (loop->fs_fd != -1) {
+    uv__close(loop->fs_fd);
+    loop->fs_fd = -1;
+  }
+
+  if (loop->backend_fd != -1) {
+    uv__close(loop->backend_fd);
+    loop->backend_fd = -1;
+  }
+}
+
+
+void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
+  struct port_event* events;
+  uintptr_t i;
+  uintptr_t nfds;
+
+  assert(loop->watchers != NULL);
+
+  events = (struct port_event*) loop->watchers[loop->nwatchers];
+  nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
+  if (events == NULL)
+    return;
+
+  /* Invalidate events with same file descriptor */
+  for (i = 0; i < nfds; i++)
+    if ((int) events[i].portev_object == fd)
+      events[i].portev_object = -1;
+}
+
+
+int uv__io_check_fd(uv_loop_t* loop, int fd) {
+  if (port_associate(loop->backend_fd, PORT_SOURCE_FD, fd, POLLIN, 0))
+    return -errno;
+
+  if (port_dissociate(loop->backend_fd, PORT_SOURCE_FD, fd))
+    abort();
+
+  return 0;
+}
+
+
+void uv__io_poll(uv_loop_t* loop, int timeout) {
+  struct port_event events[1024];
+  struct port_event* pe;
+  struct timespec spec;
+  QUEUE* q;
+  uv__io_t* w;
+  sigset_t* pset;
+  sigset_t set;
+  uint64_t base;
+  uint64_t diff;
+  unsigned int nfds;
+  unsigned int i;
+  int saved_errno;
+  int have_signals;
+  int nevents;
+  int count;
+  int err;
+  int fd;
+
+  if (loop->nfds == 0) {
+    assert(QUEUE_EMPTY(&loop->watcher_queue));
+    return;
+  }
+
+  while (!QUEUE_EMPTY(&loop->watcher_queue)) {
+    q = QUEUE_HEAD(&loop->watcher_queue);
+    QUEUE_REMOVE(q);
+    QUEUE_INIT(q);
+
+    w = QUEUE_DATA(q, uv__io_t, watcher_queue);
+    assert(w->pevents != 0);
+
+    if (port_associate(loop->backend_fd, PORT_SOURCE_FD, w->fd, w->pevents, 0))
+      abort();
+
+    w->events = w->pevents;
+  }
+
+  pset = NULL;
+  if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
+    pset = &set;
+    sigemptyset(pset);
+    sigaddset(pset, SIGPROF);
+  }
+
+  assert(timeout >= -1);
+  base = loop->time;
+  count = 48; /* Benchmarks suggest this gives the best throughput. */
+
+  for (;;) {
+    if (timeout != -1) {
+      spec.tv_sec = timeout / 1000;
+      spec.tv_nsec = (timeout % 1000) * 1000000;
+    }
+
+    /* Work around a kernel bug where nfds is not updated. */
+    events[0].portev_source = 0;
+
+    nfds = 1;
+    saved_errno = 0;
+
+    if (pset != NULL)
+      pthread_sigmask(SIG_BLOCK, pset, NULL);
+
+    err = port_getn(loop->backend_fd,
+                    events,
+                    ARRAY_SIZE(events),
+                    &nfds,
+                    timeout == -1 ? NULL : &spec);
+
+    if (pset != NULL)
+      pthread_sigmask(SIG_UNBLOCK, pset, NULL);
+
+    if (err) {
+      /* Work around another kernel bug: port_getn() may return events even
+       * on error.
+       */
+      if (errno == EINTR || errno == ETIME)
+        saved_errno = errno;
+      else
+        abort();
+    }
+
+    /* Update loop->time unconditionally. It's tempting to skip the update when
+     * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
+     * operating system didn't reschedule our process while in the syscall.
+     */
+    SAVE_ERRNO(uv__update_time(loop));
+
+    if (events[0].portev_source == 0) {
+      if (timeout == 0)
+        return;
+
+      if (timeout == -1)
+        continue;
+
+      goto update_timeout;
+    }
+
+    if (nfds == 0) {
+      assert(timeout != -1);
+      return;
+    }
+
+    have_signals = 0;
+    nevents = 0;
+
+    assert(loop->watchers != NULL);
+    loop->watchers[loop->nwatchers] = (void*) events;
+    loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
+    for (i = 0; i < nfds; i++) {
+      pe = events + i;
+      fd = pe->portev_object;
+
+      /* Skip invalidated events, see uv__platform_invalidate_fd */
+      if (fd == -1)
+        continue;
+
+      assert(fd >= 0);
+      assert((unsigned) fd < loop->nwatchers);
+
+      w = loop->watchers[fd];
+
+      /* File descriptor that we've stopped watching, ignore. */
+      if (w == NULL)
+        continue;
+
+      /* Run signal watchers last.  This also affects child process watchers
+       * because those are implemented in terms of signal watchers.
+       */
+      if (w == &loop->signal_io_watcher)
+        have_signals = 1;
+      else
+        w->cb(loop, w, pe->portev_events);
+
+      nevents++;
+
+      if (w != loop->watchers[fd])
+        continue;  /* Disabled by callback. */
+
+      /* Events Ports operates in oneshot mode, rearm timer on next run. */
+      if (w->pevents != 0 && QUEUE_EMPTY(&w->watcher_queue))
+        QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
+    }
+
+    if (have_signals != 0)
+      loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
+
+    loop->watchers[loop->nwatchers] = NULL;
+    loop->watchers[loop->nwatchers + 1] = NULL;
+
+    if (have_signals != 0)
+      return;  /* Event loop should cycle now so don't poll again. */
+
+    if (nevents != 0) {
+      if (nfds == ARRAY_SIZE(events) && --count != 0) {
+        /* Poll for more events but don't block this time. */
+        timeout = 0;
+        continue;
+      }
+      return;
+    }
+
+    if (saved_errno == ETIME) {
+      assert(timeout != -1);
+      return;
+    }
+
+    if (timeout == 0)
+      return;
+
+    if (timeout == -1)
+      continue;
+
+update_timeout:
+    assert(timeout > 0);
+
+    diff = loop->time - base;
+    if (diff >= (uint64_t) timeout)
+      return;
+
+    timeout -= diff;
+  }
+}
+
+
+uint64_t uv__hrtime(uv_clocktype_t type) {
+  return gethrtime();
+}
+
+
+/*
+ * We could use a static buffer for the path manipulations that we need outside
+ * of the function, but this function could be called by multiple consumers and
+ * we don't want to potentially create a race condition in the use of snprintf.
+ */
+int uv_exepath(char* buffer, size_t* size) {
+  ssize_t res;
+  char buf[128];
+
+  if (buffer == NULL || size == NULL || *size == 0)
+    return -EINVAL;
+
+  snprintf(buf, sizeof(buf), "/proc/%lu/path/a.out", (unsigned long) getpid());
+
+  res = *size - 1;
+  if (res > 0)
+    res = readlink(buf, buffer, res);
+
+  if (res == -1)
+    return -errno;
+
+  buffer[res] = '\0';
+  *size = res;
+  return 0;
+}
+
+
+uint64_t uv_get_free_memory(void) {
+  return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_AVPHYS_PAGES);
+}
+
+
+uint64_t uv_get_total_memory(void) {
+  return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_PHYS_PAGES);
+}
+
+
+void uv_loadavg(double avg[3]) {
+  (void) getloadavg(avg, 3);
+}
+
+
+#if defined(PORT_SOURCE_FILE)
+
+static int uv__fs_event_rearm(uv_fs_event_t *handle) {
+  if (handle->fd == -1)
+    return -EBADF;
+
+  if (port_associate(handle->loop->fs_fd,
+                     PORT_SOURCE_FILE,
+                     (uintptr_t) &handle->fo,
+                     FILE_ATTRIB | FILE_MODIFIED,
+                     handle) == -1) {
+    return -errno;
+  }
+  handle->fd = PORT_LOADED;
+
+  return 0;
+}
+
+
+static void uv__fs_event_read(uv_loop_t* loop,
+                              uv__io_t* w,
+                              unsigned int revents) {
+  uv_fs_event_t *handle = NULL;
+  timespec_t timeout;
+  port_event_t pe;
+  int events;
+  int r;
+
+  (void) w;
+  (void) revents;
+
+  do {
+    uint_t n = 1;
+
+    /*
+     * Note that our use of port_getn() here (and not port_get()) is deliberate:
+     * there is a bug in event ports (Sun bug 6456558) whereby a zeroed timeout
+     * causes port_get() to return success instead of ETIME when there aren't
+     * actually any events (!); by using port_getn() in lieu of port_get(),
+     * we can at least workaround the bug by checking for zero returned events
+     * and treating it as we would ETIME.
+     */
+    do {
+      memset(&timeout, 0, sizeof timeout);
+      r = port_getn(loop->fs_fd, &pe, 1, &n, &timeout);
+    }
+    while (r == -1 && errno == EINTR);
+
+    if ((r == -1 && errno == ETIME) || n == 0)
+      break;
+
+    handle = (uv_fs_event_t*) pe.portev_user;
+    assert((r == 0) && "unexpected port_get() error");
+
+    events = 0;
+    if (pe.portev_events & (FILE_ATTRIB | FILE_MODIFIED))
+      events |= UV_CHANGE;
+    if (pe.portev_events & ~(FILE_ATTRIB | FILE_MODIFIED))
+      events |= UV_RENAME;
+    assert(events != 0);
+    handle->fd = PORT_FIRED;
+    handle->cb(handle, NULL, events, 0);
+
+    if (handle->fd != PORT_DELETED) {
+      r = uv__fs_event_rearm(handle);
+      if (r != 0)
+        handle->cb(handle, NULL, 0, r);
+    }
+  }
+  while (handle->fd != PORT_DELETED);
+}
+
+
+int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
+  uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
+  return 0;
+}
+
+
+int uv_fs_event_start(uv_fs_event_t* handle,
+                      uv_fs_event_cb cb,
+                      const char* path,
+                      unsigned int flags) {
+  int portfd;
+  int first_run;
+  int err;
+
+  if (uv__is_active(handle))
+    return -EINVAL;
+
+  first_run = 0;
+  if (handle->loop->fs_fd == -1) {
+    portfd = port_create();
+    if (portfd == -1)
+      return -errno;
+    handle->loop->fs_fd = portfd;
+    first_run = 1;
+  }
+
+  uv__handle_start(handle);
+  handle->path = uv__strdup(path);
+  handle->fd = PORT_UNUSED;
+  handle->cb = cb;
+
+  memset(&handle->fo, 0, sizeof handle->fo);
+  handle->fo.fo_name = handle->path;
+  err = uv__fs_event_rearm(handle);
+  if (err != 0)
+    return err;
+
+  if (first_run) {
+    uv__io_init(&handle->loop->fs_event_watcher, uv__fs_event_read, portfd);
+    uv__io_start(handle->loop, &handle->loop->fs_event_watcher, POLLIN);
+  }
+
+  return 0;
+}
+
+
+int uv_fs_event_stop(uv_fs_event_t* handle) {
+  if (!uv__is_active(handle))
+    return 0;
+
+  if (handle->fd == PORT_FIRED || handle->fd == PORT_LOADED) {
+    port_dissociate(handle->loop->fs_fd,
+                    PORT_SOURCE_FILE,
+                    (uintptr_t) &handle->fo);
+  }
+
+  handle->fd = PORT_DELETED;
+  uv__free(handle->path);
+  handle->path = NULL;
+  handle->fo.fo_name = NULL;
+  uv__handle_stop(handle);
+
+  return 0;
+}
+
+void uv__fs_event_close(uv_fs_event_t* handle) {
+  uv_fs_event_stop(handle);
+}
+
+#else /* !defined(PORT_SOURCE_FILE) */
+
+int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
+  return -ENOSYS;
+}
+
+
+int uv_fs_event_start(uv_fs_event_t* handle,
+                      uv_fs_event_cb cb,
+                      const char* filename,
+                      unsigned int flags) {
+  return -ENOSYS;
+}
+
+
+int uv_fs_event_stop(uv_fs_event_t* handle) {
+  return -ENOSYS;
+}
+
+
+void uv__fs_event_close(uv_fs_event_t* handle) {
+  UNREACHABLE();
+}
+
+#endif /* defined(PORT_SOURCE_FILE) */
+
+
+char** uv_setup_args(int argc, char** argv) {
+  return argv;
+}
+
+
+int uv_set_process_title(const char* title) {
+  return 0;
+}
+
+
+int uv_get_process_title(char* buffer, size_t size) {
+  if (buffer == NULL || size == 0)
+    return -EINVAL;
+
+  buffer[0] = '\0';
+  return 0;
+}
+
+
+int uv_resident_set_memory(size_t* rss) {
+  psinfo_t psinfo;
+  int err;
+  int fd;
+
+  fd = open("/proc/self/psinfo", O_RDONLY);
+  if (fd == -1)
+    return -errno;
+
+  /* FIXME(bnoordhuis) Handle EINTR. */
+  err = -EINVAL;
+  if (read(fd, &psinfo, sizeof(psinfo)) == sizeof(psinfo)) {
+    *rss = (size_t)psinfo.pr_rssize * 1024;
+    err = 0;
+  }
+  uv__close(fd);
+
+  return err;
+}
+
+
+int uv_uptime(double* uptime) {
+  kstat_ctl_t   *kc;
+  kstat_t       *ksp;
+  kstat_named_t *knp;
+
+  long hz = sysconf(_SC_CLK_TCK);
+
+  kc = kstat_open();
+  if (kc == NULL)
+    return -EPERM;
+
+  ksp = kstat_lookup(kc, (char*) "unix", 0, (char*) "system_misc");
+  if (kstat_read(kc, ksp, NULL) == -1) {
+    *uptime = -1;
+  } else {
+    knp = (kstat_named_t*)  kstat_data_lookup(ksp, (char*) "clk_intr");
+    *uptime = knp->value.ul / hz;
+  }
+  kstat_close(kc);
+
+  return 0;
+}
+
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+  int           lookup_instance;
+  kstat_ctl_t   *kc;
+  kstat_t       *ksp;
+  kstat_named_t *knp;
+  uv_cpu_info_t* cpu_info;
+
+  kc = kstat_open();
+  if (kc == NULL)
+    return -EPERM;
+
+  /* Get count of cpus */
+  lookup_instance = 0;
+  while ((ksp = kstat_lookup(kc, (char*) "cpu_info", lookup_instance, NULL))) {
+    lookup_instance++;
+  }
+
+  *cpu_infos = uv__malloc(lookup_instance * sizeof(**cpu_infos));
+  if (!(*cpu_infos)) {
+    kstat_close(kc);
+    return -ENOMEM;
+  }
+
+  *count = lookup_instance;
+
+  cpu_info = *cpu_infos;
+  lookup_instance = 0;
+  while ((ksp = kstat_lookup(kc, (char*) "cpu_info", lookup_instance, NULL))) {
+    if (kstat_read(kc, ksp, NULL) == -1) {
+      cpu_info->speed = 0;
+      cpu_info->model = NULL;
+    } else {
+      knp = kstat_data_lookup(ksp, (char*) "clock_MHz");
+      assert(knp->data_type == KSTAT_DATA_INT32 ||
+             knp->data_type == KSTAT_DATA_INT64);
+      cpu_info->speed = (knp->data_type == KSTAT_DATA_INT32) ? knp->value.i32
+                                                             : knp->value.i64;
+
+      knp = kstat_data_lookup(ksp, (char*) "brand");
+      assert(knp->data_type == KSTAT_DATA_STRING);
+      cpu_info->model = uv__strdup(KSTAT_NAMED_STR_PTR(knp));
+    }
+
+    lookup_instance++;
+    cpu_info++;
+  }
+
+  cpu_info = *cpu_infos;
+  lookup_instance = 0;
+  for (;;) {
+    ksp = kstat_lookup(kc, (char*) "cpu", lookup_instance, (char*) "sys");
+
+    if (ksp == NULL)
+      break;
+
+    if (kstat_read(kc, ksp, NULL) == -1) {
+      cpu_info->cpu_times.user = 0;
+      cpu_info->cpu_times.nice = 0;
+      cpu_info->cpu_times.sys = 0;
+      cpu_info->cpu_times.idle = 0;
+      cpu_info->cpu_times.irq = 0;
+    } else {
+      knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_user");
+      assert(knp->data_type == KSTAT_DATA_UINT64);
+      cpu_info->cpu_times.user = knp->value.ui64;
+
+      knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_kernel");
+      assert(knp->data_type == KSTAT_DATA_UINT64);
+      cpu_info->cpu_times.sys = knp->value.ui64;
+
+      knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_idle");
+      assert(knp->data_type == KSTAT_DATA_UINT64);
+      cpu_info->cpu_times.idle = knp->value.ui64;
+
+      knp = kstat_data_lookup(ksp, (char*) "intr");
+      assert(knp->data_type == KSTAT_DATA_UINT64);
+      cpu_info->cpu_times.irq = knp->value.ui64;
+      cpu_info->cpu_times.nice = 0;
+    }
+
+    lookup_instance++;
+    cpu_info++;
+  }
+
+  kstat_close(kc);
+
+  return 0;
+}
+
+
+void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
+  int i;
+
+  for (i = 0; i < count; i++) {
+    uv__free(cpu_infos[i].model);
+  }
+
+  uv__free(cpu_infos);
+}
+
+/*
+ * Inspired By:
+ * https://blogs.oracle.com/paulie/entry/retrieving_mac_address_in_solaris
+ * http://www.pauliesworld.org/project/getmac.c
+ */
+static int uv__set_phys_addr(uv_interface_address_t* address,
+                             struct ifaddrs* ent) {
+
+  struct sockaddr_dl* sa_addr;
+  int sockfd;
+  int i;
+  struct arpreq arpreq;
+
+  /* This appears to only work as root */
+  sa_addr = (struct sockaddr_dl*)(ent->ifa_addr);
+  memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr));
+  for (i = 0; i < sizeof(address->phys_addr); i++) {
+    if (address->phys_addr[i] != 0)
+      return 0;
+  }
+  memset(&arpreq, 0, sizeof(arpreq));
+  if (address->address.address4.sin_family == AF_INET) {
+    struct sockaddr_in* sin = ((struct sockaddr_in*)&arpreq.arp_pa);
+    sin->sin_addr.s_addr = address->address.address4.sin_addr.s_addr;
+  } else if (address->address.address4.sin_family == AF_INET6) {
+    struct sockaddr_in6* sin = ((struct sockaddr_in6*)&arpreq.arp_pa);
+    memcpy(sin->sin6_addr.s6_addr,
+           address->address.address6.sin6_addr.s6_addr,
+           sizeof(address->address.address6.sin6_addr.s6_addr));
+  } else {
+    return 0;
+  }
+
+  sockfd = socket(AF_INET, SOCK_DGRAM, 0);
+  if (sockfd < 0)
+    return -errno;
+
+  if (ioctl(sockfd, SIOCGARP, (char*)&arpreq) == -1) {
+    uv__close(sockfd);
+    return -errno;
+  }
+  memcpy(address->phys_addr, arpreq.arp_ha.sa_data, sizeof(address->phys_addr));
+  uv__close(sockfd);
+  return 0;
+}
+
+int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
+#ifdef SUNOS_NO_IFADDRS
+  return -ENOSYS;
+#else
+  uv_interface_address_t* address;
+  struct ifaddrs* addrs;
+  struct ifaddrs* ent;
+  int i;
+
+  if (getifaddrs(&addrs))
+    return -errno;
+
+  *count = 0;
+
+  /* Count the number of interfaces */
+  for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+    if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) ||
+        (ent->ifa_addr == NULL) ||
+        (ent->ifa_addr->sa_family == PF_PACKET)) {
+      continue;
+    }
+
+    (*count)++;
+  }
+
+  *addresses = uv__malloc(*count * sizeof(**addresses));
+  if (!(*addresses)) {
+    freeifaddrs(addrs);
+    return -ENOMEM;
+  }
+
+  address = *addresses;
+
+  for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+    if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
+      continue;
+
+    if (ent->ifa_addr == NULL)
+      continue;
+
+    address->name = uv__strdup(ent->ifa_name);
+
+    if (ent->ifa_addr->sa_family == AF_INET6) {
+      address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
+    } else {
+      address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
+    }
+
+    if (ent->ifa_netmask->sa_family == AF_INET6) {
+      address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
+    } else {
+      address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
+    }
+
+    address->is_internal = !!((ent->ifa_flags & IFF_PRIVATE) ||
+                           (ent->ifa_flags & IFF_LOOPBACK));
+
+    uv__set_phys_addr(address, ent);
+    address++;
+  }
+
+  freeifaddrs(addrs);
+
+  return 0;
+#endif  /* SUNOS_NO_IFADDRS */
+}
+
+
+void uv_free_interface_addresses(uv_interface_address_t* addresses,
+  int count) {
+  int i;
+
+  for (i = 0; i < count; i++) {
+    uv__free(addresses[i].name);
+  }
+
+  uv__free(addresses);
+}

+ 395 - 0
Utilities/cmlibuv/src/unix/tcp.c

@@ -0,0 +1,395 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <assert.h>
+#include <errno.h>
+
+
+static int maybe_new_socket(uv_tcp_t* handle, int domain, int flags) {
+  int sockfd;
+  int err;
+
+  if (domain == AF_UNSPEC || uv__stream_fd(handle) != -1) {
+    handle->flags |= flags;
+    return 0;
+  }
+
+  err = uv__socket(domain, SOCK_STREAM, 0);
+  if (err < 0)
+    return err;
+  sockfd = err;
+
+  err = uv__stream_open((uv_stream_t*) handle, sockfd, flags);
+  if (err) {
+    uv__close(sockfd);
+    return err;
+  }
+
+  return 0;
+}
+
+
+int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* tcp, unsigned int flags) {
+  int domain;
+
+  /* Use the lower 8 bits for the domain */
+  domain = flags & 0xFF;
+  if (domain != AF_INET && domain != AF_INET6 && domain != AF_UNSPEC)
+    return -EINVAL;
+
+  if (flags & ~0xFF)
+    return -EINVAL;
+
+  uv__stream_init(loop, (uv_stream_t*)tcp, UV_TCP);
+
+  /* If anything fails beyond this point we need to remove the handle from
+   * the handle queue, since it was added by uv__handle_init in uv_stream_init.
+   */
+
+  if (domain != AF_UNSPEC) {
+    int err = maybe_new_socket(tcp, domain, 0);
+    if (err) {
+      QUEUE_REMOVE(&tcp->handle_queue);
+      return err;
+    }
+  }
+
+  return 0;
+}
+
+
+int uv_tcp_init(uv_loop_t* loop, uv_tcp_t* tcp) {
+  return uv_tcp_init_ex(loop, tcp, AF_UNSPEC);
+}
+
+
+int uv__tcp_bind(uv_tcp_t* tcp,
+                 const struct sockaddr* addr,
+                 unsigned int addrlen,
+                 unsigned int flags) {
+  int err;
+  int on;
+
+  /* Cannot set IPv6-only mode on non-IPv6 socket. */
+  if ((flags & UV_TCP_IPV6ONLY) && addr->sa_family != AF_INET6)
+    return -EINVAL;
+
+  err = maybe_new_socket(tcp,
+                         addr->sa_family,
+                         UV_STREAM_READABLE | UV_STREAM_WRITABLE);
+  if (err)
+    return err;
+
+  on = 1;
+  if (setsockopt(tcp->io_watcher.fd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)))
+    return -errno;
+
+#ifdef IPV6_V6ONLY
+  if (addr->sa_family == AF_INET6) {
+    on = (flags & UV_TCP_IPV6ONLY) != 0;
+    if (setsockopt(tcp->io_watcher.fd,
+                   IPPROTO_IPV6,
+                   IPV6_V6ONLY,
+                   &on,
+                   sizeof on) == -1) {
+#if defined(__MVS__)
+      if (errno == EOPNOTSUPP)
+        return -EINVAL;
+#endif
+      return -errno;
+    }
+  }
+#endif
+
+  errno = 0;
+  if (bind(tcp->io_watcher.fd, addr, addrlen) && errno != EADDRINUSE) {
+    if (errno == EAFNOSUPPORT)
+      /* OSX, other BSDs and SunoS fail with EAFNOSUPPORT when binding a
+       * socket created with AF_INET to an AF_INET6 address or vice versa. */
+      return -EINVAL;
+    return -errno;
+  }
+  tcp->delayed_error = -errno;
+
+  tcp->flags |= UV_HANDLE_BOUND;
+  if (addr->sa_family == AF_INET6)
+    tcp->flags |= UV_HANDLE_IPV6;
+
+  return 0;
+}
+
+
+int uv__tcp_connect(uv_connect_t* req,
+                    uv_tcp_t* handle,
+                    const struct sockaddr* addr,
+                    unsigned int addrlen,
+                    uv_connect_cb cb) {
+  int err;
+  int r;
+
+  assert(handle->type == UV_TCP);
+
+  if (handle->connect_req != NULL)
+    return -EALREADY;  /* FIXME(bnoordhuis) -EINVAL or maybe -EBUSY. */
+
+  err = maybe_new_socket(handle,
+                         addr->sa_family,
+                         UV_STREAM_READABLE | UV_STREAM_WRITABLE);
+  if (err)
+    return err;
+
+  handle->delayed_error = 0;
+
+  do {
+    errno = 0;
+    r = connect(uv__stream_fd(handle), addr, addrlen);
+  } while (r == -1 && errno == EINTR);
+
+  /* We not only check the return value, but also check the errno != 0.
+   * Because in rare cases connect() will return -1 but the errno
+   * is 0 (for example, on Android 4.3, OnePlus phone A0001_12_150227)
+   * and actually the tcp three-way handshake is completed.
+   */
+  if (r == -1 && errno != 0) {
+    if (errno == EINPROGRESS)
+      ; /* not an error */
+    else if (errno == ECONNREFUSED)
+    /* If we get a ECONNREFUSED wait until the next tick to report the
+     * error. Solaris wants to report immediately--other unixes want to
+     * wait.
+     */
+      handle->delayed_error = -errno;
+    else
+      return -errno;
+  }
+
+  uv__req_init(handle->loop, req, UV_CONNECT);
+  req->cb = cb;
+  req->handle = (uv_stream_t*) handle;
+  QUEUE_INIT(&req->queue);
+  handle->connect_req = req;
+
+  uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
+
+  if (handle->delayed_error)
+    uv__io_feed(handle->loop, &handle->io_watcher);
+
+  return 0;
+}
+
+
+int uv_tcp_open(uv_tcp_t* handle, uv_os_sock_t sock) {
+  int err;
+
+  err = uv__nonblock(sock, 1);
+  if (err)
+    return err;
+
+  return uv__stream_open((uv_stream_t*)handle,
+                         sock,
+                         UV_STREAM_READABLE | UV_STREAM_WRITABLE);
+}
+
+
+int uv_tcp_getsockname(const uv_tcp_t* handle,
+                       struct sockaddr* name,
+                       int* namelen) {
+  socklen_t socklen;
+
+  if (handle->delayed_error)
+    return handle->delayed_error;
+
+  if (uv__stream_fd(handle) < 0)
+    return -EINVAL;  /* FIXME(bnoordhuis) -EBADF */
+
+  /* sizeof(socklen_t) != sizeof(int) on some systems. */
+  socklen = (socklen_t) *namelen;
+
+  if (getsockname(uv__stream_fd(handle), name, &socklen))
+    return -errno;
+
+  *namelen = (int) socklen;
+  return 0;
+}
+
+
+int uv_tcp_getpeername(const uv_tcp_t* handle,
+                       struct sockaddr* name,
+                       int* namelen) {
+  socklen_t socklen;
+
+  if (handle->delayed_error)
+    return handle->delayed_error;
+
+  if (uv__stream_fd(handle) < 0)
+    return -EINVAL;  /* FIXME(bnoordhuis) -EBADF */
+
+  /* sizeof(socklen_t) != sizeof(int) on some systems. */
+  socklen = (socklen_t) *namelen;
+
+  if (getpeername(uv__stream_fd(handle), name, &socklen))
+    return -errno;
+
+  *namelen = (int) socklen;
+  return 0;
+}
+
+
+int uv_tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) {
+  static int single_accept = -1;
+  int err;
+
+  if (tcp->delayed_error)
+    return tcp->delayed_error;
+
+  if (single_accept == -1) {
+    const char* val = getenv("UV_TCP_SINGLE_ACCEPT");
+    single_accept = (val != NULL && atoi(val) != 0);  /* Off by default. */
+  }
+
+  if (single_accept)
+    tcp->flags |= UV_TCP_SINGLE_ACCEPT;
+
+  err = maybe_new_socket(tcp, AF_INET, UV_STREAM_READABLE);
+  if (err)
+    return err;
+
+#ifdef __MVS__
+  /* on zOS the listen call does not bind automatically
+     if the socket is unbound. Hence the manual binding to
+     an arbitrary port is required to be done manually
+  */
+
+  if (!(tcp->flags & UV_HANDLE_BOUND)) {
+    struct sockaddr_storage saddr;
+    socklen_t slen  = sizeof(saddr);
+    memset(&saddr, 0, sizeof(saddr));
+
+    if (getsockname(tcp->io_watcher.fd, (struct sockaddr*) &saddr, &slen))
+      return -errno;
+
+    if (bind(tcp->io_watcher.fd, (struct sockaddr*) &saddr, slen))
+      return -errno;
+
+    tcp->flags |= UV_HANDLE_BOUND;
+  }
+#endif
+
+  if (listen(tcp->io_watcher.fd, backlog))
+    return -errno;
+
+  tcp->connection_cb = cb;
+  tcp->flags |= UV_HANDLE_BOUND;
+
+  /* Start listening for connections. */
+  tcp->io_watcher.cb = uv__server_io;
+  uv__io_start(tcp->loop, &tcp->io_watcher, POLLIN);
+
+  return 0;
+}
+
+
+int uv__tcp_nodelay(int fd, int on) {
+  if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on)))
+    return -errno;
+  return 0;
+}
+
+
+int uv__tcp_keepalive(int fd, int on, unsigned int delay) {
+  if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on)))
+    return -errno;
+
+#ifdef TCP_KEEPIDLE
+  if (on && setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &delay, sizeof(delay)))
+    return -errno;
+#endif
+
+  /* Solaris/SmartOS, if you don't support keep-alive,
+   * then don't advertise it in your system headers...
+   */
+  /* FIXME(bnoordhuis) That's possibly because sizeof(delay) should be 1. */
+#if defined(TCP_KEEPALIVE) && !defined(__sun)
+  if (on && setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &delay, sizeof(delay)))
+    return -errno;
+#endif
+
+  return 0;
+}
+
+
+int uv_tcp_nodelay(uv_tcp_t* handle, int on) {
+  int err;
+
+  if (uv__stream_fd(handle) != -1) {
+    err = uv__tcp_nodelay(uv__stream_fd(handle), on);
+    if (err)
+      return err;
+  }
+
+  if (on)
+    handle->flags |= UV_TCP_NODELAY;
+  else
+    handle->flags &= ~UV_TCP_NODELAY;
+
+  return 0;
+}
+
+
+int uv_tcp_keepalive(uv_tcp_t* handle, int on, unsigned int delay) {
+  int err;
+
+  if (uv__stream_fd(handle) != -1) {
+    err =uv__tcp_keepalive(uv__stream_fd(handle), on, delay);
+    if (err)
+      return err;
+  }
+
+  if (on)
+    handle->flags |= UV_TCP_KEEPALIVE;
+  else
+    handle->flags &= ~UV_TCP_KEEPALIVE;
+
+  /* TODO Store delay if uv__stream_fd(handle) == -1 but don't want to enlarge
+   *      uv_tcp_t with an int that's almost never used...
+   */
+
+  return 0;
+}
+
+
+int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) {
+  if (enable)
+    handle->flags &= ~UV_TCP_SINGLE_ACCEPT;
+  else
+    handle->flags |= UV_TCP_SINGLE_ACCEPT;
+  return 0;
+}
+
+
+void uv__tcp_close(uv_tcp_t* handle) {
+  uv__stream_close((uv_stream_t*)handle);
+}

+ 605 - 0
Utilities/cmlibuv/src/unix/thread.c

@@ -0,0 +1,605 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <pthread.h>
+#include <assert.h>
+#include <errno.h>
+
+#include <sys/time.h>
+#include <sys/resource.h>  /* getrlimit() */
+#include <unistd.h>  /* getpagesize() */
+
+#include <limits.h>
+
+#ifdef __MVS__
+#include <sys/ipc.h>
+#include <sys/sem.h>
+#endif
+
+#undef NANOSEC
+#define NANOSEC ((uint64_t) 1e9)
+
+struct thread_ctx {
+  void (*entry)(void* arg);
+  void* arg;
+};
+
+
+static void* uv__thread_start(void *arg)
+{
+  struct thread_ctx *ctx_p;
+  struct thread_ctx ctx;
+
+  ctx_p = arg;
+  ctx = *ctx_p;
+  uv__free(ctx_p);
+  ctx.entry(ctx.arg);
+
+  return 0;
+}
+
+
+int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) {
+  struct thread_ctx* ctx;
+  int err;
+  pthread_attr_t* attr;
+#if defined(__APPLE__)
+  pthread_attr_t attr_storage;
+  struct rlimit lim;
+#endif
+
+  ctx = uv__malloc(sizeof(*ctx));
+  if (ctx == NULL)
+    return UV_ENOMEM;
+
+  ctx->entry = entry;
+  ctx->arg = arg;
+
+  /* On OSX threads other than the main thread are created with a reduced stack
+   * size by default, adjust it to RLIMIT_STACK.
+   */
+#if defined(__APPLE__)
+  if (getrlimit(RLIMIT_STACK, &lim))
+    abort();
+
+  attr = &attr_storage;
+  if (pthread_attr_init(attr))
+    abort();
+
+  if (lim.rlim_cur != RLIM_INFINITY) {
+    /* pthread_attr_setstacksize() expects page-aligned values. */
+    lim.rlim_cur -= lim.rlim_cur % (rlim_t) getpagesize();
+
+    if (lim.rlim_cur >= PTHREAD_STACK_MIN)
+      if (pthread_attr_setstacksize(attr, lim.rlim_cur))
+        abort();
+  }
+#else
+  attr = NULL;
+#endif
+
+  err = pthread_create(tid, attr, uv__thread_start, ctx);
+
+  if (attr != NULL)
+    pthread_attr_destroy(attr);
+
+  if (err)
+    uv__free(ctx);
+
+  return -err;
+}
+
+
+uv_thread_t uv_thread_self(void) {
+  return pthread_self();
+}
+
+int uv_thread_join(uv_thread_t *tid) {
+  return -pthread_join(*tid, NULL);
+}
+
+
+int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) {
+  return pthread_equal(*t1, *t2);
+}
+
+
+int uv_mutex_init(uv_mutex_t* mutex) {
+#if defined(NDEBUG) || !defined(PTHREAD_MUTEX_ERRORCHECK)
+  return -pthread_mutex_init(mutex, NULL);
+#else
+  pthread_mutexattr_t attr;
+  int err;
+
+  if (pthread_mutexattr_init(&attr))
+    abort();
+
+  if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK))
+    abort();
+
+  err = pthread_mutex_init(mutex, &attr);
+
+  if (pthread_mutexattr_destroy(&attr))
+    abort();
+
+  return -err;
+#endif
+}
+
+
+void uv_mutex_destroy(uv_mutex_t* mutex) {
+  if (pthread_mutex_destroy(mutex))
+    abort();
+}
+
+
+void uv_mutex_lock(uv_mutex_t* mutex) {
+  if (pthread_mutex_lock(mutex))
+    abort();
+}
+
+
+int uv_mutex_trylock(uv_mutex_t* mutex) {
+  int err;
+
+  err = pthread_mutex_trylock(mutex);
+  if (err) {
+    if (err != EBUSY && err != EAGAIN)
+      abort();
+    return -EBUSY;
+  }
+
+  return 0;
+}
+
+
+void uv_mutex_unlock(uv_mutex_t* mutex) {
+  if (pthread_mutex_unlock(mutex))
+    abort();
+}
+
+
+int uv_rwlock_init(uv_rwlock_t* rwlock) {
+  return -pthread_rwlock_init(rwlock, NULL);
+}
+
+
+void uv_rwlock_destroy(uv_rwlock_t* rwlock) {
+  if (pthread_rwlock_destroy(rwlock))
+    abort();
+}
+
+
+void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {
+  if (pthread_rwlock_rdlock(rwlock))
+    abort();
+}
+
+
+int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) {
+  int err;
+
+  err = pthread_rwlock_tryrdlock(rwlock);
+  if (err) {
+    if (err != EBUSY && err != EAGAIN)
+      abort();
+    return -EBUSY;
+  }
+
+  return 0;
+}
+
+
+void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {
+  if (pthread_rwlock_unlock(rwlock))
+    abort();
+}
+
+
+void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {
+  if (pthread_rwlock_wrlock(rwlock))
+    abort();
+}
+
+
+int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) {
+  int err;
+
+  err = pthread_rwlock_trywrlock(rwlock);
+  if (err) {
+    if (err != EBUSY && err != EAGAIN)
+      abort();
+    return -EBUSY;
+  }
+
+  return 0;
+}
+
+
+void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {
+  if (pthread_rwlock_unlock(rwlock))
+    abort();
+}
+
+
+void uv_once(uv_once_t* guard, void (*callback)(void)) {
+  if (pthread_once(guard, callback))
+    abort();
+}
+
+#if defined(__APPLE__) && defined(__MACH__)
+
+int uv_sem_init(uv_sem_t* sem, unsigned int value) {
+  kern_return_t err;
+
+  err = semaphore_create(mach_task_self(), sem, SYNC_POLICY_FIFO, value);
+  if (err == KERN_SUCCESS)
+    return 0;
+  if (err == KERN_INVALID_ARGUMENT)
+    return -EINVAL;
+  if (err == KERN_RESOURCE_SHORTAGE)
+    return -ENOMEM;
+
+  abort();
+  return -EINVAL;  /* Satisfy the compiler. */
+}
+
+
+void uv_sem_destroy(uv_sem_t* sem) {
+  if (semaphore_destroy(mach_task_self(), *sem))
+    abort();
+}
+
+
+void uv_sem_post(uv_sem_t* sem) {
+  if (semaphore_signal(*sem))
+    abort();
+}
+
+
+void uv_sem_wait(uv_sem_t* sem) {
+  int r;
+
+  do
+    r = semaphore_wait(*sem);
+  while (r == KERN_ABORTED);
+
+  if (r != KERN_SUCCESS)
+    abort();
+}
+
+
+int uv_sem_trywait(uv_sem_t* sem) {
+  mach_timespec_t interval;
+  kern_return_t err;
+
+  interval.tv_sec = 0;
+  interval.tv_nsec = 0;
+
+  err = semaphore_timedwait(*sem, interval);
+  if (err == KERN_SUCCESS)
+    return 0;
+  if (err == KERN_OPERATION_TIMED_OUT)
+    return -EAGAIN;
+
+  abort();
+  return -EINVAL;  /* Satisfy the compiler. */
+}
+
+#elif defined(__MVS__)
+
+int uv_sem_init(uv_sem_t* sem, unsigned int value) {
+  uv_sem_t semid;
+  struct sembuf buf;
+  int err;
+
+  buf.sem_num = 0;
+  buf.sem_op = value;
+  buf.sem_flg = 0;
+
+  semid = semget(IPC_PRIVATE, 1, S_IRUSR | S_IWUSR);
+  if (semid == -1)
+    return -errno;
+
+  if (-1 == semop(semid, &buf, 1)) {
+    err = errno;
+    if (-1 == semctl(*sem, 0, IPC_RMID))
+      abort();
+    return -err;
+  }
+
+  *sem = semid;
+  return 0;
+}
+
+void uv_sem_destroy(uv_sem_t* sem) {
+  if (-1 == semctl(*sem, 0, IPC_RMID))
+    abort();
+}
+
+void uv_sem_post(uv_sem_t* sem) {
+  struct sembuf buf;
+
+  buf.sem_num = 0;
+  buf.sem_op = 1;
+  buf.sem_flg = 0;
+
+  if (-1 == semop(*sem, &buf, 1))
+    abort();
+}
+
+void uv_sem_wait(uv_sem_t* sem) {
+  struct sembuf buf;
+  int op_status;
+
+  buf.sem_num = 0;
+  buf.sem_op = -1;
+  buf.sem_flg = 0;
+
+  do
+    op_status = semop(*sem, &buf, 1);
+  while (op_status == -1 && errno == EINTR);
+
+  if (op_status)
+    abort();
+}
+
+int uv_sem_trywait(uv_sem_t* sem) {
+  struct sembuf buf;
+  int op_status;
+
+  buf.sem_num = 0;
+  buf.sem_op = -1;
+  buf.sem_flg = IPC_NOWAIT;
+
+  do
+    op_status = semop(*sem, &buf, 1);
+  while (op_status == -1 && errno == EINTR);
+
+  if (op_status) {
+    if (errno == EAGAIN)
+      return -EAGAIN;
+    abort();
+  }
+
+  return 0;
+}
+
+#else /* !(defined(__APPLE__) && defined(__MACH__)) */
+
+int uv_sem_init(uv_sem_t* sem, unsigned int value) {
+  if (sem_init(sem, 0, value))
+    return -errno;
+  return 0;
+}
+
+
+void uv_sem_destroy(uv_sem_t* sem) {
+  if (sem_destroy(sem))
+    abort();
+}
+
+
+void uv_sem_post(uv_sem_t* sem) {
+  if (sem_post(sem))
+    abort();
+}
+
+
+void uv_sem_wait(uv_sem_t* sem) {
+  int r;
+
+  do
+    r = sem_wait(sem);
+  while (r == -1 && errno == EINTR);
+
+  if (r)
+    abort();
+}
+
+
+int uv_sem_trywait(uv_sem_t* sem) {
+  int r;
+
+  do
+    r = sem_trywait(sem);
+  while (r == -1 && errno == EINTR);
+
+  if (r) {
+    if (errno == EAGAIN)
+      return -EAGAIN;
+    abort();
+  }
+
+  return 0;
+}
+
+#endif /* defined(__APPLE__) && defined(__MACH__) */
+
+
+#if defined(__APPLE__) && defined(__MACH__) || defined(__MVS__)
+
+int uv_cond_init(uv_cond_t* cond) {
+  return -pthread_cond_init(cond, NULL);
+}
+
+#else /* !(defined(__APPLE__) && defined(__MACH__)) */
+
+int uv_cond_init(uv_cond_t* cond) {
+  pthread_condattr_t attr;
+  int err;
+
+  err = pthread_condattr_init(&attr);
+  if (err)
+    return -err;
+
+#if !(defined(__ANDROID__) && defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC))
+  err = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
+  if (err)
+    goto error2;
+#endif
+
+  err = pthread_cond_init(cond, &attr);
+  if (err)
+    goto error2;
+
+  err = pthread_condattr_destroy(&attr);
+  if (err)
+    goto error;
+
+  return 0;
+
+error:
+  pthread_cond_destroy(cond);
+error2:
+  pthread_condattr_destroy(&attr);
+  return -err;
+}
+
+#endif /* defined(__APPLE__) && defined(__MACH__) */
+
+void uv_cond_destroy(uv_cond_t* cond) {
+#if defined(__APPLE__) && defined(__MACH__)
+  /* It has been reported that destroying condition variables that have been
+   * signalled but not waited on can sometimes result in application crashes.
+   * See https://codereview.chromium.org/1323293005.
+   */
+  pthread_mutex_t mutex;
+  struct timespec ts;
+  int err;
+
+  if (pthread_mutex_init(&mutex, NULL))
+    abort();
+
+  if (pthread_mutex_lock(&mutex))
+    abort();
+
+  ts.tv_sec = 0;
+  ts.tv_nsec = 1;
+
+  err = pthread_cond_timedwait_relative_np(cond, &mutex, &ts);
+  if (err != 0 && err != ETIMEDOUT)
+    abort();
+
+  if (pthread_mutex_unlock(&mutex))
+    abort();
+
+  if (pthread_mutex_destroy(&mutex))
+    abort();
+#endif /* defined(__APPLE__) && defined(__MACH__) */
+
+  if (pthread_cond_destroy(cond))
+    abort();
+}
+
+void uv_cond_signal(uv_cond_t* cond) {
+  if (pthread_cond_signal(cond))
+    abort();
+}
+
+void uv_cond_broadcast(uv_cond_t* cond) {
+  if (pthread_cond_broadcast(cond))
+    abort();
+}
+
+void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
+  if (pthread_cond_wait(cond, mutex))
+    abort();
+}
+
+
+int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
+  int r;
+  struct timespec ts;
+
+#if defined(__APPLE__) && defined(__MACH__)
+  ts.tv_sec = timeout / NANOSEC;
+  ts.tv_nsec = timeout % NANOSEC;
+  r = pthread_cond_timedwait_relative_np(cond, mutex, &ts);
+#else
+  timeout += uv__hrtime(UV_CLOCK_PRECISE);
+  ts.tv_sec = timeout / NANOSEC;
+  ts.tv_nsec = timeout % NANOSEC;
+#if defined(__ANDROID__) && defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC)
+  /*
+   * The bionic pthread implementation doesn't support CLOCK_MONOTONIC,
+   * but has this alternative function instead.
+   */
+  r = pthread_cond_timedwait_monotonic_np(cond, mutex, &ts);
+#else
+  r = pthread_cond_timedwait(cond, mutex, &ts);
+#endif /* __ANDROID__ */
+#endif
+
+
+  if (r == 0)
+    return 0;
+
+  if (r == ETIMEDOUT)
+    return -ETIMEDOUT;
+
+  abort();
+  return -EINVAL;  /* Satisfy the compiler. */
+}
+
+
+int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
+  return -pthread_barrier_init(barrier, NULL, count);
+}
+
+
+void uv_barrier_destroy(uv_barrier_t* barrier) {
+  if (pthread_barrier_destroy(barrier))
+    abort();
+}
+
+
+int uv_barrier_wait(uv_barrier_t* barrier) {
+  int r = pthread_barrier_wait(barrier);
+  if (r && r != PTHREAD_BARRIER_SERIAL_THREAD)
+    abort();
+  return r == PTHREAD_BARRIER_SERIAL_THREAD;
+}
+
+
+int uv_key_create(uv_key_t* key) {
+  return -pthread_key_create(key, NULL);
+}
+
+
+void uv_key_delete(uv_key_t* key) {
+  if (pthread_key_delete(*key))
+    abort();
+}
+
+
+void* uv_key_get(uv_key_t* key) {
+  return pthread_getspecific(*key);
+}
+
+
+void uv_key_set(uv_key_t* key, void* value) {
+  if (pthread_setspecific(*key, value))
+    abort();
+}

+ 172 - 0
Utilities/cmlibuv/src/unix/timer.c

@@ -0,0 +1,172 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+#include "heap-inl.h"
+
+#include <assert.h>
+#include <limits.h>
+
+
+static int timer_less_than(const struct heap_node* ha,
+                           const struct heap_node* hb) {
+  const uv_timer_t* a;
+  const uv_timer_t* b;
+
+  a = container_of(ha, uv_timer_t, heap_node);
+  b = container_of(hb, uv_timer_t, heap_node);
+
+  if (a->timeout < b->timeout)
+    return 1;
+  if (b->timeout < a->timeout)
+    return 0;
+
+  /* Compare start_id when both have the same timeout. start_id is
+   * allocated with loop->timer_counter in uv_timer_start().
+   */
+  if (a->start_id < b->start_id)
+    return 1;
+  if (b->start_id < a->start_id)
+    return 0;
+
+  return 0;
+}
+
+
+int uv_timer_init(uv_loop_t* loop, uv_timer_t* handle) {
+  uv__handle_init(loop, (uv_handle_t*)handle, UV_TIMER);
+  handle->timer_cb = NULL;
+  handle->repeat = 0;
+  return 0;
+}
+
+
+int uv_timer_start(uv_timer_t* handle,
+                   uv_timer_cb cb,
+                   uint64_t timeout,
+                   uint64_t repeat) {
+  uint64_t clamped_timeout;
+
+  if (cb == NULL)
+    return -EINVAL;
+
+  if (uv__is_active(handle))
+    uv_timer_stop(handle);
+
+  clamped_timeout = handle->loop->time + timeout;
+  if (clamped_timeout < timeout)
+    clamped_timeout = (uint64_t) -1;
+
+  handle->timer_cb = cb;
+  handle->timeout = clamped_timeout;
+  handle->repeat = repeat;
+  /* start_id is the second index to be compared in uv__timer_cmp() */
+  handle->start_id = handle->loop->timer_counter++;
+
+  heap_insert((struct heap*) &handle->loop->timer_heap,
+              (struct heap_node*) &handle->heap_node,
+              timer_less_than);
+  uv__handle_start(handle);
+
+  return 0;
+}
+
+
+int uv_timer_stop(uv_timer_t* handle) {
+  if (!uv__is_active(handle))
+    return 0;
+
+  heap_remove((struct heap*) &handle->loop->timer_heap,
+              (struct heap_node*) &handle->heap_node,
+              timer_less_than);
+  uv__handle_stop(handle);
+
+  return 0;
+}
+
+
+int uv_timer_again(uv_timer_t* handle) {
+  if (handle->timer_cb == NULL)
+    return -EINVAL;
+
+  if (handle->repeat) {
+    uv_timer_stop(handle);
+    uv_timer_start(handle, handle->timer_cb, handle->repeat, handle->repeat);
+  }
+
+  return 0;
+}
+
+
+void uv_timer_set_repeat(uv_timer_t* handle, uint64_t repeat) {
+  handle->repeat = repeat;
+}
+
+
+uint64_t uv_timer_get_repeat(const uv_timer_t* handle) {
+  return handle->repeat;
+}
+
+
+int uv__next_timeout(const uv_loop_t* loop) {
+  const struct heap_node* heap_node;
+  const uv_timer_t* handle;
+  uint64_t diff;
+
+  heap_node = heap_min((const struct heap*) &loop->timer_heap);
+  if (heap_node == NULL)
+    return -1; /* block indefinitely */
+
+  handle = container_of(heap_node, uv_timer_t, heap_node);
+  if (handle->timeout <= loop->time)
+    return 0;
+
+  diff = handle->timeout - loop->time;
+  if (diff > INT_MAX)
+    diff = INT_MAX;
+
+  return diff;
+}
+
+
+void uv__run_timers(uv_loop_t* loop) {
+  struct heap_node* heap_node;
+  uv_timer_t* handle;
+
+  for (;;) {
+    heap_node = heap_min((struct heap*) &loop->timer_heap);
+    if (heap_node == NULL)
+      break;
+
+    handle = container_of(heap_node, uv_timer_t, heap_node);
+    if (handle->timeout > loop->time)
+      break;
+
+    uv_timer_stop(handle);
+    uv_timer_again(handle);
+    handle->timer_cb(handle);
+  }
+}
+
+
+void uv__timer_close(uv_timer_t* handle) {
+  uv_timer_stop(handle);
+}

+ 336 - 0
Utilities/cmlibuv/src/unix/tty.c

@@ -0,0 +1,336 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+#include "spinlock.h"
+
+#include <stdlib.h>
+#include <assert.h>
+#include <unistd.h>
+#include <termios.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+
+#if defined(__MVS__) && !defined(IMAXBEL)
+#define IMAXBEL 0
+#endif
+
+static int orig_termios_fd = -1;
+static struct termios orig_termios;
+static uv_spinlock_t termios_spinlock = UV_SPINLOCK_INITIALIZER;
+
+static int uv__tty_is_slave(const int fd) {
+  int result;
+#if defined(__linux__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
+  int dummy;
+
+  result = ioctl(fd, TIOCGPTN, &dummy) != 0;
+#elif defined(__APPLE__)
+  char dummy[256];
+
+  result = ioctl(fd, TIOCPTYGNAME, &dummy) != 0;
+#else
+  /* Fallback to ptsname
+   */
+  result = ptsname(fd) == NULL;
+#endif
+  return result;
+}
+
+int uv_tty_init(uv_loop_t* loop, uv_tty_t* tty, int fd, int readable) {
+  uv_handle_type type;
+  int flags;
+  int newfd;
+  int r;
+  int saved_flags;
+  char path[256];
+
+  /* File descriptors that refer to files cannot be monitored with epoll.
+   * That restriction also applies to character devices like /dev/random
+   * (but obviously not /dev/tty.)
+   */
+  type = uv_guess_handle(fd);
+  if (type == UV_FILE || type == UV_UNKNOWN_HANDLE)
+    return -EINVAL;
+
+  flags = 0;
+  newfd = -1;
+
+  /* Reopen the file descriptor when it refers to a tty. This lets us put the
+   * tty in non-blocking mode without affecting other processes that share it
+   * with us.
+   *
+   * Example: `node | cat` - if we put our fd 0 in non-blocking mode, it also
+   * affects fd 1 of `cat` because both file descriptors refer to the same
+   * struct file in the kernel. When we reopen our fd 0, it points to a
+   * different struct file, hence changing its properties doesn't affect
+   * other processes.
+   */
+  if (type == UV_TTY) {
+    /* Reopening a pty in master mode won't work either because the reopened
+     * pty will be in slave mode (*BSD) or reopening will allocate a new
+     * master/slave pair (Linux). Therefore check if the fd points to a
+     * slave device.
+     */
+    if (uv__tty_is_slave(fd) && ttyname_r(fd, path, sizeof(path)) == 0)
+      r = uv__open_cloexec(path, O_RDWR);
+    else
+      r = -1;
+
+    if (r < 0) {
+      /* fallback to using blocking writes */
+      if (!readable)
+        flags |= UV_STREAM_BLOCKING;
+      goto skip;
+    }
+
+    newfd = r;
+
+    r = uv__dup2_cloexec(newfd, fd);
+    if (r < 0 && r != -EINVAL) {
+      /* EINVAL means newfd == fd which could conceivably happen if another
+       * thread called close(fd) between our calls to isatty() and open().
+       * That's a rather unlikely event but let's handle it anyway.
+       */
+      uv__close(newfd);
+      return r;
+    }
+
+    fd = newfd;
+  }
+
+#if defined(__APPLE__)
+  /* Save the fd flags in case we need to restore them due to an error. */
+  do
+    saved_flags = fcntl(fd, F_GETFL);
+  while (saved_flags == -1 && errno == EINTR);
+
+  if (saved_flags == -1) {
+    if (newfd != -1)
+      uv__close(newfd);
+    return -errno;
+  }
+#endif
+
+  /* Pacify the compiler. */
+  (void) &saved_flags;
+
+skip:
+  uv__stream_init(loop, (uv_stream_t*) tty, UV_TTY);
+
+  /* If anything fails beyond this point we need to remove the handle from
+   * the handle queue, since it was added by uv__handle_init in uv_stream_init.
+   */
+
+  if (!(flags & UV_STREAM_BLOCKING))
+    uv__nonblock(fd, 1);
+
+#if defined(__APPLE__)
+  r = uv__stream_try_select((uv_stream_t*) tty, &fd);
+  if (r) {
+    int rc = r;
+    if (newfd != -1)
+      uv__close(newfd);
+    QUEUE_REMOVE(&tty->handle_queue);
+    do
+      r = fcntl(fd, F_SETFL, saved_flags);
+    while (r == -1 && errno == EINTR);
+    return rc;
+  }
+#endif
+
+  if (readable)
+    flags |= UV_STREAM_READABLE;
+  else
+    flags |= UV_STREAM_WRITABLE;
+
+  uv__stream_open((uv_stream_t*) tty, fd, flags);
+  tty->mode = UV_TTY_MODE_NORMAL;
+
+  return 0;
+}
+
+static void uv__tty_make_raw(struct termios* tio) {
+  assert(tio != NULL);
+
+#if defined __sun || defined __MVS__
+  /*
+   * This implementation of cfmakeraw for Solaris and derivatives is taken from
+   * http://www.perkin.org.uk/posts/solaris-portability-cfmakeraw.html.
+   */
+  tio->c_iflag &= ~(IMAXBEL | IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR |
+                    IGNCR | ICRNL | IXON);
+  tio->c_oflag &= ~OPOST;
+  tio->c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN);
+  tio->c_cflag &= ~(CSIZE | PARENB);
+  tio->c_cflag |= CS8;
+#else
+  cfmakeraw(tio);
+#endif /* #ifdef __sun */
+}
+
+int uv_tty_set_mode(uv_tty_t* tty, uv_tty_mode_t mode) {
+  struct termios tmp;
+  int fd;
+
+  if (tty->mode == (int) mode)
+    return 0;
+
+  fd = uv__stream_fd(tty);
+  if (tty->mode == UV_TTY_MODE_NORMAL && mode != UV_TTY_MODE_NORMAL) {
+    if (tcgetattr(fd, &tty->orig_termios))
+      return -errno;
+
+    /* This is used for uv_tty_reset_mode() */
+    uv_spinlock_lock(&termios_spinlock);
+    if (orig_termios_fd == -1) {
+      orig_termios = tty->orig_termios;
+      orig_termios_fd = fd;
+    }
+    uv_spinlock_unlock(&termios_spinlock);
+  }
+
+  tmp = tty->orig_termios;
+  switch (mode) {
+    case UV_TTY_MODE_NORMAL:
+      break;
+    case UV_TTY_MODE_RAW:
+      tmp.c_iflag &= ~(BRKINT | ICRNL | INPCK | ISTRIP | IXON);
+      tmp.c_oflag |= (ONLCR);
+      tmp.c_cflag |= (CS8);
+      tmp.c_lflag &= ~(ECHO | ICANON | IEXTEN | ISIG);
+      tmp.c_cc[VMIN] = 1;
+      tmp.c_cc[VTIME] = 0;
+      break;
+    case UV_TTY_MODE_IO:
+      uv__tty_make_raw(&tmp);
+      break;
+  }
+
+  /* Apply changes after draining */
+  if (tcsetattr(fd, TCSADRAIN, &tmp))
+    return -errno;
+
+  tty->mode = mode;
+  return 0;
+}
+
+
+int uv_tty_get_winsize(uv_tty_t* tty, int* width, int* height) {
+  struct winsize ws;
+  int err;
+
+  do
+    err = ioctl(uv__stream_fd(tty), TIOCGWINSZ, &ws);
+  while (err == -1 && errno == EINTR);
+
+  if (err == -1)
+    return -errno;
+
+  *width = ws.ws_col;
+  *height = ws.ws_row;
+
+  return 0;
+}
+
+
+uv_handle_type uv_guess_handle(uv_file file) {
+  struct sockaddr sa;
+  struct stat s;
+  socklen_t len;
+  int type;
+
+  if (file < 0)
+    return UV_UNKNOWN_HANDLE;
+
+  if (isatty(file))
+    return UV_TTY;
+
+  if (fstat(file, &s))
+    return UV_UNKNOWN_HANDLE;
+
+  if (S_ISREG(s.st_mode))
+    return UV_FILE;
+
+  if (S_ISCHR(s.st_mode))
+    return UV_FILE;  /* XXX UV_NAMED_PIPE? */
+
+  if (S_ISFIFO(s.st_mode))
+    return UV_NAMED_PIPE;
+
+  if (!S_ISSOCK(s.st_mode))
+    return UV_UNKNOWN_HANDLE;
+
+  len = sizeof(type);
+  if (getsockopt(file, SOL_SOCKET, SO_TYPE, &type, &len))
+    return UV_UNKNOWN_HANDLE;
+
+  len = sizeof(sa);
+  if (getsockname(file, &sa, &len))
+    return UV_UNKNOWN_HANDLE;
+
+  if (type == SOCK_DGRAM)
+    if (sa.sa_family == AF_INET || sa.sa_family == AF_INET6)
+      return UV_UDP;
+
+  if (type == SOCK_STREAM) {
+#if defined(_AIX) || defined(__DragonFly__)
+    /* on AIX/DragonFly the getsockname call returns an empty sa structure
+     * for sockets of type AF_UNIX.  For all other types it will
+     * return a properly filled in structure.
+     */
+    if (len == 0)
+      return UV_NAMED_PIPE;
+#endif /* defined(_AIX) || defined(__DragonFly__) */
+
+    if (sa.sa_family == AF_INET || sa.sa_family == AF_INET6)
+      return UV_TCP;
+    if (sa.sa_family == AF_UNIX)
+      return UV_NAMED_PIPE;
+  }
+
+  return UV_UNKNOWN_HANDLE;
+}
+
+
+/* This function is async signal-safe, meaning that it's safe to call from
+ * inside a signal handler _unless_ execution was inside uv_tty_set_mode()'s
+ * critical section when the signal was raised.
+ */
+int uv_tty_reset_mode(void) {
+  int saved_errno;
+  int err;
+
+  saved_errno = errno;
+  if (!uv_spinlock_trylock(&termios_spinlock))
+    return -EBUSY;  /* In uv_tty_set_mode(). */
+
+  err = 0;
+  if (orig_termios_fd != -1)
+    if (tcsetattr(orig_termios_fd, TCSANOW, &orig_termios))
+      err = -errno;
+
+  uv_spinlock_unlock(&termios_spinlock);
+  errno = saved_errno;
+
+  return err;
+}

+ 895 - 0
Utilities/cmlibuv/src/unix/udp.c

@@ -0,0 +1,895 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <assert.h>
+#include <string.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <unistd.h>
+#if defined(__MVS__)
+#include <xti.h>
+#endif
+
+#if defined(IPV6_JOIN_GROUP) && !defined(IPV6_ADD_MEMBERSHIP)
+# define IPV6_ADD_MEMBERSHIP IPV6_JOIN_GROUP
+#endif
+
+#if defined(IPV6_LEAVE_GROUP) && !defined(IPV6_DROP_MEMBERSHIP)
+# define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP
+#endif
+
+
+static void uv__udp_run_completed(uv_udp_t* handle);
+static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents);
+static void uv__udp_recvmsg(uv_udp_t* handle);
+static void uv__udp_sendmsg(uv_udp_t* handle);
+static int uv__udp_maybe_deferred_bind(uv_udp_t* handle,
+                                       int domain,
+                                       unsigned int flags);
+
+
+void uv__udp_close(uv_udp_t* handle) {
+  uv__io_close(handle->loop, &handle->io_watcher);
+  uv__handle_stop(handle);
+
+  if (handle->io_watcher.fd != -1) {
+    uv__close(handle->io_watcher.fd);
+    handle->io_watcher.fd = -1;
+  }
+}
+
+
+void uv__udp_finish_close(uv_udp_t* handle) {
+  uv_udp_send_t* req;
+  QUEUE* q;
+
+  assert(!uv__io_active(&handle->io_watcher, POLLIN | POLLOUT));
+  assert(handle->io_watcher.fd == -1);
+
+  while (!QUEUE_EMPTY(&handle->write_queue)) {
+    q = QUEUE_HEAD(&handle->write_queue);
+    QUEUE_REMOVE(q);
+
+    req = QUEUE_DATA(q, uv_udp_send_t, queue);
+    req->status = -ECANCELED;
+    QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
+  }
+
+  uv__udp_run_completed(handle);
+
+  assert(handle->send_queue_size == 0);
+  assert(handle->send_queue_count == 0);
+
+  /* Now tear down the handle. */
+  handle->recv_cb = NULL;
+  handle->alloc_cb = NULL;
+  /* but _do not_ touch close_cb */
+}
+
+
+static void uv__udp_run_completed(uv_udp_t* handle) {
+  uv_udp_send_t* req;
+  QUEUE* q;
+
+  assert(!(handle->flags & UV_UDP_PROCESSING));
+  handle->flags |= UV_UDP_PROCESSING;
+
+  while (!QUEUE_EMPTY(&handle->write_completed_queue)) {
+    q = QUEUE_HEAD(&handle->write_completed_queue);
+    QUEUE_REMOVE(q);
+
+    req = QUEUE_DATA(q, uv_udp_send_t, queue);
+    uv__req_unregister(handle->loop, req);
+
+    handle->send_queue_size -= uv__count_bufs(req->bufs, req->nbufs);
+    handle->send_queue_count--;
+
+    if (req->bufs != req->bufsml)
+      uv__free(req->bufs);
+    req->bufs = NULL;
+
+    if (req->send_cb == NULL)
+      continue;
+
+    /* req->status >= 0 == bytes written
+     * req->status <  0 == errno
+     */
+    if (req->status >= 0)
+      req->send_cb(req, 0);
+    else
+      req->send_cb(req, req->status);
+  }
+
+  if (QUEUE_EMPTY(&handle->write_queue)) {
+    /* Pending queue and completion queue empty, stop watcher. */
+    uv__io_stop(handle->loop, &handle->io_watcher, POLLOUT);
+    if (!uv__io_active(&handle->io_watcher, POLLIN))
+      uv__handle_stop(handle);
+  }
+
+  handle->flags &= ~UV_UDP_PROCESSING;
+}
+
+
+static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents) {
+  uv_udp_t* handle;
+
+  handle = container_of(w, uv_udp_t, io_watcher);
+  assert(handle->type == UV_UDP);
+
+  if (revents & POLLIN)
+    uv__udp_recvmsg(handle);
+
+  if (revents & POLLOUT) {
+    uv__udp_sendmsg(handle);
+    uv__udp_run_completed(handle);
+  }
+}
+
+
+static void uv__udp_recvmsg(uv_udp_t* handle) {
+  struct sockaddr_storage peer;
+  struct msghdr h;
+  ssize_t nread;
+  uv_buf_t buf;
+  int flags;
+  int count;
+
+  assert(handle->recv_cb != NULL);
+  assert(handle->alloc_cb != NULL);
+
+  /* Prevent loop starvation when the data comes in as fast as (or faster than)
+   * we can read it. XXX Need to rearm fd if we switch to edge-triggered I/O.
+   */
+  count = 32;
+
+  memset(&h, 0, sizeof(h));
+  h.msg_name = &peer;
+
+  do {
+    buf = uv_buf_init(NULL, 0);
+    handle->alloc_cb((uv_handle_t*) handle, 64 * 1024, &buf);
+    if (buf.base == NULL || buf.len == 0) {
+      handle->recv_cb(handle, UV_ENOBUFS, &buf, NULL, 0);
+      return;
+    }
+    assert(buf.base != NULL);
+
+    h.msg_namelen = sizeof(peer);
+    h.msg_iov = (void*) &buf;
+    h.msg_iovlen = 1;
+
+    do {
+      nread = recvmsg(handle->io_watcher.fd, &h, 0);
+    }
+    while (nread == -1 && errno == EINTR);
+
+    if (nread == -1) {
+      if (errno == EAGAIN || errno == EWOULDBLOCK)
+        handle->recv_cb(handle, 0, &buf, NULL, 0);
+      else
+        handle->recv_cb(handle, -errno, &buf, NULL, 0);
+    }
+    else {
+      const struct sockaddr *addr;
+      if (h.msg_namelen == 0)
+        addr = NULL;
+      else
+        addr = (const struct sockaddr*) &peer;
+
+      flags = 0;
+      if (h.msg_flags & MSG_TRUNC)
+        flags |= UV_UDP_PARTIAL;
+
+      handle->recv_cb(handle, nread, &buf, addr, flags);
+    }
+  }
+  /* recv_cb callback may decide to pause or close the handle */
+  while (nread != -1
+      && count-- > 0
+      && handle->io_watcher.fd != -1
+      && handle->recv_cb != NULL);
+}
+
+
+static void uv__udp_sendmsg(uv_udp_t* handle) {
+  uv_udp_send_t* req;
+  QUEUE* q;
+  struct msghdr h;
+  ssize_t size;
+
+  while (!QUEUE_EMPTY(&handle->write_queue)) {
+    q = QUEUE_HEAD(&handle->write_queue);
+    assert(q != NULL);
+
+    req = QUEUE_DATA(q, uv_udp_send_t, queue);
+    assert(req != NULL);
+
+    memset(&h, 0, sizeof h);
+    h.msg_name = &req->addr;
+    h.msg_namelen = (req->addr.ss_family == AF_INET6 ?
+      sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in));
+    h.msg_iov = (struct iovec*) req->bufs;
+    h.msg_iovlen = req->nbufs;
+
+    do {
+      size = sendmsg(handle->io_watcher.fd, &h, 0);
+    } while (size == -1 && errno == EINTR);
+
+    if (size == -1 && (errno == EAGAIN || errno == EWOULDBLOCK))
+      break;
+
+    req->status = (size == -1 ? -errno : size);
+
+    /* Sending a datagram is an atomic operation: either all data
+     * is written or nothing is (and EMSGSIZE is raised). That is
+     * why we don't handle partial writes. Just pop the request
+     * off the write queue and onto the completed queue, done.
+     */
+    QUEUE_REMOVE(&req->queue);
+    QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
+    uv__io_feed(handle->loop, &handle->io_watcher);
+  }
+}
+
+
+/* On the BSDs, SO_REUSEPORT implies SO_REUSEADDR but with some additional
+ * refinements for programs that use multicast.
+ *
+ * Linux as of 3.9 has a SO_REUSEPORT socket option but with semantics that
+ * are different from the BSDs: it _shares_ the port rather than steal it
+ * from the current listener.  While useful, it's not something we can emulate
+ * on other platforms so we don't enable it.
+ */
+static int uv__set_reuse(int fd) {
+  int yes;
+
+#if defined(SO_REUSEPORT) && !defined(__linux__)
+  yes = 1;
+  if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)))
+    return -errno;
+#else
+  yes = 1;
+  if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)))
+    return -errno;
+#endif
+
+  return 0;
+}
+
+
+int uv__udp_bind(uv_udp_t* handle,
+                 const struct sockaddr* addr,
+                 unsigned int addrlen,
+                 unsigned int flags) {
+  int err;
+  int yes;
+  int fd;
+
+  /* Check for bad flags. */
+  if (flags & ~(UV_UDP_IPV6ONLY | UV_UDP_REUSEADDR))
+    return -EINVAL;
+
+  /* Cannot set IPv6-only mode on non-IPv6 socket. */
+  if ((flags & UV_UDP_IPV6ONLY) && addr->sa_family != AF_INET6)
+    return -EINVAL;
+
+  fd = handle->io_watcher.fd;
+  if (fd == -1) {
+    err = uv__socket(addr->sa_family, SOCK_DGRAM, 0);
+    if (err < 0)
+      return err;
+    fd = err;
+    handle->io_watcher.fd = fd;
+  }
+
+  if (flags & UV_UDP_REUSEADDR) {
+    err = uv__set_reuse(fd);
+    if (err)
+      goto out;
+  }
+
+  if (flags & UV_UDP_IPV6ONLY) {
+#ifdef IPV6_V6ONLY
+    yes = 1;
+    if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &yes, sizeof yes) == -1) {
+      err = -errno;
+      goto out;
+    }
+#else
+    err = -ENOTSUP;
+    goto out;
+#endif
+  }
+
+  if (bind(fd, addr, addrlen)) {
+    err = -errno;
+    if (errno == EAFNOSUPPORT)
+      /* OSX, other BSDs and SunoS fail with EAFNOSUPPORT when binding a
+       * socket created with AF_INET to an AF_INET6 address or vice versa. */
+      err = -EINVAL;
+    goto out;
+  }
+
+  if (addr->sa_family == AF_INET6)
+    handle->flags |= UV_HANDLE_IPV6;
+
+  handle->flags |= UV_HANDLE_BOUND;
+
+  return 0;
+
+out:
+  uv__close(handle->io_watcher.fd);
+  handle->io_watcher.fd = -1;
+  return err;
+}
+
+
+static int uv__udp_maybe_deferred_bind(uv_udp_t* handle,
+                                       int domain,
+                                       unsigned int flags) {
+  unsigned char taddr[sizeof(struct sockaddr_in6)];
+  socklen_t addrlen;
+
+  if (handle->io_watcher.fd != -1)
+    return 0;
+
+  switch (domain) {
+  case AF_INET:
+  {
+    struct sockaddr_in* addr = (void*)&taddr;
+    memset(addr, 0, sizeof *addr);
+    addr->sin_family = AF_INET;
+    addr->sin_addr.s_addr = INADDR_ANY;
+    addrlen = sizeof *addr;
+    break;
+  }
+  case AF_INET6:
+  {
+    struct sockaddr_in6* addr = (void*)&taddr;
+    memset(addr, 0, sizeof *addr);
+    addr->sin6_family = AF_INET6;
+    addr->sin6_addr = in6addr_any;
+    addrlen = sizeof *addr;
+    break;
+  }
+  default:
+    assert(0 && "unsupported address family");
+    abort();
+  }
+
+  return uv__udp_bind(handle, (const struct sockaddr*) &taddr, addrlen, flags);
+}
+
+
+int uv__udp_send(uv_udp_send_t* req,
+                 uv_udp_t* handle,
+                 const uv_buf_t bufs[],
+                 unsigned int nbufs,
+                 const struct sockaddr* addr,
+                 unsigned int addrlen,
+                 uv_udp_send_cb send_cb) {
+  int err;
+  int empty_queue;
+
+  assert(nbufs > 0);
+
+  err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
+  if (err)
+    return err;
+
+  /* It's legal for send_queue_count > 0 even when the write_queue is empty;
+   * it means there are error-state requests in the write_completed_queue that
+   * will touch up send_queue_size/count later.
+   */
+  empty_queue = (handle->send_queue_count == 0);
+
+  uv__req_init(handle->loop, req, UV_UDP_SEND);
+  assert(addrlen <= sizeof(req->addr));
+  memcpy(&req->addr, addr, addrlen);
+  req->send_cb = send_cb;
+  req->handle = handle;
+  req->nbufs = nbufs;
+
+  req->bufs = req->bufsml;
+  if (nbufs > ARRAY_SIZE(req->bufsml))
+    req->bufs = uv__malloc(nbufs * sizeof(bufs[0]));
+
+  if (req->bufs == NULL) {
+    uv__req_unregister(handle->loop, req);
+    return -ENOMEM;
+  }
+
+  memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0]));
+  handle->send_queue_size += uv__count_bufs(req->bufs, req->nbufs);
+  handle->send_queue_count++;
+  QUEUE_INSERT_TAIL(&handle->write_queue, &req->queue);
+  uv__handle_start(handle);
+
+  if (empty_queue && !(handle->flags & UV_UDP_PROCESSING)) {
+    uv__udp_sendmsg(handle);
+  } else {
+    uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
+  }
+
+  return 0;
+}
+
+
+int uv__udp_try_send(uv_udp_t* handle,
+                     const uv_buf_t bufs[],
+                     unsigned int nbufs,
+                     const struct sockaddr* addr,
+                     unsigned int addrlen) {
+  int err;
+  struct msghdr h;
+  ssize_t size;
+
+  assert(nbufs > 0);
+
+  /* already sending a message */
+  if (handle->send_queue_count != 0)
+    return -EAGAIN;
+
+  err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
+  if (err)
+    return err;
+
+  memset(&h, 0, sizeof h);
+  h.msg_name = (struct sockaddr*) addr;
+  h.msg_namelen = addrlen;
+  h.msg_iov = (struct iovec*) bufs;
+  h.msg_iovlen = nbufs;
+
+  do {
+    size = sendmsg(handle->io_watcher.fd, &h, 0);
+  } while (size == -1 && errno == EINTR);
+
+  if (size == -1) {
+    if (errno == EAGAIN || errno == EWOULDBLOCK)
+      return -EAGAIN;
+    else
+      return -errno;
+  }
+
+  return size;
+}
+
+
+static int uv__udp_set_membership4(uv_udp_t* handle,
+                                   const struct sockaddr_in* multicast_addr,
+                                   const char* interface_addr,
+                                   uv_membership membership) {
+  struct ip_mreq mreq;
+  int optname;
+  int err;
+
+  memset(&mreq, 0, sizeof mreq);
+
+  if (interface_addr) {
+    err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr);
+    if (err)
+      return err;
+  } else {
+    mreq.imr_interface.s_addr = htonl(INADDR_ANY);
+  }
+
+  mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr;
+
+  switch (membership) {
+  case UV_JOIN_GROUP:
+    optname = IP_ADD_MEMBERSHIP;
+    break;
+  case UV_LEAVE_GROUP:
+    optname = IP_DROP_MEMBERSHIP;
+    break;
+  default:
+    return -EINVAL;
+  }
+
+  if (setsockopt(handle->io_watcher.fd,
+                 IPPROTO_IP,
+                 optname,
+                 &mreq,
+                 sizeof(mreq))) {
+#if defined(__MVS__)
+  if (errno == ENXIO)
+    return -ENODEV;
+#endif
+    return -errno;
+  }
+
+  return 0;
+}
+
+
+static int uv__udp_set_membership6(uv_udp_t* handle,
+                                   const struct sockaddr_in6* multicast_addr,
+                                   const char* interface_addr,
+                                   uv_membership membership) {
+  int optname;
+  struct ipv6_mreq mreq;
+  struct sockaddr_in6 addr6;
+
+  memset(&mreq, 0, sizeof mreq);
+
+  if (interface_addr) {
+    if (uv_ip6_addr(interface_addr, 0, &addr6))
+      return -EINVAL;
+    mreq.ipv6mr_interface = addr6.sin6_scope_id;
+  } else {
+    mreq.ipv6mr_interface = 0;
+  }
+
+  mreq.ipv6mr_multiaddr = multicast_addr->sin6_addr;
+
+  switch (membership) {
+  case UV_JOIN_GROUP:
+    optname = IPV6_ADD_MEMBERSHIP;
+    break;
+  case UV_LEAVE_GROUP:
+    optname = IPV6_DROP_MEMBERSHIP;
+    break;
+  default:
+    return -EINVAL;
+  }
+
+  if (setsockopt(handle->io_watcher.fd,
+                 IPPROTO_IPV6,
+                 optname,
+                 &mreq,
+                 sizeof(mreq))) {
+#if defined(__MVS__)
+  if (errno == ENXIO)
+    return -ENODEV;
+#endif
+    return -errno;
+  }
+
+  return 0;
+}
+
+
+int uv_udp_init_ex(uv_loop_t* loop, uv_udp_t* handle, unsigned int flags) {
+  int domain;
+  int err;
+  int fd;
+
+  /* Use the lower 8 bits for the domain */
+  domain = flags & 0xFF;
+  if (domain != AF_INET && domain != AF_INET6 && domain != AF_UNSPEC)
+    return -EINVAL;
+
+  if (flags & ~0xFF)
+    return -EINVAL;
+
+  if (domain != AF_UNSPEC) {
+    err = uv__socket(domain, SOCK_DGRAM, 0);
+    if (err < 0)
+      return err;
+    fd = err;
+  } else {
+    fd = -1;
+  }
+
+  uv__handle_init(loop, (uv_handle_t*)handle, UV_UDP);
+  handle->alloc_cb = NULL;
+  handle->recv_cb = NULL;
+  handle->send_queue_size = 0;
+  handle->send_queue_count = 0;
+  uv__io_init(&handle->io_watcher, uv__udp_io, fd);
+  QUEUE_INIT(&handle->write_queue);
+  QUEUE_INIT(&handle->write_completed_queue);
+  return 0;
+}
+
+
+int uv_udp_init(uv_loop_t* loop, uv_udp_t* handle) {
+  return uv_udp_init_ex(loop, handle, AF_UNSPEC);
+}
+
+
+int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock) {
+  int err;
+
+  /* Check for already active socket. */
+  if (handle->io_watcher.fd != -1)
+    return -EBUSY;
+
+  err = uv__nonblock(sock, 1);
+  if (err)
+    return err;
+
+  err = uv__set_reuse(sock);
+  if (err)
+    return err;
+
+  handle->io_watcher.fd = sock;
+  return 0;
+}
+
+
+int uv_udp_set_membership(uv_udp_t* handle,
+                          const char* multicast_addr,
+                          const char* interface_addr,
+                          uv_membership membership) {
+  int err;
+  struct sockaddr_in addr4;
+  struct sockaddr_in6 addr6;
+
+  if (uv_ip4_addr(multicast_addr, 0, &addr4) == 0) {
+    err = uv__udp_maybe_deferred_bind(handle, AF_INET, UV_UDP_REUSEADDR);
+    if (err)
+      return err;
+    return uv__udp_set_membership4(handle, &addr4, interface_addr, membership);
+  } else if (uv_ip6_addr(multicast_addr, 0, &addr6) == 0) {
+    err = uv__udp_maybe_deferred_bind(handle, AF_INET6, UV_UDP_REUSEADDR);
+    if (err)
+      return err;
+    return uv__udp_set_membership6(handle, &addr6, interface_addr, membership);
+  } else {
+    return -EINVAL;
+  }
+}
+
+static int uv__setsockopt(uv_udp_t* handle,
+                         int option4,
+                         int option6,
+                         const void* val,
+                         size_t size) {
+  int r;
+
+  if (handle->flags & UV_HANDLE_IPV6)
+    r = setsockopt(handle->io_watcher.fd,
+                   IPPROTO_IPV6,
+                   option6,
+                   val,
+                   size);
+  else
+    r = setsockopt(handle->io_watcher.fd,
+                   IPPROTO_IP,
+                   option4,
+                   val,
+                   size);
+  if (r)
+    return -errno;
+
+  return 0;
+}
+
+static int uv__setsockopt_maybe_char(uv_udp_t* handle,
+                                     int option4,
+                                     int option6,
+                                     int val) {
+#if defined(__sun) || defined(_AIX) || defined(__MVS__)
+  char arg = val;
+#elif defined(__OpenBSD__)
+  unsigned char arg = val;
+#else
+  int arg = val;
+#endif
+
+  if (val < 0 || val > 255)
+    return -EINVAL;
+
+  return uv__setsockopt(handle, option4, option6, &arg, sizeof(arg));
+}
+
+
+int uv_udp_set_broadcast(uv_udp_t* handle, int on) {
+  if (setsockopt(handle->io_watcher.fd,
+                 SOL_SOCKET,
+                 SO_BROADCAST,
+                 &on,
+                 sizeof(on))) {
+    return -errno;
+  }
+
+  return 0;
+}
+
+
+int uv_udp_set_ttl(uv_udp_t* handle, int ttl) {
+  if (ttl < 1 || ttl > 255)
+    return -EINVAL;
+
+#if defined(__MVS__)
+  if (!(handle->flags & UV_HANDLE_IPV6))
+    return -ENOTSUP;  /* zOS does not support setting ttl for IPv4 */
+#endif
+
+/*
+ * On Solaris and derivatives such as SmartOS, the length of socket options
+ * is sizeof(int) for IP_TTL and IPV6_UNICAST_HOPS,
+ * so hardcode the size of these options on this platform,
+ * and use the general uv__setsockopt_maybe_char call on other platforms.
+ */
+#if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
+    defined(__MVS__)
+
+  return uv__setsockopt(handle,
+                        IP_TTL,
+                        IPV6_UNICAST_HOPS,
+                        &ttl,
+                        sizeof(ttl));
+#endif /* defined(__sun) || defined(_AIX) || defined (__OpenBSD__) ||
+          defined(__MVS__) */
+
+  return uv__setsockopt_maybe_char(handle,
+                                   IP_TTL,
+                                   IPV6_UNICAST_HOPS,
+                                   ttl);
+}
+
+
+int uv_udp_set_multicast_ttl(uv_udp_t* handle, int ttl) {
+/*
+ * On Solaris and derivatives such as SmartOS, the length of socket options
+ * is sizeof(int) for IPV6_MULTICAST_HOPS and sizeof(char) for
+ * IP_MULTICAST_TTL, so hardcode the size of the option in the IPv6 case,
+ * and use the general uv__setsockopt_maybe_char call otherwise.
+ */
+#if defined(__sun) || defined(_AIX) || defined(__MVS__)
+  if (handle->flags & UV_HANDLE_IPV6)
+    return uv__setsockopt(handle,
+                          IP_MULTICAST_TTL,
+                          IPV6_MULTICAST_HOPS,
+                          &ttl,
+                          sizeof(ttl));
+#endif /* defined(__sun) || defined(_AIX) || defined(__MVS__) */
+
+  return uv__setsockopt_maybe_char(handle,
+                                   IP_MULTICAST_TTL,
+                                   IPV6_MULTICAST_HOPS,
+                                   ttl);
+}
+
+
+int uv_udp_set_multicast_loop(uv_udp_t* handle, int on) {
+/*
+ * On Solaris and derivatives such as SmartOS, the length of socket options
+ * is sizeof(int) for IPV6_MULTICAST_LOOP and sizeof(char) for
+ * IP_MULTICAST_LOOP, so hardcode the size of the option in the IPv6 case,
+ * and use the general uv__setsockopt_maybe_char call otherwise.
+ */
+#if defined(__sun) || defined(_AIX) || defined(__MVS__)
+  if (handle->flags & UV_HANDLE_IPV6)
+    return uv__setsockopt(handle,
+                          IP_MULTICAST_LOOP,
+                          IPV6_MULTICAST_LOOP,
+                          &on,
+                          sizeof(on));
+#endif /* defined(__sun) || defined(_AIX) || defined(__MVS__) */
+
+  return uv__setsockopt_maybe_char(handle,
+                                   IP_MULTICAST_LOOP,
+                                   IPV6_MULTICAST_LOOP,
+                                   on);
+}
+
+int uv_udp_set_multicast_interface(uv_udp_t* handle, const char* interface_addr) {
+  struct sockaddr_storage addr_st;
+  struct sockaddr_in* addr4;
+  struct sockaddr_in6* addr6;
+
+  addr4 = (struct sockaddr_in*) &addr_st;
+  addr6 = (struct sockaddr_in6*) &addr_st;
+
+  if (!interface_addr) {
+    memset(&addr_st, 0, sizeof addr_st);
+    if (handle->flags & UV_HANDLE_IPV6) {
+      addr_st.ss_family = AF_INET6;
+      addr6->sin6_scope_id = 0;
+    } else {
+      addr_st.ss_family = AF_INET;
+      addr4->sin_addr.s_addr = htonl(INADDR_ANY);
+    }
+  } else if (uv_ip4_addr(interface_addr, 0, addr4) == 0) {
+    /* nothing, address was parsed */
+  } else if (uv_ip6_addr(interface_addr, 0, addr6) == 0) {
+    /* nothing, address was parsed */
+  } else {
+    return -EINVAL;
+  }
+
+  if (addr_st.ss_family == AF_INET) {
+    if (setsockopt(handle->io_watcher.fd,
+                   IPPROTO_IP,
+                   IP_MULTICAST_IF,
+                   (void*) &addr4->sin_addr,
+                   sizeof(addr4->sin_addr)) == -1) {
+      return -errno;
+    }
+  } else if (addr_st.ss_family == AF_INET6) {
+    if (setsockopt(handle->io_watcher.fd,
+                   IPPROTO_IPV6,
+                   IPV6_MULTICAST_IF,
+                   &addr6->sin6_scope_id,
+                   sizeof(addr6->sin6_scope_id)) == -1) {
+      return -errno;
+    }
+  } else {
+    assert(0 && "unexpected address family");
+    abort();
+  }
+
+  return 0;
+}
+
+
+int uv_udp_getsockname(const uv_udp_t* handle,
+                       struct sockaddr* name,
+                       int* namelen) {
+  socklen_t socklen;
+
+  if (handle->io_watcher.fd == -1)
+    return -EINVAL;  /* FIXME(bnoordhuis) -EBADF */
+
+  /* sizeof(socklen_t) != sizeof(int) on some systems. */
+  socklen = (socklen_t) *namelen;
+
+  if (getsockname(handle->io_watcher.fd, name, &socklen))
+    return -errno;
+
+  *namelen = (int) socklen;
+  return 0;
+}
+
+
+int uv__udp_recv_start(uv_udp_t* handle,
+                       uv_alloc_cb alloc_cb,
+                       uv_udp_recv_cb recv_cb) {
+  int err;
+
+  if (alloc_cb == NULL || recv_cb == NULL)
+    return -EINVAL;
+
+  if (uv__io_active(&handle->io_watcher, POLLIN))
+    return -EALREADY;  /* FIXME(bnoordhuis) Should be -EBUSY. */
+
+  err = uv__udp_maybe_deferred_bind(handle, AF_INET, 0);
+  if (err)
+    return err;
+
+  handle->alloc_cb = alloc_cb;
+  handle->recv_cb = recv_cb;
+
+  uv__io_start(handle->loop, &handle->io_watcher, POLLIN);
+  uv__handle_start(handle);
+
+  return 0;
+}
+
+
+int uv__udp_recv_stop(uv_udp_t* handle) {
+  uv__io_stop(handle->loop, &handle->io_watcher, POLLIN);
+
+  if (!uv__io_active(&handle->io_watcher, POLLOUT))
+    uv__handle_stop(handle);
+
+  handle->alloc_cb = NULL;
+  handle->recv_cb = NULL;
+
+  return 0;
+}

+ 654 - 0
Utilities/cmlibuv/src/uv-common.c

@@ -0,0 +1,654 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "uv-common.h"
+
+#include <assert.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <stddef.h> /* NULL */
+#include <stdio.h>
+#include <stdlib.h> /* malloc */
+#include <string.h> /* memset */
+
+#if defined(_WIN32)
+# include <malloc.h> /* malloc */
+#else
+# include <net/if.h> /* if_nametoindex */
+#endif
+
+
+typedef struct {
+  uv_malloc_func local_malloc;
+  uv_realloc_func local_realloc;
+  uv_calloc_func local_calloc;
+  uv_free_func local_free;
+} uv__allocator_t;
+
+static uv__allocator_t uv__allocator = {
+  malloc,
+  realloc,
+  calloc,
+  free,
+};
+
+char* uv__strdup(const char* s) {
+  size_t len = strlen(s) + 1;
+  char* m = uv__malloc(len);
+  if (m == NULL)
+    return NULL;
+  return memcpy(m, s, len);
+}
+
+char* uv__strndup(const char* s, size_t n) {
+  char* m;
+  size_t len = strlen(s);
+  if (n < len)
+    len = n;
+  m = uv__malloc(len + 1);
+  if (m == NULL)
+    return NULL;
+  m[len] = '\0';
+  return memcpy(m, s, len);
+}
+
+void* uv__malloc(size_t size) {
+  return uv__allocator.local_malloc(size);
+}
+
+void uv__free(void* ptr) {
+  int saved_errno;
+
+  /* Libuv expects that free() does not clobber errno.  The system allocator
+   * honors that assumption but custom allocators may not be so careful.
+   */
+  saved_errno = errno;
+  uv__allocator.local_free(ptr);
+  errno = saved_errno;
+}
+
+void* uv__calloc(size_t count, size_t size) {
+  return uv__allocator.local_calloc(count, size);
+}
+
+void* uv__realloc(void* ptr, size_t size) {
+  return uv__allocator.local_realloc(ptr, size);
+}
+
+int uv_replace_allocator(uv_malloc_func malloc_func,
+                         uv_realloc_func realloc_func,
+                         uv_calloc_func calloc_func,
+                         uv_free_func free_func) {
+  if (malloc_func == NULL || realloc_func == NULL ||
+      calloc_func == NULL || free_func == NULL) {
+    return UV_EINVAL;
+  }
+
+  uv__allocator.local_malloc = malloc_func;
+  uv__allocator.local_realloc = realloc_func;
+  uv__allocator.local_calloc = calloc_func;
+  uv__allocator.local_free = free_func;
+
+  return 0;
+}
+
+#define XX(uc, lc) case UV_##uc: return sizeof(uv_##lc##_t);
+
+size_t uv_handle_size(uv_handle_type type) {
+  switch (type) {
+    UV_HANDLE_TYPE_MAP(XX)
+    default:
+      return -1;
+  }
+}
+
+size_t uv_req_size(uv_req_type type) {
+  switch(type) {
+    UV_REQ_TYPE_MAP(XX)
+    default:
+      return -1;
+  }
+}
+
+#undef XX
+
+
+size_t uv_loop_size(void) {
+  return sizeof(uv_loop_t);
+}
+
+
+uv_buf_t uv_buf_init(char* base, unsigned int len) {
+  uv_buf_t buf;
+  buf.base = base;
+  buf.len = len;
+  return buf;
+}
+
+
+static const char* uv__unknown_err_code(int err) {
+  char buf[32];
+  char* copy;
+
+  snprintf(buf, sizeof(buf), "Unknown system error %d", err);
+  copy = uv__strdup(buf);
+
+  return copy != NULL ? copy : "Unknown system error";
+}
+
+
+#define UV_ERR_NAME_GEN(name, _) case UV_ ## name: return #name;
+const char* uv_err_name(int err) {
+  switch (err) {
+    UV_ERRNO_MAP(UV_ERR_NAME_GEN)
+  }
+  return uv__unknown_err_code(err);
+}
+#undef UV_ERR_NAME_GEN
+
+
+#define UV_STRERROR_GEN(name, msg) case UV_ ## name: return msg;
+const char* uv_strerror(int err) {
+  switch (err) {
+    UV_ERRNO_MAP(UV_STRERROR_GEN)
+  }
+  return uv__unknown_err_code(err);
+}
+#undef UV_STRERROR_GEN
+
+
+int uv_ip4_addr(const char* ip, int port, struct sockaddr_in* addr) {
+  memset(addr, 0, sizeof(*addr));
+  addr->sin_family = AF_INET;
+  addr->sin_port = htons(port);
+  return uv_inet_pton(AF_INET, ip, &(addr->sin_addr.s_addr));
+}
+
+
+int uv_ip6_addr(const char* ip, int port, struct sockaddr_in6* addr) {
+  char address_part[40];
+  size_t address_part_size;
+  const char* zone_index;
+
+  memset(addr, 0, sizeof(*addr));
+  addr->sin6_family = AF_INET6;
+  addr->sin6_port = htons(port);
+
+  zone_index = strchr(ip, '%');
+  if (zone_index != NULL) {
+    address_part_size = zone_index - ip;
+    if (address_part_size >= sizeof(address_part))
+      address_part_size = sizeof(address_part) - 1;
+
+    memcpy(address_part, ip, address_part_size);
+    address_part[address_part_size] = '\0';
+    ip = address_part;
+
+    zone_index++; /* skip '%' */
+    /* NOTE: unknown interface (id=0) is silently ignored */
+#ifdef _WIN32
+    addr->sin6_scope_id = atoi(zone_index);
+#else
+    addr->sin6_scope_id = if_nametoindex(zone_index);
+#endif
+  }
+
+  return uv_inet_pton(AF_INET6, ip, &addr->sin6_addr);
+}
+
+
+int uv_ip4_name(const struct sockaddr_in* src, char* dst, size_t size) {
+  return uv_inet_ntop(AF_INET, &src->sin_addr, dst, size);
+}
+
+
+int uv_ip6_name(const struct sockaddr_in6* src, char* dst, size_t size) {
+  return uv_inet_ntop(AF_INET6, &src->sin6_addr, dst, size);
+}
+
+
+int uv_tcp_bind(uv_tcp_t* handle,
+                const struct sockaddr* addr,
+                unsigned int flags) {
+  unsigned int addrlen;
+
+  if (handle->type != UV_TCP)
+    return UV_EINVAL;
+
+  if (addr->sa_family == AF_INET)
+    addrlen = sizeof(struct sockaddr_in);
+  else if (addr->sa_family == AF_INET6)
+    addrlen = sizeof(struct sockaddr_in6);
+  else
+    return UV_EINVAL;
+
+  return uv__tcp_bind(handle, addr, addrlen, flags);
+}
+
+
+int uv_udp_bind(uv_udp_t* handle,
+                const struct sockaddr* addr,
+                unsigned int flags) {
+  unsigned int addrlen;
+
+  if (handle->type != UV_UDP)
+    return UV_EINVAL;
+
+  if (addr->sa_family == AF_INET)
+    addrlen = sizeof(struct sockaddr_in);
+  else if (addr->sa_family == AF_INET6)
+    addrlen = sizeof(struct sockaddr_in6);
+  else
+    return UV_EINVAL;
+
+  return uv__udp_bind(handle, addr, addrlen, flags);
+}
+
+
+int uv_tcp_connect(uv_connect_t* req,
+                   uv_tcp_t* handle,
+                   const struct sockaddr* addr,
+                   uv_connect_cb cb) {
+  unsigned int addrlen;
+
+  if (handle->type != UV_TCP)
+    return UV_EINVAL;
+
+  if (addr->sa_family == AF_INET)
+    addrlen = sizeof(struct sockaddr_in);
+  else if (addr->sa_family == AF_INET6)
+    addrlen = sizeof(struct sockaddr_in6);
+  else
+    return UV_EINVAL;
+
+  return uv__tcp_connect(req, handle, addr, addrlen, cb);
+}
+
+
+int uv_udp_send(uv_udp_send_t* req,
+                uv_udp_t* handle,
+                const uv_buf_t bufs[],
+                unsigned int nbufs,
+                const struct sockaddr* addr,
+                uv_udp_send_cb send_cb) {
+  unsigned int addrlen;
+
+  if (handle->type != UV_UDP)
+    return UV_EINVAL;
+
+  if (addr->sa_family == AF_INET)
+    addrlen = sizeof(struct sockaddr_in);
+  else if (addr->sa_family == AF_INET6)
+    addrlen = sizeof(struct sockaddr_in6);
+  else
+    return UV_EINVAL;
+
+  return uv__udp_send(req, handle, bufs, nbufs, addr, addrlen, send_cb);
+}
+
+
+int uv_udp_try_send(uv_udp_t* handle,
+                    const uv_buf_t bufs[],
+                    unsigned int nbufs,
+                    const struct sockaddr* addr) {
+  unsigned int addrlen;
+
+  if (handle->type != UV_UDP)
+    return UV_EINVAL;
+
+  if (addr->sa_family == AF_INET)
+    addrlen = sizeof(struct sockaddr_in);
+  else if (addr->sa_family == AF_INET6)
+    addrlen = sizeof(struct sockaddr_in6);
+  else
+    return UV_EINVAL;
+
+  return uv__udp_try_send(handle, bufs, nbufs, addr, addrlen);
+}
+
+
+int uv_udp_recv_start(uv_udp_t* handle,
+                      uv_alloc_cb alloc_cb,
+                      uv_udp_recv_cb recv_cb) {
+  if (handle->type != UV_UDP || alloc_cb == NULL || recv_cb == NULL)
+    return UV_EINVAL;
+  else
+    return uv__udp_recv_start(handle, alloc_cb, recv_cb);
+}
+
+
+int uv_udp_recv_stop(uv_udp_t* handle) {
+  if (handle->type != UV_UDP)
+    return UV_EINVAL;
+  else
+    return uv__udp_recv_stop(handle);
+}
+
+
+void uv_walk(uv_loop_t* loop, uv_walk_cb walk_cb, void* arg) {
+  QUEUE queue;
+  QUEUE* q;
+  uv_handle_t* h;
+
+  QUEUE_MOVE(&loop->handle_queue, &queue);
+  while (!QUEUE_EMPTY(&queue)) {
+    q = QUEUE_HEAD(&queue);
+    h = QUEUE_DATA(q, uv_handle_t, handle_queue);
+
+    QUEUE_REMOVE(q);
+    QUEUE_INSERT_TAIL(&loop->handle_queue, q);
+
+    if (h->flags & UV__HANDLE_INTERNAL) continue;
+    walk_cb(h, arg);
+  }
+}
+
+
+static void uv__print_handles(uv_loop_t* loop, int only_active, FILE* stream) {
+  const char* type;
+  QUEUE* q;
+  uv_handle_t* h;
+
+  if (loop == NULL)
+    loop = uv_default_loop();
+
+  QUEUE_FOREACH(q, &loop->handle_queue) {
+    h = QUEUE_DATA(q, uv_handle_t, handle_queue);
+
+    if (only_active && !uv__is_active(h))
+      continue;
+
+    switch (h->type) {
+#define X(uc, lc) case UV_##uc: type = #lc; break;
+      UV_HANDLE_TYPE_MAP(X)
+#undef X
+      default: type = "<unknown>";
+    }
+
+    fprintf(stream,
+            "[%c%c%c] %-8s %p\n",
+            "R-"[!(h->flags & UV__HANDLE_REF)],
+            "A-"[!(h->flags & UV__HANDLE_ACTIVE)],
+            "I-"[!(h->flags & UV__HANDLE_INTERNAL)],
+            type,
+            (void*)h);
+  }
+}
+
+
+void uv_print_all_handles(uv_loop_t* loop, FILE* stream) {
+  uv__print_handles(loop, 0, stream);
+}
+
+
+void uv_print_active_handles(uv_loop_t* loop, FILE* stream) {
+  uv__print_handles(loop, 1, stream);
+}
+
+
+void uv_ref(uv_handle_t* handle) {
+  uv__handle_ref(handle);
+}
+
+
+void uv_unref(uv_handle_t* handle) {
+  uv__handle_unref(handle);
+}
+
+
+int uv_has_ref(const uv_handle_t* handle) {
+  return uv__has_ref(handle);
+}
+
+
+void uv_stop(uv_loop_t* loop) {
+  loop->stop_flag = 1;
+}
+
+
+uint64_t uv_now(const uv_loop_t* loop) {
+  return loop->time;
+}
+
+
+
+size_t uv__count_bufs(const uv_buf_t bufs[], unsigned int nbufs) {
+  unsigned int i;
+  size_t bytes;
+
+  bytes = 0;
+  for (i = 0; i < nbufs; i++)
+    bytes += (size_t) bufs[i].len;
+
+  return bytes;
+}
+
+int uv_recv_buffer_size(uv_handle_t* handle, int* value) {
+  return uv__socket_sockopt(handle, SO_RCVBUF, value);
+}
+
+int uv_send_buffer_size(uv_handle_t* handle, int *value) {
+  return uv__socket_sockopt(handle, SO_SNDBUF, value);
+}
+
+int uv_fs_event_getpath(uv_fs_event_t* handle, char* buffer, size_t* size) {
+  size_t required_len;
+
+  if (!uv__is_active(handle)) {
+    *size = 0;
+    return UV_EINVAL;
+  }
+
+  required_len = strlen(handle->path);
+  if (required_len >= *size) {
+    *size = required_len + 1;
+    return UV_ENOBUFS;
+  }
+
+  memcpy(buffer, handle->path, required_len);
+  *size = required_len;
+  buffer[required_len] = '\0';
+
+  return 0;
+}
+
+/* The windows implementation does not have the same structure layout as
+ * the unix implementation (nbufs is not directly inside req but is
+ * contained in a nested union/struct) so this function locates it.
+*/
+static unsigned int* uv__get_nbufs(uv_fs_t* req) {
+#ifdef _WIN32
+  return &req->fs.info.nbufs;
+#else
+  return &req->nbufs;
+#endif
+}
+
+/* uv_fs_scandir() uses the system allocator to allocate memory on non-Windows
+ * systems. So, the memory should be released using free(). On Windows,
+ * uv__malloc() is used, so use uv__free() to free memory.
+*/
+#ifdef _WIN32
+# define uv__fs_scandir_free uv__free
+#else
+# define uv__fs_scandir_free free
+#endif
+
+void uv__fs_scandir_cleanup(uv_fs_t* req) {
+  uv__dirent_t** dents;
+
+  unsigned int* nbufs = uv__get_nbufs(req);
+
+  dents = req->ptr;
+  if (*nbufs > 0 && *nbufs != (unsigned int) req->result)
+    (*nbufs)--;
+  for (; *nbufs < (unsigned int) req->result; (*nbufs)++)
+    uv__fs_scandir_free(dents[*nbufs]);
+
+  uv__fs_scandir_free(req->ptr);
+  req->ptr = NULL;
+}
+
+
+int uv_fs_scandir_next(uv_fs_t* req, uv_dirent_t* ent) {
+  uv__dirent_t** dents;
+  uv__dirent_t* dent;
+
+  unsigned int* nbufs = uv__get_nbufs(req);
+
+  dents = req->ptr;
+
+  /* Free previous entity */
+  if (*nbufs > 0)
+    uv__fs_scandir_free(dents[*nbufs - 1]);
+
+  /* End was already reached */
+  if (*nbufs == (unsigned int) req->result) {
+    uv__fs_scandir_free(dents);
+    req->ptr = NULL;
+    return UV_EOF;
+  }
+
+  dent = dents[(*nbufs)++];
+
+  ent->name = dent->d_name;
+#ifdef HAVE_DIRENT_TYPES
+  switch (dent->d_type) {
+    case UV__DT_DIR:
+      ent->type = UV_DIRENT_DIR;
+      break;
+    case UV__DT_FILE:
+      ent->type = UV_DIRENT_FILE;
+      break;
+    case UV__DT_LINK:
+      ent->type = UV_DIRENT_LINK;
+      break;
+    case UV__DT_FIFO:
+      ent->type = UV_DIRENT_FIFO;
+      break;
+    case UV__DT_SOCKET:
+      ent->type = UV_DIRENT_SOCKET;
+      break;
+    case UV__DT_CHAR:
+      ent->type = UV_DIRENT_CHAR;
+      break;
+    case UV__DT_BLOCK:
+      ent->type = UV_DIRENT_BLOCK;
+      break;
+    default:
+      ent->type = UV_DIRENT_UNKNOWN;
+  }
+#else
+  ent->type = UV_DIRENT_UNKNOWN;
+#endif
+
+  return 0;
+}
+
+
+int uv_loop_configure(uv_loop_t* loop, uv_loop_option option, ...) {
+  va_list ap;
+  int err;
+
+  va_start(ap, option);
+  /* Any platform-agnostic options should be handled here. */
+  err = uv__loop_configure(loop, option, ap);
+  va_end(ap);
+
+  return err;
+}
+
+
+static uv_loop_t default_loop_struct;
+static uv_loop_t* default_loop_ptr;
+
+
+uv_loop_t* uv_default_loop(void) {
+  if (default_loop_ptr != NULL)
+    return default_loop_ptr;
+
+  if (uv_loop_init(&default_loop_struct))
+    return NULL;
+
+  default_loop_ptr = &default_loop_struct;
+  return default_loop_ptr;
+}
+
+
+uv_loop_t* uv_loop_new(void) {
+  uv_loop_t* loop;
+
+  loop = uv__malloc(sizeof(*loop));
+  if (loop == NULL)
+    return NULL;
+
+  if (uv_loop_init(loop)) {
+    uv__free(loop);
+    return NULL;
+  }
+
+  return loop;
+}
+
+
+int uv_loop_close(uv_loop_t* loop) {
+  QUEUE* q;
+  uv_handle_t* h;
+#ifndef NDEBUG
+  void* saved_data;
+#endif
+
+  if (!QUEUE_EMPTY(&(loop)->active_reqs))
+    return UV_EBUSY;
+
+  QUEUE_FOREACH(q, &loop->handle_queue) {
+    h = QUEUE_DATA(q, uv_handle_t, handle_queue);
+    if (!(h->flags & UV__HANDLE_INTERNAL))
+      return UV_EBUSY;
+  }
+
+  uv__loop_close(loop);
+
+#ifndef NDEBUG
+  saved_data = loop->data;
+  memset(loop, -1, sizeof(*loop));
+  loop->data = saved_data;
+#endif
+  if (loop == default_loop_ptr)
+    default_loop_ptr = NULL;
+
+  return 0;
+}
+
+
+void uv_loop_delete(uv_loop_t* loop) {
+  uv_loop_t* default_loop;
+  int err;
+
+  default_loop = default_loop_ptr;
+
+  err = uv_loop_close(loop);
+  (void) err;    /* Squelch compiler warnings. */
+  assert(err == 0);
+  if (loop != default_loop)
+    uv__free(loop);
+}

+ 227 - 0
Utilities/cmlibuv/src/uv-common.h

@@ -0,0 +1,227 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/*
+ * This file is private to libuv. It provides common functionality to both
+ * Windows and Unix backends.
+ */
+
+#ifndef UV_COMMON_H_
+#define UV_COMMON_H_
+
+#include <assert.h>
+#include <stdarg.h>
+#include <stddef.h>
+
+#if defined(_MSC_VER) && _MSC_VER < 1600
+# include "stdint-msvc2008.h"
+#else
+# include <stdint.h>
+#endif
+
+#include "uv.h"
+#include "tree.h"
+#include "queue.h"
+
+#if !defined(snprintf) && defined(_MSC_VER) && _MSC_VER < 1900
+extern int snprintf(char*, size_t, const char*, ...);
+#endif
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+#define container_of(ptr, type, member) \
+  ((type *) ((char *) (ptr) - offsetof(type, member)))
+
+#define STATIC_ASSERT(expr)                                                   \
+  void uv__static_assert(int static_assert_failed[1 - 2 * !(expr)])
+
+#ifndef _WIN32
+enum {
+  UV__HANDLE_INTERNAL = 0x8000,
+  UV__HANDLE_ACTIVE   = 0x4000,
+  UV__HANDLE_REF      = 0x2000,
+  UV__HANDLE_CLOSING  = 0 /* no-op on unix */
+};
+#else
+# define UV__HANDLE_INTERNAL  0x80
+# define UV__HANDLE_ACTIVE    0x40
+# define UV__HANDLE_REF       0x20
+# define UV__HANDLE_CLOSING   0x01
+#endif
+
+int uv__loop_configure(uv_loop_t* loop, uv_loop_option option, va_list ap);
+
+void uv__loop_close(uv_loop_t* loop);
+
+int uv__tcp_bind(uv_tcp_t* tcp,
+                 const struct sockaddr* addr,
+                 unsigned int addrlen,
+                 unsigned int flags);
+
+int uv__tcp_connect(uv_connect_t* req,
+                   uv_tcp_t* handle,
+                   const struct sockaddr* addr,
+                   unsigned int addrlen,
+                   uv_connect_cb cb);
+
+int uv__udp_bind(uv_udp_t* handle,
+                 const struct sockaddr* addr,
+                 unsigned int  addrlen,
+                 unsigned int flags);
+
+int uv__udp_send(uv_udp_send_t* req,
+                 uv_udp_t* handle,
+                 const uv_buf_t bufs[],
+                 unsigned int nbufs,
+                 const struct sockaddr* addr,
+                 unsigned int addrlen,
+                 uv_udp_send_cb send_cb);
+
+int uv__udp_try_send(uv_udp_t* handle,
+                     const uv_buf_t bufs[],
+                     unsigned int nbufs,
+                     const struct sockaddr* addr,
+                     unsigned int addrlen);
+
+int uv__udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloccb,
+                       uv_udp_recv_cb recv_cb);
+
+int uv__udp_recv_stop(uv_udp_t* handle);
+
+void uv__fs_poll_close(uv_fs_poll_t* handle);
+
+int uv__getaddrinfo_translate_error(int sys_err);    /* EAI_* error. */
+
+void uv__work_submit(uv_loop_t* loop,
+                     struct uv__work *w,
+                     void (*work)(struct uv__work *w),
+                     void (*done)(struct uv__work *w, int status));
+
+void uv__work_done(uv_async_t* handle);
+
+size_t uv__count_bufs(const uv_buf_t bufs[], unsigned int nbufs);
+
+int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value);
+
+void uv__fs_scandir_cleanup(uv_fs_t* req);
+
+#define uv__has_active_reqs(loop)                                             \
+  (QUEUE_EMPTY(&(loop)->active_reqs) == 0)
+
+#define uv__req_register(loop, req)                                           \
+  do {                                                                        \
+    QUEUE_INSERT_TAIL(&(loop)->active_reqs, &(req)->active_queue);            \
+  }                                                                           \
+  while (0)
+
+#define uv__req_unregister(loop, req)                                         \
+  do {                                                                        \
+    assert(uv__has_active_reqs(loop));                                        \
+    QUEUE_REMOVE(&(req)->active_queue);                                       \
+  }                                                                           \
+  while (0)
+
+#define uv__has_active_handles(loop)                                          \
+  ((loop)->active_handles > 0)
+
+#define uv__active_handle_add(h)                                              \
+  do {                                                                        \
+    (h)->loop->active_handles++;                                              \
+  }                                                                           \
+  while (0)
+
+#define uv__active_handle_rm(h)                                               \
+  do {                                                                        \
+    (h)->loop->active_handles--;                                              \
+  }                                                                           \
+  while (0)
+
+#define uv__is_active(h)                                                      \
+  (((h)->flags & UV__HANDLE_ACTIVE) != 0)
+
+#define uv__is_closing(h)                                                     \
+  (((h)->flags & (UV_CLOSING |  UV_CLOSED)) != 0)
+
+#define uv__handle_start(h)                                                   \
+  do {                                                                        \
+    assert(((h)->flags & UV__HANDLE_CLOSING) == 0);                           \
+    if (((h)->flags & UV__HANDLE_ACTIVE) != 0) break;                         \
+    (h)->flags |= UV__HANDLE_ACTIVE;                                          \
+    if (((h)->flags & UV__HANDLE_REF) != 0) uv__active_handle_add(h);         \
+  }                                                                           \
+  while (0)
+
+#define uv__handle_stop(h)                                                    \
+  do {                                                                        \
+    assert(((h)->flags & UV__HANDLE_CLOSING) == 0);                           \
+    if (((h)->flags & UV__HANDLE_ACTIVE) == 0) break;                         \
+    (h)->flags &= ~UV__HANDLE_ACTIVE;                                         \
+    if (((h)->flags & UV__HANDLE_REF) != 0) uv__active_handle_rm(h);          \
+  }                                                                           \
+  while (0)
+
+#define uv__handle_ref(h)                                                     \
+  do {                                                                        \
+    if (((h)->flags & UV__HANDLE_REF) != 0) break;                            \
+    (h)->flags |= UV__HANDLE_REF;                                             \
+    if (((h)->flags & UV__HANDLE_CLOSING) != 0) break;                        \
+    if (((h)->flags & UV__HANDLE_ACTIVE) != 0) uv__active_handle_add(h);      \
+  }                                                                           \
+  while (0)
+
+#define uv__handle_unref(h)                                                   \
+  do {                                                                        \
+    if (((h)->flags & UV__HANDLE_REF) == 0) break;                            \
+    (h)->flags &= ~UV__HANDLE_REF;                                            \
+    if (((h)->flags & UV__HANDLE_CLOSING) != 0) break;                        \
+    if (((h)->flags & UV__HANDLE_ACTIVE) != 0) uv__active_handle_rm(h);       \
+  }                                                                           \
+  while (0)
+
+#define uv__has_ref(h)                                                        \
+  (((h)->flags & UV__HANDLE_REF) != 0)
+
+#if defined(_WIN32)
+# define uv__handle_platform_init(h) ((h)->u.fd = -1)
+#else
+# define uv__handle_platform_init(h) ((h)->next_closing = NULL)
+#endif
+
+#define uv__handle_init(loop_, h, type_)                                      \
+  do {                                                                        \
+    (h)->loop = (loop_);                                                      \
+    (h)->type = (type_);                                                      \
+    (h)->flags = UV__HANDLE_REF;  /* Ref the loop when active. */             \
+    QUEUE_INSERT_TAIL(&(loop_)->handle_queue, &(h)->handle_queue);            \
+    uv__handle_platform_init(h);                                              \
+  }                                                                           \
+  while (0)
+
+
+/* Allocator prototypes */
+void *uv__calloc(size_t count, size_t size);
+char *uv__strdup(const char* s);
+char *uv__strndup(const char* s, size_t n);
+void* uv__malloc(size_t size);
+void uv__free(void* ptr);
+void* uv__realloc(void* ptr, size_t size);
+
+#endif /* UV_COMMON_H_ */

+ 45 - 0
Utilities/cmlibuv/src/version.c

@@ -0,0 +1,45 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+
+#define UV_STRINGIFY(v) UV_STRINGIFY_HELPER(v)
+#define UV_STRINGIFY_HELPER(v) #v
+
+#define UV_VERSION_STRING_BASE  UV_STRINGIFY(UV_VERSION_MAJOR) "." \
+                                UV_STRINGIFY(UV_VERSION_MINOR) "." \
+                                UV_STRINGIFY(UV_VERSION_PATCH)
+
+#if UV_VERSION_IS_RELEASE
+# define UV_VERSION_STRING  UV_VERSION_STRING_BASE
+#else
+# define UV_VERSION_STRING  UV_VERSION_STRING_BASE "-" UV_VERSION_SUFFIX
+#endif
+
+
+unsigned int uv_version(void) {
+  return UV_VERSION_HEX;
+}
+
+
+const char* uv_version_string(void) {
+  return UV_VERSION_STRING;
+}

+ 99 - 0
Utilities/cmlibuv/src/win/async.c

@@ -0,0 +1,99 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+
+#include "uv.h"
+#include "internal.h"
+#include "atomicops-inl.h"
+#include "handle-inl.h"
+#include "req-inl.h"
+
+
+void uv_async_endgame(uv_loop_t* loop, uv_async_t* handle) {
+  if (handle->flags & UV__HANDLE_CLOSING &&
+      !handle->async_sent) {
+    assert(!(handle->flags & UV_HANDLE_CLOSED));
+    uv__handle_close(handle);
+  }
+}
+
+
+int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
+  uv_req_t* req;
+
+  uv__handle_init(loop, (uv_handle_t*) handle, UV_ASYNC);
+  handle->async_sent = 0;
+  handle->async_cb = async_cb;
+
+  req = &handle->async_req;
+  uv_req_init(loop, req);
+  req->type = UV_WAKEUP;
+  req->data = handle;
+
+  uv__handle_start(handle);
+
+  return 0;
+}
+
+
+void uv_async_close(uv_loop_t* loop, uv_async_t* handle) {
+  if (!((uv_async_t*)handle)->async_sent) {
+    uv_want_endgame(loop, (uv_handle_t*) handle);
+  }
+
+  uv__handle_closing(handle);
+}
+
+
+int uv_async_send(uv_async_t* handle) {
+  uv_loop_t* loop = handle->loop;
+
+  if (handle->type != UV_ASYNC) {
+    /* Can't set errno because that's not thread-safe. */
+    return -1;
+  }
+
+  /* The user should make sure never to call uv_async_send to a closing */
+  /* or closed handle. */
+  assert(!(handle->flags & UV__HANDLE_CLOSING));
+
+  if (!uv__atomic_exchange_set(&handle->async_sent)) {
+    POST_COMPLETION_FOR_REQ(loop, &handle->async_req);
+  }
+
+  return 0;
+}
+
+
+void uv_process_async_wakeup_req(uv_loop_t* loop, uv_async_t* handle,
+    uv_req_t* req) {
+  assert(handle->type == UV_ASYNC);
+  assert(req->type == UV_WAKEUP);
+
+  handle->async_sent = 0;
+
+  if (handle->flags & UV__HANDLE_CLOSING) {
+    uv_want_endgame(loop, (uv_handle_t*)handle);
+  } else if (handle->async_cb != NULL) {
+    handle->async_cb(handle);
+  }
+}

+ 56 - 0
Utilities/cmlibuv/src/win/atomicops-inl.h

@@ -0,0 +1,56 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_WIN_ATOMICOPS_INL_H_
+#define UV_WIN_ATOMICOPS_INL_H_
+
+#include "uv.h"
+
+
+/* Atomic set operation on char */
+#ifdef _MSC_VER /* MSVC */
+
+/* _InterlockedOr8 is supported by MSVC on x32 and x64. It is  slightly less */
+/* efficient than InterlockedExchange, but InterlockedExchange8 does not */
+/* exist, and interlocked operations on larger targets might require the */
+/* target to be aligned. */
+#pragma intrinsic(_InterlockedOr8)
+
+static char __declspec(inline) uv__atomic_exchange_set(char volatile* target) {
+  return _InterlockedOr8(target, 1);
+}
+
+#else /* GCC */
+
+/* Mingw-32 version, hopefully this works for 64-bit gcc as well. */
+static inline char uv__atomic_exchange_set(char volatile* target) {
+  const char one = 1;
+  char old_value;
+  __asm__ __volatile__ ("lock xchgb %0, %1\n\t"
+                        : "=r"(old_value), "=m"(*target)
+                        : "0"(one), "m"(*target)
+                        : "memory");
+  return old_value;
+}
+
+#endif
+
+#endif /* UV_WIN_ATOMICOPS_INL_H_ */

+ 602 - 0
Utilities/cmlibuv/src/win/core.c

@@ -0,0 +1,602 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#if defined(_MSC_VER) || defined(__MINGW64_VERSION_MAJOR)
+#include <crtdbg.h>
+#endif
+
+#include "uv.h"
+#include "internal.h"
+#include "queue.h"
+#include "handle-inl.h"
+#include "req-inl.h"
+
+
+static uv_loop_t default_loop_struct;
+static uv_loop_t* default_loop_ptr;
+
+/* uv_once initialization guards */
+static uv_once_t uv_init_guard_ = UV_ONCE_INIT;
+
+
+#if defined(_DEBUG) && (defined(_MSC_VER) || defined(__MINGW64_VERSION_MAJOR))
+/* Our crt debug report handler allows us to temporarily disable asserts
+ * just for the current thread.
+ */
+
+UV_THREAD_LOCAL int uv__crt_assert_enabled = TRUE;
+
+static int uv__crt_dbg_report_handler(int report_type, char *message, int *ret_val) {
+  if (uv__crt_assert_enabled || report_type != _CRT_ASSERT)
+    return FALSE;
+
+  if (ret_val) {
+    /* Set ret_val to 0 to continue with normal execution.
+     * Set ret_val to 1 to trigger a breakpoint.
+    */
+
+    if(IsDebuggerPresent())
+      *ret_val = 1;
+    else
+      *ret_val = 0;
+  }
+
+  /* Don't call _CrtDbgReport. */
+  return TRUE;
+}
+#else
+UV_THREAD_LOCAL int uv__crt_assert_enabled = FALSE;
+#endif
+
+
+#if !defined(__MINGW32__) || __MSVCRT_VERSION__ >= 0x800
+static void uv__crt_invalid_parameter_handler(const wchar_t* expression,
+    const wchar_t* function, const wchar_t * file, unsigned int line,
+    uintptr_t reserved) {
+  /* No-op. */
+}
+#endif
+
+static uv_loop_t** uv__loops;
+static int uv__loops_size;
+static int uv__loops_capacity;
+#define UV__LOOPS_CHUNK_SIZE 8
+static uv_mutex_t uv__loops_lock;
+
+static void uv__loops_init() {
+  uv_mutex_init(&uv__loops_lock);
+  uv__loops = uv__calloc(UV__LOOPS_CHUNK_SIZE, sizeof(uv_loop_t*));
+  if (!uv__loops)
+    uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+  uv__loops_size = 0;
+  uv__loops_capacity = UV__LOOPS_CHUNK_SIZE;
+}
+
+static int uv__loops_add(uv_loop_t* loop) {
+  uv_loop_t** new_loops;
+  int new_capacity, i;
+
+  uv_mutex_lock(&uv__loops_lock);
+
+  if (uv__loops_size == uv__loops_capacity) {
+    new_capacity = uv__loops_capacity + UV__LOOPS_CHUNK_SIZE;
+    new_loops = uv__realloc(uv__loops, sizeof(uv_loop_t*) * new_capacity);
+    if (!new_loops)
+      goto failed_loops_realloc;
+    uv__loops = new_loops;
+    for (i = uv__loops_capacity; i < new_capacity; ++i)
+      uv__loops[i] = NULL;
+    uv__loops_capacity = new_capacity;
+  }
+  uv__loops[uv__loops_size] = loop;
+  ++uv__loops_size;
+
+  uv_mutex_unlock(&uv__loops_lock);
+  return 0;
+
+failed_loops_realloc:
+  uv_mutex_unlock(&uv__loops_lock);
+  return ERROR_OUTOFMEMORY;
+}
+
+static void uv__loops_remove(uv_loop_t* loop) {
+  int loop_index;
+  int smaller_capacity;
+  uv_loop_t** new_loops;
+
+  uv_mutex_lock(&uv__loops_lock);
+
+  for (loop_index = 0; loop_index < uv__loops_size; ++loop_index) {
+    if (uv__loops[loop_index] == loop)
+      break;
+  }
+  /* If loop was not found, ignore */
+  if (loop_index == uv__loops_size)
+    goto loop_removed;
+
+  uv__loops[loop_index] = uv__loops[uv__loops_size - 1];
+  uv__loops[uv__loops_size - 1] = NULL;
+  --uv__loops_size;
+
+  /* If we didn't grow to big skip downsizing */
+  if (uv__loops_capacity < 4 * UV__LOOPS_CHUNK_SIZE)
+    goto loop_removed;
+
+  /* Downsize only if more than half of buffer is free */
+  smaller_capacity = uv__loops_capacity / 2;
+  if (uv__loops_size >= smaller_capacity)
+    goto loop_removed;
+  new_loops = uv__realloc(uv__loops, sizeof(uv_loop_t*) * smaller_capacity);
+  if (!new_loops)
+    goto loop_removed;
+  uv__loops = new_loops;
+  uv__loops_capacity = smaller_capacity;
+
+loop_removed:
+  uv_mutex_unlock(&uv__loops_lock);
+}
+
+void uv__wake_all_loops() {
+  int i;
+  uv_loop_t* loop;
+
+  uv_mutex_lock(&uv__loops_lock);
+  for (i = 0; i < uv__loops_size; ++i) {
+    loop = uv__loops[i];
+    assert(loop);
+    if (loop->iocp != INVALID_HANDLE_VALUE)
+      PostQueuedCompletionStatus(loop->iocp, 0, 0, NULL);
+  }
+  uv_mutex_unlock(&uv__loops_lock);
+}
+
+static void uv_init(void) {
+  /* Tell Windows that we will handle critical errors. */
+  SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX |
+               SEM_NOOPENFILEERRORBOX);
+
+  /* Tell the CRT to not exit the application when an invalid parameter is
+   * passed. The main issue is that invalid FDs will trigger this behavior.
+   */
+#if !defined(__MINGW32__) || __MSVCRT_VERSION__ >= 0x800
+  _set_invalid_parameter_handler(uv__crt_invalid_parameter_handler);
+#endif
+
+  /* We also need to setup our debug report handler because some CRT
+   * functions (eg _get_osfhandle) raise an assert when called with invalid
+   * FDs even though they return the proper error code in the release build.
+   */
+#if defined(_DEBUG) && (defined(_MSC_VER) || defined(__MINGW64_VERSION_MAJOR))
+  _CrtSetReportHook(uv__crt_dbg_report_handler);
+#endif
+
+  /* Initialize tracking of all uv loops */
+  uv__loops_init();
+
+  /* Fetch winapi function pointers. This must be done first because other
+   * initialization code might need these function pointers to be loaded.
+   */
+  uv_winapi_init();
+
+  /* Initialize winsock */
+  uv_winsock_init();
+
+  /* Initialize FS */
+  uv_fs_init();
+
+  /* Initialize signal stuff */
+  uv_signals_init();
+
+  /* Initialize console */
+  uv_console_init();
+
+  /* Initialize utilities */
+  uv__util_init();
+
+  /* Initialize system wakeup detection */
+  uv__init_detect_system_wakeup();
+}
+
+
+int uv_loop_init(uv_loop_t* loop) {
+  int err;
+
+  /* Initialize libuv itself first */
+  uv__once_init();
+
+  /* Create an I/O completion port */
+  loop->iocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 1);
+  if (loop->iocp == NULL)
+    return uv_translate_sys_error(GetLastError());
+
+  /* To prevent uninitialized memory access, loop->time must be initialized
+   * to zero before calling uv_update_time for the first time.
+   */
+  loop->time = 0;
+  uv_update_time(loop);
+
+  QUEUE_INIT(&loop->wq);
+  QUEUE_INIT(&loop->handle_queue);
+  QUEUE_INIT(&loop->active_reqs);
+  loop->active_handles = 0;
+
+  loop->pending_reqs_tail = NULL;
+
+  loop->endgame_handles = NULL;
+
+  RB_INIT(&loop->timers);
+
+  loop->check_handles = NULL;
+  loop->prepare_handles = NULL;
+  loop->idle_handles = NULL;
+
+  loop->next_prepare_handle = NULL;
+  loop->next_check_handle = NULL;
+  loop->next_idle_handle = NULL;
+
+  memset(&loop->poll_peer_sockets, 0, sizeof loop->poll_peer_sockets);
+
+  loop->active_tcp_streams = 0;
+  loop->active_udp_streams = 0;
+
+  loop->timer_counter = 0;
+  loop->stop_flag = 0;
+
+  err = uv_mutex_init(&loop->wq_mutex);
+  if (err)
+    goto fail_mutex_init;
+
+  err = uv_async_init(loop, &loop->wq_async, uv__work_done);
+  if (err)
+    goto fail_async_init;
+
+  uv__handle_unref(&loop->wq_async);
+  loop->wq_async.flags |= UV__HANDLE_INTERNAL;
+
+  err = uv__loops_add(loop);
+  if (err)
+    goto fail_async_init;
+
+  return 0;
+
+fail_async_init:
+  uv_mutex_destroy(&loop->wq_mutex);
+
+fail_mutex_init:
+  CloseHandle(loop->iocp);
+  loop->iocp = INVALID_HANDLE_VALUE;
+
+  return err;
+}
+
+
+void uv__once_init(void) {
+  uv_once(&uv_init_guard_, uv_init);
+}
+
+
+void uv__loop_close(uv_loop_t* loop) {
+  size_t i;
+
+  uv__loops_remove(loop);
+
+  /* close the async handle without needing an extra loop iteration */
+  assert(!loop->wq_async.async_sent);
+  loop->wq_async.close_cb = NULL;
+  uv__handle_closing(&loop->wq_async);
+  uv__handle_close(&loop->wq_async);
+
+  for (i = 0; i < ARRAY_SIZE(loop->poll_peer_sockets); i++) {
+    SOCKET sock = loop->poll_peer_sockets[i];
+    if (sock != 0 && sock != INVALID_SOCKET)
+      closesocket(sock);
+  }
+
+  uv_mutex_lock(&loop->wq_mutex);
+  assert(QUEUE_EMPTY(&loop->wq) && "thread pool work queue not empty!");
+  assert(!uv__has_active_reqs(loop));
+  uv_mutex_unlock(&loop->wq_mutex);
+  uv_mutex_destroy(&loop->wq_mutex);
+
+  CloseHandle(loop->iocp);
+}
+
+
+int uv__loop_configure(uv_loop_t* loop, uv_loop_option option, va_list ap) {
+  return UV_ENOSYS;
+}
+
+
+int uv_backend_fd(const uv_loop_t* loop) {
+  return -1;
+}
+
+
+int uv_backend_timeout(const uv_loop_t* loop) {
+  if (loop->stop_flag != 0)
+    return 0;
+
+  if (!uv__has_active_handles(loop) && !uv__has_active_reqs(loop))
+    return 0;
+
+  if (loop->pending_reqs_tail)
+    return 0;
+
+  if (loop->endgame_handles)
+    return 0;
+
+  if (loop->idle_handles)
+    return 0;
+
+  return uv__next_timeout(loop);
+}
+
+
+static void uv_poll(uv_loop_t* loop, DWORD timeout) {
+  DWORD bytes;
+  ULONG_PTR key;
+  OVERLAPPED* overlapped;
+  uv_req_t* req;
+  int repeat;
+  uint64_t timeout_time;
+
+  timeout_time = loop->time + timeout;
+
+  for (repeat = 0; ; repeat++) {
+    GetQueuedCompletionStatus(loop->iocp,
+                              &bytes,
+                              &key,
+                              &overlapped,
+                              timeout);
+
+    if (overlapped) {
+      /* Package was dequeued */
+      req = uv_overlapped_to_req(overlapped);
+      uv_insert_pending_req(loop, req);
+
+      /* Some time might have passed waiting for I/O,
+       * so update the loop time here.
+       */
+      uv_update_time(loop);
+    } else if (GetLastError() != WAIT_TIMEOUT) {
+      /* Serious error */
+      uv_fatal_error(GetLastError(), "GetQueuedCompletionStatus");
+    } else if (timeout > 0) {
+      /* GetQueuedCompletionStatus can occasionally return a little early.
+       * Make sure that the desired timeout target time is reached.
+       */
+      uv_update_time(loop);
+      if (timeout_time > loop->time) {
+        timeout = (DWORD)(timeout_time - loop->time);
+        /* The first call to GetQueuedCompletionStatus should return very
+         * close to the target time and the second should reach it, but
+         * this is not stated in the documentation. To make sure a busy
+         * loop cannot happen, the timeout is increased exponentially
+         * starting on the third round.
+         */
+        timeout += repeat ? (1 << (repeat - 1)) : 0;
+        continue;
+      }
+    }
+    break;
+  }
+}
+
+
+static void uv_poll_ex(uv_loop_t* loop, DWORD timeout) {
+  BOOL success;
+  uv_req_t* req;
+  OVERLAPPED_ENTRY overlappeds[128];
+  ULONG count;
+  ULONG i;
+  int repeat;
+  uint64_t timeout_time;
+
+  timeout_time = loop->time + timeout;
+
+  for (repeat = 0; ; repeat++) {
+    success = pGetQueuedCompletionStatusEx(loop->iocp,
+                                           overlappeds,
+                                           ARRAY_SIZE(overlappeds),
+                                           &count,
+                                           timeout,
+                                           FALSE);
+
+    if (success) {
+      for (i = 0; i < count; i++) {
+        /* Package was dequeued, but see if it is not a empty package
+         * meant only to wake us up.
+         */
+        if (overlappeds[i].lpOverlapped) {
+          req = uv_overlapped_to_req(overlappeds[i].lpOverlapped);
+          uv_insert_pending_req(loop, req);
+        }
+      }
+
+      /* Some time might have passed waiting for I/O,
+       * so update the loop time here.
+       */
+      uv_update_time(loop);
+    } else if (GetLastError() != WAIT_TIMEOUT) {
+      /* Serious error */
+      uv_fatal_error(GetLastError(), "GetQueuedCompletionStatusEx");
+    } else if (timeout > 0) {
+      /* GetQueuedCompletionStatus can occasionally return a little early.
+       * Make sure that the desired timeout target time is reached.
+       */
+      uv_update_time(loop);
+      if (timeout_time > loop->time) {
+        timeout = (DWORD)(timeout_time - loop->time);
+        /* The first call to GetQueuedCompletionStatus should return very
+         * close to the target time and the second should reach it, but
+         * this is not stated in the documentation. To make sure a busy
+         * loop cannot happen, the timeout is increased exponentially
+         * starting on the third round.
+         */
+        timeout += repeat ? (1 << (repeat - 1)) : 0;
+        continue;
+      }
+    }
+    break;
+  }
+}
+
+
+static int uv__loop_alive(const uv_loop_t* loop) {
+  return loop->active_handles > 0 ||
+         !QUEUE_EMPTY(&loop->active_reqs) ||
+         loop->endgame_handles != NULL;
+}
+
+
+int uv_loop_alive(const uv_loop_t* loop) {
+    return uv__loop_alive(loop);
+}
+
+
+int uv_run(uv_loop_t *loop, uv_run_mode mode) {
+  DWORD timeout;
+  int r;
+  int ran_pending;
+  void (*poll)(uv_loop_t* loop, DWORD timeout);
+
+  if (pGetQueuedCompletionStatusEx)
+    poll = &uv_poll_ex;
+  else
+    poll = &uv_poll;
+
+  r = uv__loop_alive(loop);
+  if (!r)
+    uv_update_time(loop);
+
+  while (r != 0 && loop->stop_flag == 0) {
+    uv_update_time(loop);
+    uv_process_timers(loop);
+
+    ran_pending = uv_process_reqs(loop);
+    uv_idle_invoke(loop);
+    uv_prepare_invoke(loop);
+
+    timeout = 0;
+    if ((mode == UV_RUN_ONCE && !ran_pending) || mode == UV_RUN_DEFAULT)
+      timeout = uv_backend_timeout(loop);
+
+    (*poll)(loop, timeout);
+
+    uv_check_invoke(loop);
+    uv_process_endgames(loop);
+
+    if (mode == UV_RUN_ONCE) {
+      /* UV_RUN_ONCE implies forward progress: at least one callback must have
+       * been invoked when it returns. uv__io_poll() can return without doing
+       * I/O (meaning: no callbacks) when its timeout expires - which means we
+       * have pending timers that satisfy the forward progress constraint.
+       *
+       * UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
+       * the check.
+       */
+      uv_process_timers(loop);
+    }
+
+    r = uv__loop_alive(loop);
+    if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT)
+      break;
+  }
+
+  /* The if statement lets the compiler compile it to a conditional store.
+   * Avoids dirtying a cache line.
+   */
+  if (loop->stop_flag != 0)
+    loop->stop_flag = 0;
+
+  return r;
+}
+
+
+int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) {
+  uv_os_fd_t fd_out;
+
+  switch (handle->type) {
+  case UV_TCP:
+    fd_out = (uv_os_fd_t)((uv_tcp_t*) handle)->socket;
+    break;
+
+  case UV_NAMED_PIPE:
+    fd_out = ((uv_pipe_t*) handle)->handle;
+    break;
+
+  case UV_TTY:
+    fd_out = ((uv_tty_t*) handle)->handle;
+    break;
+
+  case UV_UDP:
+    fd_out = (uv_os_fd_t)((uv_udp_t*) handle)->socket;
+    break;
+
+  case UV_POLL:
+    fd_out = (uv_os_fd_t)((uv_poll_t*) handle)->socket;
+    break;
+
+  default:
+    return UV_EINVAL;
+  }
+
+  if (uv_is_closing(handle) || fd_out == INVALID_HANDLE_VALUE)
+    return UV_EBADF;
+
+  *fd = fd_out;
+  return 0;
+}
+
+
+int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value) {
+  int r;
+  int len;
+  SOCKET socket;
+
+  if (handle == NULL || value == NULL)
+    return UV_EINVAL;
+
+  if (handle->type == UV_TCP)
+    socket = ((uv_tcp_t*) handle)->socket;
+  else if (handle->type == UV_UDP)
+    socket = ((uv_udp_t*) handle)->socket;
+  else
+    return UV_ENOTSUP;
+
+  len = sizeof(*value);
+
+  if (*value == 0)
+    r = getsockopt(socket, SOL_SOCKET, optname, (char*) value, &len);
+  else
+    r = setsockopt(socket, SOL_SOCKET, optname, (const char*) value, len);
+
+  if (r == SOCKET_ERROR)
+    return uv_translate_sys_error(WSAGetLastError());
+
+  return 0;
+}

+ 35 - 0
Utilities/cmlibuv/src/win/detect-wakeup.c

@@ -0,0 +1,35 @@
+#include "uv.h"
+#include "internal.h"
+#include "winapi.h"
+
+static void uv__register_system_resume_callback();
+
+void uv__init_detect_system_wakeup() {
+  /* Try registering system power event callback. This is the cleanest
+   * method, but it will only work on Win8 and above.
+   */
+  uv__register_system_resume_callback();
+}
+
+static ULONG CALLBACK uv__system_resume_callback(PVOID Context,
+                                                 ULONG Type,
+                                                 PVOID Setting) {
+  if (Type == PBT_APMRESUMESUSPEND || Type == PBT_APMRESUMEAUTOMATIC)
+    uv__wake_all_loops();
+
+  return 0;
+}
+
+static void uv__register_system_resume_callback() {
+  _DEVICE_NOTIFY_SUBSCRIBE_PARAMETERS recipient;
+  _HPOWERNOTIFY registration_handle;
+
+  if (pPowerRegisterSuspendResumeNotification == NULL)
+    return;
+
+  recipient.Callback = uv__system_resume_callback;
+  recipient.Context = NULL;
+  (*pPowerRegisterSuspendResumeNotification)(DEVICE_NOTIFY_CALLBACK,
+                                             &recipient,
+                                             &registration_handle);
+}

+ 118 - 0
Utilities/cmlibuv/src/win/dl.c

@@ -0,0 +1,118 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+static int uv__dlerror(uv_lib_t* lib, int errorno);
+
+
+int uv_dlopen(const char* filename, uv_lib_t* lib) {
+  WCHAR filename_w[32768];
+
+  lib->handle = NULL;
+  lib->errmsg = NULL;
+
+  if (!MultiByteToWideChar(CP_UTF8,
+                           0,
+                           filename,
+                           -1,
+                           filename_w,
+                           ARRAY_SIZE(filename_w))) {
+    return uv__dlerror(lib, GetLastError());
+  }
+
+  lib->handle = LoadLibraryExW(filename_w, NULL, LOAD_WITH_ALTERED_SEARCH_PATH);
+  if (lib->handle == NULL) {
+    return uv__dlerror(lib, GetLastError());
+  }
+
+  return 0;
+}
+
+
+void uv_dlclose(uv_lib_t* lib) {
+  if (lib->errmsg) {
+    LocalFree((void*)lib->errmsg);
+    lib->errmsg = NULL;
+  }
+
+  if (lib->handle) {
+    /* Ignore errors. No good way to signal them without leaking memory. */
+    FreeLibrary(lib->handle);
+    lib->handle = NULL;
+  }
+}
+
+
+int uv_dlsym(uv_lib_t* lib, const char* name, void** ptr) {
+  *ptr = (void*) GetProcAddress(lib->handle, name);
+  return uv__dlerror(lib, *ptr ? 0 : GetLastError());
+}
+
+
+const char* uv_dlerror(const uv_lib_t* lib) {
+  return lib->errmsg ? lib->errmsg : "no error";
+}
+
+
+static void uv__format_fallback_error(uv_lib_t* lib, int errorno){
+  DWORD_PTR args[1] = { (DWORD_PTR) errorno };
+  LPSTR fallback_error = "error: %1!d!";
+
+  FormatMessageA(FORMAT_MESSAGE_FROM_STRING |
+                 FORMAT_MESSAGE_ARGUMENT_ARRAY |
+                 FORMAT_MESSAGE_ALLOCATE_BUFFER,
+                 fallback_error, 0, 0,
+                 (LPSTR) &lib->errmsg,
+                 0, (va_list*) args);
+}
+
+
+
+static int uv__dlerror(uv_lib_t* lib, int errorno) {
+  DWORD res;
+
+  if (lib->errmsg) {
+    LocalFree((void*)lib->errmsg);
+    lib->errmsg = NULL;
+  }
+
+  if (errorno) {
+    res = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER |
+                         FORMAT_MESSAGE_FROM_SYSTEM |
+                         FORMAT_MESSAGE_IGNORE_INSERTS, NULL, errorno,
+                         MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US),
+                         (LPSTR) &lib->errmsg, 0, NULL);
+    if (!res && GetLastError() == ERROR_MUI_FILE_NOT_FOUND) {
+      res = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER |
+                           FORMAT_MESSAGE_FROM_SYSTEM |
+                           FORMAT_MESSAGE_IGNORE_INSERTS, NULL, errorno,
+                           0, (LPSTR) &lib->errmsg, 0, NULL);
+    }
+
+    if (!res) {
+      uv__format_fallback_error(lib, errorno);
+    }
+  }
+
+  return errorno ? -1 : 0;
+}

+ 170 - 0
Utilities/cmlibuv/src/win/error.c

@@ -0,0 +1,170 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include "uv.h"
+#include "internal.h"
+
+
+/*
+ * Display an error message and abort the event loop.
+ */
+void uv_fatal_error(const int errorno, const char* syscall) {
+  char* buf = NULL;
+  const char* errmsg;
+
+  FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM |
+      FORMAT_MESSAGE_IGNORE_INSERTS, NULL, errorno,
+      MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL);
+
+  if (buf) {
+    errmsg = buf;
+  } else {
+    errmsg = "Unknown error";
+  }
+
+  /* FormatMessage messages include a newline character already, */
+  /* so don't add another. */
+  if (syscall) {
+    fprintf(stderr, "%s: (%d) %s", syscall, errorno, errmsg);
+  } else {
+    fprintf(stderr, "(%d) %s", errorno, errmsg);
+  }
+
+  if (buf) {
+    LocalFree(buf);
+  }
+
+  *((char*)NULL) = 0xff; /* Force debug break */
+  abort();
+}
+
+
+int uv_translate_sys_error(int sys_errno) {
+  if (sys_errno <= 0) {
+    return sys_errno;  /* If < 0 then it's already a libuv error. */
+  }
+
+  switch (sys_errno) {
+    case ERROR_NOACCESS:                    return UV_EACCES;
+    case WSAEACCES:                         return UV_EACCES;
+    case ERROR_ADDRESS_ALREADY_ASSOCIATED:  return UV_EADDRINUSE;
+    case WSAEADDRINUSE:                     return UV_EADDRINUSE;
+    case WSAEADDRNOTAVAIL:                  return UV_EADDRNOTAVAIL;
+    case WSAEAFNOSUPPORT:                   return UV_EAFNOSUPPORT;
+    case WSAEWOULDBLOCK:                    return UV_EAGAIN;
+    case WSAEALREADY:                       return UV_EALREADY;
+    case ERROR_INVALID_FLAGS:               return UV_EBADF;
+    case ERROR_INVALID_HANDLE:              return UV_EBADF;
+    case ERROR_LOCK_VIOLATION:              return UV_EBUSY;
+    case ERROR_PIPE_BUSY:                   return UV_EBUSY;
+    case ERROR_SHARING_VIOLATION:           return UV_EBUSY;
+    case ERROR_OPERATION_ABORTED:           return UV_ECANCELED;
+    case WSAEINTR:                          return UV_ECANCELED;
+    case ERROR_NO_UNICODE_TRANSLATION:      return UV_ECHARSET;
+    case ERROR_CONNECTION_ABORTED:          return UV_ECONNABORTED;
+    case WSAECONNABORTED:                   return UV_ECONNABORTED;
+    case ERROR_CONNECTION_REFUSED:          return UV_ECONNREFUSED;
+    case WSAECONNREFUSED:                   return UV_ECONNREFUSED;
+    case ERROR_NETNAME_DELETED:             return UV_ECONNRESET;
+    case WSAECONNRESET:                     return UV_ECONNRESET;
+    case ERROR_ALREADY_EXISTS:              return UV_EEXIST;
+    case ERROR_FILE_EXISTS:                 return UV_EEXIST;
+    case ERROR_BUFFER_OVERFLOW:             return UV_EFAULT;
+    case WSAEFAULT:                         return UV_EFAULT;
+    case ERROR_HOST_UNREACHABLE:            return UV_EHOSTUNREACH;
+    case WSAEHOSTUNREACH:                   return UV_EHOSTUNREACH;
+    case ERROR_INSUFFICIENT_BUFFER:         return UV_EINVAL;
+    case ERROR_INVALID_DATA:                return UV_EINVAL;
+    case ERROR_INVALID_PARAMETER:           return UV_EINVAL;
+    case ERROR_SYMLINK_NOT_SUPPORTED:       return UV_EINVAL;
+    case WSAEINVAL:                         return UV_EINVAL;
+    case WSAEPFNOSUPPORT:                   return UV_EINVAL;
+    case WSAESOCKTNOSUPPORT:                return UV_EINVAL;
+    case ERROR_BEGINNING_OF_MEDIA:          return UV_EIO;
+    case ERROR_BUS_RESET:                   return UV_EIO;
+    case ERROR_CRC:                         return UV_EIO;
+    case ERROR_DEVICE_DOOR_OPEN:            return UV_EIO;
+    case ERROR_DEVICE_REQUIRES_CLEANING:    return UV_EIO;
+    case ERROR_DISK_CORRUPT:                return UV_EIO;
+    case ERROR_EOM_OVERFLOW:                return UV_EIO;
+    case ERROR_FILEMARK_DETECTED:           return UV_EIO;
+    case ERROR_GEN_FAILURE:                 return UV_EIO;
+    case ERROR_INVALID_BLOCK_LENGTH:        return UV_EIO;
+    case ERROR_IO_DEVICE:                   return UV_EIO;
+    case ERROR_NO_DATA_DETECTED:            return UV_EIO;
+    case ERROR_NO_SIGNAL_SENT:              return UV_EIO;
+    case ERROR_OPEN_FAILED:                 return UV_EIO;
+    case ERROR_SETMARK_DETECTED:            return UV_EIO;
+    case ERROR_SIGNAL_REFUSED:              return UV_EIO;
+    case WSAEISCONN:                        return UV_EISCONN;
+    case ERROR_CANT_RESOLVE_FILENAME:       return UV_ELOOP;
+    case ERROR_TOO_MANY_OPEN_FILES:         return UV_EMFILE;
+    case WSAEMFILE:                         return UV_EMFILE;
+    case WSAEMSGSIZE:                       return UV_EMSGSIZE;
+    case ERROR_FILENAME_EXCED_RANGE:        return UV_ENAMETOOLONG;
+    case ERROR_NETWORK_UNREACHABLE:         return UV_ENETUNREACH;
+    case WSAENETUNREACH:                    return UV_ENETUNREACH;
+    case WSAENOBUFS:                        return UV_ENOBUFS;
+    case ERROR_BAD_PATHNAME:                return UV_ENOENT;
+    case ERROR_DIRECTORY:                   return UV_ENOENT;
+    case ERROR_FILE_NOT_FOUND:              return UV_ENOENT;
+    case ERROR_INVALID_NAME:                return UV_ENOENT;
+    case ERROR_INVALID_DRIVE:               return UV_ENOENT;
+    case ERROR_INVALID_REPARSE_DATA:        return UV_ENOENT;
+    case ERROR_MOD_NOT_FOUND:               return UV_ENOENT;
+    case ERROR_PATH_NOT_FOUND:              return UV_ENOENT;
+    case WSAHOST_NOT_FOUND:                 return UV_ENOENT;
+    case WSANO_DATA:                        return UV_ENOENT;
+    case ERROR_NOT_ENOUGH_MEMORY:           return UV_ENOMEM;
+    case ERROR_OUTOFMEMORY:                 return UV_ENOMEM;
+    case ERROR_CANNOT_MAKE:                 return UV_ENOSPC;
+    case ERROR_DISK_FULL:                   return UV_ENOSPC;
+    case ERROR_EA_TABLE_FULL:               return UV_ENOSPC;
+    case ERROR_END_OF_MEDIA:                return UV_ENOSPC;
+    case ERROR_HANDLE_DISK_FULL:            return UV_ENOSPC;
+    case ERROR_NOT_CONNECTED:               return UV_ENOTCONN;
+    case WSAENOTCONN:                       return UV_ENOTCONN;
+    case ERROR_DIR_NOT_EMPTY:               return UV_ENOTEMPTY;
+    case WSAENOTSOCK:                       return UV_ENOTSOCK;
+    case ERROR_NOT_SUPPORTED:               return UV_ENOTSUP;
+    case ERROR_BROKEN_PIPE:                 return UV_EOF;
+    case ERROR_ACCESS_DENIED:               return UV_EPERM;
+    case ERROR_PRIVILEGE_NOT_HELD:          return UV_EPERM;
+    case ERROR_BAD_PIPE:                    return UV_EPIPE;
+    case ERROR_NO_DATA:                     return UV_EPIPE;
+    case ERROR_PIPE_NOT_CONNECTED:          return UV_EPIPE;
+    case WSAESHUTDOWN:                      return UV_EPIPE;
+    case WSAEPROTONOSUPPORT:                return UV_EPROTONOSUPPORT;
+    case ERROR_WRITE_PROTECT:               return UV_EROFS;
+    case ERROR_SEM_TIMEOUT:                 return UV_ETIMEDOUT;
+    case WSAETIMEDOUT:                      return UV_ETIMEDOUT;
+    case ERROR_NOT_SAME_DEVICE:             return UV_EXDEV;
+    case ERROR_INVALID_FUNCTION:            return UV_EISDIR;
+    case ERROR_META_EXPANSION_TOO_LONG:     return UV_E2BIG;
+    default:                                return UV_UNKNOWN;
+  }
+}

+ 545 - 0
Utilities/cmlibuv/src/win/fs-event.c

@@ -0,0 +1,545 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "uv.h"
+#include "internal.h"
+#include "handle-inl.h"
+#include "req-inl.h"
+
+
+const unsigned int uv_directory_watcher_buffer_size = 4096;
+
+
+static void uv_fs_event_queue_readdirchanges(uv_loop_t* loop,
+    uv_fs_event_t* handle) {
+  assert(handle->dir_handle != INVALID_HANDLE_VALUE);
+  assert(!handle->req_pending);
+
+  memset(&(handle->req.u.io.overlapped), 0,
+         sizeof(handle->req.u.io.overlapped));
+  if (!ReadDirectoryChangesW(handle->dir_handle,
+                             handle->buffer,
+                             uv_directory_watcher_buffer_size,
+                             (handle->flags & UV_FS_EVENT_RECURSIVE) ? TRUE : FALSE,
+                             FILE_NOTIFY_CHANGE_FILE_NAME      |
+                               FILE_NOTIFY_CHANGE_DIR_NAME     |
+                               FILE_NOTIFY_CHANGE_ATTRIBUTES   |
+                               FILE_NOTIFY_CHANGE_SIZE         |
+                               FILE_NOTIFY_CHANGE_LAST_WRITE   |
+                               FILE_NOTIFY_CHANGE_LAST_ACCESS  |
+                               FILE_NOTIFY_CHANGE_CREATION     |
+                               FILE_NOTIFY_CHANGE_SECURITY,
+                             NULL,
+                             &handle->req.u.io.overlapped,
+                             NULL)) {
+    /* Make this req pending reporting an error. */
+    SET_REQ_ERROR(&handle->req, GetLastError());
+    uv_insert_pending_req(loop, (uv_req_t*)&handle->req);
+  }
+
+  handle->req_pending = 1;
+}
+
+static void uv_relative_path(const WCHAR* filename,
+                             const WCHAR* dir,
+                             WCHAR** relpath) {
+  size_t relpathlen;
+  size_t filenamelen = wcslen(filename);
+  size_t dirlen = wcslen(dir);
+  if (dirlen > 0 && dir[dirlen - 1] == '\\')
+    dirlen--;
+  relpathlen = filenamelen - dirlen - 1;
+  *relpath = uv__malloc((relpathlen + 1) * sizeof(WCHAR));
+  if (!*relpath)
+    uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+  wcsncpy(*relpath, filename + dirlen + 1, relpathlen);
+  (*relpath)[relpathlen] = L'\0';
+}
+
+static int uv_split_path(const WCHAR* filename, WCHAR** dir,
+    WCHAR** file) {
+  int len = wcslen(filename);
+  int i = len;
+  while (i > 0 && filename[--i] != '\\' && filename[i] != '/');
+
+  if (i == 0) {
+    if (dir) {
+      *dir = (WCHAR*)uv__malloc((MAX_PATH + 1) * sizeof(WCHAR));
+      if (!*dir) {
+        uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+      }
+
+      if (!GetCurrentDirectoryW(MAX_PATH, *dir)) {
+        uv__free(*dir);
+        *dir = NULL;
+        return -1;
+      }
+    }
+
+    *file = wcsdup(filename);
+  } else {
+    if (dir) {
+      *dir = (WCHAR*)uv__malloc((i + 2) * sizeof(WCHAR));
+      if (!*dir) {
+        uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+      }
+      wcsncpy(*dir, filename, i + 1);
+      (*dir)[i + 1] = L'\0';
+    }
+
+    *file = (WCHAR*)uv__malloc((len - i) * sizeof(WCHAR));
+    if (!*file) {
+      uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+    }
+    wcsncpy(*file, filename + i + 1, len - i - 1);
+    (*file)[len - i - 1] = L'\0';
+  }
+
+  return 0;
+}
+
+
+int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
+  uv__handle_init(loop, (uv_handle_t*) handle, UV_FS_EVENT);
+  handle->dir_handle = INVALID_HANDLE_VALUE;
+  handle->buffer = NULL;
+  handle->req_pending = 0;
+  handle->filew = NULL;
+  handle->short_filew = NULL;
+  handle->dirw = NULL;
+
+  uv_req_init(loop, (uv_req_t*)&handle->req);
+  handle->req.type = UV_FS_EVENT_REQ;
+  handle->req.data = handle;
+
+  return 0;
+}
+
+
+int uv_fs_event_start(uv_fs_event_t* handle,
+                      uv_fs_event_cb cb,
+                      const char* path,
+                      unsigned int flags) {
+  int name_size, is_path_dir;
+  DWORD attr, last_error;
+  WCHAR* dir = NULL, *dir_to_watch, *pathw = NULL;
+  WCHAR short_path[MAX_PATH];
+
+  if (uv__is_active(handle))
+    return UV_EINVAL;
+
+  handle->cb = cb;
+  handle->path = uv__strdup(path);
+  if (!handle->path) {
+    uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+  }
+
+  uv__handle_start(handle);
+
+  /* Convert name to UTF16. */
+
+  name_size = MultiByteToWideChar(CP_UTF8, 0, path, -1, NULL, 0) *
+              sizeof(WCHAR);
+  pathw = (WCHAR*)uv__malloc(name_size);
+  if (!pathw) {
+    uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+  }
+
+  if (!MultiByteToWideChar(CP_UTF8,
+                           0,
+                           path,
+                           -1,
+                           pathw,
+                           name_size / sizeof(WCHAR))) {
+    return uv_translate_sys_error(GetLastError());
+  }
+
+  /* Determine whether path is a file or a directory. */
+  attr = GetFileAttributesW(pathw);
+  if (attr == INVALID_FILE_ATTRIBUTES) {
+    last_error = GetLastError();
+    goto error;
+  }
+
+  is_path_dir = (attr & FILE_ATTRIBUTE_DIRECTORY) ? 1 : 0;
+
+  if (is_path_dir) {
+     /* path is a directory, so that's the directory that we will watch. */
+    handle->dirw = pathw;
+    dir_to_watch = pathw;
+  } else {
+    /*
+     * path is a file.  So we split path into dir & file parts, and
+     * watch the dir directory.
+     */
+
+    /* Convert to short path. */
+    if (!GetShortPathNameW(pathw, short_path, ARRAY_SIZE(short_path))) {
+      last_error = GetLastError();
+      goto error;
+    }
+
+    if (uv_split_path(pathw, &dir, &handle->filew) != 0) {
+      last_error = GetLastError();
+      goto error;
+    }
+
+    if (uv_split_path(short_path, NULL, &handle->short_filew) != 0) {
+      last_error = GetLastError();
+      goto error;
+    }
+
+    dir_to_watch = dir;
+    uv__free(pathw);
+    pathw = NULL;
+  }
+
+  handle->dir_handle = CreateFileW(dir_to_watch,
+                                   FILE_LIST_DIRECTORY,
+                                   FILE_SHARE_READ | FILE_SHARE_DELETE |
+                                     FILE_SHARE_WRITE,
+                                   NULL,
+                                   OPEN_EXISTING,
+                                   FILE_FLAG_BACKUP_SEMANTICS |
+                                     FILE_FLAG_OVERLAPPED,
+                                   NULL);
+
+  if (dir) {
+    uv__free(dir);
+    dir = NULL;
+  }
+
+  if (handle->dir_handle == INVALID_HANDLE_VALUE) {
+    last_error = GetLastError();
+    goto error;
+  }
+
+  if (CreateIoCompletionPort(handle->dir_handle,
+                             handle->loop->iocp,
+                             (ULONG_PTR)handle,
+                             0) == NULL) {
+    last_error = GetLastError();
+    goto error;
+  }
+
+  if (!handle->buffer) {
+    handle->buffer = (char*)uv__malloc(uv_directory_watcher_buffer_size);
+  }
+  if (!handle->buffer) {
+    uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+  }
+
+  memset(&(handle->req.u.io.overlapped), 0,
+         sizeof(handle->req.u.io.overlapped));
+
+  if (!ReadDirectoryChangesW(handle->dir_handle,
+                             handle->buffer,
+                             uv_directory_watcher_buffer_size,
+                             (flags & UV_FS_EVENT_RECURSIVE) ? TRUE : FALSE,
+                             FILE_NOTIFY_CHANGE_FILE_NAME      |
+                               FILE_NOTIFY_CHANGE_DIR_NAME     |
+                               FILE_NOTIFY_CHANGE_ATTRIBUTES   |
+                               FILE_NOTIFY_CHANGE_SIZE         |
+                               FILE_NOTIFY_CHANGE_LAST_WRITE   |
+                               FILE_NOTIFY_CHANGE_LAST_ACCESS  |
+                               FILE_NOTIFY_CHANGE_CREATION     |
+                               FILE_NOTIFY_CHANGE_SECURITY,
+                             NULL,
+                             &handle->req.u.io.overlapped,
+                             NULL)) {
+    last_error = GetLastError();
+    goto error;
+  }
+
+  handle->req_pending = 1;
+  return 0;
+
+error:
+  if (handle->path) {
+    uv__free(handle->path);
+    handle->path = NULL;
+  }
+
+  if (handle->filew) {
+    uv__free(handle->filew);
+    handle->filew = NULL;
+  }
+
+  if (handle->short_filew) {
+    uv__free(handle->short_filew);
+    handle->short_filew = NULL;
+  }
+
+  uv__free(pathw);
+
+  if (handle->dir_handle != INVALID_HANDLE_VALUE) {
+    CloseHandle(handle->dir_handle);
+    handle->dir_handle = INVALID_HANDLE_VALUE;
+  }
+
+  if (handle->buffer) {
+    uv__free(handle->buffer);
+    handle->buffer = NULL;
+  }
+
+  return uv_translate_sys_error(last_error);
+}
+
+
+int uv_fs_event_stop(uv_fs_event_t* handle) {
+  if (!uv__is_active(handle))
+    return 0;
+
+  if (handle->dir_handle != INVALID_HANDLE_VALUE) {
+    CloseHandle(handle->dir_handle);
+    handle->dir_handle = INVALID_HANDLE_VALUE;
+  }
+
+  uv__handle_stop(handle);
+
+  if (handle->filew) {
+    uv__free(handle->filew);
+    handle->filew = NULL;
+  }
+
+  if (handle->short_filew) {
+    uv__free(handle->short_filew);
+    handle->short_filew = NULL;
+  }
+
+  if (handle->path) {
+    uv__free(handle->path);
+    handle->path = NULL;
+  }
+
+  if (handle->dirw) {
+    uv__free(handle->dirw);
+    handle->dirw = NULL;
+  }
+
+  return 0;
+}
+
+
+static int file_info_cmp(WCHAR* str, WCHAR* file_name, int file_name_len) {
+  int str_len;
+
+  str_len = wcslen(str);
+
+  /*
+    Since we only care about equality, return early if the strings
+    aren't the same length
+  */
+  if (str_len != (file_name_len / sizeof(WCHAR)))
+    return -1;
+
+  return _wcsnicmp(str, file_name, str_len);
+}
+
+
+void uv_process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
+    uv_fs_event_t* handle) {
+  FILE_NOTIFY_INFORMATION* file_info;
+  int err, sizew, size;
+  char* filename = NULL;
+  WCHAR* filenamew = NULL;
+  WCHAR* long_filenamew = NULL;
+  DWORD offset = 0;
+
+  assert(req->type == UV_FS_EVENT_REQ);
+  assert(handle->req_pending);
+  handle->req_pending = 0;
+
+  /* Don't report any callbacks if:
+   * - We're closing, just push the handle onto the endgame queue
+   * - We are not active, just ignore the callback
+   */
+  if (!uv__is_active(handle)) {
+    if (handle->flags & UV__HANDLE_CLOSING) {
+      uv_want_endgame(loop, (uv_handle_t*) handle);
+    }
+    return;
+  }
+
+  file_info = (FILE_NOTIFY_INFORMATION*)(handle->buffer + offset);
+
+  if (REQ_SUCCESS(req)) {
+    if (req->u.io.overlapped.InternalHigh > 0) {
+      do {
+        file_info = (FILE_NOTIFY_INFORMATION*)((char*)file_info + offset);
+        assert(!filename);
+        assert(!filenamew);
+        assert(!long_filenamew);
+
+        /*
+         * Fire the event only if we were asked to watch a directory,
+         * or if the filename filter matches.
+         */
+        if (handle->dirw ||
+            file_info_cmp(handle->filew,
+                          file_info->FileName,
+                          file_info->FileNameLength) == 0 ||
+            file_info_cmp(handle->short_filew,
+                          file_info->FileName,
+                          file_info->FileNameLength) == 0) {
+
+          if (handle->dirw) {
+            /*
+             * We attempt to resolve the long form of the file name explicitly.
+             * We only do this for file names that might still exist on disk.
+             * If this fails, we use the name given by ReadDirectoryChangesW.
+             * This may be the long form or the 8.3 short name in some cases.
+             */
+            if (file_info->Action != FILE_ACTION_REMOVED &&
+              file_info->Action != FILE_ACTION_RENAMED_OLD_NAME) {
+              /* Construct a full path to the file. */
+              size = wcslen(handle->dirw) +
+                file_info->FileNameLength / sizeof(WCHAR) + 2;
+
+              filenamew = (WCHAR*)uv__malloc(size * sizeof(WCHAR));
+              if (!filenamew) {
+                uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+              }
+
+              _snwprintf(filenamew, size, L"%s\\%.*s", handle->dirw,
+                file_info->FileNameLength / (DWORD)sizeof(WCHAR),
+                file_info->FileName);
+
+              filenamew[size - 1] = L'\0';
+
+              /* Convert to long name. */
+              size = GetLongPathNameW(filenamew, NULL, 0);
+
+              if (size) {
+                long_filenamew = (WCHAR*)uv__malloc(size * sizeof(WCHAR));
+                if (!long_filenamew) {
+                  uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+                }
+
+                size = GetLongPathNameW(filenamew, long_filenamew, size);
+                if (size) {
+                  long_filenamew[size] = '\0';
+                } else {
+                  uv__free(long_filenamew);
+                  long_filenamew = NULL;
+                }
+              }
+
+              uv__free(filenamew);
+
+              if (long_filenamew) {
+                /* Get the file name out of the long path. */
+                uv_relative_path(long_filenamew,
+                                 handle->dirw,
+                                 &filenamew);
+                uv__free(long_filenamew);
+                long_filenamew = filenamew;
+                sizew = -1;
+              } else {
+                /* We couldn't get the long filename, use the one reported. */
+                filenamew = file_info->FileName;
+                sizew = file_info->FileNameLength / sizeof(WCHAR);
+              }
+            } else {
+              /*
+               * Removed or renamed events cannot be resolved to the long form.
+               * We therefore use the name given by ReadDirectoryChangesW.
+               * This may be the long form or the 8.3 short name in some cases.
+               */
+              filenamew = file_info->FileName;
+              sizew = file_info->FileNameLength / sizeof(WCHAR);
+            }
+          } else {
+            /* We already have the long name of the file, so just use it. */
+            filenamew = handle->filew;
+            sizew = -1;
+          }
+
+          /* Convert the filename to utf8. */
+          uv__convert_utf16_to_utf8(filenamew, sizew, &filename);
+
+          switch (file_info->Action) {
+            case FILE_ACTION_ADDED:
+            case FILE_ACTION_REMOVED:
+            case FILE_ACTION_RENAMED_OLD_NAME:
+            case FILE_ACTION_RENAMED_NEW_NAME:
+              handle->cb(handle, filename, UV_RENAME, 0);
+              break;
+
+            case FILE_ACTION_MODIFIED:
+              handle->cb(handle, filename, UV_CHANGE, 0);
+              break;
+          }
+
+          uv__free(filename);
+          filename = NULL;
+          uv__free(long_filenamew);
+          long_filenamew = NULL;
+          filenamew = NULL;
+        }
+
+        offset = file_info->NextEntryOffset;
+      } while (offset && !(handle->flags & UV__HANDLE_CLOSING));
+    } else {
+      handle->cb(handle, NULL, UV_CHANGE, 0);
+    }
+  } else {
+    err = GET_REQ_ERROR(req);
+    handle->cb(handle, NULL, 0, uv_translate_sys_error(err));
+  }
+
+  if (!(handle->flags & UV__HANDLE_CLOSING)) {
+    uv_fs_event_queue_readdirchanges(loop, handle);
+  } else {
+    uv_want_endgame(loop, (uv_handle_t*)handle);
+  }
+}
+
+
+void uv_fs_event_close(uv_loop_t* loop, uv_fs_event_t* handle) {
+  uv_fs_event_stop(handle);
+
+  uv__handle_closing(handle);
+
+  if (!handle->req_pending) {
+    uv_want_endgame(loop, (uv_handle_t*)handle);
+  }
+
+}
+
+
+void uv_fs_event_endgame(uv_loop_t* loop, uv_fs_event_t* handle) {
+  if ((handle->flags & UV__HANDLE_CLOSING) && !handle->req_pending) {
+    assert(!(handle->flags & UV_HANDLE_CLOSED));
+
+    if (handle->buffer) {
+      uv__free(handle->buffer);
+      handle->buffer = NULL;
+    }
+
+    uv__handle_close(handle);
+  }
+}

+ 2491 - 0
Utilities/cmlibuv/src/win/fs.c

@@ -0,0 +1,2491 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <direct.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <io.h>
+#include <limits.h>
+#include <sys/stat.h>
+#include <sys/utime.h>
+#include <stdio.h>
+
+#include "uv.h"
+#include "internal.h"
+#include "req-inl.h"
+#include "handle-inl.h"
+
+#include <wincrypt.h>
+
+
+#define UV_FS_FREE_PATHS         0x0002
+#define UV_FS_FREE_PTR           0x0008
+#define UV_FS_CLEANEDUP          0x0010
+
+
+#define QUEUE_FS_TP_JOB(loop, req)                                          \
+  do {                                                                      \
+    uv__req_register(loop, req);                                            \
+    uv__work_submit((loop), &(req)->work_req, uv__fs_work, uv__fs_done);    \
+  } while (0)
+
+#define SET_REQ_RESULT(req, result_value)                                   \
+  do {                                                                      \
+    req->result = (result_value);                                           \
+    if (req->result == -1) {                                                \
+      req->sys_errno_ = _doserrno;                                          \
+      req->result = uv_translate_sys_error(req->sys_errno_);                \
+    }                                                                       \
+  } while (0)
+
+#define SET_REQ_WIN32_ERROR(req, sys_errno)                                 \
+  do {                                                                      \
+    req->sys_errno_ = (sys_errno);                                          \
+    req->result = uv_translate_sys_error(req->sys_errno_);                  \
+  } while (0)
+
+#define SET_REQ_UV_ERROR(req, uv_errno, sys_errno)                          \
+  do {                                                                      \
+    req->result = (uv_errno);                                               \
+    req->sys_errno_ = (sys_errno);                                          \
+  } while (0)
+
+#define VERIFY_FD(fd, req)                                                  \
+  if (fd == -1) {                                                           \
+    req->result = UV_EBADF;                                                 \
+    req->sys_errno_ = ERROR_INVALID_HANDLE;                                 \
+    return;                                                                 \
+  }
+
+#define FILETIME_TO_UINT(filetime)                                          \
+   (*((uint64_t*) &(filetime)) - 116444736000000000ULL)
+
+#define FILETIME_TO_TIME_T(filetime)                                        \
+   (FILETIME_TO_UINT(filetime) / 10000000ULL)
+
+#define FILETIME_TO_TIME_NS(filetime, secs)                                 \
+   ((FILETIME_TO_UINT(filetime) - (secs * 10000000ULL)) * 100)
+
+#define FILETIME_TO_TIMESPEC(ts, filetime)                                  \
+   do {                                                                     \
+     (ts).tv_sec = (long) FILETIME_TO_TIME_T(filetime);                     \
+     (ts).tv_nsec = (long) FILETIME_TO_TIME_NS(filetime, (ts).tv_sec);      \
+   } while(0)
+
+#define TIME_T_TO_FILETIME(time, filetime_ptr)                              \
+  do {                                                                      \
+    uint64_t bigtime = ((uint64_t) ((time) * 10000000ULL)) +                \
+                                  116444736000000000ULL;                    \
+    (filetime_ptr)->dwLowDateTime = bigtime & 0xFFFFFFFF;                   \
+    (filetime_ptr)->dwHighDateTime = bigtime >> 32;                         \
+  } while(0)
+
+#define IS_SLASH(c) ((c) == L'\\' || (c) == L'/')
+#define IS_LETTER(c) (((c) >= L'a' && (c) <= L'z') || \
+  ((c) >= L'A' && (c) <= L'Z'))
+
+const WCHAR JUNCTION_PREFIX[] = L"\\??\\";
+const WCHAR JUNCTION_PREFIX_LEN = 4;
+
+const WCHAR LONG_PATH_PREFIX[] = L"\\\\?\\";
+const WCHAR LONG_PATH_PREFIX_LEN = 4;
+
+const WCHAR UNC_PATH_PREFIX[] = L"\\\\?\\UNC\\";
+const WCHAR UNC_PATH_PREFIX_LEN = 8;
+
+
+void uv_fs_init() {
+  _fmode = _O_BINARY;
+}
+
+
+INLINE static int fs__capture_path(uv_fs_t* req, const char* path,
+    const char* new_path, const int copy_path) {
+  char* buf;
+  char* pos;
+  ssize_t buf_sz = 0, path_len, pathw_len = 0, new_pathw_len = 0;
+
+  /* new_path can only be set if path is also set. */
+  assert(new_path == NULL || path != NULL);
+
+  if (path != NULL) {
+    pathw_len = MultiByteToWideChar(CP_UTF8,
+                                    0,
+                                    path,
+                                    -1,
+                                    NULL,
+                                    0);
+    if (pathw_len == 0) {
+      return GetLastError();
+    }
+
+    buf_sz += pathw_len * sizeof(WCHAR);
+  }
+
+  if (path != NULL && copy_path) {
+    path_len = 1 + strlen(path);
+    buf_sz += path_len;
+  }
+
+  if (new_path != NULL) {
+    new_pathw_len = MultiByteToWideChar(CP_UTF8,
+                                        0,
+                                        new_path,
+                                        -1,
+                                        NULL,
+                                        0);
+    if (new_pathw_len == 0) {
+      return GetLastError();
+    }
+
+    buf_sz += new_pathw_len * sizeof(WCHAR);
+  }
+
+
+  if (buf_sz == 0) {
+    req->file.pathw = NULL;
+    req->fs.info.new_pathw = NULL;
+    req->path = NULL;
+    return 0;
+  }
+
+  buf = (char*) uv__malloc(buf_sz);
+  if (buf == NULL) {
+    return ERROR_OUTOFMEMORY;
+  }
+
+  pos = buf;
+
+  if (path != NULL) {
+    DWORD r = MultiByteToWideChar(CP_UTF8,
+                                  0,
+                                  path,
+                                  -1,
+                                  (WCHAR*) pos,
+                                  pathw_len);
+    assert(r == (DWORD) pathw_len);
+    req->file.pathw = (WCHAR*) pos;
+    pos += r * sizeof(WCHAR);
+  } else {
+    req->file.pathw = NULL;
+  }
+
+  if (new_path != NULL) {
+    DWORD r = MultiByteToWideChar(CP_UTF8,
+                                  0,
+                                  new_path,
+                                  -1,
+                                  (WCHAR*) pos,
+                                  new_pathw_len);
+    assert(r == (DWORD) new_pathw_len);
+    req->fs.info.new_pathw = (WCHAR*) pos;
+    pos += r * sizeof(WCHAR);
+  } else {
+    req->fs.info.new_pathw = NULL;
+  }
+
+  req->path = path;
+  if (path != NULL && copy_path) {
+    memcpy(pos, path, path_len);
+    assert(path_len == buf_sz - (pos - buf));
+    req->path = pos;
+  }
+
+  req->flags |= UV_FS_FREE_PATHS;
+
+  return 0;
+}
+
+
+
+INLINE static void uv_fs_req_init(uv_loop_t* loop, uv_fs_t* req,
+    uv_fs_type fs_type, const uv_fs_cb cb) {
+  uv_req_init(loop, (uv_req_t*) req);
+
+  req->type = UV_FS;
+  req->loop = loop;
+  req->flags = 0;
+  req->fs_type = fs_type;
+  req->result = 0;
+  req->ptr = NULL;
+  req->path = NULL;
+  req->cb = cb;
+}
+
+
+static int fs__wide_to_utf8(WCHAR* w_source_ptr,
+                               DWORD w_source_len,
+                               char** target_ptr,
+                               uint64_t* target_len_ptr) {
+  int r;
+  int target_len;
+  char* target;
+  target_len = WideCharToMultiByte(CP_UTF8,
+                                   0,
+                                   w_source_ptr,
+                                   w_source_len,
+                                   NULL,
+                                   0,
+                                   NULL,
+                                   NULL);
+
+  if (target_len == 0) {
+    return -1;
+  }
+
+  if (target_len_ptr != NULL) {
+    *target_len_ptr = target_len;
+  }
+
+  if (target_ptr == NULL) {
+    return 0;
+  }
+
+  target = uv__malloc(target_len + 1);
+  if (target == NULL) {
+    SetLastError(ERROR_OUTOFMEMORY);
+    return -1;
+  }
+
+  r = WideCharToMultiByte(CP_UTF8,
+                          0,
+                          w_source_ptr,
+                          w_source_len,
+                          target,
+                          target_len,
+                          NULL,
+                          NULL);
+  assert(r == target_len);
+  target[target_len] = '\0';
+  *target_ptr = target;
+  return 0;
+}
+
+
+INLINE static int fs__readlink_handle(HANDLE handle, char** target_ptr,
+    uint64_t* target_len_ptr) {
+  char buffer[MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
+  REPARSE_DATA_BUFFER* reparse_data = (REPARSE_DATA_BUFFER*) buffer;
+  WCHAR* w_target;
+  DWORD w_target_len;
+  DWORD bytes;
+
+  if (!DeviceIoControl(handle,
+                       FSCTL_GET_REPARSE_POINT,
+                       NULL,
+                       0,
+                       buffer,
+                       sizeof buffer,
+                       &bytes,
+                       NULL)) {
+    return -1;
+  }
+
+  if (reparse_data->ReparseTag == IO_REPARSE_TAG_SYMLINK) {
+    /* Real symlink */
+    w_target = reparse_data->SymbolicLinkReparseBuffer.PathBuffer +
+        (reparse_data->SymbolicLinkReparseBuffer.SubstituteNameOffset /
+        sizeof(WCHAR));
+    w_target_len =
+        reparse_data->SymbolicLinkReparseBuffer.SubstituteNameLength /
+        sizeof(WCHAR);
+
+    /* Real symlinks can contain pretty much everything, but the only thing */
+    /* we really care about is undoing the implicit conversion to an NT */
+    /* namespaced path that CreateSymbolicLink will perform on absolute */
+    /* paths. If the path is win32-namespaced then the user must have */
+    /* explicitly made it so, and we better just return the unmodified */
+    /* reparse data. */
+    if (w_target_len >= 4 &&
+        w_target[0] == L'\\' &&
+        w_target[1] == L'?' &&
+        w_target[2] == L'?' &&
+        w_target[3] == L'\\') {
+      /* Starts with \??\ */
+      if (w_target_len >= 6 &&
+          ((w_target[4] >= L'A' && w_target[4] <= L'Z') ||
+           (w_target[4] >= L'a' && w_target[4] <= L'z')) &&
+          w_target[5] == L':' &&
+          (w_target_len == 6 || w_target[6] == L'\\')) {
+        /* \??\<drive>:\ */
+        w_target += 4;
+        w_target_len -= 4;
+
+      } else if (w_target_len >= 8 &&
+                 (w_target[4] == L'U' || w_target[4] == L'u') &&
+                 (w_target[5] == L'N' || w_target[5] == L'n') &&
+                 (w_target[6] == L'C' || w_target[6] == L'c') &&
+                 w_target[7] == L'\\') {
+        /* \??\UNC\<server>\<share>\ - make sure the final path looks like */
+        /* \\<server>\<share>\ */
+        w_target += 6;
+        w_target[0] = L'\\';
+        w_target_len -= 6;
+      }
+    }
+
+  } else if (reparse_data->ReparseTag == IO_REPARSE_TAG_MOUNT_POINT) {
+    /* Junction. */
+    w_target = reparse_data->MountPointReparseBuffer.PathBuffer +
+        (reparse_data->MountPointReparseBuffer.SubstituteNameOffset /
+        sizeof(WCHAR));
+    w_target_len = reparse_data->MountPointReparseBuffer.SubstituteNameLength /
+        sizeof(WCHAR);
+
+    /* Only treat junctions that look like \??\<drive>:\ as symlink. */
+    /* Junctions can also be used as mount points, like \??\Volume{<guid>}, */
+    /* but that's confusing for programs since they wouldn't be able to */
+    /* actually understand such a path when returned by uv_readlink(). */
+    /* UNC paths are never valid for junctions so we don't care about them. */
+    if (!(w_target_len >= 6 &&
+          w_target[0] == L'\\' &&
+          w_target[1] == L'?' &&
+          w_target[2] == L'?' &&
+          w_target[3] == L'\\' &&
+          ((w_target[4] >= L'A' && w_target[4] <= L'Z') ||
+           (w_target[4] >= L'a' && w_target[4] <= L'z')) &&
+          w_target[5] == L':' &&
+          (w_target_len == 6 || w_target[6] == L'\\'))) {
+      SetLastError(ERROR_SYMLINK_NOT_SUPPORTED);
+      return -1;
+    }
+
+    /* Remove leading \??\ */
+    w_target += 4;
+    w_target_len -= 4;
+
+  } else {
+    /* Reparse tag does not indicate a symlink. */
+    SetLastError(ERROR_SYMLINK_NOT_SUPPORTED);
+    return -1;
+  }
+
+  return fs__wide_to_utf8(w_target, w_target_len, target_ptr, target_len_ptr);
+}
+
+
+void fs__open(uv_fs_t* req) {
+  DWORD access;
+  DWORD share;
+  DWORD disposition;
+  DWORD attributes = 0;
+  HANDLE file;
+  int fd, current_umask;
+  int flags = req->fs.info.file_flags;
+
+  /* Obtain the active umask. umask() never fails and returns the previous */
+  /* umask. */
+  current_umask = umask(0);
+  umask(current_umask);
+
+  /* convert flags and mode to CreateFile parameters */
+  switch (flags & (_O_RDONLY | _O_WRONLY | _O_RDWR)) {
+  case _O_RDONLY:
+    access = FILE_GENERIC_READ;
+    attributes |= FILE_FLAG_BACKUP_SEMANTICS;
+    break;
+  case _O_WRONLY:
+    access = FILE_GENERIC_WRITE;
+    break;
+  case _O_RDWR:
+    access = FILE_GENERIC_READ | FILE_GENERIC_WRITE;
+    break;
+  default:
+    goto einval;
+  }
+
+  if (flags & _O_APPEND) {
+    access &= ~FILE_WRITE_DATA;
+    access |= FILE_APPEND_DATA;
+    attributes &= ~FILE_FLAG_BACKUP_SEMANTICS;
+  }
+
+  /*
+   * Here is where we deviate significantly from what CRT's _open()
+   * does. We indiscriminately use all the sharing modes, to match
+   * UNIX semantics. In particular, this ensures that the file can
+   * be deleted even whilst it's open, fixing issue #1449.
+   */
+  share = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE;
+
+  switch (flags & (_O_CREAT | _O_EXCL | _O_TRUNC)) {
+  case 0:
+  case _O_EXCL:
+    disposition = OPEN_EXISTING;
+    break;
+  case _O_CREAT:
+    disposition = OPEN_ALWAYS;
+    break;
+  case _O_CREAT | _O_EXCL:
+  case _O_CREAT | _O_TRUNC | _O_EXCL:
+    disposition = CREATE_NEW;
+    break;
+  case _O_TRUNC:
+  case _O_TRUNC | _O_EXCL:
+    disposition = TRUNCATE_EXISTING;
+    break;
+  case _O_CREAT | _O_TRUNC:
+    disposition = CREATE_ALWAYS;
+    break;
+  default:
+    goto einval;
+  }
+
+  attributes |= FILE_ATTRIBUTE_NORMAL;
+  if (flags & _O_CREAT) {
+    if (!((req->fs.info.mode & ~current_umask) & _S_IWRITE)) {
+      attributes |= FILE_ATTRIBUTE_READONLY;
+    }
+  }
+
+  if (flags & _O_TEMPORARY ) {
+    attributes |= FILE_FLAG_DELETE_ON_CLOSE | FILE_ATTRIBUTE_TEMPORARY;
+    access |= DELETE;
+  }
+
+  if (flags & _O_SHORT_LIVED) {
+    attributes |= FILE_ATTRIBUTE_TEMPORARY;
+  }
+
+  switch (flags & (_O_SEQUENTIAL | _O_RANDOM)) {
+  case 0:
+    break;
+  case _O_SEQUENTIAL:
+    attributes |= FILE_FLAG_SEQUENTIAL_SCAN;
+    break;
+  case _O_RANDOM:
+    attributes |= FILE_FLAG_RANDOM_ACCESS;
+    break;
+  default:
+    goto einval;
+  }
+
+  /* Setting this flag makes it possible to open a directory. */
+  attributes |= FILE_FLAG_BACKUP_SEMANTICS;
+
+  file = CreateFileW(req->file.pathw,
+                     access,
+                     share,
+                     NULL,
+                     disposition,
+                     attributes,
+                     NULL);
+  if (file == INVALID_HANDLE_VALUE) {
+    DWORD error = GetLastError();
+    if (error == ERROR_FILE_EXISTS && (flags & _O_CREAT) &&
+        !(flags & _O_EXCL)) {
+      /* Special case: when ERROR_FILE_EXISTS happens and O_CREAT was */
+      /* specified, it means the path referred to a directory. */
+      SET_REQ_UV_ERROR(req, UV_EISDIR, error);
+    } else {
+      SET_REQ_WIN32_ERROR(req, GetLastError());
+    }
+    return;
+  }
+
+  fd = _open_osfhandle((intptr_t) file, flags);
+  if (fd < 0) {
+    /* The only known failure mode for _open_osfhandle() is EMFILE, in which
+     * case GetLastError() will return zero. However we'll try to handle other
+     * errors as well, should they ever occur.
+     */
+    if (errno == EMFILE)
+      SET_REQ_UV_ERROR(req, UV_EMFILE, ERROR_TOO_MANY_OPEN_FILES);
+    else if (GetLastError() != ERROR_SUCCESS)
+      SET_REQ_WIN32_ERROR(req, GetLastError());
+    else
+      SET_REQ_WIN32_ERROR(req, UV_UNKNOWN);
+    CloseHandle(file);
+    return;
+  }
+
+  SET_REQ_RESULT(req, fd);
+  return;
+
+ einval:
+  SET_REQ_UV_ERROR(req, UV_EINVAL, ERROR_INVALID_PARAMETER);
+}
+
+void fs__close(uv_fs_t* req) {
+  int fd = req->file.fd;
+  int result;
+
+  VERIFY_FD(fd, req);
+
+  if (fd > 2)
+    result = _close(fd);
+  else
+    result = 0;
+
+  /* _close doesn't set _doserrno on failure, but it does always set errno
+   * to EBADF on failure.
+   */
+  if (result == -1) {
+    assert(errno == EBADF);
+    SET_REQ_UV_ERROR(req, UV_EBADF, ERROR_INVALID_HANDLE);
+  } else {
+    req->result = 0;
+  }
+}
+
+
+void fs__read(uv_fs_t* req) {
+  int fd = req->file.fd;
+  int64_t offset = req->fs.info.offset;
+  HANDLE handle;
+  OVERLAPPED overlapped, *overlapped_ptr;
+  LARGE_INTEGER offset_;
+  DWORD bytes;
+  DWORD error;
+  int result;
+  unsigned int index;
+
+  VERIFY_FD(fd, req);
+
+  handle = uv__get_osfhandle(fd);
+
+  if (handle == INVALID_HANDLE_VALUE) {
+    SET_REQ_WIN32_ERROR(req, ERROR_INVALID_HANDLE);
+    return;
+  }
+
+  if (offset != -1) {
+    memset(&overlapped, 0, sizeof overlapped);
+    overlapped_ptr = &overlapped;
+  } else {
+    overlapped_ptr = NULL;
+  }
+
+  index = 0;
+  bytes = 0;
+  do {
+    DWORD incremental_bytes;
+
+    if (offset != -1) {
+      offset_.QuadPart = offset + bytes;
+      overlapped.Offset = offset_.LowPart;
+      overlapped.OffsetHigh = offset_.HighPart;
+    }
+
+    result = ReadFile(handle,
+                      req->fs.info.bufs[index].base,
+                      req->fs.info.bufs[index].len,
+                      &incremental_bytes,
+                      overlapped_ptr);
+    bytes += incremental_bytes;
+    ++index;
+  } while (result && index < req->fs.info.nbufs);
+
+  if (result || bytes > 0) {
+    SET_REQ_RESULT(req, bytes);
+  } else {
+    error = GetLastError();
+    if (error == ERROR_HANDLE_EOF) {
+      SET_REQ_RESULT(req, bytes);
+    } else {
+      SET_REQ_WIN32_ERROR(req, error);
+    }
+  }
+}
+
+
+void fs__write(uv_fs_t* req) {
+  int fd = req->file.fd;
+  int64_t offset = req->fs.info.offset;
+  HANDLE handle;
+  OVERLAPPED overlapped, *overlapped_ptr;
+  LARGE_INTEGER offset_;
+  DWORD bytes;
+  int result;
+  unsigned int index;
+
+  VERIFY_FD(fd, req);
+
+  handle = uv__get_osfhandle(fd);
+  if (handle == INVALID_HANDLE_VALUE) {
+    SET_REQ_WIN32_ERROR(req, ERROR_INVALID_HANDLE);
+    return;
+  }
+
+  if (offset != -1) {
+    memset(&overlapped, 0, sizeof overlapped);
+    overlapped_ptr = &overlapped;
+  } else {
+    overlapped_ptr = NULL;
+  }
+
+  index = 0;
+  bytes = 0;
+  do {
+    DWORD incremental_bytes;
+
+    if (offset != -1) {
+      offset_.QuadPart = offset + bytes;
+      overlapped.Offset = offset_.LowPart;
+      overlapped.OffsetHigh = offset_.HighPart;
+    }
+
+    result = WriteFile(handle,
+                       req->fs.info.bufs[index].base,
+                       req->fs.info.bufs[index].len,
+                       &incremental_bytes,
+                       overlapped_ptr);
+    bytes += incremental_bytes;
+    ++index;
+  } while (result && index < req->fs.info.nbufs);
+
+  if (result || bytes > 0) {
+    SET_REQ_RESULT(req, bytes);
+  } else {
+    SET_REQ_WIN32_ERROR(req, GetLastError());
+  }
+}
+
+
+void fs__rmdir(uv_fs_t* req) {
+  int result = _wrmdir(req->file.pathw);
+  SET_REQ_RESULT(req, result);
+}
+
+
+void fs__unlink(uv_fs_t* req) {
+  const WCHAR* pathw = req->file.pathw;
+  HANDLE handle;
+  BY_HANDLE_FILE_INFORMATION info;
+  FILE_DISPOSITION_INFORMATION disposition;
+  IO_STATUS_BLOCK iosb;
+  NTSTATUS status;
+
+  handle = CreateFileW(pathw,
+                       FILE_READ_ATTRIBUTES | FILE_WRITE_ATTRIBUTES | DELETE,
+                       FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
+                       NULL,
+                       OPEN_EXISTING,
+                       FILE_FLAG_OPEN_REPARSE_POINT | FILE_FLAG_BACKUP_SEMANTICS,
+                       NULL);
+
+  if (handle == INVALID_HANDLE_VALUE) {
+    SET_REQ_WIN32_ERROR(req, GetLastError());
+    return;
+  }
+
+  if (!GetFileInformationByHandle(handle, &info)) {
+    SET_REQ_WIN32_ERROR(req, GetLastError());
+    CloseHandle(handle);
+    return;
+  }
+
+  if (info.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) {
+    /* Do not allow deletion of directories, unless it is a symlink. When */
+    /* the path refers to a non-symlink directory, report EPERM as mandated */
+    /* by POSIX.1. */
+
+    /* Check if it is a reparse point. If it's not, it's a normal directory. */
+    if (!(info.dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT)) {
+      SET_REQ_WIN32_ERROR(req, ERROR_ACCESS_DENIED);
+      CloseHandle(handle);
+      return;
+    }
+
+    /* Read the reparse point and check if it is a valid symlink. */
+    /* If not, don't unlink. */
+    if (fs__readlink_handle(handle, NULL, NULL) < 0) {
+      DWORD error = GetLastError();
+      if (error == ERROR_SYMLINK_NOT_SUPPORTED)
+        error = ERROR_ACCESS_DENIED;
+      SET_REQ_WIN32_ERROR(req, error);
+      CloseHandle(handle);
+      return;
+    }
+  }
+
+  if (info.dwFileAttributes & FILE_ATTRIBUTE_READONLY) {
+    /* Remove read-only attribute */
+    FILE_BASIC_INFORMATION basic = { 0 };
+
+    basic.FileAttributes = info.dwFileAttributes & ~(FILE_ATTRIBUTE_READONLY);
+
+    status = pNtSetInformationFile(handle,
+                                   &iosb,
+                                   &basic,
+                                   sizeof basic,
+                                   FileBasicInformation);
+    if (!NT_SUCCESS(status)) {
+      SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(status));
+      CloseHandle(handle);
+      return;
+    }
+  }
+
+  /* Try to set the delete flag. */
+  disposition.DeleteFile = TRUE;
+  status = pNtSetInformationFile(handle,
+                                 &iosb,
+                                 &disposition,
+                                 sizeof disposition,
+                                 FileDispositionInformation);
+  if (NT_SUCCESS(status)) {
+    SET_REQ_SUCCESS(req);
+  } else {
+    SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(status));
+  }
+
+  CloseHandle(handle);
+}
+
+
+void fs__mkdir(uv_fs_t* req) {
+  /* TODO: use req->mode. */
+  int result = _wmkdir(req->file.pathw);
+  SET_REQ_RESULT(req, result);
+}
+
+
+/* OpenBSD original: lib/libc/stdio/mktemp.c */
+void fs__mkdtemp(uv_fs_t* req) {
+  static const WCHAR *tempchars =
+    L"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
+  static const size_t num_chars = 62;
+  static const size_t num_x = 6;
+  WCHAR *cp, *ep;
+  unsigned int tries, i;
+  size_t len;
+  HCRYPTPROV h_crypt_prov;
+  uint64_t v;
+  BOOL released;
+
+  len = wcslen(req->file.pathw);
+  ep = req->file.pathw + len;
+  if (len < num_x || wcsncmp(ep - num_x, L"XXXXXX", num_x)) {
+    SET_REQ_UV_ERROR(req, UV_EINVAL, ERROR_INVALID_PARAMETER);
+    return;
+  }
+
+  if (!CryptAcquireContext(&h_crypt_prov, NULL, NULL, PROV_RSA_FULL,
+                           CRYPT_VERIFYCONTEXT)) {
+    SET_REQ_WIN32_ERROR(req, GetLastError());
+    return;
+  }
+
+  tries = TMP_MAX;
+  do {
+    if (!CryptGenRandom(h_crypt_prov, sizeof(v), (BYTE*) &v)) {
+      SET_REQ_WIN32_ERROR(req, GetLastError());
+      break;
+    }
+
+    cp = ep - num_x;
+    for (i = 0; i < num_x; i++) {
+      *cp++ = tempchars[v % num_chars];
+      v /= num_chars;
+    }
+
+    if (_wmkdir(req->file.pathw) == 0) {
+      len = strlen(req->path);
+      wcstombs((char*) req->path + len - num_x, ep - num_x, num_x);
+      SET_REQ_RESULT(req, 0);
+      break;
+    } else if (errno != EEXIST) {
+      SET_REQ_RESULT(req, -1);
+      break;
+    }
+  } while (--tries);
+
+  released = CryptReleaseContext(h_crypt_prov, 0);
+  assert(released);
+  if (tries == 0) {
+    SET_REQ_RESULT(req, -1);
+  }
+}
+
+
+void fs__scandir(uv_fs_t* req) {
+  static const size_t dirents_initial_size = 32;
+
+  HANDLE dir_handle = INVALID_HANDLE_VALUE;
+
+  uv__dirent_t** dirents = NULL;
+  size_t dirents_size = 0;
+  size_t dirents_used = 0;
+
+  IO_STATUS_BLOCK iosb;
+  NTSTATUS status;
+
+  /* Buffer to hold directory entries returned by NtQueryDirectoryFile.
+   * It's important that this buffer can hold at least one entry, regardless
+   * of the length of the file names present in the enumerated directory.
+   * A file name is at most 256 WCHARs long.
+   * According to MSDN, the buffer must be aligned at an 8-byte boundary.
+   */
+#if _MSC_VER
+  __declspec(align(8)) char buffer[8192];
+#else
+  __attribute__ ((aligned (8))) char buffer[8192];
+#endif
+
+  STATIC_ASSERT(sizeof buffer >=
+                sizeof(FILE_DIRECTORY_INFORMATION) + 256 * sizeof(WCHAR));
+
+  /* Open the directory. */
+  dir_handle =
+      CreateFileW(req->file.pathw,
+                  FILE_LIST_DIRECTORY | SYNCHRONIZE,
+                  FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
+                  NULL,
+                  OPEN_EXISTING,
+                  FILE_FLAG_BACKUP_SEMANTICS,
+                  NULL);
+  if (dir_handle == INVALID_HANDLE_VALUE)
+    goto win32_error;
+
+  /* Read the first chunk. */
+  status = pNtQueryDirectoryFile(dir_handle,
+                                 NULL,
+                                 NULL,
+                                 NULL,
+                                 &iosb,
+                                 &buffer,
+                                 sizeof buffer,
+                                 FileDirectoryInformation,
+                                 FALSE,
+                                 NULL,
+                                 TRUE);
+
+  /* If the handle is not a directory, we'll get STATUS_INVALID_PARAMETER.
+   * This should be reported back as UV_ENOTDIR.
+   */
+  if (status == STATUS_INVALID_PARAMETER)
+    goto not_a_directory_error;
+
+  while (NT_SUCCESS(status)) {
+    char* position = buffer;
+    size_t next_entry_offset = 0;
+
+    do {
+      FILE_DIRECTORY_INFORMATION* info;
+      uv__dirent_t* dirent;
+
+      size_t wchar_len;
+      size_t utf8_len;
+
+      /* Obtain a pointer to the current directory entry. */
+      position += next_entry_offset;
+      info = (FILE_DIRECTORY_INFORMATION*) position;
+
+      /* Fetch the offset to the next directory entry. */
+      next_entry_offset = info->NextEntryOffset;
+
+      /* Compute the length of the filename in WCHARs. */
+      wchar_len = info->FileNameLength / sizeof info->FileName[0];
+
+      /* Skip over '.' and '..' entries.  It has been reported that
+       * the SharePoint driver includes the terminating zero byte in
+       * the filename length.  Strip those first.
+       */
+      while (wchar_len > 0 && info->FileName[wchar_len - 1] == L'\0')
+        wchar_len -= 1;
+
+      if (wchar_len == 0)
+        continue;
+      if (wchar_len == 1 && info->FileName[0] == L'.')
+        continue;
+      if (wchar_len == 2 && info->FileName[0] == L'.' &&
+          info->FileName[1] == L'.')
+        continue;
+
+      /* Compute the space required to store the filename as UTF-8. */
+      utf8_len = WideCharToMultiByte(
+          CP_UTF8, 0, &info->FileName[0], wchar_len, NULL, 0, NULL, NULL);
+      if (utf8_len == 0)
+        goto win32_error;
+
+      /* Resize the dirent array if needed. */
+      if (dirents_used >= dirents_size) {
+        size_t new_dirents_size =
+            dirents_size == 0 ? dirents_initial_size : dirents_size << 1;
+        uv__dirent_t** new_dirents =
+            uv__realloc(dirents, new_dirents_size * sizeof *dirents);
+
+        if (new_dirents == NULL)
+          goto out_of_memory_error;
+
+        dirents_size = new_dirents_size;
+        dirents = new_dirents;
+      }
+
+      /* Allocate space for the uv dirent structure. The dirent structure
+       * includes room for the first character of the filename, but `utf8_len`
+       * doesn't count the NULL terminator at this point.
+       */
+      dirent = uv__malloc(sizeof *dirent + utf8_len);
+      if (dirent == NULL)
+        goto out_of_memory_error;
+
+      dirents[dirents_used++] = dirent;
+
+      /* Convert file name to UTF-8. */
+      if (WideCharToMultiByte(CP_UTF8,
+                              0,
+                              &info->FileName[0],
+                              wchar_len,
+                              &dirent->d_name[0],
+                              utf8_len,
+                              NULL,
+                              NULL) == 0)
+        goto win32_error;
+
+      /* Add a null terminator to the filename. */
+      dirent->d_name[utf8_len] = '\0';
+
+      /* Fill out the type field. */
+      if (info->FileAttributes & FILE_ATTRIBUTE_DEVICE)
+        dirent->d_type = UV__DT_CHAR;
+      else if (info->FileAttributes & FILE_ATTRIBUTE_REPARSE_POINT)
+        dirent->d_type = UV__DT_LINK;
+      else if (info->FileAttributes & FILE_ATTRIBUTE_DIRECTORY)
+        dirent->d_type = UV__DT_DIR;
+      else
+        dirent->d_type = UV__DT_FILE;
+    } while (next_entry_offset != 0);
+
+    /* Read the next chunk. */
+    status = pNtQueryDirectoryFile(dir_handle,
+                                   NULL,
+                                   NULL,
+                                   NULL,
+                                   &iosb,
+                                   &buffer,
+                                   sizeof buffer,
+                                   FileDirectoryInformation,
+                                   FALSE,
+                                   NULL,
+                                   FALSE);
+
+    /* After the first pNtQueryDirectoryFile call, the function may return
+     * STATUS_SUCCESS even if the buffer was too small to hold at least one
+     * directory entry.
+     */
+    if (status == STATUS_SUCCESS && iosb.Information == 0)
+      status = STATUS_BUFFER_OVERFLOW;
+  }
+
+  if (status != STATUS_NO_MORE_FILES)
+    goto nt_error;
+
+  CloseHandle(dir_handle);
+
+  /* Store the result in the request object. */
+  req->ptr = dirents;
+  if (dirents != NULL)
+    req->flags |= UV_FS_FREE_PTR;
+
+  SET_REQ_RESULT(req, dirents_used);
+
+  /* `nbufs` will be used as index by uv_fs_scandir_next. */
+  req->fs.info.nbufs = 0;
+
+  return;
+
+nt_error:
+  SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(status));
+  goto cleanup;
+
+win32_error:
+  SET_REQ_WIN32_ERROR(req, GetLastError());
+  goto cleanup;
+
+not_a_directory_error:
+  SET_REQ_UV_ERROR(req, UV_ENOTDIR, ERROR_DIRECTORY);
+  goto cleanup;
+
+out_of_memory_error:
+  SET_REQ_UV_ERROR(req, UV_ENOMEM, ERROR_OUTOFMEMORY);
+  goto cleanup;
+
+cleanup:
+  if (dir_handle != INVALID_HANDLE_VALUE)
+    CloseHandle(dir_handle);
+  while (dirents_used > 0)
+    uv__free(dirents[--dirents_used]);
+  if (dirents != NULL)
+    uv__free(dirents);
+}
+
+
+INLINE static int fs__stat_handle(HANDLE handle, uv_stat_t* statbuf) {
+  FILE_ALL_INFORMATION file_info;
+  FILE_FS_VOLUME_INFORMATION volume_info;
+  NTSTATUS nt_status;
+  IO_STATUS_BLOCK io_status;
+
+  nt_status = pNtQueryInformationFile(handle,
+                                      &io_status,
+                                      &file_info,
+                                      sizeof file_info,
+                                      FileAllInformation);
+
+  /* Buffer overflow (a warning status code) is expected here. */
+  if (NT_ERROR(nt_status)) {
+    SetLastError(pRtlNtStatusToDosError(nt_status));
+    return -1;
+  }
+
+  nt_status = pNtQueryVolumeInformationFile(handle,
+                                            &io_status,
+                                            &volume_info,
+                                            sizeof volume_info,
+                                            FileFsVolumeInformation);
+
+  /* Buffer overflow (a warning status code) is expected here. */
+  if (io_status.Status == STATUS_NOT_IMPLEMENTED) {
+    statbuf->st_dev = 0;
+  } else if (NT_ERROR(nt_status)) {
+    SetLastError(pRtlNtStatusToDosError(nt_status));
+    return -1;
+  } else {
+    statbuf->st_dev = volume_info.VolumeSerialNumber;
+  }
+
+  /* Todo: st_mode should probably always be 0666 for everyone. We might also
+   * want to report 0777 if the file is a .exe or a directory.
+   *
+   * Currently it's based on whether the 'readonly' attribute is set, which
+   * makes little sense because the semantics are so different: the 'read-only'
+   * flag is just a way for a user to protect against accidental deletion, and
+   * serves no security purpose. Windows uses ACLs for that.
+   *
+   * Also people now use uv_fs_chmod() to take away the writable bit for good
+   * reasons. Windows however just makes the file read-only, which makes it
+   * impossible to delete the file afterwards, since read-only files can't be
+   * deleted.
+   *
+   * IOW it's all just a clusterfuck and we should think of something that
+   * makes slightly more sense.
+   *
+   * And uv_fs_chmod should probably just fail on windows or be a total no-op.
+   * There's nothing sensible it can do anyway.
+   */
+  statbuf->st_mode = 0;
+
+  if (file_info.BasicInformation.FileAttributes & FILE_ATTRIBUTE_REPARSE_POINT) {
+    /*
+     * It is possible for a file to have FILE_ATTRIBUTE_REPARSE_POINT but not have
+     * any link data. In that case DeviceIoControl() in fs__readlink_handle() sets
+     * the last error to ERROR_NOT_A_REPARSE_POINT. Then the stat result mode
+     * calculated below will indicate a normal directory or file, as if
+     * FILE_ATTRIBUTE_REPARSE_POINT was not present.
+     */
+    if (fs__readlink_handle(handle, NULL, &statbuf->st_size) == 0) {
+      statbuf->st_mode |= S_IFLNK;
+    } else if (GetLastError() != ERROR_NOT_A_REPARSE_POINT) {
+      return -1;
+    }
+  }
+
+  if (statbuf->st_mode == 0) {
+    if (file_info.BasicInformation.FileAttributes & FILE_ATTRIBUTE_DIRECTORY) {
+      statbuf->st_mode |= _S_IFDIR;
+      statbuf->st_size = 0;
+    } else {
+      statbuf->st_mode |= _S_IFREG;
+      statbuf->st_size = file_info.StandardInformation.EndOfFile.QuadPart;
+    }
+  }
+
+  if (file_info.BasicInformation.FileAttributes & FILE_ATTRIBUTE_READONLY)
+    statbuf->st_mode |= _S_IREAD | (_S_IREAD >> 3) | (_S_IREAD >> 6);
+  else
+    statbuf->st_mode |= (_S_IREAD | _S_IWRITE) | ((_S_IREAD | _S_IWRITE) >> 3) |
+                        ((_S_IREAD | _S_IWRITE) >> 6);
+
+  FILETIME_TO_TIMESPEC(statbuf->st_atim, file_info.BasicInformation.LastAccessTime);
+  FILETIME_TO_TIMESPEC(statbuf->st_ctim, file_info.BasicInformation.ChangeTime);
+  FILETIME_TO_TIMESPEC(statbuf->st_mtim, file_info.BasicInformation.LastWriteTime);
+  FILETIME_TO_TIMESPEC(statbuf->st_birthtim, file_info.BasicInformation.CreationTime);
+
+  statbuf->st_ino = file_info.InternalInformation.IndexNumber.QuadPart;
+
+  /* st_blocks contains the on-disk allocation size in 512-byte units. */
+  statbuf->st_blocks =
+      file_info.StandardInformation.AllocationSize.QuadPart >> 9ULL;
+
+  statbuf->st_nlink = file_info.StandardInformation.NumberOfLinks;
+
+  /* The st_blksize is supposed to be the 'optimal' number of bytes for reading
+   * and writing to the disk. That is, for any definition of 'optimal' - it's
+   * supposed to at least avoid read-update-write behavior when writing to the
+   * disk.
+   *
+   * However nobody knows this and even fewer people actually use this value,
+   * and in order to fill it out we'd have to make another syscall to query the
+   * volume for FILE_FS_SECTOR_SIZE_INFORMATION.
+   *
+   * Therefore we'll just report a sensible value that's quite commonly okay
+   * on modern hardware.
+   */
+  statbuf->st_blksize = 2048;
+
+  /* Todo: set st_flags to something meaningful. Also provide a wrapper for
+   * chattr(2).
+   */
+  statbuf->st_flags = 0;
+
+  /* Windows has nothing sensible to say about these values, so they'll just
+   * remain empty.
+   */
+  statbuf->st_gid = 0;
+  statbuf->st_uid = 0;
+  statbuf->st_rdev = 0;
+  statbuf->st_gen = 0;
+
+  return 0;
+}
+
+
+INLINE static void fs__stat_prepare_path(WCHAR* pathw) {
+  size_t len = wcslen(pathw);
+
+  /* TODO: ignore namespaced paths. */
+  if (len > 1 && pathw[len - 2] != L':' &&
+      (pathw[len - 1] == L'\\' || pathw[len - 1] == L'/')) {
+    pathw[len - 1] = '\0';
+  }
+}
+
+
+INLINE static void fs__stat_impl(uv_fs_t* req, int do_lstat) {
+  HANDLE handle;
+  DWORD flags;
+
+  flags = FILE_FLAG_BACKUP_SEMANTICS;
+  if (do_lstat) {
+    flags |= FILE_FLAG_OPEN_REPARSE_POINT;
+  }
+
+  handle = CreateFileW(req->file.pathw,
+                       FILE_READ_ATTRIBUTES,
+                       FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
+                       NULL,
+                       OPEN_EXISTING,
+                       flags,
+                       NULL);
+  if (handle == INVALID_HANDLE_VALUE) {
+    SET_REQ_WIN32_ERROR(req, GetLastError());
+    return;
+  }
+
+  if (fs__stat_handle(handle, &req->statbuf) != 0) {
+    DWORD error = GetLastError();
+    if (do_lstat && error == ERROR_SYMLINK_NOT_SUPPORTED) {
+      /* We opened a reparse point but it was not a symlink. Try again. */
+      fs__stat_impl(req, 0);
+
+    } else {
+      /* Stat failed. */
+      SET_REQ_WIN32_ERROR(req, GetLastError());
+    }
+
+    CloseHandle(handle);
+    return;
+  }
+
+  req->ptr = &req->statbuf;
+  req->result = 0;
+  CloseHandle(handle);
+}
+
+
+static void fs__stat(uv_fs_t* req) {
+  fs__stat_prepare_path(req->file.pathw);
+  fs__stat_impl(req, 0);
+}
+
+
+static void fs__lstat(uv_fs_t* req) {
+  fs__stat_prepare_path(req->file.pathw);
+  fs__stat_impl(req, 1);
+}
+
+
+static void fs__fstat(uv_fs_t* req) {
+  int fd = req->file.fd;
+  HANDLE handle;
+
+  VERIFY_FD(fd, req);
+
+  handle = uv__get_osfhandle(fd);
+
+  if (handle == INVALID_HANDLE_VALUE) {
+    SET_REQ_WIN32_ERROR(req, ERROR_INVALID_HANDLE);
+    return;
+  }
+
+  if (fs__stat_handle(handle, &req->statbuf) != 0) {
+    SET_REQ_WIN32_ERROR(req, GetLastError());
+    return;
+  }
+
+  req->ptr = &req->statbuf;
+  req->result = 0;
+}
+
+
+static void fs__rename(uv_fs_t* req) {
+  if (!MoveFileExW(req->file.pathw, req->fs.info.new_pathw, MOVEFILE_REPLACE_EXISTING)) {
+    SET_REQ_WIN32_ERROR(req, GetLastError());
+    return;
+  }
+
+  SET_REQ_RESULT(req, 0);
+}
+
+
+INLINE static void fs__sync_impl(uv_fs_t* req) {
+  int fd = req->file.fd;
+  int result;
+
+  VERIFY_FD(fd, req);
+
+  result = FlushFileBuffers(uv__get_osfhandle(fd)) ? 0 : -1;
+  if (result == -1) {
+    SET_REQ_WIN32_ERROR(req, GetLastError());
+  } else {
+    SET_REQ_RESULT(req, result);
+  }
+}
+
+
+static void fs__fsync(uv_fs_t* req) {
+  fs__sync_impl(req);
+}
+
+
+static void fs__fdatasync(uv_fs_t* req) {
+  fs__sync_impl(req);
+}
+
+
+static void fs__ftruncate(uv_fs_t* req) {
+  int fd = req->file.fd;
+  HANDLE handle;
+  NTSTATUS status;
+  IO_STATUS_BLOCK io_status;
+  FILE_END_OF_FILE_INFORMATION eof_info;
+
+  VERIFY_FD(fd, req);
+
+  handle = uv__get_osfhandle(fd);
+
+  eof_info.EndOfFile.QuadPart = req->fs.info.offset;
+
+  status = pNtSetInformationFile(handle,
+                                 &io_status,
+                                 &eof_info,
+                                 sizeof eof_info,
+                                 FileEndOfFileInformation);
+
+  if (NT_SUCCESS(status)) {
+    SET_REQ_RESULT(req, 0);
+  } else {
+    SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(status));
+  }
+}
+
+
+static void fs__sendfile(uv_fs_t* req) {
+  int fd_in = req->file.fd, fd_out = req->fs.info.fd_out;
+  size_t length = req->fs.info.bufsml[0].len;
+  int64_t offset = req->fs.info.offset;
+  const size_t max_buf_size = 65536;
+  size_t buf_size = length < max_buf_size ? length : max_buf_size;
+  int n, result = 0;
+  int64_t result_offset = 0;
+  char* buf = (char*) uv__malloc(buf_size);
+  if (!buf) {
+    uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+  }
+
+  if (offset != -1) {
+    result_offset = _lseeki64(fd_in, offset, SEEK_SET);
+  }
+
+  if (result_offset == -1) {
+    result = -1;
+  } else {
+    while (length > 0) {
+      n = _read(fd_in, buf, length < buf_size ? length : buf_size);
+      if (n == 0) {
+        break;
+      } else if (n == -1) {
+        result = -1;
+        break;
+      }
+
+      length -= n;
+
+      n = _write(fd_out, buf, n);
+      if (n == -1) {
+        result = -1;
+        break;
+      }
+
+      result += n;
+    }
+  }
+
+  uv__free(buf);
+
+  SET_REQ_RESULT(req, result);
+}
+
+
+static void fs__access(uv_fs_t* req) {
+  DWORD attr = GetFileAttributesW(req->file.pathw);
+
+  if (attr == INVALID_FILE_ATTRIBUTES) {
+    SET_REQ_WIN32_ERROR(req, GetLastError());
+    return;
+  }
+
+  /*
+   * Access is possible if
+   * - write access wasn't requested,
+   * - or the file isn't read-only,
+   * - or it's a directory.
+   * (Directories cannot be read-only on Windows.)
+   */
+  if (!(req->flags & W_OK) ||
+      !(attr & FILE_ATTRIBUTE_READONLY) ||
+      (attr & FILE_ATTRIBUTE_DIRECTORY)) {
+    SET_REQ_RESULT(req, 0);
+  } else {
+    SET_REQ_WIN32_ERROR(req, UV_EPERM);
+  }
+
+}
+
+
+static void fs__chmod(uv_fs_t* req) {
+  int result = _wchmod(req->file.pathw, req->fs.info.mode);
+  SET_REQ_RESULT(req, result);
+}
+
+
+static void fs__fchmod(uv_fs_t* req) {
+  int fd = req->file.fd;
+  HANDLE handle;
+  NTSTATUS nt_status;
+  IO_STATUS_BLOCK io_status;
+  FILE_BASIC_INFORMATION file_info;
+
+  VERIFY_FD(fd, req);
+
+  handle = uv__get_osfhandle(fd);
+
+  nt_status = pNtQueryInformationFile(handle,
+                                      &io_status,
+                                      &file_info,
+                                      sizeof file_info,
+                                      FileBasicInformation);
+
+  if (!NT_SUCCESS(nt_status)) {
+    SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(nt_status));
+    return;
+  }
+
+  if (req->fs.info.mode & _S_IWRITE) {
+    file_info.FileAttributes &= ~FILE_ATTRIBUTE_READONLY;
+  } else {
+    file_info.FileAttributes |= FILE_ATTRIBUTE_READONLY;
+  }
+
+  nt_status = pNtSetInformationFile(handle,
+                                    &io_status,
+                                    &file_info,
+                                    sizeof file_info,
+                                    FileBasicInformation);
+
+  if (!NT_SUCCESS(nt_status)) {
+    SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(nt_status));
+    return;
+  }
+
+  SET_REQ_SUCCESS(req);
+}
+
+
+INLINE static int fs__utime_handle(HANDLE handle, double atime, double mtime) {
+  FILETIME filetime_a, filetime_m;
+
+  TIME_T_TO_FILETIME(atime, &filetime_a);
+  TIME_T_TO_FILETIME(mtime, &filetime_m);
+
+  if (!SetFileTime(handle, NULL, &filetime_a, &filetime_m)) {
+    return -1;
+  }
+
+  return 0;
+}
+
+
+static void fs__utime(uv_fs_t* req) {
+  HANDLE handle;
+
+  handle = CreateFileW(req->file.pathw,
+                       FILE_WRITE_ATTRIBUTES,
+                       FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
+                       NULL,
+                       OPEN_EXISTING,
+                       FILE_FLAG_BACKUP_SEMANTICS,
+                       NULL);
+
+  if (handle == INVALID_HANDLE_VALUE) {
+    SET_REQ_WIN32_ERROR(req, GetLastError());
+    return;
+  }
+
+  if (fs__utime_handle(handle, req->fs.time.atime, req->fs.time.mtime) != 0) {
+    SET_REQ_WIN32_ERROR(req, GetLastError());
+    CloseHandle(handle);
+    return;
+  }
+
+  CloseHandle(handle);
+
+  req->result = 0;
+}
+
+
+static void fs__futime(uv_fs_t* req) {
+  int fd = req->file.fd;
+  HANDLE handle;
+  VERIFY_FD(fd, req);
+
+  handle = uv__get_osfhandle(fd);
+
+  if (handle == INVALID_HANDLE_VALUE) {
+    SET_REQ_WIN32_ERROR(req, ERROR_INVALID_HANDLE);
+    return;
+  }
+
+  if (fs__utime_handle(handle, req->fs.time.atime, req->fs.time.mtime) != 0) {
+    SET_REQ_WIN32_ERROR(req, GetLastError());
+    return;
+  }
+
+  req->result = 0;
+}
+
+
+static void fs__link(uv_fs_t* req) {
+  DWORD r = CreateHardLinkW(req->fs.info.new_pathw, req->file.pathw, NULL);
+  if (r == 0) {
+    SET_REQ_WIN32_ERROR(req, GetLastError());
+  } else {
+    req->result = 0;
+  }
+}
+
+
+static void fs__create_junction(uv_fs_t* req, const WCHAR* path,
+    const WCHAR* new_path) {
+  HANDLE handle = INVALID_HANDLE_VALUE;
+  REPARSE_DATA_BUFFER *buffer = NULL;
+  int created = 0;
+  int target_len;
+  int is_absolute, is_long_path;
+  int needed_buf_size, used_buf_size, used_data_size, path_buf_len;
+  int start, len, i;
+  int add_slash;
+  DWORD bytes;
+  WCHAR* path_buf;
+
+  target_len = wcslen(path);
+  is_long_path = wcsncmp(path, LONG_PATH_PREFIX, LONG_PATH_PREFIX_LEN) == 0;
+
+  if (is_long_path) {
+    is_absolute = 1;
+  } else {
+    is_absolute = target_len >= 3 && IS_LETTER(path[0]) &&
+      path[1] == L':' && IS_SLASH(path[2]);
+  }
+
+  if (!is_absolute) {
+    /* Not supporting relative paths */
+    SET_REQ_UV_ERROR(req, UV_EINVAL, ERROR_NOT_SUPPORTED);
+    return;
+  }
+
+  /* Do a pessimistic calculation of the required buffer size */
+  needed_buf_size =
+      FIELD_OFFSET(REPARSE_DATA_BUFFER, MountPointReparseBuffer.PathBuffer) +
+      JUNCTION_PREFIX_LEN * sizeof(WCHAR) +
+      2 * (target_len + 2) * sizeof(WCHAR);
+
+  /* Allocate the buffer */
+  buffer = (REPARSE_DATA_BUFFER*)uv__malloc(needed_buf_size);
+  if (!buffer) {
+    uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+  }
+
+  /* Grab a pointer to the part of the buffer where filenames go */
+  path_buf = (WCHAR*)&(buffer->MountPointReparseBuffer.PathBuffer);
+  path_buf_len = 0;
+
+  /* Copy the substitute (internal) target path */
+  start = path_buf_len;
+
+  wcsncpy((WCHAR*)&path_buf[path_buf_len], JUNCTION_PREFIX,
+    JUNCTION_PREFIX_LEN);
+  path_buf_len += JUNCTION_PREFIX_LEN;
+
+  add_slash = 0;
+  for (i = is_long_path ? LONG_PATH_PREFIX_LEN : 0; path[i] != L'\0'; i++) {
+    if (IS_SLASH(path[i])) {
+      add_slash = 1;
+      continue;
+    }
+
+    if (add_slash) {
+      path_buf[path_buf_len++] = L'\\';
+      add_slash = 0;
+    }
+
+    path_buf[path_buf_len++] = path[i];
+  }
+  path_buf[path_buf_len++] = L'\\';
+  len = path_buf_len - start;
+
+  /* Set the info about the substitute name */
+  buffer->MountPointReparseBuffer.SubstituteNameOffset = start * sizeof(WCHAR);
+  buffer->MountPointReparseBuffer.SubstituteNameLength = len * sizeof(WCHAR);
+
+  /* Insert null terminator */
+  path_buf[path_buf_len++] = L'\0';
+
+  /* Copy the print name of the target path */
+  start = path_buf_len;
+  add_slash = 0;
+  for (i = is_long_path ? LONG_PATH_PREFIX_LEN : 0; path[i] != L'\0'; i++) {
+    if (IS_SLASH(path[i])) {
+      add_slash = 1;
+      continue;
+    }
+
+    if (add_slash) {
+      path_buf[path_buf_len++] = L'\\';
+      add_slash = 0;
+    }
+
+    path_buf[path_buf_len++] = path[i];
+  }
+  len = path_buf_len - start;
+  if (len == 2) {
+    path_buf[path_buf_len++] = L'\\';
+    len++;
+  }
+
+  /* Set the info about the print name */
+  buffer->MountPointReparseBuffer.PrintNameOffset = start * sizeof(WCHAR);
+  buffer->MountPointReparseBuffer.PrintNameLength = len * sizeof(WCHAR);
+
+  /* Insert another null terminator */
+  path_buf[path_buf_len++] = L'\0';
+
+  /* Calculate how much buffer space was actually used */
+  used_buf_size = FIELD_OFFSET(REPARSE_DATA_BUFFER, MountPointReparseBuffer.PathBuffer) +
+    path_buf_len * sizeof(WCHAR);
+  used_data_size = used_buf_size -
+    FIELD_OFFSET(REPARSE_DATA_BUFFER, MountPointReparseBuffer);
+
+  /* Put general info in the data buffer */
+  buffer->ReparseTag = IO_REPARSE_TAG_MOUNT_POINT;
+  buffer->ReparseDataLength = used_data_size;
+  buffer->Reserved = 0;
+
+  /* Create a new directory */
+  if (!CreateDirectoryW(new_path, NULL)) {
+    SET_REQ_WIN32_ERROR(req, GetLastError());
+    goto error;
+  }
+  created = 1;
+
+  /* Open the directory */
+  handle = CreateFileW(new_path,
+                       GENERIC_WRITE,
+                       0,
+                       NULL,
+                       OPEN_EXISTING,
+                       FILE_FLAG_BACKUP_SEMANTICS |
+                         FILE_FLAG_OPEN_REPARSE_POINT,
+                       NULL);
+  if (handle == INVALID_HANDLE_VALUE) {
+    SET_REQ_WIN32_ERROR(req, GetLastError());
+    goto error;
+  }
+
+  /* Create the actual reparse point */
+  if (!DeviceIoControl(handle,
+                       FSCTL_SET_REPARSE_POINT,
+                       buffer,
+                       used_buf_size,
+                       NULL,
+                       0,
+                       &bytes,
+                       NULL)) {
+    SET_REQ_WIN32_ERROR(req, GetLastError());
+    goto error;
+  }
+
+  /* Clean up */
+  CloseHandle(handle);
+  uv__free(buffer);
+
+  SET_REQ_RESULT(req, 0);
+  return;
+
+error:
+  uv__free(buffer);
+
+  if (handle != INVALID_HANDLE_VALUE) {
+    CloseHandle(handle);
+  }
+
+  if (created) {
+    RemoveDirectoryW(new_path);
+  }
+}
+
+
+static void fs__symlink(uv_fs_t* req) {
+  WCHAR* pathw = req->file.pathw;
+  WCHAR* new_pathw = req->fs.info.new_pathw;
+  int flags = req->fs.info.file_flags;
+  int result;
+
+
+  if (flags & UV_FS_SYMLINK_JUNCTION) {
+    fs__create_junction(req, pathw, new_pathw);
+  } else if (pCreateSymbolicLinkW) {
+    result = pCreateSymbolicLinkW(new_pathw,
+                                  pathw,
+                                  flags & UV_FS_SYMLINK_DIR ? SYMBOLIC_LINK_FLAG_DIRECTORY : 0) ? 0 : -1;
+    if (result == -1) {
+      SET_REQ_WIN32_ERROR(req, GetLastError());
+    } else {
+      SET_REQ_RESULT(req, result);
+    }
+  } else {
+    SET_REQ_UV_ERROR(req, UV_ENOSYS, ERROR_NOT_SUPPORTED);
+  }
+}
+
+
+static void fs__readlink(uv_fs_t* req) {
+  HANDLE handle;
+
+  handle = CreateFileW(req->file.pathw,
+                       0,
+                       0,
+                       NULL,
+                       OPEN_EXISTING,
+                       FILE_FLAG_OPEN_REPARSE_POINT | FILE_FLAG_BACKUP_SEMANTICS,
+                       NULL);
+
+  if (handle == INVALID_HANDLE_VALUE) {
+    SET_REQ_WIN32_ERROR(req, GetLastError());
+    return;
+  }
+
+  if (fs__readlink_handle(handle, (char**) &req->ptr, NULL) != 0) {
+    SET_REQ_WIN32_ERROR(req, GetLastError());
+    CloseHandle(handle);
+    return;
+  }
+
+  req->flags |= UV_FS_FREE_PTR;
+  SET_REQ_RESULT(req, 0);
+
+  CloseHandle(handle);
+}
+
+
+static size_t fs__realpath_handle(HANDLE handle, char** realpath_ptr) {
+  int r;
+  DWORD w_realpath_len;
+  WCHAR* w_realpath_ptr = NULL;
+  WCHAR* w_realpath_buf;
+
+  w_realpath_len = pGetFinalPathNameByHandleW(handle, NULL, 0, VOLUME_NAME_DOS);
+  if (w_realpath_len == 0) {
+    return -1;
+  }
+
+  w_realpath_buf = uv__malloc((w_realpath_len + 1) * sizeof(WCHAR));
+  if (w_realpath_buf == NULL) {
+    SetLastError(ERROR_OUTOFMEMORY);
+    return -1;
+  }
+  w_realpath_ptr = w_realpath_buf;
+
+  if (pGetFinalPathNameByHandleW(handle,
+                                w_realpath_ptr,
+                                w_realpath_len,
+                                VOLUME_NAME_DOS) == 0) {
+    uv__free(w_realpath_buf);
+    SetLastError(ERROR_INVALID_HANDLE);
+    return -1;
+  }
+
+  /* convert UNC path to long path */
+  if (wcsncmp(w_realpath_ptr,
+              UNC_PATH_PREFIX,
+              UNC_PATH_PREFIX_LEN) == 0) {
+    w_realpath_ptr += 6;
+    *w_realpath_ptr = L'\\';
+    w_realpath_len -= 6;
+  } else if (wcsncmp(w_realpath_ptr,
+                      LONG_PATH_PREFIX,
+                      LONG_PATH_PREFIX_LEN) == 0) {
+    w_realpath_ptr += 4;
+    w_realpath_len -= 4;
+  } else {
+    uv__free(w_realpath_buf);
+    SetLastError(ERROR_INVALID_HANDLE);
+    return -1;
+  }
+
+  r = fs__wide_to_utf8(w_realpath_ptr, w_realpath_len, realpath_ptr, NULL);
+  uv__free(w_realpath_buf);
+  return r;
+}
+
+static void fs__realpath(uv_fs_t* req) {
+  HANDLE handle;
+
+  if (!pGetFinalPathNameByHandleW) {
+    SET_REQ_UV_ERROR(req, UV_ENOSYS, ERROR_NOT_SUPPORTED);
+    return;
+  }
+
+  handle = CreateFileW(req->file.pathw,
+                       0,
+                       0,
+                       NULL,
+                       OPEN_EXISTING,
+                       FILE_ATTRIBUTE_NORMAL | FILE_FLAG_BACKUP_SEMANTICS,
+                       NULL);
+  if (handle == INVALID_HANDLE_VALUE) {
+    SET_REQ_WIN32_ERROR(req, GetLastError());
+    return;
+  }
+
+  if (fs__realpath_handle(handle, (char**) &req->ptr) == -1) {
+    CloseHandle(handle);
+    SET_REQ_WIN32_ERROR(req, GetLastError());
+    return;
+  }
+
+  CloseHandle(handle);
+  req->flags |= UV_FS_FREE_PTR;
+  SET_REQ_RESULT(req, 0);
+}
+
+
+static void fs__chown(uv_fs_t* req) {
+  req->result = 0;
+}
+
+
+static void fs__fchown(uv_fs_t* req) {
+  req->result = 0;
+}
+
+
+static void uv__fs_work(struct uv__work* w) {
+  uv_fs_t* req;
+
+  req = container_of(w, uv_fs_t, work_req);
+  assert(req->type == UV_FS);
+
+#define XX(uc, lc)  case UV_FS_##uc: fs__##lc(req); break;
+  switch (req->fs_type) {
+    XX(OPEN, open)
+    XX(CLOSE, close)
+    XX(READ, read)
+    XX(WRITE, write)
+    XX(SENDFILE, sendfile)
+    XX(STAT, stat)
+    XX(LSTAT, lstat)
+    XX(FSTAT, fstat)
+    XX(FTRUNCATE, ftruncate)
+    XX(UTIME, utime)
+    XX(FUTIME, futime)
+    XX(ACCESS, access)
+    XX(CHMOD, chmod)
+    XX(FCHMOD, fchmod)
+    XX(FSYNC, fsync)
+    XX(FDATASYNC, fdatasync)
+    XX(UNLINK, unlink)
+    XX(RMDIR, rmdir)
+    XX(MKDIR, mkdir)
+    XX(MKDTEMP, mkdtemp)
+    XX(RENAME, rename)
+    XX(SCANDIR, scandir)
+    XX(LINK, link)
+    XX(SYMLINK, symlink)
+    XX(READLINK, readlink)
+    XX(REALPATH, realpath)
+    XX(CHOWN, chown)
+    XX(FCHOWN, fchown);
+    default:
+      assert(!"bad uv_fs_type");
+  }
+}
+
+
+static void uv__fs_done(struct uv__work* w, int status) {
+  uv_fs_t* req;
+
+  req = container_of(w, uv_fs_t, work_req);
+  uv__req_unregister(req->loop, req);
+
+  if (status == UV_ECANCELED) {
+    assert(req->result == 0);
+    req->result = UV_ECANCELED;
+  }
+
+  req->cb(req);
+}
+
+
+void uv_fs_req_cleanup(uv_fs_t* req) {
+  if (req->flags & UV_FS_CLEANEDUP)
+    return;
+
+  if (req->flags & UV_FS_FREE_PATHS)
+    uv__free(req->file.pathw);
+
+  if (req->flags & UV_FS_FREE_PTR) {
+    if (req->fs_type == UV_FS_SCANDIR && req->ptr != NULL)
+      uv__fs_scandir_cleanup(req);
+    else
+      uv__free(req->ptr);
+  }
+
+  req->path = NULL;
+  req->file.pathw = NULL;
+  req->fs.info.new_pathw = NULL;
+  req->ptr = NULL;
+
+  req->flags |= UV_FS_CLEANEDUP;
+}
+
+
+int uv_fs_open(uv_loop_t* loop, uv_fs_t* req, const char* path, int flags,
+    int mode, uv_fs_cb cb) {
+  int err;
+
+  uv_fs_req_init(loop, req, UV_FS_OPEN, cb);
+
+  err = fs__capture_path(req, path, NULL, cb != NULL);
+  if (err) {
+    return uv_translate_sys_error(err);
+  }
+
+  req->fs.info.file_flags = flags;
+  req->fs.info.mode = mode;
+
+  if (cb) {
+    QUEUE_FS_TP_JOB(loop, req);
+    return 0;
+  } else {
+    fs__open(req);
+    return req->result;
+  }
+}
+
+
+int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file fd, uv_fs_cb cb) {
+  uv_fs_req_init(loop, req, UV_FS_CLOSE, cb);
+  req->file.fd = fd;
+
+  if (cb) {
+    QUEUE_FS_TP_JOB(loop, req);
+    return 0;
+  } else {
+    fs__close(req);
+    return req->result;
+  }
+}
+
+
+int uv_fs_read(uv_loop_t* loop,
+               uv_fs_t* req,
+               uv_file fd,
+               const uv_buf_t bufs[],
+               unsigned int nbufs,
+               int64_t offset,
+               uv_fs_cb cb) {
+  if (bufs == NULL || nbufs == 0)
+    return UV_EINVAL;
+
+  uv_fs_req_init(loop, req, UV_FS_READ, cb);
+
+  req->file.fd = fd;
+
+  req->fs.info.nbufs = nbufs;
+  req->fs.info.bufs = req->fs.info.bufsml;
+  if (nbufs > ARRAY_SIZE(req->fs.info.bufsml))
+    req->fs.info.bufs = uv__malloc(nbufs * sizeof(*bufs));
+
+  if (req->fs.info.bufs == NULL)
+    return UV_ENOMEM;
+
+  memcpy(req->fs.info.bufs, bufs, nbufs * sizeof(*bufs));
+
+  req->fs.info.offset = offset;
+
+  if (cb) {
+    QUEUE_FS_TP_JOB(loop, req);
+    return 0;
+  } else {
+    fs__read(req);
+    return req->result;
+  }
+}
+
+
+int uv_fs_write(uv_loop_t* loop,
+                uv_fs_t* req,
+                uv_file fd,
+                const uv_buf_t bufs[],
+                unsigned int nbufs,
+                int64_t offset,
+                uv_fs_cb cb) {
+  if (bufs == NULL || nbufs == 0)
+    return UV_EINVAL;
+
+  uv_fs_req_init(loop, req, UV_FS_WRITE, cb);
+
+  req->file.fd = fd;
+
+  req->fs.info.nbufs = nbufs;
+  req->fs.info.bufs = req->fs.info.bufsml;
+  if (nbufs > ARRAY_SIZE(req->fs.info.bufsml))
+    req->fs.info.bufs = uv__malloc(nbufs * sizeof(*bufs));
+
+  if (req->fs.info.bufs == NULL)
+    return UV_ENOMEM;
+
+  memcpy(req->fs.info.bufs, bufs, nbufs * sizeof(*bufs));
+
+  req->fs.info.offset = offset;
+
+  if (cb) {
+    QUEUE_FS_TP_JOB(loop, req);
+    return 0;
+  } else {
+    fs__write(req);
+    return req->result;
+  }
+}
+
+
+int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path,
+    uv_fs_cb cb) {
+  int err;
+
+  uv_fs_req_init(loop, req, UV_FS_UNLINK, cb);
+
+  err = fs__capture_path(req, path, NULL, cb != NULL);
+  if (err) {
+    return uv_translate_sys_error(err);
+  }
+
+  if (cb) {
+    QUEUE_FS_TP_JOB(loop, req);
+    return 0;
+  } else {
+    fs__unlink(req);
+    return req->result;
+  }
+}
+
+
+int uv_fs_mkdir(uv_loop_t* loop, uv_fs_t* req, const char* path, int mode,
+    uv_fs_cb cb) {
+  int err;
+
+  uv_fs_req_init(loop, req, UV_FS_MKDIR, cb);
+
+  err = fs__capture_path(req, path, NULL, cb != NULL);
+  if (err) {
+    return uv_translate_sys_error(err);
+  }
+
+  req->fs.info.mode = mode;
+
+  if (cb) {
+    QUEUE_FS_TP_JOB(loop, req);
+    return 0;
+  } else {
+    fs__mkdir(req);
+    return req->result;
+  }
+}
+
+
+int uv_fs_mkdtemp(uv_loop_t* loop, uv_fs_t* req, const char* tpl,
+    uv_fs_cb cb) {
+  int err;
+
+  uv_fs_req_init(loop, req, UV_FS_MKDTEMP, cb);
+
+  err = fs__capture_path(req, tpl, NULL, TRUE);
+  if (err)
+    return uv_translate_sys_error(err);
+
+  if (cb) {
+    QUEUE_FS_TP_JOB(loop, req);
+    return 0;
+  } else {
+    fs__mkdtemp(req);
+    return req->result;
+  }
+}
+
+
+int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
+  int err;
+
+  uv_fs_req_init(loop, req, UV_FS_RMDIR, cb);
+
+  err = fs__capture_path(req, path, NULL, cb != NULL);
+  if (err) {
+    return uv_translate_sys_error(err);
+  }
+
+  if (cb) {
+    QUEUE_FS_TP_JOB(loop, req);
+    return 0;
+  } else {
+    fs__rmdir(req);
+    return req->result;
+  }
+}
+
+
+int uv_fs_scandir(uv_loop_t* loop, uv_fs_t* req, const char* path, int flags,
+    uv_fs_cb cb) {
+  int err;
+
+  uv_fs_req_init(loop, req, UV_FS_SCANDIR, cb);
+
+  err = fs__capture_path(req, path, NULL, cb != NULL);
+  if (err) {
+    return uv_translate_sys_error(err);
+  }
+
+  req->fs.info.file_flags = flags;
+
+  if (cb) {
+    QUEUE_FS_TP_JOB(loop, req);
+    return 0;
+  } else {
+    fs__scandir(req);
+    return req->result;
+  }
+}
+
+
+int uv_fs_link(uv_loop_t* loop, uv_fs_t* req, const char* path,
+    const char* new_path, uv_fs_cb cb) {
+  int err;
+
+  uv_fs_req_init(loop, req, UV_FS_LINK, cb);
+
+  err = fs__capture_path(req, path, new_path, cb != NULL);
+  if (err) {
+    return uv_translate_sys_error(err);
+  }
+
+  if (cb) {
+    QUEUE_FS_TP_JOB(loop, req);
+    return 0;
+  } else {
+    fs__link(req);
+    return req->result;
+  }
+}
+
+
+int uv_fs_symlink(uv_loop_t* loop, uv_fs_t* req, const char* path,
+    const char* new_path, int flags, uv_fs_cb cb) {
+  int err;
+
+  uv_fs_req_init(loop, req, UV_FS_SYMLINK, cb);
+
+  err = fs__capture_path(req, path, new_path, cb != NULL);
+  if (err) {
+    return uv_translate_sys_error(err);
+  }
+
+  req->fs.info.file_flags = flags;
+
+  if (cb) {
+    QUEUE_FS_TP_JOB(loop, req);
+    return 0;
+  } else {
+    fs__symlink(req);
+    return req->result;
+  }
+}
+
+
+int uv_fs_readlink(uv_loop_t* loop, uv_fs_t* req, const char* path,
+    uv_fs_cb cb) {
+  int err;
+
+  uv_fs_req_init(loop, req, UV_FS_READLINK, cb);
+
+  err = fs__capture_path(req, path, NULL, cb != NULL);
+  if (err) {
+    return uv_translate_sys_error(err);
+  }
+
+  if (cb) {
+    QUEUE_FS_TP_JOB(loop, req);
+    return 0;
+  } else {
+    fs__readlink(req);
+    return req->result;
+  }
+}
+
+
+int uv_fs_realpath(uv_loop_t* loop, uv_fs_t* req, const char* path,
+    uv_fs_cb cb) {
+  int err;
+
+  if (!req || !path) {
+    return UV_EINVAL;
+  }
+
+  uv_fs_req_init(loop, req, UV_FS_REALPATH, cb);
+
+  err = fs__capture_path(req, path, NULL, cb != NULL);
+  if (err) {
+    return uv_translate_sys_error(err);
+  }
+
+  if (cb) {
+    QUEUE_FS_TP_JOB(loop, req);
+    return 0;
+  } else {
+    fs__realpath(req);
+    return req->result;
+  }
+}
+
+
+int uv_fs_chown(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_uid_t uid,
+    uv_gid_t gid, uv_fs_cb cb) {
+  int err;
+
+  uv_fs_req_init(loop, req, UV_FS_CHOWN, cb);
+
+  err = fs__capture_path(req, path, NULL, cb != NULL);
+  if (err) {
+    return uv_translate_sys_error(err);
+  }
+
+  if (cb) {
+    QUEUE_FS_TP_JOB(loop, req);
+    return 0;
+  } else {
+    fs__chown(req);
+    return req->result;
+  }
+}
+
+
+int uv_fs_fchown(uv_loop_t* loop, uv_fs_t* req, uv_file fd, uv_uid_t uid,
+    uv_gid_t gid, uv_fs_cb cb) {
+  uv_fs_req_init(loop, req, UV_FS_FCHOWN, cb);
+
+  if (cb) {
+    QUEUE_FS_TP_JOB(loop, req);
+    return 0;
+  } else {
+    fs__fchown(req);
+    return req->result;
+  }
+}
+
+
+int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
+  int err;
+
+  uv_fs_req_init(loop, req, UV_FS_STAT, cb);
+
+  err = fs__capture_path(req, path, NULL, cb != NULL);
+  if (err) {
+    return uv_translate_sys_error(err);
+  }
+
+  if (cb) {
+    QUEUE_FS_TP_JOB(loop, req);
+    return 0;
+  } else {
+    fs__stat(req);
+    return req->result;
+  }
+}
+
+
+int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
+  int err;
+
+  uv_fs_req_init(loop, req, UV_FS_LSTAT, cb);
+
+  err = fs__capture_path(req, path, NULL, cb != NULL);
+  if (err) {
+    return uv_translate_sys_error(err);
+  }
+
+  if (cb) {
+    QUEUE_FS_TP_JOB(loop, req);
+    return 0;
+  } else {
+    fs__lstat(req);
+    return req->result;
+  }
+}
+
+
+int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file fd, uv_fs_cb cb) {
+  uv_fs_req_init(loop, req, UV_FS_FSTAT, cb);
+  req->file.fd = fd;
+
+  if (cb) {
+    QUEUE_FS_TP_JOB(loop, req);
+    return 0;
+  } else {
+    fs__fstat(req);
+    return req->result;
+  }
+}
+
+
+int uv_fs_rename(uv_loop_t* loop, uv_fs_t* req, const char* path,
+    const char* new_path, uv_fs_cb cb) {
+  int err;
+
+  uv_fs_req_init(loop, req, UV_FS_RENAME, cb);
+
+  err = fs__capture_path(req, path, new_path, cb != NULL);
+  if (err) {
+    return uv_translate_sys_error(err);
+  }
+
+  if (cb) {
+    QUEUE_FS_TP_JOB(loop, req);
+    return 0;
+  } else {
+    fs__rename(req);
+    return req->result;
+  }
+}
+
+
+int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file fd, uv_fs_cb cb) {
+  uv_fs_req_init(loop, req, UV_FS_FSYNC, cb);
+  req->file.fd = fd;
+
+  if (cb) {
+    QUEUE_FS_TP_JOB(loop, req);
+    return 0;
+  } else {
+    fs__fsync(req);
+    return req->result;
+  }
+}
+
+
+int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file fd, uv_fs_cb cb) {
+  uv_fs_req_init(loop, req, UV_FS_FDATASYNC, cb);
+  req->file.fd = fd;
+
+  if (cb) {
+    QUEUE_FS_TP_JOB(loop, req);
+    return 0;
+  } else {
+    fs__fdatasync(req);
+    return req->result;
+  }
+}
+
+
+int uv_fs_ftruncate(uv_loop_t* loop, uv_fs_t* req, uv_file fd,
+    int64_t offset, uv_fs_cb cb) {
+  uv_fs_req_init(loop, req, UV_FS_FTRUNCATE, cb);
+
+  req->file.fd = fd;
+  req->fs.info.offset = offset;
+
+  if (cb) {
+    QUEUE_FS_TP_JOB(loop, req);
+    return 0;
+  } else {
+    fs__ftruncate(req);
+    return req->result;
+  }
+}
+
+
+
+int uv_fs_sendfile(uv_loop_t* loop, uv_fs_t* req, uv_file fd_out,
+    uv_file fd_in, int64_t in_offset, size_t length, uv_fs_cb cb) {
+  uv_fs_req_init(loop, req, UV_FS_SENDFILE, cb);
+
+  req->file.fd = fd_in;
+  req->fs.info.fd_out = fd_out;
+  req->fs.info.offset = in_offset;
+  req->fs.info.bufsml[0].len = length;
+
+  if (cb) {
+    QUEUE_FS_TP_JOB(loop, req);
+    return 0;
+  } else {
+    fs__sendfile(req);
+    return req->result;
+  }
+}
+
+
+int uv_fs_access(uv_loop_t* loop,
+                 uv_fs_t* req,
+                 const char* path,
+                 int flags,
+                 uv_fs_cb cb) {
+  int err;
+
+  uv_fs_req_init(loop, req, UV_FS_ACCESS, cb);
+
+  err = fs__capture_path(req, path, NULL, cb != NULL);
+  if (err)
+    return uv_translate_sys_error(err);
+
+  req->flags = flags;
+
+  if (cb) {
+    QUEUE_FS_TP_JOB(loop, req);
+    return 0;
+  }
+
+  fs__access(req);
+  return req->result;
+}
+
+
+int uv_fs_chmod(uv_loop_t* loop, uv_fs_t* req, const char* path, int mode,
+    uv_fs_cb cb) {
+  int err;
+
+  uv_fs_req_init(loop, req, UV_FS_CHMOD, cb);
+
+  err = fs__capture_path(req, path, NULL, cb != NULL);
+  if (err) {
+    return uv_translate_sys_error(err);
+  }
+
+  req->fs.info.mode = mode;
+
+  if (cb) {
+    QUEUE_FS_TP_JOB(loop, req);
+    return 0;
+  } else {
+    fs__chmod(req);
+    return req->result;
+  }
+}
+
+
+int uv_fs_fchmod(uv_loop_t* loop, uv_fs_t* req, uv_file fd, int mode,
+    uv_fs_cb cb) {
+  uv_fs_req_init(loop, req, UV_FS_FCHMOD, cb);
+
+  req->file.fd = fd;
+  req->fs.info.mode = mode;
+
+  if (cb) {
+    QUEUE_FS_TP_JOB(loop, req);
+    return 0;
+  } else {
+    fs__fchmod(req);
+    return req->result;
+  }
+}
+
+
+int uv_fs_utime(uv_loop_t* loop, uv_fs_t* req, const char* path, double atime,
+    double mtime, uv_fs_cb cb) {
+  int err;
+
+  uv_fs_req_init(loop, req, UV_FS_UTIME, cb);
+
+  err = fs__capture_path(req, path, NULL, cb != NULL);
+  if (err) {
+    return uv_translate_sys_error(err);
+  }
+
+  req->fs.time.atime = atime;
+  req->fs.time.mtime = mtime;
+
+  if (cb) {
+    QUEUE_FS_TP_JOB(loop, req);
+    return 0;
+  } else {
+    fs__utime(req);
+    return req->result;
+  }
+}
+
+
+int uv_fs_futime(uv_loop_t* loop, uv_fs_t* req, uv_file fd, double atime,
+    double mtime, uv_fs_cb cb) {
+  uv_fs_req_init(loop, req, UV_FS_FUTIME, cb);
+
+  req->file.fd = fd;
+  req->fs.time.atime = atime;
+  req->fs.time.mtime = mtime;
+
+  if (cb) {
+    QUEUE_FS_TP_JOB(loop, req);
+    return 0;
+  } else {
+    fs__futime(req);
+    return req->result;
+  }
+}

+ 385 - 0
Utilities/cmlibuv/src/win/getaddrinfo.c

@@ -0,0 +1,385 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+
+#include "uv.h"
+#include "internal.h"
+#include "req-inl.h"
+
+/* EAI_* constants. */
+#include <winsock2.h>
+
+
+int uv__getaddrinfo_translate_error(int sys_err) {
+  switch (sys_err) {
+    case 0:                       return 0;
+    case WSATRY_AGAIN:            return UV_EAI_AGAIN;
+    case WSAEINVAL:               return UV_EAI_BADFLAGS;
+    case WSANO_RECOVERY:          return UV_EAI_FAIL;
+    case WSAEAFNOSUPPORT:         return UV_EAI_FAMILY;
+    case WSA_NOT_ENOUGH_MEMORY:   return UV_EAI_MEMORY;
+    case WSAHOST_NOT_FOUND:       return UV_EAI_NONAME;
+    case WSATYPE_NOT_FOUND:       return UV_EAI_SERVICE;
+    case WSAESOCKTNOSUPPORT:      return UV_EAI_SOCKTYPE;
+    default:                      return uv_translate_sys_error(sys_err);
+  }
+}
+
+
+/*
+ * MinGW is missing this
+ */
+#if !defined(_MSC_VER) && !defined(__MINGW64_VERSION_MAJOR)
+  typedef struct addrinfoW {
+    int ai_flags;
+    int ai_family;
+    int ai_socktype;
+    int ai_protocol;
+    size_t ai_addrlen;
+    WCHAR* ai_canonname;
+    struct sockaddr* ai_addr;
+    struct addrinfoW* ai_next;
+  } ADDRINFOW, *PADDRINFOW;
+
+  DECLSPEC_IMPORT int WSAAPI GetAddrInfoW(const WCHAR* node,
+                                          const WCHAR* service,
+                                          const ADDRINFOW* hints,
+                                          PADDRINFOW* result);
+
+  DECLSPEC_IMPORT void WSAAPI FreeAddrInfoW(PADDRINFOW pAddrInfo);
+#endif
+
+
+/* adjust size value to be multiple of 4. Use to keep pointer aligned */
+/* Do we need different versions of this for different architectures? */
+#define ALIGNED_SIZE(X)     ((((X) + 3) >> 2) << 2)
+
+
+static void uv__getaddrinfo_work(struct uv__work* w) {
+  uv_getaddrinfo_t* req;
+  struct addrinfoW* hints;
+  int err;
+
+  req = container_of(w, uv_getaddrinfo_t, work_req);
+  hints = req->addrinfow;
+  req->addrinfow = NULL;
+  err = GetAddrInfoW(req->node, req->service, hints, &req->addrinfow);
+  req->retcode = uv__getaddrinfo_translate_error(err);
+}
+
+
+/*
+ * Called from uv_run when complete. Call user specified callback
+ * then free returned addrinfo
+ * Returned addrinfo strings are converted from UTF-16 to UTF-8.
+ *
+ * To minimize allocation we calculate total size required,
+ * and copy all structs and referenced strings into the one block.
+ * Each size calculation is adjusted to avoid unaligned pointers.
+ */
+static void uv__getaddrinfo_done(struct uv__work* w, int status) {
+  uv_getaddrinfo_t* req;
+  int addrinfo_len = 0;
+  int name_len = 0;
+  size_t addrinfo_struct_len = ALIGNED_SIZE(sizeof(struct addrinfo));
+  struct addrinfoW* addrinfow_ptr;
+  struct addrinfo* addrinfo_ptr;
+  char* alloc_ptr = NULL;
+  char* cur_ptr = NULL;
+
+  req = container_of(w, uv_getaddrinfo_t, work_req);
+
+  /* release input parameter memory */
+  uv__free(req->alloc);
+  req->alloc = NULL;
+
+  if (status == UV_ECANCELED) {
+    assert(req->retcode == 0);
+    req->retcode = UV_EAI_CANCELED;
+    goto complete;
+  }
+
+  if (req->retcode == 0) {
+    /* convert addrinfoW to addrinfo */
+    /* first calculate required length */
+    addrinfow_ptr = req->addrinfow;
+    while (addrinfow_ptr != NULL) {
+      addrinfo_len += addrinfo_struct_len +
+          ALIGNED_SIZE(addrinfow_ptr->ai_addrlen);
+      if (addrinfow_ptr->ai_canonname != NULL) {
+        name_len = WideCharToMultiByte(CP_UTF8,
+                                       0,
+                                       addrinfow_ptr->ai_canonname,
+                                       -1,
+                                       NULL,
+                                       0,
+                                       NULL,
+                                       NULL);
+        if (name_len == 0) {
+          req->retcode = uv_translate_sys_error(GetLastError());
+          goto complete;
+        }
+        addrinfo_len += ALIGNED_SIZE(name_len);
+      }
+      addrinfow_ptr = addrinfow_ptr->ai_next;
+    }
+
+    /* allocate memory for addrinfo results */
+    alloc_ptr = (char*)uv__malloc(addrinfo_len);
+
+    /* do conversions */
+    if (alloc_ptr != NULL) {
+      cur_ptr = alloc_ptr;
+      addrinfow_ptr = req->addrinfow;
+
+      while (addrinfow_ptr != NULL) {
+        /* copy addrinfo struct data */
+        assert(cur_ptr + addrinfo_struct_len <= alloc_ptr + addrinfo_len);
+        addrinfo_ptr = (struct addrinfo*)cur_ptr;
+        addrinfo_ptr->ai_family = addrinfow_ptr->ai_family;
+        addrinfo_ptr->ai_socktype = addrinfow_ptr->ai_socktype;
+        addrinfo_ptr->ai_protocol = addrinfow_ptr->ai_protocol;
+        addrinfo_ptr->ai_flags = addrinfow_ptr->ai_flags;
+        addrinfo_ptr->ai_addrlen = addrinfow_ptr->ai_addrlen;
+        addrinfo_ptr->ai_canonname = NULL;
+        addrinfo_ptr->ai_addr = NULL;
+        addrinfo_ptr->ai_next = NULL;
+
+        cur_ptr += addrinfo_struct_len;
+
+        /* copy sockaddr */
+        if (addrinfo_ptr->ai_addrlen > 0) {
+          assert(cur_ptr + addrinfo_ptr->ai_addrlen <=
+                 alloc_ptr + addrinfo_len);
+          memcpy(cur_ptr, addrinfow_ptr->ai_addr, addrinfo_ptr->ai_addrlen);
+          addrinfo_ptr->ai_addr = (struct sockaddr*)cur_ptr;
+          cur_ptr += ALIGNED_SIZE(addrinfo_ptr->ai_addrlen);
+        }
+
+        /* convert canonical name to UTF-8 */
+        if (addrinfow_ptr->ai_canonname != NULL) {
+          name_len = WideCharToMultiByte(CP_UTF8,
+                                         0,
+                                         addrinfow_ptr->ai_canonname,
+                                         -1,
+                                         NULL,
+                                         0,
+                                         NULL,
+                                         NULL);
+          assert(name_len > 0);
+          assert(cur_ptr + name_len <= alloc_ptr + addrinfo_len);
+          name_len = WideCharToMultiByte(CP_UTF8,
+                                         0,
+                                         addrinfow_ptr->ai_canonname,
+                                         -1,
+                                         cur_ptr,
+                                         name_len,
+                                         NULL,
+                                         NULL);
+          assert(name_len > 0);
+          addrinfo_ptr->ai_canonname = cur_ptr;
+          cur_ptr += ALIGNED_SIZE(name_len);
+        }
+        assert(cur_ptr <= alloc_ptr + addrinfo_len);
+
+        /* set next ptr */
+        addrinfow_ptr = addrinfow_ptr->ai_next;
+        if (addrinfow_ptr != NULL) {
+          addrinfo_ptr->ai_next = (struct addrinfo*)cur_ptr;
+        }
+      }
+      req->addrinfo = (struct addrinfo*)alloc_ptr;
+    } else {
+      req->retcode = UV_EAI_MEMORY;
+    }
+  }
+
+  /* return memory to system */
+  if (req->addrinfow != NULL) {
+    FreeAddrInfoW(req->addrinfow);
+    req->addrinfow = NULL;
+  }
+
+complete:
+  uv__req_unregister(req->loop, req);
+
+  /* finally do callback with converted result */
+  if (req->getaddrinfo_cb)
+    req->getaddrinfo_cb(req, req->retcode, req->addrinfo);
+}
+
+
+void uv_freeaddrinfo(struct addrinfo* ai) {
+  char* alloc_ptr = (char*)ai;
+
+  /* release copied result memory */
+  uv__free(alloc_ptr);
+}
+
+
+/*
+ * Entry point for getaddrinfo
+ * we convert the UTF-8 strings to UNICODE
+ * and save the UNICODE string pointers in the req
+ * We also copy hints so that caller does not need to keep memory until the
+ * callback.
+ * return 0 if a callback will be made
+ * return error code if validation fails
+ *
+ * To minimize allocation we calculate total size required,
+ * and copy all structs and referenced strings into the one block.
+ * Each size calculation is adjusted to avoid unaligned pointers.
+ */
+int uv_getaddrinfo(uv_loop_t* loop,
+                   uv_getaddrinfo_t* req,
+                   uv_getaddrinfo_cb getaddrinfo_cb,
+                   const char* node,
+                   const char* service,
+                   const struct addrinfo* hints) {
+  int nodesize = 0;
+  int servicesize = 0;
+  int hintssize = 0;
+  char* alloc_ptr = NULL;
+  int err;
+
+  if (req == NULL || (node == NULL && service == NULL)) {
+    err = WSAEINVAL;
+    goto error;
+  }
+
+  uv_req_init(loop, (uv_req_t*)req);
+
+  req->getaddrinfo_cb = getaddrinfo_cb;
+  req->addrinfo = NULL;
+  req->type = UV_GETADDRINFO;
+  req->loop = loop;
+  req->retcode = 0;
+
+  /* calculate required memory size for all input values */
+  if (node != NULL) {
+    nodesize = ALIGNED_SIZE(MultiByteToWideChar(CP_UTF8, 0, node, -1, NULL, 0) *
+                            sizeof(WCHAR));
+    if (nodesize == 0) {
+      err = GetLastError();
+      goto error;
+    }
+  }
+
+  if (service != NULL) {
+    servicesize = ALIGNED_SIZE(MultiByteToWideChar(CP_UTF8,
+                                                   0,
+                                                   service,
+                                                   -1,
+                                                   NULL,
+                                                   0) *
+                               sizeof(WCHAR));
+    if (servicesize == 0) {
+      err = GetLastError();
+      goto error;
+    }
+  }
+  if (hints != NULL) {
+    hintssize = ALIGNED_SIZE(sizeof(struct addrinfoW));
+  }
+
+  /* allocate memory for inputs, and partition it as needed */
+  alloc_ptr = (char*)uv__malloc(nodesize + servicesize + hintssize);
+  if (!alloc_ptr) {
+    err = WSAENOBUFS;
+    goto error;
+  }
+
+  /* save alloc_ptr now so we can free if error */
+  req->alloc = (void*)alloc_ptr;
+
+  /* convert node string to UTF16 into allocated memory and save pointer in */
+  /* the request. */
+  if (node != NULL) {
+    req->node = (WCHAR*)alloc_ptr;
+    if (MultiByteToWideChar(CP_UTF8,
+                            0,
+                            node,
+                            -1,
+                            (WCHAR*) alloc_ptr,
+                            nodesize / sizeof(WCHAR)) == 0) {
+      err = GetLastError();
+      goto error;
+    }
+    alloc_ptr += nodesize;
+  } else {
+    req->node = NULL;
+  }
+
+  /* convert service string to UTF16 into allocated memory and save pointer */
+  /* in the req. */
+  if (service != NULL) {
+    req->service = (WCHAR*)alloc_ptr;
+    if (MultiByteToWideChar(CP_UTF8,
+                            0,
+                            service,
+                            -1,
+                            (WCHAR*) alloc_ptr,
+                            servicesize / sizeof(WCHAR)) == 0) {
+      err = GetLastError();
+      goto error;
+    }
+    alloc_ptr += servicesize;
+  } else {
+    req->service = NULL;
+  }
+
+  /* copy hints to allocated memory and save pointer in req */
+  if (hints != NULL) {
+    req->addrinfow = (struct addrinfoW*)alloc_ptr;
+    req->addrinfow->ai_family = hints->ai_family;
+    req->addrinfow->ai_socktype = hints->ai_socktype;
+    req->addrinfow->ai_protocol = hints->ai_protocol;
+    req->addrinfow->ai_flags = hints->ai_flags;
+    req->addrinfow->ai_addrlen = 0;
+    req->addrinfow->ai_canonname = NULL;
+    req->addrinfow->ai_addr = NULL;
+    req->addrinfow->ai_next = NULL;
+  } else {
+    req->addrinfow = NULL;
+  }
+
+  uv__req_register(loop, req);
+
+  if (getaddrinfo_cb) {
+    uv__work_submit(loop,
+                    &req->work_req,
+                    uv__getaddrinfo_work,
+                    uv__getaddrinfo_done);
+    return 0;
+  } else {
+    uv__getaddrinfo_work(&req->work_req);
+    uv__getaddrinfo_done(&req->work_req, 0);
+    return req->retcode;
+  }
+
+error:
+  if (req != NULL) {
+    uv__free(req->alloc);
+    req->alloc = NULL;
+  }
+  return uv_translate_sys_error(err);
+}

+ 150 - 0
Utilities/cmlibuv/src/win/getnameinfo.c

@@ -0,0 +1,150 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+*
+* Permission is hereby granted, free of charge, to any person obtaining a copy
+* of this software and associated documentation files (the "Software"), to
+* deal in the Software without restriction, including without limitation the
+* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+* sell copies of the Software, and to permit persons to whom the Software is
+* furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+* IN THE SOFTWARE.
+*/
+
+#include <assert.h>
+#include <stdio.h>
+
+#include "uv.h"
+#include "internal.h"
+#include "req-inl.h"
+
+#ifndef GetNameInfo
+int WSAAPI GetNameInfoW(
+  const SOCKADDR *pSockaddr,
+  socklen_t SockaddrLength,
+  PWCHAR pNodeBuffer,
+  DWORD NodeBufferSize,
+  PWCHAR pServiceBuffer,
+  DWORD ServiceBufferSize,
+  INT Flags
+);
+#endif
+
+static void uv__getnameinfo_work(struct uv__work* w) {
+  uv_getnameinfo_t* req;
+  WCHAR host[NI_MAXHOST];
+  WCHAR service[NI_MAXSERV];
+  int ret = 0;
+
+  req = container_of(w, uv_getnameinfo_t, work_req);
+  if (GetNameInfoW((struct sockaddr*)&req->storage,
+                   sizeof(req->storage),
+                   host,
+                   ARRAY_SIZE(host),
+                   service,
+                   ARRAY_SIZE(service),
+                   req->flags)) {
+    ret = WSAGetLastError();
+  }
+  req->retcode = uv__getaddrinfo_translate_error(ret);
+
+  /* convert results to UTF-8 */
+  WideCharToMultiByte(CP_UTF8,
+                      0,
+                      host,
+                      -1,
+                      req->host,
+                      sizeof(req->host),
+                      NULL,
+                      NULL);
+
+  WideCharToMultiByte(CP_UTF8,
+                      0,
+                      service,
+                      -1,
+                      req->service,
+                      sizeof(req->service),
+                      NULL,
+                      NULL);
+}
+
+
+/*
+* Called from uv_run when complete.
+*/
+static void uv__getnameinfo_done(struct uv__work* w, int status) {
+  uv_getnameinfo_t* req;
+  char* host;
+  char* service;
+
+  req = container_of(w, uv_getnameinfo_t, work_req);
+  uv__req_unregister(req->loop, req);
+  host = service = NULL;
+
+  if (status == UV_ECANCELED) {
+    assert(req->retcode == 0);
+    req->retcode = UV_EAI_CANCELED;
+  } else if (req->retcode == 0) {
+    host = req->host;
+    service = req->service;
+  }
+
+  if (req->getnameinfo_cb)
+    req->getnameinfo_cb(req, req->retcode, host, service);
+}
+
+
+/*
+* Entry point for getnameinfo
+* return 0 if a callback will be made
+* return error code if validation fails
+*/
+int uv_getnameinfo(uv_loop_t* loop,
+                   uv_getnameinfo_t* req,
+                   uv_getnameinfo_cb getnameinfo_cb,
+                   const struct sockaddr* addr,
+                   int flags) {
+  if (req == NULL || addr == NULL)
+    return UV_EINVAL;
+
+  if (addr->sa_family == AF_INET) {
+    memcpy(&req->storage,
+           addr,
+           sizeof(struct sockaddr_in));
+  } else if (addr->sa_family == AF_INET6) {
+    memcpy(&req->storage,
+           addr,
+           sizeof(struct sockaddr_in6));
+  } else {
+    return UV_EINVAL;
+  }
+
+  uv_req_init(loop, (uv_req_t*)req);
+  uv__req_register(loop, req);
+
+  req->getnameinfo_cb = getnameinfo_cb;
+  req->flags = flags;
+  req->type = UV_GETNAMEINFO;
+  req->loop = loop;
+  req->retcode = 0;
+
+  if (getnameinfo_cb) {
+    uv__work_submit(loop,
+                    &req->work_req,
+                    uv__getnameinfo_work,
+                    uv__getnameinfo_done);
+    return 0;
+  } else {
+    uv__getnameinfo_work(&req->work_req);
+    uv__getnameinfo_done(&req->work_req, 0);
+    return req->retcode;
+  }
+}

+ 179 - 0
Utilities/cmlibuv/src/win/handle-inl.h

@@ -0,0 +1,179 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_WIN_HANDLE_INL_H_
+#define UV_WIN_HANDLE_INL_H_
+
+#include <assert.h>
+#include <io.h>
+
+#include "uv.h"
+#include "internal.h"
+
+
+#define DECREASE_ACTIVE_COUNT(loop, handle)                             \
+  do {                                                                  \
+    if (--(handle)->activecnt == 0 &&                                   \
+        !((handle)->flags & UV__HANDLE_CLOSING)) {                      \
+      uv__handle_stop((handle));                                        \
+    }                                                                   \
+    assert((handle)->activecnt >= 0);                                   \
+  } while (0)
+
+
+#define INCREASE_ACTIVE_COUNT(loop, handle)                             \
+  do {                                                                  \
+    if ((handle)->activecnt++ == 0) {                                   \
+      uv__handle_start((handle));                                       \
+    }                                                                   \
+    assert((handle)->activecnt > 0);                                    \
+  } while (0)
+
+
+#define DECREASE_PENDING_REQ_COUNT(handle)                              \
+  do {                                                                  \
+    assert(handle->reqs_pending > 0);                                   \
+    handle->reqs_pending--;                                             \
+                                                                        \
+    if (handle->flags & UV__HANDLE_CLOSING &&                           \
+        handle->reqs_pending == 0) {                                    \
+      uv_want_endgame(loop, (uv_handle_t*)handle);                      \
+    }                                                                   \
+  } while (0)
+
+
+#define uv__handle_closing(handle)                                      \
+  do {                                                                  \
+    assert(!((handle)->flags & UV__HANDLE_CLOSING));                    \
+                                                                        \
+    if (!(((handle)->flags & UV__HANDLE_ACTIVE) &&                      \
+          ((handle)->flags & UV__HANDLE_REF)))                          \
+      uv__active_handle_add((uv_handle_t*) (handle));                   \
+                                                                        \
+    (handle)->flags |= UV__HANDLE_CLOSING;                              \
+    (handle)->flags &= ~UV__HANDLE_ACTIVE;                              \
+  } while (0)
+
+
+#define uv__handle_close(handle)                                        \
+  do {                                                                  \
+    QUEUE_REMOVE(&(handle)->handle_queue);                              \
+    uv__active_handle_rm((uv_handle_t*) (handle));                      \
+                                                                        \
+    (handle)->flags |= UV_HANDLE_CLOSED;                                \
+                                                                        \
+    if ((handle)->close_cb)                                             \
+      (handle)->close_cb((uv_handle_t*) (handle));                      \
+  } while (0)
+
+
+INLINE static void uv_want_endgame(uv_loop_t* loop, uv_handle_t* handle) {
+  if (!(handle->flags & UV_HANDLE_ENDGAME_QUEUED)) {
+    handle->flags |= UV_HANDLE_ENDGAME_QUEUED;
+
+    handle->endgame_next = loop->endgame_handles;
+    loop->endgame_handles = handle;
+  }
+}
+
+
+INLINE static void uv_process_endgames(uv_loop_t* loop) {
+  uv_handle_t* handle;
+
+  while (loop->endgame_handles) {
+    handle = loop->endgame_handles;
+    loop->endgame_handles = handle->endgame_next;
+
+    handle->flags &= ~UV_HANDLE_ENDGAME_QUEUED;
+
+    switch (handle->type) {
+      case UV_TCP:
+        uv_tcp_endgame(loop, (uv_tcp_t*) handle);
+        break;
+
+      case UV_NAMED_PIPE:
+        uv_pipe_endgame(loop, (uv_pipe_t*) handle);
+        break;
+
+      case UV_TTY:
+        uv_tty_endgame(loop, (uv_tty_t*) handle);
+        break;
+
+      case UV_UDP:
+        uv_udp_endgame(loop, (uv_udp_t*) handle);
+        break;
+
+      case UV_POLL:
+        uv_poll_endgame(loop, (uv_poll_t*) handle);
+        break;
+
+      case UV_TIMER:
+        uv_timer_endgame(loop, (uv_timer_t*) handle);
+        break;
+
+      case UV_PREPARE:
+      case UV_CHECK:
+      case UV_IDLE:
+        uv_loop_watcher_endgame(loop, handle);
+        break;
+
+      case UV_ASYNC:
+        uv_async_endgame(loop, (uv_async_t*) handle);
+        break;
+
+      case UV_SIGNAL:
+        uv_signal_endgame(loop, (uv_signal_t*) handle);
+        break;
+
+      case UV_PROCESS:
+        uv_process_endgame(loop, (uv_process_t*) handle);
+        break;
+
+      case UV_FS_EVENT:
+        uv_fs_event_endgame(loop, (uv_fs_event_t*) handle);
+        break;
+
+      case UV_FS_POLL:
+        uv__fs_poll_endgame(loop, (uv_fs_poll_t*) handle);
+        break;
+
+      default:
+        assert(0);
+        break;
+    }
+  }
+}
+
+INLINE static HANDLE uv__get_osfhandle(int fd)
+{
+  /* _get_osfhandle() raises an assert in debug builds if the FD is invalid. */
+  /* But  it also correctly checks the FD and returns INVALID_HANDLE_VALUE */
+  /* for invalid FDs in release builds (or if you let the assert continue).  */
+  /* So this wrapper function disables asserts when calling _get_osfhandle. */
+
+  HANDLE handle;
+  UV_BEGIN_DISABLE_CRT_ASSERT();
+  handle = (HANDLE) _get_osfhandle(fd);
+  UV_END_DISABLE_CRT_ASSERT();
+  return handle;
+}
+
+#endif /* UV_WIN_HANDLE_INL_H_ */

+ 154 - 0
Utilities/cmlibuv/src/win/handle.c

@@ -0,0 +1,154 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <io.h>
+#include <stdlib.h>
+
+#include "uv.h"
+#include "internal.h"
+#include "handle-inl.h"
+
+
+uv_handle_type uv_guess_handle(uv_file file) {
+  HANDLE handle;
+  DWORD mode;
+
+  if (file < 0) {
+    return UV_UNKNOWN_HANDLE;
+  }
+
+  handle = uv__get_osfhandle(file);
+
+  switch (GetFileType(handle)) {
+    case FILE_TYPE_CHAR:
+      if (GetConsoleMode(handle, &mode)) {
+        return UV_TTY;
+      } else {
+        return UV_FILE;
+      }
+
+    case FILE_TYPE_PIPE:
+      return UV_NAMED_PIPE;
+
+    case FILE_TYPE_DISK:
+      return UV_FILE;
+
+    default:
+      return UV_UNKNOWN_HANDLE;
+  }
+}
+
+
+int uv_is_active(const uv_handle_t* handle) {
+  return (handle->flags & UV__HANDLE_ACTIVE) &&
+        !(handle->flags & UV__HANDLE_CLOSING);
+}
+
+
+void uv_close(uv_handle_t* handle, uv_close_cb cb) {
+  uv_loop_t* loop = handle->loop;
+
+  if (handle->flags & UV__HANDLE_CLOSING) {
+    assert(0);
+    return;
+  }
+
+  handle->close_cb = cb;
+
+  /* Handle-specific close actions */
+  switch (handle->type) {
+    case UV_TCP:
+      uv_tcp_close(loop, (uv_tcp_t*)handle);
+      return;
+
+    case UV_NAMED_PIPE:
+      uv_pipe_close(loop, (uv_pipe_t*) handle);
+      return;
+
+    case UV_TTY:
+      uv_tty_close((uv_tty_t*) handle);
+      return;
+
+    case UV_UDP:
+      uv_udp_close(loop, (uv_udp_t*) handle);
+      return;
+
+    case UV_POLL:
+      uv_poll_close(loop, (uv_poll_t*) handle);
+      return;
+
+    case UV_TIMER:
+      uv_timer_stop((uv_timer_t*)handle);
+      uv__handle_closing(handle);
+      uv_want_endgame(loop, handle);
+      return;
+
+    case UV_PREPARE:
+      uv_prepare_stop((uv_prepare_t*)handle);
+      uv__handle_closing(handle);
+      uv_want_endgame(loop, handle);
+      return;
+
+    case UV_CHECK:
+      uv_check_stop((uv_check_t*)handle);
+      uv__handle_closing(handle);
+      uv_want_endgame(loop, handle);
+      return;
+
+    case UV_IDLE:
+      uv_idle_stop((uv_idle_t*)handle);
+      uv__handle_closing(handle);
+      uv_want_endgame(loop, handle);
+      return;
+
+    case UV_ASYNC:
+      uv_async_close(loop, (uv_async_t*) handle);
+      return;
+
+    case UV_SIGNAL:
+      uv_signal_close(loop, (uv_signal_t*) handle);
+      return;
+
+    case UV_PROCESS:
+      uv_process_close(loop, (uv_process_t*) handle);
+      return;
+
+    case UV_FS_EVENT:
+      uv_fs_event_close(loop, (uv_fs_event_t*) handle);
+      return;
+
+    case UV_FS_POLL:
+      uv__fs_poll_close((uv_fs_poll_t*) handle);
+      uv__handle_closing(handle);
+      uv_want_endgame(loop, handle);
+      return;
+
+    default:
+      /* Not supported */
+      abort();
+  }
+}
+
+
+int uv_is_closing(const uv_handle_t* handle) {
+  return !!(handle->flags & (UV__HANDLE_CLOSING | UV_HANDLE_CLOSED));
+}

+ 398 - 0
Utilities/cmlibuv/src/win/internal.h

@@ -0,0 +1,398 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_WIN_INTERNAL_H_
+#define UV_WIN_INTERNAL_H_
+
+#if defined(_MSC_VER)
+# pragma warning(push,1)
+#endif
+
+#include "uv.h"
+#include "../uv-common.h"
+
+#include "tree.h"
+#include "winapi.h"
+#include "winsock.h"
+
+#ifdef _MSC_VER
+# define INLINE __inline
+# define UV_THREAD_LOCAL __declspec( thread )
+#else
+# define INLINE inline
+# define UV_THREAD_LOCAL __thread
+#endif
+
+
+#ifdef _DEBUG
+
+extern UV_THREAD_LOCAL int uv__crt_assert_enabled;
+
+#define UV_BEGIN_DISABLE_CRT_ASSERT()                           \
+  {                                                             \
+    int uv__saved_crt_assert_enabled = uv__crt_assert_enabled;  \
+    uv__crt_assert_enabled = FALSE;
+
+
+#define UV_END_DISABLE_CRT_ASSERT()                             \
+    uv__crt_assert_enabled = uv__saved_crt_assert_enabled;      \
+  }
+
+#else
+#define UV_BEGIN_DISABLE_CRT_ASSERT()
+#define UV_END_DISABLE_CRT_ASSERT()
+#endif
+
+/*
+ * Handles
+ * (also see handle-inl.h)
+ */
+
+/* Used by all handles. */
+#define UV_HANDLE_CLOSED                        0x00000002
+#define UV_HANDLE_ENDGAME_QUEUED                0x00000008
+
+/* uv-common.h: #define UV__HANDLE_CLOSING      0x00000001 */
+/* uv-common.h: #define UV__HANDLE_ACTIVE       0x00000040 */
+/* uv-common.h: #define UV__HANDLE_REF          0x00000020 */
+/* uv-common.h: #define UV_HANDLE_INTERNAL      0x00000080 */
+
+/* Used by streams and UDP handles. */
+#define UV_HANDLE_READING                       0x00000100
+#define UV_HANDLE_BOUND                         0x00000200
+#define UV_HANDLE_LISTENING                     0x00000800
+#define UV_HANDLE_CONNECTION                    0x00001000
+#define UV_HANDLE_READABLE                      0x00008000
+#define UV_HANDLE_WRITABLE                      0x00010000
+#define UV_HANDLE_READ_PENDING                  0x00020000
+#define UV_HANDLE_SYNC_BYPASS_IOCP              0x00040000
+#define UV_HANDLE_ZERO_READ                     0x00080000
+#define UV_HANDLE_EMULATE_IOCP                  0x00100000
+#define UV_HANDLE_BLOCKING_WRITES               0x00200000
+#define UV_HANDLE_CANCELLATION_PENDING          0x00400000
+
+/* Used by uv_tcp_t and uv_udp_t handles */
+#define UV_HANDLE_IPV6                          0x01000000
+
+/* Only used by uv_tcp_t handles. */
+#define UV_HANDLE_TCP_NODELAY                   0x02000000
+#define UV_HANDLE_TCP_KEEPALIVE                 0x04000000
+#define UV_HANDLE_TCP_SINGLE_ACCEPT             0x08000000
+#define UV_HANDLE_TCP_ACCEPT_STATE_CHANGING     0x10000000
+#define UV_HANDLE_TCP_SOCKET_CLOSED             0x20000000
+#define UV_HANDLE_SHARED_TCP_SOCKET             0x40000000
+
+/* Only used by uv_pipe_t handles. */
+#define UV_HANDLE_NON_OVERLAPPED_PIPE           0x01000000
+#define UV_HANDLE_PIPESERVER                    0x02000000
+#define UV_HANDLE_PIPE_READ_CANCELABLE          0x04000000
+
+/* Only used by uv_tty_t handles. */
+#define UV_HANDLE_TTY_READABLE                  0x01000000
+#define UV_HANDLE_TTY_RAW                       0x02000000
+#define UV_HANDLE_TTY_SAVED_POSITION            0x04000000
+#define UV_HANDLE_TTY_SAVED_ATTRIBUTES          0x08000000
+
+/* Only used by uv_poll_t handles. */
+#define UV_HANDLE_POLL_SLOW                     0x02000000
+
+
+/*
+ * Requests: see req-inl.h
+ */
+
+
+/*
+ * Streams: see stream-inl.h
+ */
+
+
+/*
+ * TCP
+ */
+
+typedef struct {
+  WSAPROTOCOL_INFOW socket_info;
+  int delayed_error;
+} uv__ipc_socket_info_ex;
+
+int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb);
+int uv_tcp_accept(uv_tcp_t* server, uv_tcp_t* client);
+int uv_tcp_read_start(uv_tcp_t* handle, uv_alloc_cb alloc_cb,
+    uv_read_cb read_cb);
+int uv_tcp_write(uv_loop_t* loop, uv_write_t* req, uv_tcp_t* handle,
+    const uv_buf_t bufs[], unsigned int nbufs, uv_write_cb cb);
+int uv__tcp_try_write(uv_tcp_t* handle, const uv_buf_t bufs[],
+    unsigned int nbufs);
+
+void uv_process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle, uv_req_t* req);
+void uv_process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle,
+    uv_write_t* req);
+void uv_process_tcp_accept_req(uv_loop_t* loop, uv_tcp_t* handle,
+    uv_req_t* req);
+void uv_process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle,
+    uv_connect_t* req);
+
+void uv_tcp_close(uv_loop_t* loop, uv_tcp_t* tcp);
+void uv_tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle);
+
+int uv_tcp_import(uv_tcp_t* tcp, uv__ipc_socket_info_ex* socket_info_ex,
+    int tcp_connection);
+
+int uv_tcp_duplicate_socket(uv_tcp_t* handle, int pid,
+    LPWSAPROTOCOL_INFOW protocol_info);
+
+
+/*
+ * UDP
+ */
+void uv_process_udp_recv_req(uv_loop_t* loop, uv_udp_t* handle, uv_req_t* req);
+void uv_process_udp_send_req(uv_loop_t* loop, uv_udp_t* handle,
+    uv_udp_send_t* req);
+
+void uv_udp_close(uv_loop_t* loop, uv_udp_t* handle);
+void uv_udp_endgame(uv_loop_t* loop, uv_udp_t* handle);
+
+
+/*
+ * Pipes
+ */
+int uv_stdio_pipe_server(uv_loop_t* loop, uv_pipe_t* handle, DWORD access,
+    char* name, size_t nameSize);
+
+int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb);
+int uv_pipe_accept(uv_pipe_t* server, uv_stream_t* client);
+int uv_pipe_read_start(uv_pipe_t* handle, uv_alloc_cb alloc_cb,
+    uv_read_cb read_cb);
+int uv_pipe_write(uv_loop_t* loop, uv_write_t* req, uv_pipe_t* handle,
+    const uv_buf_t bufs[], unsigned int nbufs, uv_write_cb cb);
+int uv_pipe_write2(uv_loop_t* loop, uv_write_t* req, uv_pipe_t* handle,
+    const uv_buf_t bufs[], unsigned int nbufs, uv_stream_t* send_handle,
+    uv_write_cb cb);
+void uv__pipe_pause_read(uv_pipe_t* handle);
+void uv__pipe_unpause_read(uv_pipe_t* handle);
+void uv__pipe_stop_read(uv_pipe_t* handle);
+
+void uv_process_pipe_read_req(uv_loop_t* loop, uv_pipe_t* handle,
+    uv_req_t* req);
+void uv_process_pipe_write_req(uv_loop_t* loop, uv_pipe_t* handle,
+    uv_write_t* req);
+void uv_process_pipe_accept_req(uv_loop_t* loop, uv_pipe_t* handle,
+    uv_req_t* raw_req);
+void uv_process_pipe_connect_req(uv_loop_t* loop, uv_pipe_t* handle,
+    uv_connect_t* req);
+void uv_process_pipe_shutdown_req(uv_loop_t* loop, uv_pipe_t* handle,
+    uv_shutdown_t* req);
+
+void uv_pipe_close(uv_loop_t* loop, uv_pipe_t* handle);
+void uv_pipe_cleanup(uv_loop_t* loop, uv_pipe_t* handle);
+void uv_pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle);
+
+
+/*
+ * TTY
+ */
+void uv_console_init();
+
+int uv_tty_read_start(uv_tty_t* handle, uv_alloc_cb alloc_cb,
+    uv_read_cb read_cb);
+int uv_tty_read_stop(uv_tty_t* handle);
+int uv_tty_write(uv_loop_t* loop, uv_write_t* req, uv_tty_t* handle,
+    const uv_buf_t bufs[], unsigned int nbufs, uv_write_cb cb);
+int uv__tty_try_write(uv_tty_t* handle, const uv_buf_t bufs[],
+    unsigned int nbufs);
+void uv_tty_close(uv_tty_t* handle);
+
+void uv_process_tty_read_req(uv_loop_t* loop, uv_tty_t* handle,
+    uv_req_t* req);
+void uv_process_tty_write_req(uv_loop_t* loop, uv_tty_t* handle,
+    uv_write_t* req);
+/* TODO: remove me */
+void uv_process_tty_accept_req(uv_loop_t* loop, uv_tty_t* handle,
+    uv_req_t* raw_req);
+/* TODO: remove me */
+void uv_process_tty_connect_req(uv_loop_t* loop, uv_tty_t* handle,
+    uv_connect_t* req);
+
+void uv_tty_endgame(uv_loop_t* loop, uv_tty_t* handle);
+
+
+/*
+ * Poll watchers
+ */
+void uv_process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
+    uv_req_t* req);
+
+int uv_poll_close(uv_loop_t* loop, uv_poll_t* handle);
+void uv_poll_endgame(uv_loop_t* loop, uv_poll_t* handle);
+
+
+/*
+ * Timers
+ */
+void uv_timer_endgame(uv_loop_t* loop, uv_timer_t* handle);
+
+DWORD uv__next_timeout(const uv_loop_t* loop);
+void uv_process_timers(uv_loop_t* loop);
+
+
+/*
+ * Loop watchers
+ */
+void uv_loop_watcher_endgame(uv_loop_t* loop, uv_handle_t* handle);
+
+void uv_prepare_invoke(uv_loop_t* loop);
+void uv_check_invoke(uv_loop_t* loop);
+void uv_idle_invoke(uv_loop_t* loop);
+
+void uv__once_init();
+
+
+/*
+ * Async watcher
+ */
+void uv_async_close(uv_loop_t* loop, uv_async_t* handle);
+void uv_async_endgame(uv_loop_t* loop, uv_async_t* handle);
+
+void uv_process_async_wakeup_req(uv_loop_t* loop, uv_async_t* handle,
+    uv_req_t* req);
+
+
+/*
+ * Signal watcher
+ */
+void uv_signals_init();
+int uv__signal_dispatch(int signum);
+
+void uv_signal_close(uv_loop_t* loop, uv_signal_t* handle);
+void uv_signal_endgame(uv_loop_t* loop, uv_signal_t* handle);
+
+void uv_process_signal_req(uv_loop_t* loop, uv_signal_t* handle,
+    uv_req_t* req);
+
+
+/*
+ * Spawn
+ */
+void uv_process_proc_exit(uv_loop_t* loop, uv_process_t* handle);
+void uv_process_close(uv_loop_t* loop, uv_process_t* handle);
+void uv_process_endgame(uv_loop_t* loop, uv_process_t* handle);
+
+
+/*
+ * Error
+ */
+int uv_translate_sys_error(int sys_errno);
+
+
+/*
+ * FS
+ */
+void uv_fs_init();
+
+
+/*
+ * FS Event
+ */
+void uv_process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
+    uv_fs_event_t* handle);
+void uv_fs_event_close(uv_loop_t* loop, uv_fs_event_t* handle);
+void uv_fs_event_endgame(uv_loop_t* loop, uv_fs_event_t* handle);
+
+
+/*
+ * Stat poller.
+ */
+void uv__fs_poll_endgame(uv_loop_t* loop, uv_fs_poll_t* handle);
+
+
+/*
+ * Utilities.
+ */
+void uv__util_init();
+
+uint64_t uv__hrtime(double scale);
+int uv_parent_pid();
+int uv_current_pid();
+__declspec(noreturn) void uv_fatal_error(const int errorno, const char* syscall);
+int uv__getpwuid_r(uv_passwd_t* pwd);
+int uv__convert_utf16_to_utf8(const WCHAR* utf16, int utf16len, char** utf8);
+
+
+/*
+ * Process stdio handles.
+ */
+int uv__stdio_create(uv_loop_t* loop,
+                     const uv_process_options_t* options,
+                     BYTE** buffer_ptr);
+void uv__stdio_destroy(BYTE* buffer);
+void uv__stdio_noinherit(BYTE* buffer);
+int uv__stdio_verify(BYTE* buffer, WORD size);
+WORD uv__stdio_size(BYTE* buffer);
+HANDLE uv__stdio_handle(BYTE* buffer, int fd);
+
+
+/*
+ * Winapi and ntapi utility functions
+ */
+void uv_winapi_init();
+
+
+/*
+ * Winsock utility functions
+ */
+void uv_winsock_init();
+
+int uv_ntstatus_to_winsock_error(NTSTATUS status);
+
+BOOL uv_get_acceptex_function(SOCKET socket, LPFN_ACCEPTEX* target);
+BOOL uv_get_connectex_function(SOCKET socket, LPFN_CONNECTEX* target);
+
+int WSAAPI uv_wsarecv_workaround(SOCKET socket, WSABUF* buffers,
+    DWORD buffer_count, DWORD* bytes, DWORD* flags, WSAOVERLAPPED *overlapped,
+    LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine);
+int WSAAPI uv_wsarecvfrom_workaround(SOCKET socket, WSABUF* buffers,
+    DWORD buffer_count, DWORD* bytes, DWORD* flags, struct sockaddr* addr,
+    int* addr_len, WSAOVERLAPPED *overlapped,
+    LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine);
+
+int WSAAPI uv_msafd_poll(SOCKET socket, AFD_POLL_INFO* info_in,
+    AFD_POLL_INFO* info_out, OVERLAPPED* overlapped);
+
+/* Whether there are any non-IFS LSPs stacked on TCP */
+extern int uv_tcp_non_ifs_lsp_ipv4;
+extern int uv_tcp_non_ifs_lsp_ipv6;
+
+/* Ip address used to bind to any port at any interface */
+extern struct sockaddr_in uv_addr_ip4_any_;
+extern struct sockaddr_in6 uv_addr_ip6_any_;
+
+/*
+ * Wake all loops with fake message
+ */
+void uv__wake_all_loops();
+
+/*
+ * Init system wake-up detection
+ */
+void uv__init_detect_system_wakeup();
+
+#endif /* UV_WIN_INTERNAL_H_ */

+ 122 - 0
Utilities/cmlibuv/src/win/loop-watcher.c

@@ -0,0 +1,122 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+
+#include "uv.h"
+#include "internal.h"
+#include "handle-inl.h"
+
+
+void uv_loop_watcher_endgame(uv_loop_t* loop, uv_handle_t* handle) {
+  if (handle->flags & UV__HANDLE_CLOSING) {
+    assert(!(handle->flags & UV_HANDLE_CLOSED));
+    handle->flags |= UV_HANDLE_CLOSED;
+    uv__handle_close(handle);
+  }
+}
+
+
+#define UV_LOOP_WATCHER_DEFINE(name, NAME)                                    \
+  int uv_##name##_init(uv_loop_t* loop, uv_##name##_t* handle) {              \
+    uv__handle_init(loop, (uv_handle_t*) handle, UV_##NAME);                  \
+                                                                              \
+    return 0;                                                                 \
+  }                                                                           \
+                                                                              \
+                                                                              \
+  int uv_##name##_start(uv_##name##_t* handle, uv_##name##_cb cb) {           \
+    uv_loop_t* loop = handle->loop;                                           \
+    uv_##name##_t* old_head;                                                  \
+                                                                              \
+    assert(handle->type == UV_##NAME);                                        \
+                                                                              \
+    if (uv__is_active(handle))                                                \
+      return 0;                                                               \
+                                                                              \
+    if (cb == NULL)                                                           \
+      return UV_EINVAL;                                                       \
+                                                                              \
+    old_head = loop->name##_handles;                                          \
+                                                                              \
+    handle->name##_next = old_head;                                           \
+    handle->name##_prev = NULL;                                               \
+                                                                              \
+    if (old_head) {                                                           \
+      old_head->name##_prev = handle;                                         \
+    }                                                                         \
+                                                                              \
+    loop->name##_handles = handle;                                            \
+                                                                              \
+    handle->name##_cb = cb;                                                   \
+    uv__handle_start(handle);                                                 \
+                                                                              \
+    return 0;                                                                 \
+  }                                                                           \
+                                                                              \
+                                                                              \
+  int uv_##name##_stop(uv_##name##_t* handle) {                               \
+    uv_loop_t* loop = handle->loop;                                           \
+                                                                              \
+    assert(handle->type == UV_##NAME);                                        \
+                                                                              \
+    if (!uv__is_active(handle))                                               \
+      return 0;                                                               \
+                                                                              \
+    /* Update loop head if needed */                                          \
+    if (loop->name##_handles == handle) {                                     \
+      loop->name##_handles = handle->name##_next;                             \
+    }                                                                         \
+                                                                              \
+    /* Update the iterator-next pointer of needed */                          \
+    if (loop->next_##name##_handle == handle) {                               \
+      loop->next_##name##_handle = handle->name##_next;                       \
+    }                                                                         \
+                                                                              \
+    if (handle->name##_prev) {                                                \
+      handle->name##_prev->name##_next = handle->name##_next;                 \
+    }                                                                         \
+    if (handle->name##_next) {                                                \
+      handle->name##_next->name##_prev = handle->name##_prev;                 \
+    }                                                                         \
+                                                                              \
+    uv__handle_stop(handle);                                                  \
+                                                                              \
+    return 0;                                                                 \
+  }                                                                           \
+                                                                              \
+                                                                              \
+  void uv_##name##_invoke(uv_loop_t* loop) {                                  \
+    uv_##name##_t* handle;                                                    \
+                                                                              \
+    (loop)->next_##name##_handle = (loop)->name##_handles;                    \
+                                                                              \
+    while ((loop)->next_##name##_handle != NULL) {                            \
+      handle = (loop)->next_##name##_handle;                                  \
+      (loop)->next_##name##_handle = handle->name##_next;                     \
+                                                                              \
+      handle->name##_cb(handle);                                              \
+    }                                                                         \
+  }
+
+UV_LOOP_WATCHER_DEFINE(prepare, PREPARE)
+UV_LOOP_WATCHER_DEFINE(check, CHECK)
+UV_LOOP_WATCHER_DEFINE(idle, IDLE)

+ 2130 - 0
Utilities/cmlibuv/src/win/pipe.c

@@ -0,0 +1,2130 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <io.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "uv.h"
+#include "internal.h"
+#include "handle-inl.h"
+#include "stream-inl.h"
+#include "req-inl.h"
+
+typedef struct uv__ipc_queue_item_s uv__ipc_queue_item_t;
+
+struct uv__ipc_queue_item_s {
+  /*
+   * NOTE: It is important for socket_info_ex to be the first field,
+   * because we will we assigning it to the pending_ipc_info.socket_info
+   */
+  uv__ipc_socket_info_ex socket_info_ex;
+  QUEUE member;
+  int tcp_connection;
+};
+
+/* A zero-size buffer for use by uv_pipe_read */
+static char uv_zero_[] = "";
+
+/* Null uv_buf_t */
+static const uv_buf_t uv_null_buf_ = { 0, NULL };
+
+/* The timeout that the pipe will wait for the remote end to write data */
+/* when the local ends wants to shut it down. */
+static const int64_t eof_timeout = 50; /* ms */
+
+static const int default_pending_pipe_instances = 4;
+
+/* Pipe prefix */
+static char pipe_prefix[] = "\\\\?\\pipe";
+static const int pipe_prefix_len = sizeof(pipe_prefix) - 1;
+
+/* IPC protocol flags. */
+#define UV_IPC_RAW_DATA       0x0001
+#define UV_IPC_TCP_SERVER     0x0002
+#define UV_IPC_TCP_CONNECTION 0x0004
+
+/* IPC frame header. */
+typedef struct {
+  int flags;
+  uint64_t raw_data_length;
+} uv_ipc_frame_header_t;
+
+/* IPC frame, which contains an imported TCP socket stream. */
+typedef struct {
+  uv_ipc_frame_header_t header;
+  uv__ipc_socket_info_ex socket_info_ex;
+} uv_ipc_frame_uv_stream;
+
+static void eof_timer_init(uv_pipe_t* pipe);
+static void eof_timer_start(uv_pipe_t* pipe);
+static void eof_timer_stop(uv_pipe_t* pipe);
+static void eof_timer_cb(uv_timer_t* timer);
+static void eof_timer_destroy(uv_pipe_t* pipe);
+static void eof_timer_close_cb(uv_handle_t* handle);
+
+
+static void uv_unique_pipe_name(char* ptr, char* name, size_t size) {
+  snprintf(name, size, "\\\\?\\pipe\\uv\\%p-%lu", ptr, GetCurrentProcessId());
+}
+
+
+int uv_pipe_init(uv_loop_t* loop, uv_pipe_t* handle, int ipc) {
+  uv_stream_init(loop, (uv_stream_t*)handle, UV_NAMED_PIPE);
+
+  handle->reqs_pending = 0;
+  handle->handle = INVALID_HANDLE_VALUE;
+  handle->name = NULL;
+  handle->pipe.conn.ipc_pid = 0;
+  handle->pipe.conn.remaining_ipc_rawdata_bytes = 0;
+  QUEUE_INIT(&handle->pipe.conn.pending_ipc_info.queue);
+  handle->pipe.conn.pending_ipc_info.queue_len = 0;
+  handle->ipc = ipc;
+  handle->pipe.conn.non_overlapped_writes_tail = NULL;
+  handle->pipe.conn.readfile_thread = NULL;
+
+  uv_req_init(loop, (uv_req_t*) &handle->pipe.conn.ipc_header_write_req);
+
+  return 0;
+}
+
+
+static void uv_pipe_connection_init(uv_pipe_t* handle) {
+  uv_connection_init((uv_stream_t*) handle);
+  handle->read_req.data = handle;
+  handle->pipe.conn.eof_timer = NULL;
+  assert(!(handle->flags & UV_HANDLE_PIPESERVER));
+  if (pCancelSynchronousIo &&
+      handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE) {
+      uv_mutex_init(&handle->pipe.conn.readfile_mutex);
+      handle->flags |= UV_HANDLE_PIPE_READ_CANCELABLE;
+  }
+}
+
+
+static HANDLE open_named_pipe(const WCHAR* name, DWORD* duplex_flags) {
+  HANDLE pipeHandle;
+
+  /*
+   * Assume that we have a duplex pipe first, so attempt to
+   * connect with GENERIC_READ | GENERIC_WRITE.
+   */
+  pipeHandle = CreateFileW(name,
+                           GENERIC_READ | GENERIC_WRITE,
+                           0,
+                           NULL,
+                           OPEN_EXISTING,
+                           FILE_FLAG_OVERLAPPED,
+                           NULL);
+  if (pipeHandle != INVALID_HANDLE_VALUE) {
+    *duplex_flags = UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
+    return pipeHandle;
+  }
+
+  /*
+   * If the pipe is not duplex CreateFileW fails with
+   * ERROR_ACCESS_DENIED.  In that case try to connect
+   * as a read-only or write-only.
+   */
+  if (GetLastError() == ERROR_ACCESS_DENIED) {
+    pipeHandle = CreateFileW(name,
+                             GENERIC_READ | FILE_WRITE_ATTRIBUTES,
+                             0,
+                             NULL,
+                             OPEN_EXISTING,
+                             FILE_FLAG_OVERLAPPED,
+                             NULL);
+
+    if (pipeHandle != INVALID_HANDLE_VALUE) {
+      *duplex_flags = UV_HANDLE_READABLE;
+      return pipeHandle;
+    }
+  }
+
+  if (GetLastError() == ERROR_ACCESS_DENIED) {
+    pipeHandle = CreateFileW(name,
+                             GENERIC_WRITE | FILE_READ_ATTRIBUTES,
+                             0,
+                             NULL,
+                             OPEN_EXISTING,
+                             FILE_FLAG_OVERLAPPED,
+                             NULL);
+
+    if (pipeHandle != INVALID_HANDLE_VALUE) {
+      *duplex_flags = UV_HANDLE_WRITABLE;
+      return pipeHandle;
+    }
+  }
+
+  return INVALID_HANDLE_VALUE;
+}
+
+
+static void close_pipe(uv_pipe_t* pipe) {
+  assert(pipe->u.fd == -1 || pipe->u.fd > 2);
+  if (pipe->u.fd == -1)
+    CloseHandle(pipe->handle);
+  else
+    close(pipe->u.fd);
+
+  pipe->u.fd = -1;
+  pipe->handle = INVALID_HANDLE_VALUE;
+}
+
+
+int uv_stdio_pipe_server(uv_loop_t* loop, uv_pipe_t* handle, DWORD access,
+    char* name, size_t nameSize) {
+  HANDLE pipeHandle;
+  int err;
+  char* ptr = (char*)handle;
+
+  for (;;) {
+    uv_unique_pipe_name(ptr, name, nameSize);
+
+    pipeHandle = CreateNamedPipeA(name,
+      access | FILE_FLAG_OVERLAPPED | FILE_FLAG_FIRST_PIPE_INSTANCE,
+      PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT, 1, 65536, 65536, 0,
+      NULL);
+
+    if (pipeHandle != INVALID_HANDLE_VALUE) {
+      /* No name collisions.  We're done. */
+      break;
+    }
+
+    err = GetLastError();
+    if (err != ERROR_PIPE_BUSY && err != ERROR_ACCESS_DENIED) {
+      goto error;
+    }
+
+    /* Pipe name collision.  Increment the pointer and try again. */
+    ptr++;
+  }
+
+  if (CreateIoCompletionPort(pipeHandle,
+                             loop->iocp,
+                             (ULONG_PTR)handle,
+                             0) == NULL) {
+    err = GetLastError();
+    goto error;
+  }
+
+  uv_pipe_connection_init(handle);
+  handle->handle = pipeHandle;
+
+  return 0;
+
+ error:
+  if (pipeHandle != INVALID_HANDLE_VALUE) {
+    CloseHandle(pipeHandle);
+  }
+
+  return err;
+}
+
+
+static int uv_set_pipe_handle(uv_loop_t* loop,
+                              uv_pipe_t* handle,
+                              HANDLE pipeHandle,
+                              int fd,
+                              DWORD duplex_flags) {
+  NTSTATUS nt_status;
+  IO_STATUS_BLOCK io_status;
+  FILE_MODE_INFORMATION mode_info;
+  DWORD mode = PIPE_READMODE_BYTE | PIPE_WAIT;
+  DWORD current_mode = 0;
+  DWORD err = 0;
+
+  if (!(handle->flags & UV_HANDLE_PIPESERVER) &&
+      handle->handle != INVALID_HANDLE_VALUE)
+    return UV_EBUSY;
+
+  if (!SetNamedPipeHandleState(pipeHandle, &mode, NULL, NULL)) {
+    err = GetLastError();
+    if (err == ERROR_ACCESS_DENIED) {
+      /*
+       * SetNamedPipeHandleState can fail if the handle doesn't have either
+       * GENERIC_WRITE  or FILE_WRITE_ATTRIBUTES.
+       * But if the handle already has the desired wait and blocking modes
+       * we can continue.
+       */
+      if (!GetNamedPipeHandleState(pipeHandle, &current_mode, NULL, NULL,
+                                   NULL, NULL, 0)) {
+        return -1;
+      } else if (current_mode & PIPE_NOWAIT) {
+        SetLastError(ERROR_ACCESS_DENIED);
+        return -1;
+      }
+    } else {
+      /* If this returns ERROR_INVALID_PARAMETER we probably opened
+       * something that is not a pipe. */
+      if (err == ERROR_INVALID_PARAMETER) {
+        SetLastError(WSAENOTSOCK);
+      }
+      return -1;
+    }
+  }
+
+  /* Check if the pipe was created with FILE_FLAG_OVERLAPPED. */
+  nt_status = pNtQueryInformationFile(pipeHandle,
+                                      &io_status,
+                                      &mode_info,
+                                      sizeof(mode_info),
+                                      FileModeInformation);
+  if (nt_status != STATUS_SUCCESS) {
+    return -1;
+  }
+
+  if (mode_info.Mode & FILE_SYNCHRONOUS_IO_ALERT ||
+      mode_info.Mode & FILE_SYNCHRONOUS_IO_NONALERT) {
+    /* Non-overlapped pipe. */
+    handle->flags |= UV_HANDLE_NON_OVERLAPPED_PIPE;
+  } else {
+    /* Overlapped pipe.  Try to associate with IOCP. */
+    if (CreateIoCompletionPort(pipeHandle,
+                               loop->iocp,
+                               (ULONG_PTR)handle,
+                               0) == NULL) {
+      handle->flags |= UV_HANDLE_EMULATE_IOCP;
+    }
+  }
+
+  handle->handle = pipeHandle;
+  handle->u.fd = fd;
+  handle->flags |= duplex_flags;
+
+  return 0;
+}
+
+
+static DWORD WINAPI pipe_shutdown_thread_proc(void* parameter) {
+  uv_loop_t* loop;
+  uv_pipe_t* handle;
+  uv_shutdown_t* req;
+
+  req = (uv_shutdown_t*) parameter;
+  assert(req);
+  handle = (uv_pipe_t*) req->handle;
+  assert(handle);
+  loop = handle->loop;
+  assert(loop);
+
+  FlushFileBuffers(handle->handle);
+
+  /* Post completed */
+  POST_COMPLETION_FOR_REQ(loop, req);
+
+  return 0;
+}
+
+
+void uv_pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle) {
+  int err;
+  DWORD result;
+  uv_shutdown_t* req;
+  NTSTATUS nt_status;
+  IO_STATUS_BLOCK io_status;
+  FILE_PIPE_LOCAL_INFORMATION pipe_info;
+  uv__ipc_queue_item_t* item;
+
+  if (handle->flags & UV_HANDLE_PIPE_READ_CANCELABLE) {
+    handle->flags &= ~UV_HANDLE_PIPE_READ_CANCELABLE;
+    uv_mutex_destroy(&handle->pipe.conn.readfile_mutex);
+  }
+
+  if ((handle->flags & UV_HANDLE_CONNECTION) &&
+      handle->stream.conn.shutdown_req != NULL &&
+      handle->stream.conn.write_reqs_pending == 0) {
+    req = handle->stream.conn.shutdown_req;
+
+    /* Clear the shutdown_req field so we don't go here again. */
+    handle->stream.conn.shutdown_req = NULL;
+
+    if (handle->flags & UV__HANDLE_CLOSING) {
+      UNREGISTER_HANDLE_REQ(loop, handle, req);
+
+      /* Already closing. Cancel the shutdown. */
+      if (req->cb) {
+        req->cb(req, UV_ECANCELED);
+      }
+
+      DECREASE_PENDING_REQ_COUNT(handle);
+      return;
+    }
+
+    /* Try to avoid flushing the pipe buffer in the thread pool. */
+    nt_status = pNtQueryInformationFile(handle->handle,
+                                        &io_status,
+                                        &pipe_info,
+                                        sizeof pipe_info,
+                                        FilePipeLocalInformation);
+
+    if (nt_status != STATUS_SUCCESS) {
+      /* Failure */
+      UNREGISTER_HANDLE_REQ(loop, handle, req);
+
+      handle->flags |= UV_HANDLE_WRITABLE; /* Questionable */
+      if (req->cb) {
+        err = pRtlNtStatusToDosError(nt_status);
+        req->cb(req, uv_translate_sys_error(err));
+      }
+
+      DECREASE_PENDING_REQ_COUNT(handle);
+      return;
+    }
+
+    if (pipe_info.OutboundQuota == pipe_info.WriteQuotaAvailable) {
+      /* Short-circuit, no need to call FlushFileBuffers. */
+      uv_insert_pending_req(loop, (uv_req_t*) req);
+      return;
+    }
+
+    /* Run FlushFileBuffers in the thread pool. */
+    result = QueueUserWorkItem(pipe_shutdown_thread_proc,
+                               req,
+                               WT_EXECUTELONGFUNCTION);
+    if (result) {
+      return;
+
+    } else {
+      /* Failure. */
+      UNREGISTER_HANDLE_REQ(loop, handle, req);
+
+      handle->flags |= UV_HANDLE_WRITABLE; /* Questionable */
+      if (req->cb) {
+        err = GetLastError();
+        req->cb(req, uv_translate_sys_error(err));
+      }
+
+      DECREASE_PENDING_REQ_COUNT(handle);
+      return;
+    }
+  }
+
+  if (handle->flags & UV__HANDLE_CLOSING &&
+      handle->reqs_pending == 0) {
+    assert(!(handle->flags & UV_HANDLE_CLOSED));
+
+    if (handle->flags & UV_HANDLE_CONNECTION) {
+      /* Free pending sockets */
+      while (!QUEUE_EMPTY(&handle->pipe.conn.pending_ipc_info.queue)) {
+        QUEUE* q;
+        SOCKET socket;
+
+        q = QUEUE_HEAD(&handle->pipe.conn.pending_ipc_info.queue);
+        QUEUE_REMOVE(q);
+        item = QUEUE_DATA(q, uv__ipc_queue_item_t, member);
+
+        /* Materialize socket and close it */
+        socket = WSASocketW(FROM_PROTOCOL_INFO,
+                            FROM_PROTOCOL_INFO,
+                            FROM_PROTOCOL_INFO,
+                            &item->socket_info_ex.socket_info,
+                            0,
+                            WSA_FLAG_OVERLAPPED);
+        uv__free(item);
+
+        if (socket != INVALID_SOCKET)
+          closesocket(socket);
+      }
+      handle->pipe.conn.pending_ipc_info.queue_len = 0;
+
+      if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
+        if (handle->read_req.wait_handle != INVALID_HANDLE_VALUE) {
+          UnregisterWait(handle->read_req.wait_handle);
+          handle->read_req.wait_handle = INVALID_HANDLE_VALUE;
+        }
+        if (handle->read_req.event_handle) {
+          CloseHandle(handle->read_req.event_handle);
+          handle->read_req.event_handle = NULL;
+        }
+      }
+    }
+
+    if (handle->flags & UV_HANDLE_PIPESERVER) {
+      assert(handle->pipe.serv.accept_reqs);
+      uv__free(handle->pipe.serv.accept_reqs);
+      handle->pipe.serv.accept_reqs = NULL;
+    }
+
+    uv__handle_close(handle);
+  }
+}
+
+
+void uv_pipe_pending_instances(uv_pipe_t* handle, int count) {
+  if (handle->flags & UV_HANDLE_BOUND)
+    return;
+  handle->pipe.serv.pending_instances = count;
+  handle->flags |= UV_HANDLE_PIPESERVER;
+}
+
+
+/* Creates a pipe server. */
+int uv_pipe_bind(uv_pipe_t* handle, const char* name) {
+  uv_loop_t* loop = handle->loop;
+  int i, err, nameSize;
+  uv_pipe_accept_t* req;
+
+  if (handle->flags & UV_HANDLE_BOUND) {
+    return UV_EINVAL;
+  }
+
+  if (!name) {
+    return UV_EINVAL;
+  }
+
+  if (!(handle->flags & UV_HANDLE_PIPESERVER)) {
+    handle->pipe.serv.pending_instances = default_pending_pipe_instances;
+  }
+
+  handle->pipe.serv.accept_reqs = (uv_pipe_accept_t*)
+    uv__malloc(sizeof(uv_pipe_accept_t) * handle->pipe.serv.pending_instances);
+  if (!handle->pipe.serv.accept_reqs) {
+    uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+  }
+
+  for (i = 0; i < handle->pipe.serv.pending_instances; i++) {
+    req = &handle->pipe.serv.accept_reqs[i];
+    uv_req_init(loop, (uv_req_t*) req);
+    req->type = UV_ACCEPT;
+    req->data = handle;
+    req->pipeHandle = INVALID_HANDLE_VALUE;
+    req->next_pending = NULL;
+  }
+
+  /* Convert name to UTF16. */
+  nameSize = MultiByteToWideChar(CP_UTF8, 0, name, -1, NULL, 0) * sizeof(WCHAR);
+  handle->name = (WCHAR*)uv__malloc(nameSize);
+  if (!handle->name) {
+    uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+  }
+
+  if (!MultiByteToWideChar(CP_UTF8,
+                           0,
+                           name,
+                           -1,
+                           handle->name,
+                           nameSize / sizeof(WCHAR))) {
+    err = GetLastError();
+    goto error;
+  }
+
+  /*
+   * Attempt to create the first pipe with FILE_FLAG_FIRST_PIPE_INSTANCE.
+   * If this fails then there's already a pipe server for the given pipe name.
+   */
+  handle->pipe.serv.accept_reqs[0].pipeHandle = CreateNamedPipeW(handle->name,
+      PIPE_ACCESS_DUPLEX | FILE_FLAG_OVERLAPPED |
+      FILE_FLAG_FIRST_PIPE_INSTANCE,
+      PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT,
+      PIPE_UNLIMITED_INSTANCES, 65536, 65536, 0, NULL);
+
+  if (handle->pipe.serv.accept_reqs[0].pipeHandle == INVALID_HANDLE_VALUE) {
+    err = GetLastError();
+    if (err == ERROR_ACCESS_DENIED) {
+      err = WSAEADDRINUSE;  /* Translates to UV_EADDRINUSE. */
+    } else if (err == ERROR_PATH_NOT_FOUND || err == ERROR_INVALID_NAME) {
+      err = WSAEACCES;  /* Translates to UV_EACCES. */
+    }
+    goto error;
+  }
+
+  if (uv_set_pipe_handle(loop,
+                         handle,
+                         handle->pipe.serv.accept_reqs[0].pipeHandle,
+                         -1,
+                         0)) {
+    err = GetLastError();
+    goto error;
+  }
+
+  handle->pipe.serv.pending_accepts = NULL;
+  handle->flags |= UV_HANDLE_PIPESERVER;
+  handle->flags |= UV_HANDLE_BOUND;
+
+  return 0;
+
+error:
+  if (handle->name) {
+    uv__free(handle->name);
+    handle->name = NULL;
+  }
+
+  if (handle->pipe.serv.accept_reqs[0].pipeHandle != INVALID_HANDLE_VALUE) {
+    CloseHandle(handle->pipe.serv.accept_reqs[0].pipeHandle);
+    handle->pipe.serv.accept_reqs[0].pipeHandle = INVALID_HANDLE_VALUE;
+  }
+
+  return uv_translate_sys_error(err);
+}
+
+
+static DWORD WINAPI pipe_connect_thread_proc(void* parameter) {
+  uv_loop_t* loop;
+  uv_pipe_t* handle;
+  uv_connect_t* req;
+  HANDLE pipeHandle = INVALID_HANDLE_VALUE;
+  DWORD duplex_flags;
+
+  req = (uv_connect_t*) parameter;
+  assert(req);
+  handle = (uv_pipe_t*) req->handle;
+  assert(handle);
+  loop = handle->loop;
+  assert(loop);
+
+  /* We're here because CreateFile on a pipe returned ERROR_PIPE_BUSY. */
+  /* We wait for the pipe to become available with WaitNamedPipe. */
+  while (WaitNamedPipeW(handle->name, 30000)) {
+    /* The pipe is now available, try to connect. */
+    pipeHandle = open_named_pipe(handle->name, &duplex_flags);
+    if (pipeHandle != INVALID_HANDLE_VALUE) {
+      break;
+    }
+
+    SwitchToThread();
+  }
+
+  if (pipeHandle != INVALID_HANDLE_VALUE &&
+      !uv_set_pipe_handle(loop, handle, pipeHandle, -1, duplex_flags)) {
+    SET_REQ_SUCCESS(req);
+  } else {
+    SET_REQ_ERROR(req, GetLastError());
+  }
+
+  /* Post completed */
+  POST_COMPLETION_FOR_REQ(loop, req);
+
+  return 0;
+}
+
+
+void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle,
+    const char* name, uv_connect_cb cb) {
+  uv_loop_t* loop = handle->loop;
+  int err, nameSize;
+  HANDLE pipeHandle = INVALID_HANDLE_VALUE;
+  DWORD duplex_flags;
+
+  uv_req_init(loop, (uv_req_t*) req);
+  req->type = UV_CONNECT;
+  req->handle = (uv_stream_t*) handle;
+  req->cb = cb;
+
+  /* Convert name to UTF16. */
+  nameSize = MultiByteToWideChar(CP_UTF8, 0, name, -1, NULL, 0) * sizeof(WCHAR);
+  handle->name = (WCHAR*)uv__malloc(nameSize);
+  if (!handle->name) {
+    uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+  }
+
+  if (!MultiByteToWideChar(CP_UTF8,
+                           0,
+                           name,
+                           -1,
+                           handle->name,
+                           nameSize / sizeof(WCHAR))) {
+    err = GetLastError();
+    goto error;
+  }
+
+  pipeHandle = open_named_pipe(handle->name, &duplex_flags);
+  if (pipeHandle == INVALID_HANDLE_VALUE) {
+    if (GetLastError() == ERROR_PIPE_BUSY) {
+      /* Wait for the server to make a pipe instance available. */
+      if (!QueueUserWorkItem(&pipe_connect_thread_proc,
+                             req,
+                             WT_EXECUTELONGFUNCTION)) {
+        err = GetLastError();
+        goto error;
+      }
+
+      REGISTER_HANDLE_REQ(loop, handle, req);
+      handle->reqs_pending++;
+
+      return;
+    }
+
+    err = GetLastError();
+    goto error;
+  }
+
+  assert(pipeHandle != INVALID_HANDLE_VALUE);
+
+  if (uv_set_pipe_handle(loop,
+                         (uv_pipe_t*) req->handle,
+                         pipeHandle,
+                         -1,
+                         duplex_flags)) {
+    err = GetLastError();
+    goto error;
+  }
+
+  SET_REQ_SUCCESS(req);
+  uv_insert_pending_req(loop, (uv_req_t*) req);
+  handle->reqs_pending++;
+  REGISTER_HANDLE_REQ(loop, handle, req);
+  return;
+
+error:
+  if (handle->name) {
+    uv__free(handle->name);
+    handle->name = NULL;
+  }
+
+  if (pipeHandle != INVALID_HANDLE_VALUE) {
+    CloseHandle(pipeHandle);
+  }
+
+  /* Make this req pending reporting an error. */
+  SET_REQ_ERROR(req, err);
+  uv_insert_pending_req(loop, (uv_req_t*) req);
+  handle->reqs_pending++;
+  REGISTER_HANDLE_REQ(loop, handle, req);
+  return;
+}
+
+
+void uv__pipe_pause_read(uv_pipe_t* handle) {
+  if (handle->flags & UV_HANDLE_PIPE_READ_CANCELABLE) {
+      /* Pause the ReadFile task briefly, to work
+         around the Windows kernel bug that causes
+         any access to a NamedPipe to deadlock if
+         any process has called ReadFile */
+      HANDLE h;
+      uv_mutex_lock(&handle->pipe.conn.readfile_mutex);
+      h = handle->pipe.conn.readfile_thread;
+      while (h) {
+        /* spinlock: we expect this to finish quickly,
+           or we are probably about to deadlock anyways
+           (in the kernel), so it doesn't matter */
+        pCancelSynchronousIo(h);
+        SwitchToThread(); /* yield thread control briefly */
+        h = handle->pipe.conn.readfile_thread;
+      }
+  }
+}
+
+
+void uv__pipe_unpause_read(uv_pipe_t* handle) {
+  if (handle->flags & UV_HANDLE_PIPE_READ_CANCELABLE) {
+    uv_mutex_unlock(&handle->pipe.conn.readfile_mutex);
+  }
+}
+
+
+void uv__pipe_stop_read(uv_pipe_t* handle) {
+  handle->flags &= ~UV_HANDLE_READING;
+  uv__pipe_pause_read((uv_pipe_t*)handle);
+  uv__pipe_unpause_read((uv_pipe_t*)handle);
+}
+
+
+/* Cleans up uv_pipe_t (server or connection) and all resources associated */
+/* with it. */
+void uv_pipe_cleanup(uv_loop_t* loop, uv_pipe_t* handle) {
+  int i;
+  HANDLE pipeHandle;
+
+  uv__pipe_stop_read(handle);
+
+  if (handle->name) {
+    uv__free(handle->name);
+    handle->name = NULL;
+  }
+
+  if (handle->flags & UV_HANDLE_PIPESERVER) {
+    for (i = 0; i < handle->pipe.serv.pending_instances; i++) {
+      pipeHandle = handle->pipe.serv.accept_reqs[i].pipeHandle;
+      if (pipeHandle != INVALID_HANDLE_VALUE) {
+        CloseHandle(pipeHandle);
+        handle->pipe.serv.accept_reqs[i].pipeHandle = INVALID_HANDLE_VALUE;
+      }
+    }
+    handle->handle = INVALID_HANDLE_VALUE;
+  }
+
+  if (handle->flags & UV_HANDLE_CONNECTION) {
+    handle->flags &= ~UV_HANDLE_WRITABLE;
+    eof_timer_destroy(handle);
+  }
+
+  if ((handle->flags & UV_HANDLE_CONNECTION)
+      && handle->handle != INVALID_HANDLE_VALUE)
+    close_pipe(handle);
+}
+
+
+void uv_pipe_close(uv_loop_t* loop, uv_pipe_t* handle) {
+  if (handle->flags & UV_HANDLE_READING) {
+    handle->flags &= ~UV_HANDLE_READING;
+    DECREASE_ACTIVE_COUNT(loop, handle);
+  }
+
+  if (handle->flags & UV_HANDLE_LISTENING) {
+    handle->flags &= ~UV_HANDLE_LISTENING;
+    DECREASE_ACTIVE_COUNT(loop, handle);
+  }
+
+  uv_pipe_cleanup(loop, handle);
+
+  if (handle->reqs_pending == 0) {
+    uv_want_endgame(loop, (uv_handle_t*) handle);
+  }
+
+  handle->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
+  uv__handle_closing(handle);
+}
+
+
+static void uv_pipe_queue_accept(uv_loop_t* loop, uv_pipe_t* handle,
+    uv_pipe_accept_t* req, BOOL firstInstance) {
+  assert(handle->flags & UV_HANDLE_LISTENING);
+
+  if (!firstInstance) {
+    assert(req->pipeHandle == INVALID_HANDLE_VALUE);
+
+    req->pipeHandle = CreateNamedPipeW(handle->name,
+        PIPE_ACCESS_DUPLEX | FILE_FLAG_OVERLAPPED,
+        PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT,
+        PIPE_UNLIMITED_INSTANCES, 65536, 65536, 0, NULL);
+
+    if (req->pipeHandle == INVALID_HANDLE_VALUE) {
+      SET_REQ_ERROR(req, GetLastError());
+      uv_insert_pending_req(loop, (uv_req_t*) req);
+      handle->reqs_pending++;
+      return;
+    }
+
+    if (uv_set_pipe_handle(loop, handle, req->pipeHandle, -1, 0)) {
+      CloseHandle(req->pipeHandle);
+      req->pipeHandle = INVALID_HANDLE_VALUE;
+      SET_REQ_ERROR(req, GetLastError());
+      uv_insert_pending_req(loop, (uv_req_t*) req);
+      handle->reqs_pending++;
+      return;
+    }
+  }
+
+  assert(req->pipeHandle != INVALID_HANDLE_VALUE);
+
+  /* Prepare the overlapped structure. */
+  memset(&(req->u.io.overlapped), 0, sizeof(req->u.io.overlapped));
+
+  if (!ConnectNamedPipe(req->pipeHandle, &req->u.io.overlapped) &&
+      GetLastError() != ERROR_IO_PENDING) {
+    if (GetLastError() == ERROR_PIPE_CONNECTED) {
+      SET_REQ_SUCCESS(req);
+    } else {
+      CloseHandle(req->pipeHandle);
+      req->pipeHandle = INVALID_HANDLE_VALUE;
+      /* Make this req pending reporting an error. */
+      SET_REQ_ERROR(req, GetLastError());
+    }
+    uv_insert_pending_req(loop, (uv_req_t*) req);
+    handle->reqs_pending++;
+    return;
+  }
+
+  handle->reqs_pending++;
+}
+
+
+int uv_pipe_accept(uv_pipe_t* server, uv_stream_t* client) {
+  uv_loop_t* loop = server->loop;
+  uv_pipe_t* pipe_client;
+  uv_pipe_accept_t* req;
+  QUEUE* q;
+  uv__ipc_queue_item_t* item;
+  int err;
+
+  if (server->ipc) {
+    if (QUEUE_EMPTY(&server->pipe.conn.pending_ipc_info.queue)) {
+      /* No valid pending sockets. */
+      return WSAEWOULDBLOCK;
+    }
+
+    q = QUEUE_HEAD(&server->pipe.conn.pending_ipc_info.queue);
+    QUEUE_REMOVE(q);
+    server->pipe.conn.pending_ipc_info.queue_len--;
+    item = QUEUE_DATA(q, uv__ipc_queue_item_t, member);
+
+    err = uv_tcp_import((uv_tcp_t*)client,
+                        &item->socket_info_ex,
+                        item->tcp_connection);
+    if (err != 0)
+      return err;
+
+    uv__free(item);
+
+  } else {
+    pipe_client = (uv_pipe_t*)client;
+
+    /* Find a connection instance that has been connected, but not yet */
+    /* accepted. */
+    req = server->pipe.serv.pending_accepts;
+
+    if (!req) {
+      /* No valid connections found, so we error out. */
+      return WSAEWOULDBLOCK;
+    }
+
+    /* Initialize the client handle and copy the pipeHandle to the client */
+    uv_pipe_connection_init(pipe_client);
+    pipe_client->handle = req->pipeHandle;
+    pipe_client->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
+
+    /* Prepare the req to pick up a new connection */
+    server->pipe.serv.pending_accepts = req->next_pending;
+    req->next_pending = NULL;
+    req->pipeHandle = INVALID_HANDLE_VALUE;
+
+    if (!(server->flags & UV__HANDLE_CLOSING)) {
+      uv_pipe_queue_accept(loop, server, req, FALSE);
+    }
+  }
+
+  return 0;
+}
+
+
+/* Starts listening for connections for the given pipe. */
+int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb) {
+  uv_loop_t* loop = handle->loop;
+  int i;
+
+  if (handle->flags & UV_HANDLE_LISTENING) {
+    handle->stream.serv.connection_cb = cb;
+  }
+
+  if (!(handle->flags & UV_HANDLE_BOUND)) {
+    return WSAEINVAL;
+  }
+
+  if (handle->flags & UV_HANDLE_READING) {
+    return WSAEISCONN;
+  }
+
+  if (!(handle->flags & UV_HANDLE_PIPESERVER)) {
+    return ERROR_NOT_SUPPORTED;
+  }
+
+  handle->flags |= UV_HANDLE_LISTENING;
+  INCREASE_ACTIVE_COUNT(loop, handle);
+  handle->stream.serv.connection_cb = cb;
+
+  /* First pipe handle should have already been created in uv_pipe_bind */
+  assert(handle->pipe.serv.accept_reqs[0].pipeHandle != INVALID_HANDLE_VALUE);
+
+  for (i = 0; i < handle->pipe.serv.pending_instances; i++) {
+    uv_pipe_queue_accept(loop, handle, &handle->pipe.serv.accept_reqs[i], i == 0);
+  }
+
+  return 0;
+}
+
+
+static DWORD WINAPI uv_pipe_zero_readfile_thread_proc(void* parameter) {
+  int result;
+  DWORD bytes;
+  uv_read_t* req = (uv_read_t*) parameter;
+  uv_pipe_t* handle = (uv_pipe_t*) req->data;
+  uv_loop_t* loop = handle->loop;
+  HANDLE hThread = NULL;
+  DWORD err;
+  uv_mutex_t *m = &handle->pipe.conn.readfile_mutex;
+
+  assert(req != NULL);
+  assert(req->type == UV_READ);
+  assert(handle->type == UV_NAMED_PIPE);
+
+  if (handle->flags & UV_HANDLE_PIPE_READ_CANCELABLE) {
+    uv_mutex_lock(m); /* mutex controls *setting* of readfile_thread */
+    if (DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
+                        GetCurrentProcess(), &hThread,
+                        0, TRUE, DUPLICATE_SAME_ACCESS)) {
+      handle->pipe.conn.readfile_thread = hThread;
+    } else {
+      hThread = NULL;
+    }
+    uv_mutex_unlock(m);
+  }
+restart_readfile:
+  result = ReadFile(handle->handle,
+                    &uv_zero_,
+                    0,
+                    &bytes,
+                    NULL);
+  if (!result) {
+    err = GetLastError();
+    if (err == ERROR_OPERATION_ABORTED &&
+        handle->flags & UV_HANDLE_PIPE_READ_CANCELABLE) {
+      if (handle->flags & UV_HANDLE_READING) {
+        /* just a brief break to do something else */
+        handle->pipe.conn.readfile_thread = NULL;
+        /* resume after it is finished */
+        uv_mutex_lock(m);
+        handle->pipe.conn.readfile_thread = hThread;
+        uv_mutex_unlock(m);
+        goto restart_readfile;
+      } else {
+        result = 1; /* successfully stopped reading */
+      }
+    }
+  }
+  if (hThread) {
+    assert(hThread == handle->pipe.conn.readfile_thread);
+    /* mutex does not control clearing readfile_thread */
+    handle->pipe.conn.readfile_thread = NULL;
+    uv_mutex_lock(m);
+    /* only when we hold the mutex lock is it safe to
+       open or close the handle */
+    CloseHandle(hThread);
+    uv_mutex_unlock(m);
+  }
+
+  if (!result) {
+    SET_REQ_ERROR(req, err);
+  }
+
+  POST_COMPLETION_FOR_REQ(loop, req);
+  return 0;
+}
+
+
+static DWORD WINAPI uv_pipe_writefile_thread_proc(void* parameter) {
+  int result;
+  DWORD bytes;
+  uv_write_t* req = (uv_write_t*) parameter;
+  uv_pipe_t* handle = (uv_pipe_t*) req->handle;
+  uv_loop_t* loop = handle->loop;
+
+  assert(req != NULL);
+  assert(req->type == UV_WRITE);
+  assert(handle->type == UV_NAMED_PIPE);
+  assert(req->write_buffer.base);
+
+  result = WriteFile(handle->handle,
+                     req->write_buffer.base,
+                     req->write_buffer.len,
+                     &bytes,
+                     NULL);
+
+  if (!result) {
+    SET_REQ_ERROR(req, GetLastError());
+  }
+
+  POST_COMPLETION_FOR_REQ(loop, req);
+  return 0;
+}
+
+
+static void CALLBACK post_completion_read_wait(void* context, BOOLEAN timed_out) {
+  uv_read_t* req;
+  uv_tcp_t* handle;
+
+  req = (uv_read_t*) context;
+  assert(req != NULL);
+  handle = (uv_tcp_t*)req->data;
+  assert(handle != NULL);
+  assert(!timed_out);
+
+  if (!PostQueuedCompletionStatus(handle->loop->iocp,
+                                  req->u.io.overlapped.InternalHigh,
+                                  0,
+                                  &req->u.io.overlapped)) {
+    uv_fatal_error(GetLastError(), "PostQueuedCompletionStatus");
+  }
+}
+
+
+static void CALLBACK post_completion_write_wait(void* context, BOOLEAN timed_out) {
+  uv_write_t* req;
+  uv_tcp_t* handle;
+
+  req = (uv_write_t*) context;
+  assert(req != NULL);
+  handle = (uv_tcp_t*)req->handle;
+  assert(handle != NULL);
+  assert(!timed_out);
+
+  if (!PostQueuedCompletionStatus(handle->loop->iocp,
+                                  req->u.io.overlapped.InternalHigh,
+                                  0,
+                                  &req->u.io.overlapped)) {
+    uv_fatal_error(GetLastError(), "PostQueuedCompletionStatus");
+  }
+}
+
+
+static void uv_pipe_queue_read(uv_loop_t* loop, uv_pipe_t* handle) {
+  uv_read_t* req;
+  int result;
+
+  assert(handle->flags & UV_HANDLE_READING);
+  assert(!(handle->flags & UV_HANDLE_READ_PENDING));
+
+  assert(handle->handle != INVALID_HANDLE_VALUE);
+
+  req = &handle->read_req;
+
+  if (handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE) {
+    if (!QueueUserWorkItem(&uv_pipe_zero_readfile_thread_proc,
+                           req,
+                           WT_EXECUTELONGFUNCTION)) {
+      /* Make this req pending reporting an error. */
+      SET_REQ_ERROR(req, GetLastError());
+      goto error;
+    }
+  } else {
+    memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
+    if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
+      req->u.io.overlapped.hEvent = (HANDLE) ((uintptr_t) req->event_handle | 1);
+    }
+
+    /* Do 0-read */
+    result = ReadFile(handle->handle,
+                      &uv_zero_,
+                      0,
+                      NULL,
+                      &req->u.io.overlapped);
+
+    if (!result && GetLastError() != ERROR_IO_PENDING) {
+      /* Make this req pending reporting an error. */
+      SET_REQ_ERROR(req, GetLastError());
+      goto error;
+    }
+
+    if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
+      if (!req->event_handle) {
+        req->event_handle = CreateEvent(NULL, 0, 0, NULL);
+        if (!req->event_handle) {
+          uv_fatal_error(GetLastError(), "CreateEvent");
+        }
+      }
+      if (req->wait_handle == INVALID_HANDLE_VALUE) {
+        if (!RegisterWaitForSingleObject(&req->wait_handle,
+            req->u.io.overlapped.hEvent, post_completion_read_wait, (void*) req,
+            INFINITE, WT_EXECUTEINWAITTHREAD)) {
+          SET_REQ_ERROR(req, GetLastError());
+          goto error;
+        }
+      }
+    }
+  }
+
+  /* Start the eof timer if there is one */
+  eof_timer_start(handle);
+  handle->flags |= UV_HANDLE_READ_PENDING;
+  handle->reqs_pending++;
+  return;
+
+error:
+  uv_insert_pending_req(loop, (uv_req_t*)req);
+  handle->flags |= UV_HANDLE_READ_PENDING;
+  handle->reqs_pending++;
+}
+
+
+int uv_pipe_read_start(uv_pipe_t* handle,
+                       uv_alloc_cb alloc_cb,
+                       uv_read_cb read_cb) {
+  uv_loop_t* loop = handle->loop;
+
+  handle->flags |= UV_HANDLE_READING;
+  INCREASE_ACTIVE_COUNT(loop, handle);
+  handle->read_cb = read_cb;
+  handle->alloc_cb = alloc_cb;
+
+  /* If reading was stopped and then started again, there could still be a */
+  /* read request pending. */
+  if (!(handle->flags & UV_HANDLE_READ_PENDING))
+    uv_pipe_queue_read(loop, handle);
+
+  return 0;
+}
+
+
+static void uv_insert_non_overlapped_write_req(uv_pipe_t* handle,
+    uv_write_t* req) {
+  req->next_req = NULL;
+  if (handle->pipe.conn.non_overlapped_writes_tail) {
+    req->next_req =
+      handle->pipe.conn.non_overlapped_writes_tail->next_req;
+    handle->pipe.conn.non_overlapped_writes_tail->next_req = (uv_req_t*)req;
+    handle->pipe.conn.non_overlapped_writes_tail = req;
+  } else {
+    req->next_req = (uv_req_t*)req;
+    handle->pipe.conn.non_overlapped_writes_tail = req;
+  }
+}
+
+
+static uv_write_t* uv_remove_non_overlapped_write_req(uv_pipe_t* handle) {
+  uv_write_t* req;
+
+  if (handle->pipe.conn.non_overlapped_writes_tail) {
+    req = (uv_write_t*)handle->pipe.conn.non_overlapped_writes_tail->next_req;
+
+    if (req == handle->pipe.conn.non_overlapped_writes_tail) {
+      handle->pipe.conn.non_overlapped_writes_tail = NULL;
+    } else {
+      handle->pipe.conn.non_overlapped_writes_tail->next_req =
+        req->next_req;
+    }
+
+    return req;
+  } else {
+    /* queue empty */
+    return NULL;
+  }
+}
+
+
+static void uv_queue_non_overlapped_write(uv_pipe_t* handle) {
+  uv_write_t* req = uv_remove_non_overlapped_write_req(handle);
+  if (req) {
+    if (!QueueUserWorkItem(&uv_pipe_writefile_thread_proc,
+                           req,
+                           WT_EXECUTELONGFUNCTION)) {
+      uv_fatal_error(GetLastError(), "QueueUserWorkItem");
+    }
+  }
+}
+
+
+static int uv_pipe_write_impl(uv_loop_t* loop,
+                              uv_write_t* req,
+                              uv_pipe_t* handle,
+                              const uv_buf_t bufs[],
+                              unsigned int nbufs,
+                              uv_stream_t* send_handle,
+                              uv_write_cb cb) {
+  int err;
+  int result;
+  uv_tcp_t* tcp_send_handle;
+  uv_write_t* ipc_header_req = NULL;
+  uv_ipc_frame_uv_stream ipc_frame;
+
+  if (nbufs != 1 && (nbufs != 0 || !send_handle)) {
+    return ERROR_NOT_SUPPORTED;
+  }
+
+  /* Only TCP handles are supported for sharing. */
+  if (send_handle && ((send_handle->type != UV_TCP) ||
+      (!(send_handle->flags & UV_HANDLE_BOUND) &&
+       !(send_handle->flags & UV_HANDLE_CONNECTION)))) {
+    return ERROR_NOT_SUPPORTED;
+  }
+
+  assert(handle->handle != INVALID_HANDLE_VALUE);
+
+  uv_req_init(loop, (uv_req_t*) req);
+  req->type = UV_WRITE;
+  req->handle = (uv_stream_t*) handle;
+  req->cb = cb;
+  req->ipc_header = 0;
+  req->event_handle = NULL;
+  req->wait_handle = INVALID_HANDLE_VALUE;
+  memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
+
+  if (handle->ipc) {
+    assert(!(handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE));
+    ipc_frame.header.flags = 0;
+
+    /* Use the IPC framing protocol. */
+    if (send_handle) {
+      tcp_send_handle = (uv_tcp_t*)send_handle;
+
+      if (handle->pipe.conn.ipc_pid == 0) {
+          handle->pipe.conn.ipc_pid = uv_current_pid();
+      }
+
+      err = uv_tcp_duplicate_socket(tcp_send_handle, handle->pipe.conn.ipc_pid,
+          &ipc_frame.socket_info_ex.socket_info);
+      if (err) {
+        return err;
+      }
+
+      ipc_frame.socket_info_ex.delayed_error = tcp_send_handle->delayed_error;
+
+      ipc_frame.header.flags |= UV_IPC_TCP_SERVER;
+
+      if (tcp_send_handle->flags & UV_HANDLE_CONNECTION) {
+        ipc_frame.header.flags |= UV_IPC_TCP_CONNECTION;
+      }
+    }
+
+    if (nbufs == 1) {
+      ipc_frame.header.flags |= UV_IPC_RAW_DATA;
+      ipc_frame.header.raw_data_length = bufs[0].len;
+    }
+
+    /*
+     * Use the provided req if we're only doing a single write.
+     * If we're doing multiple writes, use ipc_header_write_req to do
+     * the first write, and then use the provided req for the second write.
+     */
+    if (!(ipc_frame.header.flags & UV_IPC_RAW_DATA)) {
+      ipc_header_req = req;
+    } else {
+      /*
+       * Try to use the preallocated write req if it's available.
+       * Otherwise allocate a new one.
+       */
+      if (handle->pipe.conn.ipc_header_write_req.type != UV_WRITE) {
+        ipc_header_req = (uv_write_t*)&handle->pipe.conn.ipc_header_write_req;
+      } else {
+        ipc_header_req = (uv_write_t*)uv__malloc(sizeof(uv_write_t));
+        if (!ipc_header_req) {
+          uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+        }
+      }
+
+      uv_req_init(loop, (uv_req_t*) ipc_header_req);
+      ipc_header_req->type = UV_WRITE;
+      ipc_header_req->handle = (uv_stream_t*) handle;
+      ipc_header_req->cb = NULL;
+      ipc_header_req->ipc_header = 1;
+    }
+
+    /* Write the header or the whole frame. */
+    memset(&ipc_header_req->u.io.overlapped, 0,
+           sizeof(ipc_header_req->u.io.overlapped));
+
+    /* Using overlapped IO, but wait for completion before returning.
+       This write is blocking because ipc_frame is on stack. */
+    ipc_header_req->u.io.overlapped.hEvent = CreateEvent(NULL, 1, 0, NULL);
+    if (!ipc_header_req->u.io.overlapped.hEvent) {
+      uv_fatal_error(GetLastError(), "CreateEvent");
+    }
+
+    result = WriteFile(handle->handle,
+                        &ipc_frame,
+                        ipc_frame.header.flags & UV_IPC_TCP_SERVER ?
+                          sizeof(ipc_frame) : sizeof(ipc_frame.header),
+                        NULL,
+                        &ipc_header_req->u.io.overlapped);
+    if (!result && GetLastError() != ERROR_IO_PENDING) {
+      err = GetLastError();
+      CloseHandle(ipc_header_req->u.io.overlapped.hEvent);
+      return err;
+    }
+
+    if (!result) {
+      /* Request not completed immediately. Wait for it.*/
+      if (WaitForSingleObject(ipc_header_req->u.io.overlapped.hEvent, INFINITE) !=
+          WAIT_OBJECT_0) {
+        err = GetLastError();
+        CloseHandle(ipc_header_req->u.io.overlapped.hEvent);
+        return err;
+      }
+    }
+    ipc_header_req->u.io.queued_bytes = 0;
+    CloseHandle(ipc_header_req->u.io.overlapped.hEvent);
+    ipc_header_req->u.io.overlapped.hEvent = NULL;
+
+    REGISTER_HANDLE_REQ(loop, handle, ipc_header_req);
+    handle->reqs_pending++;
+    handle->stream.conn.write_reqs_pending++;
+
+    /* If we don't have any raw data to write - we're done. */
+    if (!(ipc_frame.header.flags & UV_IPC_RAW_DATA)) {
+      return 0;
+    }
+  }
+
+  if ((handle->flags &
+      (UV_HANDLE_BLOCKING_WRITES | UV_HANDLE_NON_OVERLAPPED_PIPE)) ==
+      (UV_HANDLE_BLOCKING_WRITES | UV_HANDLE_NON_OVERLAPPED_PIPE)) {
+    DWORD bytes;
+    result = WriteFile(handle->handle,
+                       bufs[0].base,
+                       bufs[0].len,
+                       &bytes,
+                       NULL);
+
+    if (!result) {
+      err = GetLastError();
+      return err;
+    } else {
+      /* Request completed immediately. */
+      req->u.io.queued_bytes = 0;
+    }
+
+    REGISTER_HANDLE_REQ(loop, handle, req);
+    handle->reqs_pending++;
+    handle->stream.conn.write_reqs_pending++;
+    POST_COMPLETION_FOR_REQ(loop, req);
+    return 0;
+  } else if (handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE) {
+    req->write_buffer = bufs[0];
+    uv_insert_non_overlapped_write_req(handle, req);
+    if (handle->stream.conn.write_reqs_pending == 0) {
+      uv_queue_non_overlapped_write(handle);
+    }
+
+    /* Request queued by the kernel. */
+    req->u.io.queued_bytes = bufs[0].len;
+    handle->write_queue_size += req->u.io.queued_bytes;
+  } else if (handle->flags & UV_HANDLE_BLOCKING_WRITES) {
+    /* Using overlapped IO, but wait for completion before returning */
+    req->u.io.overlapped.hEvent = CreateEvent(NULL, 1, 0, NULL);
+    if (!req->u.io.overlapped.hEvent) {
+      uv_fatal_error(GetLastError(), "CreateEvent");
+    }
+
+    result = WriteFile(handle->handle,
+                       bufs[0].base,
+                       bufs[0].len,
+                       NULL,
+                       &req->u.io.overlapped);
+
+    if (!result && GetLastError() != ERROR_IO_PENDING) {
+      err = GetLastError();
+      CloseHandle(req->u.io.overlapped.hEvent);
+      return err;
+    }
+
+    if (result) {
+      /* Request completed immediately. */
+      req->u.io.queued_bytes = 0;
+    } else {
+      /* Request queued by the kernel. */
+      req->u.io.queued_bytes = bufs[0].len;
+      handle->write_queue_size += req->u.io.queued_bytes;
+      if (WaitForSingleObject(req->u.io.overlapped.hEvent, INFINITE) !=
+          WAIT_OBJECT_0) {
+        err = GetLastError();
+        CloseHandle(req->u.io.overlapped.hEvent);
+        return uv_translate_sys_error(err);
+      }
+    }
+    CloseHandle(req->u.io.overlapped.hEvent);
+
+    REGISTER_HANDLE_REQ(loop, handle, req);
+    handle->reqs_pending++;
+    handle->stream.conn.write_reqs_pending++;
+    return 0;
+  } else {
+    result = WriteFile(handle->handle,
+                       bufs[0].base,
+                       bufs[0].len,
+                       NULL,
+                       &req->u.io.overlapped);
+
+    if (!result && GetLastError() != ERROR_IO_PENDING) {
+      return GetLastError();
+    }
+
+    if (result) {
+      /* Request completed immediately. */
+      req->u.io.queued_bytes = 0;
+    } else {
+      /* Request queued by the kernel. */
+      req->u.io.queued_bytes = bufs[0].len;
+      handle->write_queue_size += req->u.io.queued_bytes;
+    }
+
+    if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
+      req->event_handle = CreateEvent(NULL, 0, 0, NULL);
+      if (!req->event_handle) {
+        uv_fatal_error(GetLastError(), "CreateEvent");
+      }
+      if (!RegisterWaitForSingleObject(&req->wait_handle,
+          req->u.io.overlapped.hEvent, post_completion_write_wait, (void*) req,
+          INFINITE, WT_EXECUTEINWAITTHREAD)) {
+        return GetLastError();
+      }
+    }
+  }
+
+  REGISTER_HANDLE_REQ(loop, handle, req);
+  handle->reqs_pending++;
+  handle->stream.conn.write_reqs_pending++;
+
+  return 0;
+}
+
+
+int uv_pipe_write(uv_loop_t* loop,
+                  uv_write_t* req,
+                  uv_pipe_t* handle,
+                  const uv_buf_t bufs[],
+                  unsigned int nbufs,
+                  uv_write_cb cb) {
+  return uv_pipe_write_impl(loop, req, handle, bufs, nbufs, NULL, cb);
+}
+
+
+int uv_pipe_write2(uv_loop_t* loop,
+                   uv_write_t* req,
+                   uv_pipe_t* handle,
+                   const uv_buf_t bufs[],
+                   unsigned int nbufs,
+                   uv_stream_t* send_handle,
+                   uv_write_cb cb) {
+  if (!handle->ipc) {
+    return WSAEINVAL;
+  }
+
+  return uv_pipe_write_impl(loop, req, handle, bufs, nbufs, send_handle, cb);
+}
+
+
+static void uv_pipe_read_eof(uv_loop_t* loop, uv_pipe_t* handle,
+    uv_buf_t buf) {
+  /* If there is an eof timer running, we don't need it any more, */
+  /* so discard it. */
+  eof_timer_destroy(handle);
+
+  handle->flags &= ~UV_HANDLE_READABLE;
+  uv_read_stop((uv_stream_t*) handle);
+
+  handle->read_cb((uv_stream_t*) handle, UV_EOF, &buf);
+}
+
+
+static void uv_pipe_read_error(uv_loop_t* loop, uv_pipe_t* handle, int error,
+    uv_buf_t buf) {
+  /* If there is an eof timer running, we don't need it any more, */
+  /* so discard it. */
+  eof_timer_destroy(handle);
+
+  uv_read_stop((uv_stream_t*) handle);
+
+  handle->read_cb((uv_stream_t*)handle, uv_translate_sys_error(error), &buf);
+}
+
+
+static void uv_pipe_read_error_or_eof(uv_loop_t* loop, uv_pipe_t* handle,
+    int error, uv_buf_t buf) {
+  if (error == ERROR_BROKEN_PIPE) {
+    uv_pipe_read_eof(loop, handle, buf);
+  } else {
+    uv_pipe_read_error(loop, handle, error, buf);
+  }
+}
+
+
+void uv__pipe_insert_pending_socket(uv_pipe_t* handle,
+                                    uv__ipc_socket_info_ex* info,
+                                    int tcp_connection) {
+  uv__ipc_queue_item_t* item;
+
+  item = (uv__ipc_queue_item_t*) uv__malloc(sizeof(*item));
+  if (item == NULL)
+    uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
+
+  memcpy(&item->socket_info_ex, info, sizeof(item->socket_info_ex));
+  item->tcp_connection = tcp_connection;
+  QUEUE_INSERT_TAIL(&handle->pipe.conn.pending_ipc_info.queue, &item->member);
+  handle->pipe.conn.pending_ipc_info.queue_len++;
+}
+
+
+void uv_process_pipe_read_req(uv_loop_t* loop, uv_pipe_t* handle,
+    uv_req_t* req) {
+  DWORD bytes, avail;
+  uv_buf_t buf;
+  uv_ipc_frame_uv_stream ipc_frame;
+
+  assert(handle->type == UV_NAMED_PIPE);
+
+  handle->flags &= ~UV_HANDLE_READ_PENDING;
+  eof_timer_stop(handle);
+
+  if (!REQ_SUCCESS(req)) {
+    /* An error occurred doing the 0-read. */
+    if (handle->flags & UV_HANDLE_READING) {
+      uv_pipe_read_error_or_eof(loop,
+                                handle,
+                                GET_REQ_ERROR(req),
+                                uv_null_buf_);
+    }
+  } else {
+    /* Do non-blocking reads until the buffer is empty */
+    while (handle->flags & UV_HANDLE_READING) {
+      if (!PeekNamedPipe(handle->handle,
+                          NULL,
+                          0,
+                          NULL,
+                          &avail,
+                          NULL)) {
+        uv_pipe_read_error_or_eof(loop, handle, GetLastError(), uv_null_buf_);
+        break;
+      }
+
+      if (avail == 0) {
+        /* There is nothing to read after all. */
+        break;
+      }
+
+      if (handle->ipc) {
+        /* Use the IPC framing protocol to read the incoming data. */
+        if (handle->pipe.conn.remaining_ipc_rawdata_bytes == 0) {
+          /* We're reading a new frame.  First, read the header. */
+          assert(avail >= sizeof(ipc_frame.header));
+
+          if (!ReadFile(handle->handle,
+                        &ipc_frame.header,
+                        sizeof(ipc_frame.header),
+                        &bytes,
+                        NULL)) {
+            uv_pipe_read_error_or_eof(loop, handle, GetLastError(),
+              uv_null_buf_);
+            break;
+          }
+
+          assert(bytes == sizeof(ipc_frame.header));
+          assert(ipc_frame.header.flags <= (UV_IPC_TCP_SERVER | UV_IPC_RAW_DATA |
+            UV_IPC_TCP_CONNECTION));
+
+          if (ipc_frame.header.flags & UV_IPC_TCP_SERVER) {
+            assert(avail - sizeof(ipc_frame.header) >=
+              sizeof(ipc_frame.socket_info_ex));
+
+            /* Read the TCP socket info. */
+            if (!ReadFile(handle->handle,
+                          &ipc_frame.socket_info_ex,
+                          sizeof(ipc_frame) - sizeof(ipc_frame.header),
+                          &bytes,
+                          NULL)) {
+              uv_pipe_read_error_or_eof(loop, handle, GetLastError(),
+                uv_null_buf_);
+              break;
+            }
+
+            assert(bytes == sizeof(ipc_frame) - sizeof(ipc_frame.header));
+
+            /* Store the pending socket info. */
+            uv__pipe_insert_pending_socket(
+                handle,
+                &ipc_frame.socket_info_ex,
+                ipc_frame.header.flags & UV_IPC_TCP_CONNECTION);
+          }
+
+          if (ipc_frame.header.flags & UV_IPC_RAW_DATA) {
+            handle->pipe.conn.remaining_ipc_rawdata_bytes =
+              ipc_frame.header.raw_data_length;
+            continue;
+          }
+        } else {
+          avail = min(avail, (DWORD)handle->pipe.conn.remaining_ipc_rawdata_bytes);
+        }
+      }
+
+      buf = uv_buf_init(NULL, 0);
+      handle->alloc_cb((uv_handle_t*) handle, avail, &buf);
+      if (buf.base == NULL || buf.len == 0) {
+        handle->read_cb((uv_stream_t*) handle, UV_ENOBUFS, &buf);
+        break;
+      }
+      assert(buf.base != NULL);
+
+      if (ReadFile(handle->handle,
+                   buf.base,
+                   min(buf.len, avail),
+                   &bytes,
+                   NULL)) {
+        /* Successful read */
+        if (handle->ipc) {
+          assert(handle->pipe.conn.remaining_ipc_rawdata_bytes >= bytes);
+          handle->pipe.conn.remaining_ipc_rawdata_bytes =
+            handle->pipe.conn.remaining_ipc_rawdata_bytes - bytes;
+        }
+        handle->read_cb((uv_stream_t*)handle, bytes, &buf);
+
+        /* Read again only if bytes == buf.len */
+        if (bytes <= buf.len) {
+          break;
+        }
+      } else {
+        uv_pipe_read_error_or_eof(loop, handle, GetLastError(), buf);
+        break;
+      }
+    }
+
+    /* Post another 0-read if still reading and not closing. */
+    if ((handle->flags & UV_HANDLE_READING) &&
+        !(handle->flags & UV_HANDLE_READ_PENDING)) {
+      uv_pipe_queue_read(loop, handle);
+    }
+  }
+
+  DECREASE_PENDING_REQ_COUNT(handle);
+}
+
+
+void uv_process_pipe_write_req(uv_loop_t* loop, uv_pipe_t* handle,
+    uv_write_t* req) {
+  int err;
+
+  assert(handle->type == UV_NAMED_PIPE);
+
+  assert(handle->write_queue_size >= req->u.io.queued_bytes);
+  handle->write_queue_size -= req->u.io.queued_bytes;
+
+  UNREGISTER_HANDLE_REQ(loop, handle, req);
+
+  if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
+    if (req->wait_handle != INVALID_HANDLE_VALUE) {
+      UnregisterWait(req->wait_handle);
+      req->wait_handle = INVALID_HANDLE_VALUE;
+    }
+    if (req->event_handle) {
+      CloseHandle(req->event_handle);
+      req->event_handle = NULL;
+    }
+  }
+
+  if (req->ipc_header) {
+    if (req == &handle->pipe.conn.ipc_header_write_req) {
+      req->type = UV_UNKNOWN_REQ;
+    } else {
+      uv__free(req);
+    }
+  } else {
+    if (req->cb) {
+      err = GET_REQ_ERROR(req);
+      req->cb(req, uv_translate_sys_error(err));
+    }
+  }
+
+  handle->stream.conn.write_reqs_pending--;
+
+  if (handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE &&
+      handle->pipe.conn.non_overlapped_writes_tail) {
+    assert(handle->stream.conn.write_reqs_pending > 0);
+    uv_queue_non_overlapped_write(handle);
+  }
+
+  if (handle->stream.conn.shutdown_req != NULL &&
+      handle->stream.conn.write_reqs_pending == 0) {
+    uv_want_endgame(loop, (uv_handle_t*)handle);
+  }
+
+  DECREASE_PENDING_REQ_COUNT(handle);
+}
+
+
+void uv_process_pipe_accept_req(uv_loop_t* loop, uv_pipe_t* handle,
+    uv_req_t* raw_req) {
+  uv_pipe_accept_t* req = (uv_pipe_accept_t*) raw_req;
+
+  assert(handle->type == UV_NAMED_PIPE);
+
+  if (handle->flags & UV__HANDLE_CLOSING) {
+    /* The req->pipeHandle should be freed already in uv_pipe_cleanup(). */
+    assert(req->pipeHandle == INVALID_HANDLE_VALUE);
+    DECREASE_PENDING_REQ_COUNT(handle);
+    return;
+  }
+
+  if (REQ_SUCCESS(req)) {
+    assert(req->pipeHandle != INVALID_HANDLE_VALUE);
+    req->next_pending = handle->pipe.serv.pending_accepts;
+    handle->pipe.serv.pending_accepts = req;
+
+    if (handle->stream.serv.connection_cb) {
+      handle->stream.serv.connection_cb((uv_stream_t*)handle, 0);
+    }
+  } else {
+    if (req->pipeHandle != INVALID_HANDLE_VALUE) {
+      CloseHandle(req->pipeHandle);
+      req->pipeHandle = INVALID_HANDLE_VALUE;
+    }
+    if (!(handle->flags & UV__HANDLE_CLOSING)) {
+      uv_pipe_queue_accept(loop, handle, req, FALSE);
+    }
+  }
+
+  DECREASE_PENDING_REQ_COUNT(handle);
+}
+
+
+void uv_process_pipe_connect_req(uv_loop_t* loop, uv_pipe_t* handle,
+    uv_connect_t* req) {
+  int err;
+
+  assert(handle->type == UV_NAMED_PIPE);
+
+  UNREGISTER_HANDLE_REQ(loop, handle, req);
+
+  if (req->cb) {
+    err = 0;
+    if (REQ_SUCCESS(req)) {
+      uv_pipe_connection_init(handle);
+    } else {
+      err = GET_REQ_ERROR(req);
+    }
+    req->cb(req, uv_translate_sys_error(err));
+  }
+
+  DECREASE_PENDING_REQ_COUNT(handle);
+}
+
+
+void uv_process_pipe_shutdown_req(uv_loop_t* loop, uv_pipe_t* handle,
+    uv_shutdown_t* req) {
+  assert(handle->type == UV_NAMED_PIPE);
+
+  UNREGISTER_HANDLE_REQ(loop, handle, req);
+
+  if (handle->flags & UV_HANDLE_READABLE) {
+    /* Initialize and optionally start the eof timer. Only do this if the */
+    /* pipe is readable and we haven't seen EOF come in ourselves. */
+    eof_timer_init(handle);
+
+    /* If reading start the timer right now. */
+    /* Otherwise uv_pipe_queue_read will start it. */
+    if (handle->flags & UV_HANDLE_READ_PENDING) {
+      eof_timer_start(handle);
+    }
+
+  } else {
+    /* This pipe is not readable. We can just close it to let the other end */
+    /* know that we're done writing. */
+    close_pipe(handle);
+  }
+
+  if (req->cb) {
+    req->cb(req, 0);
+  }
+
+  DECREASE_PENDING_REQ_COUNT(handle);
+}
+
+
+static void eof_timer_init(uv_pipe_t* pipe) {
+  int r;
+
+  assert(pipe->pipe.conn.eof_timer == NULL);
+  assert(pipe->flags & UV_HANDLE_CONNECTION);
+
+  pipe->pipe.conn.eof_timer = (uv_timer_t*) uv__malloc(sizeof *pipe->pipe.conn.eof_timer);
+
+  r = uv_timer_init(pipe->loop, pipe->pipe.conn.eof_timer);
+  assert(r == 0); /* timers can't fail */
+  pipe->pipe.conn.eof_timer->data = pipe;
+  uv_unref((uv_handle_t*) pipe->pipe.conn.eof_timer);
+}
+
+
+static void eof_timer_start(uv_pipe_t* pipe) {
+  assert(pipe->flags & UV_HANDLE_CONNECTION);
+
+  if (pipe->pipe.conn.eof_timer != NULL) {
+    uv_timer_start(pipe->pipe.conn.eof_timer, eof_timer_cb, eof_timeout, 0);
+  }
+}
+
+
+static void eof_timer_stop(uv_pipe_t* pipe) {
+  assert(pipe->flags & UV_HANDLE_CONNECTION);
+
+  if (pipe->pipe.conn.eof_timer != NULL) {
+    uv_timer_stop(pipe->pipe.conn.eof_timer);
+  }
+}
+
+
+static void eof_timer_cb(uv_timer_t* timer) {
+  uv_pipe_t* pipe = (uv_pipe_t*) timer->data;
+  uv_loop_t* loop = timer->loop;
+
+  assert(pipe->type == UV_NAMED_PIPE);
+
+  /* This should always be true, since we start the timer only */
+  /* in uv_pipe_queue_read after successfully calling ReadFile, */
+  /* or in uv_process_pipe_shutdown_req if a read is pending, */
+  /* and we always immediately stop the timer in */
+  /* uv_process_pipe_read_req. */
+  assert(pipe->flags & UV_HANDLE_READ_PENDING);
+
+  /* If there are many packets coming off the iocp then the timer callback */
+  /* may be called before the read request is coming off the queue. */
+  /* Therefore we check here if the read request has completed but will */
+  /* be processed later. */
+  if ((pipe->flags & UV_HANDLE_READ_PENDING) &&
+      HasOverlappedIoCompleted(&pipe->read_req.u.io.overlapped)) {
+    return;
+  }
+
+  /* Force both ends off the pipe. */
+  close_pipe(pipe);
+
+  /* Stop reading, so the pending read that is going to fail will */
+  /* not be reported to the user. */
+  uv_read_stop((uv_stream_t*) pipe);
+
+  /* Report the eof and update flags. This will get reported even if the */
+  /* user stopped reading in the meantime. TODO: is that okay? */
+  uv_pipe_read_eof(loop, pipe, uv_null_buf_);
+}
+
+
+static void eof_timer_destroy(uv_pipe_t* pipe) {
+  assert(pipe->flags & UV_HANDLE_CONNECTION);
+
+  if (pipe->pipe.conn.eof_timer) {
+    uv_close((uv_handle_t*) pipe->pipe.conn.eof_timer, eof_timer_close_cb);
+    pipe->pipe.conn.eof_timer = NULL;
+  }
+}
+
+
+static void eof_timer_close_cb(uv_handle_t* handle) {
+  assert(handle->type == UV_TIMER);
+  uv__free(handle);
+}
+
+
+int uv_pipe_open(uv_pipe_t* pipe, uv_file file) {
+  HANDLE os_handle = uv__get_osfhandle(file);
+  NTSTATUS nt_status;
+  IO_STATUS_BLOCK io_status;
+  FILE_ACCESS_INFORMATION access;
+  DWORD duplex_flags = 0;
+
+  if (os_handle == INVALID_HANDLE_VALUE)
+    return UV_EBADF;
+
+  /* In order to avoid closing a stdio file descriptor 0-2, duplicate the
+   * underlying OS handle and forget about the original fd.
+   * We could also opt to use the original OS handle and just never close it,
+   * but then there would be no reliable way to cancel pending read operations
+   * upon close.
+   */
+  if (file <= 2) {
+    if (!DuplicateHandle(INVALID_HANDLE_VALUE,
+                         os_handle,
+                         INVALID_HANDLE_VALUE,
+                         &os_handle,
+                         0,
+                         FALSE,
+                         DUPLICATE_SAME_ACCESS))
+      return uv_translate_sys_error(GetLastError());
+    file = -1;
+  }
+
+  /* Determine what kind of permissions we have on this handle.
+   * Cygwin opens the pipe in message mode, but we can support it,
+   * just query the access flags and set the stream flags accordingly.
+   */
+  nt_status = pNtQueryInformationFile(os_handle,
+                                      &io_status,
+                                      &access,
+                                      sizeof(access),
+                                      FileAccessInformation);
+  if (nt_status != STATUS_SUCCESS)
+    return UV_EINVAL;
+
+  if (pipe->ipc) {
+    if (!(access.AccessFlags & FILE_WRITE_DATA) ||
+        !(access.AccessFlags & FILE_READ_DATA)) {
+      return UV_EINVAL;
+    }
+  }
+
+  if (access.AccessFlags & FILE_WRITE_DATA)
+    duplex_flags |= UV_HANDLE_WRITABLE;
+  if (access.AccessFlags & FILE_READ_DATA)
+    duplex_flags |= UV_HANDLE_READABLE;
+
+  if (os_handle == INVALID_HANDLE_VALUE ||
+      uv_set_pipe_handle(pipe->loop,
+                         pipe,
+                         os_handle,
+                         file,
+                         duplex_flags) == -1) {
+    return UV_EINVAL;
+  }
+
+  uv_pipe_connection_init(pipe);
+
+  if (pipe->ipc) {
+    assert(!(pipe->flags & UV_HANDLE_NON_OVERLAPPED_PIPE));
+    pipe->pipe.conn.ipc_pid = uv_parent_pid();
+    assert(pipe->pipe.conn.ipc_pid != -1);
+  }
+  return 0;
+}
+
+
+static int uv__pipe_getname(const uv_pipe_t* handle, char* buffer, size_t* size) {
+  NTSTATUS nt_status;
+  IO_STATUS_BLOCK io_status;
+  FILE_NAME_INFORMATION tmp_name_info;
+  FILE_NAME_INFORMATION* name_info;
+  WCHAR* name_buf;
+  unsigned int addrlen;
+  unsigned int name_size;
+  unsigned int name_len;
+  int err;
+
+  name_info = NULL;
+
+  if (handle->handle == INVALID_HANDLE_VALUE) {
+    *size = 0;
+    return UV_EINVAL;
+  }
+
+  uv__pipe_pause_read((uv_pipe_t*)handle); /* cast away const warning */
+
+  nt_status = pNtQueryInformationFile(handle->handle,
+                                      &io_status,
+                                      &tmp_name_info,
+                                      sizeof tmp_name_info,
+                                      FileNameInformation);
+  if (nt_status == STATUS_BUFFER_OVERFLOW) {
+    name_size = sizeof(*name_info) + tmp_name_info.FileNameLength;
+    name_info = uv__malloc(name_size);
+    if (!name_info) {
+      *size = 0;
+      err = UV_ENOMEM;
+      goto cleanup;
+    }
+
+    nt_status = pNtQueryInformationFile(handle->handle,
+                                        &io_status,
+                                        name_info,
+                                        name_size,
+                                        FileNameInformation);
+  }
+
+  if (nt_status != STATUS_SUCCESS) {
+    *size = 0;
+    err = uv_translate_sys_error(pRtlNtStatusToDosError(nt_status));
+    goto error;
+  }
+
+  if (!name_info) {
+    /* the struct on stack was used */
+    name_buf = tmp_name_info.FileName;
+    name_len = tmp_name_info.FileNameLength;
+  } else {
+    name_buf = name_info->FileName;
+    name_len = name_info->FileNameLength;
+  }
+
+  if (name_len == 0) {
+    *size = 0;
+    err = 0;
+    goto error;
+  }
+
+  name_len /= sizeof(WCHAR);
+
+  /* check how much space we need */
+  addrlen = WideCharToMultiByte(CP_UTF8,
+                                0,
+                                name_buf,
+                                name_len,
+                                NULL,
+                                0,
+                                NULL,
+                                NULL);
+  if (!addrlen) {
+    *size = 0;
+    err = uv_translate_sys_error(GetLastError());
+    goto error;
+  } else if (pipe_prefix_len + addrlen >= *size) {
+    /* "\\\\.\\pipe" + name */
+    *size = pipe_prefix_len + addrlen + 1;
+    err = UV_ENOBUFS;
+    goto error;
+  }
+
+  memcpy(buffer, pipe_prefix, pipe_prefix_len);
+  addrlen = WideCharToMultiByte(CP_UTF8,
+                                0,
+                                name_buf,
+                                name_len,
+                                buffer+pipe_prefix_len,
+                                *size-pipe_prefix_len,
+                                NULL,
+                                NULL);
+  if (!addrlen) {
+    *size = 0;
+    err = uv_translate_sys_error(GetLastError());
+    goto error;
+  }
+
+  addrlen += pipe_prefix_len;
+  *size = addrlen;
+  buffer[addrlen] = '\0';
+
+  err = 0;
+  goto cleanup;
+
+error:
+  uv__free(name_info);
+
+cleanup:
+  uv__pipe_unpause_read((uv_pipe_t*)handle); /* cast away const warning */
+  return err;
+}
+
+
+int uv_pipe_pending_count(uv_pipe_t* handle) {
+  if (!handle->ipc)
+    return 0;
+  return handle->pipe.conn.pending_ipc_info.queue_len;
+}
+
+
+int uv_pipe_getsockname(const uv_pipe_t* handle, char* buffer, size_t* size) {
+  if (handle->flags & UV_HANDLE_BOUND)
+    return uv__pipe_getname(handle, buffer, size);
+
+  if (handle->flags & UV_HANDLE_CONNECTION ||
+      handle->handle != INVALID_HANDLE_VALUE) {
+    *size = 0;
+    return 0;
+  }
+
+  return UV_EBADF;
+}
+
+
+int uv_pipe_getpeername(const uv_pipe_t* handle, char* buffer, size_t* size) {
+  /* emulate unix behaviour */
+  if (handle->flags & UV_HANDLE_BOUND)
+    return UV_ENOTCONN;
+
+  if (handle->handle != INVALID_HANDLE_VALUE)
+    return uv__pipe_getname(handle, buffer, size);
+
+  return UV_EBADF;
+}
+
+
+uv_handle_type uv_pipe_pending_type(uv_pipe_t* handle) {
+  if (!handle->ipc)
+    return UV_UNKNOWN_HANDLE;
+  if (handle->pipe.conn.pending_ipc_info.queue_len == 0)
+    return UV_UNKNOWN_HANDLE;
+  else
+    return UV_TCP;
+}

+ 646 - 0
Utilities/cmlibuv/src/win/poll.c

@@ -0,0 +1,646 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <io.h>
+
+#include "uv.h"
+#include "internal.h"
+#include "handle-inl.h"
+#include "req-inl.h"
+
+
+static const GUID uv_msafd_provider_ids[UV_MSAFD_PROVIDER_COUNT] = {
+  {0xe70f1aa0, 0xab8b, 0x11cf,
+      {0x8c, 0xa3, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92}},
+  {0xf9eab0c0, 0x26d4, 0x11d0,
+      {0xbb, 0xbf, 0x00, 0xaa, 0x00, 0x6c, 0x34, 0xe4}},
+  {0x9fc48064, 0x7298, 0x43e4,
+      {0xb7, 0xbd, 0x18, 0x1f, 0x20, 0x89, 0x79, 0x2a}}
+};
+
+typedef struct uv_single_fd_set_s {
+  unsigned int fd_count;
+  SOCKET fd_array[1];
+} uv_single_fd_set_t;
+
+
+static OVERLAPPED overlapped_dummy_;
+static uv_once_t overlapped_dummy_init_guard_ = UV_ONCE_INIT;
+
+static AFD_POLL_INFO afd_poll_info_dummy_;
+
+
+static void uv__init_overlapped_dummy(void) {
+  HANDLE event;
+
+  event = CreateEvent(NULL, TRUE, TRUE, NULL);
+  if (event == NULL)
+    uv_fatal_error(GetLastError(), "CreateEvent");
+
+  memset(&overlapped_dummy_, 0, sizeof overlapped_dummy_);
+  overlapped_dummy_.hEvent = (HANDLE) ((uintptr_t) event | 1);
+}
+
+
+static OVERLAPPED* uv__get_overlapped_dummy() {
+  uv_once(&overlapped_dummy_init_guard_, uv__init_overlapped_dummy);
+  return &overlapped_dummy_;
+}
+
+
+static AFD_POLL_INFO* uv__get_afd_poll_info_dummy() {
+  return &afd_poll_info_dummy_;
+}
+
+
+static void uv__fast_poll_submit_poll_req(uv_loop_t* loop, uv_poll_t* handle) {
+  uv_req_t* req;
+  AFD_POLL_INFO* afd_poll_info;
+  DWORD result;
+
+  /* Find a yet unsubmitted req to submit. */
+  if (handle->submitted_events_1 == 0) {
+    req = &handle->poll_req_1;
+    afd_poll_info = &handle->afd_poll_info_1;
+    handle->submitted_events_1 = handle->events;
+    handle->mask_events_1 = 0;
+    handle->mask_events_2 = handle->events;
+  } else if (handle->submitted_events_2 == 0) {
+    req = &handle->poll_req_2;
+    afd_poll_info = &handle->afd_poll_info_2;
+    handle->submitted_events_2 = handle->events;
+    handle->mask_events_1 = handle->events;
+    handle->mask_events_2 = 0;
+  } else {
+    /* Just wait until there's an unsubmitted req. */
+    /* This will happen almost immediately as one of the 2 outstanding */
+    /* requests is about to return. When this happens, */
+    /* uv__fast_poll_process_poll_req will be called, and the pending */
+    /* events, if needed, will be processed in a subsequent request. */
+    return;
+  }
+
+  /* Setting Exclusive to TRUE makes the other poll request return if there */
+  /* is any. */
+  afd_poll_info->Exclusive = TRUE;
+  afd_poll_info->NumberOfHandles = 1;
+  afd_poll_info->Timeout.QuadPart = INT64_MAX;
+  afd_poll_info->Handles[0].Handle = (HANDLE) handle->socket;
+  afd_poll_info->Handles[0].Status = 0;
+  afd_poll_info->Handles[0].Events = 0;
+
+  if (handle->events & UV_READABLE) {
+    afd_poll_info->Handles[0].Events |= AFD_POLL_RECEIVE |
+        AFD_POLL_DISCONNECT | AFD_POLL_ACCEPT | AFD_POLL_ABORT;
+  } else {
+    if (handle->events & UV_DISCONNECT) {
+      afd_poll_info->Handles[0].Events |= AFD_POLL_DISCONNECT;
+    }
+  }
+  if (handle->events & UV_WRITABLE) {
+    afd_poll_info->Handles[0].Events |= AFD_POLL_SEND | AFD_POLL_CONNECT_FAIL;
+  }
+
+  memset(&req->u.io.overlapped, 0, sizeof req->u.io.overlapped);
+
+  result = uv_msafd_poll((SOCKET) handle->peer_socket,
+                         afd_poll_info,
+                         afd_poll_info,
+                         &req->u.io.overlapped);
+  if (result != 0 && WSAGetLastError() != WSA_IO_PENDING) {
+    /* Queue this req, reporting an error. */
+    SET_REQ_ERROR(req, WSAGetLastError());
+    uv_insert_pending_req(loop, req);
+  }
+}
+
+
+static int uv__fast_poll_cancel_poll_req(uv_loop_t* loop, uv_poll_t* handle) {
+  AFD_POLL_INFO afd_poll_info;
+  DWORD result;
+
+  afd_poll_info.Exclusive = TRUE;
+  afd_poll_info.NumberOfHandles = 1;
+  afd_poll_info.Timeout.QuadPart = INT64_MAX;
+  afd_poll_info.Handles[0].Handle = (HANDLE) handle->socket;
+  afd_poll_info.Handles[0].Status = 0;
+  afd_poll_info.Handles[0].Events = AFD_POLL_ALL;
+
+  result = uv_msafd_poll(handle->socket,
+                         &afd_poll_info,
+                         uv__get_afd_poll_info_dummy(),
+                         uv__get_overlapped_dummy());
+
+  if (result == SOCKET_ERROR) {
+    DWORD error = WSAGetLastError();
+    if (error != WSA_IO_PENDING)
+      return error;
+  }
+
+  return 0;
+}
+
+
+static void uv__fast_poll_process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
+    uv_req_t* req) {
+  unsigned char mask_events;
+  AFD_POLL_INFO* afd_poll_info;
+
+  if (req == &handle->poll_req_1) {
+    afd_poll_info = &handle->afd_poll_info_1;
+    handle->submitted_events_1 = 0;
+    mask_events = handle->mask_events_1;
+  } else if (req == &handle->poll_req_2) {
+    afd_poll_info = &handle->afd_poll_info_2;
+    handle->submitted_events_2 = 0;
+    mask_events = handle->mask_events_2;
+  } else {
+    assert(0);
+    return;
+  }
+
+  /* Report an error unless the select was just interrupted. */
+  if (!REQ_SUCCESS(req)) {
+    DWORD error = GET_REQ_SOCK_ERROR(req);
+    if (error != WSAEINTR && handle->events != 0) {
+      handle->events = 0; /* Stop the watcher */
+      handle->poll_cb(handle, uv_translate_sys_error(error), 0);
+    }
+
+  } else if (afd_poll_info->NumberOfHandles >= 1) {
+    unsigned char events = 0;
+
+    if ((afd_poll_info->Handles[0].Events & (AFD_POLL_RECEIVE |
+        AFD_POLL_DISCONNECT | AFD_POLL_ACCEPT | AFD_POLL_ABORT)) != 0) {
+      events |= UV_READABLE;
+      if ((afd_poll_info->Handles[0].Events & AFD_POLL_DISCONNECT) != 0) {
+        events |= UV_DISCONNECT;
+      }
+    }
+    if ((afd_poll_info->Handles[0].Events & (AFD_POLL_SEND |
+        AFD_POLL_CONNECT_FAIL)) != 0) {
+      events |= UV_WRITABLE;
+    }
+
+    events &= handle->events & ~mask_events;
+
+    if (afd_poll_info->Handles[0].Events & AFD_POLL_LOCAL_CLOSE) {
+      /* Stop polling. */
+      handle->events = 0;
+      if (uv__is_active(handle))
+        uv__handle_stop(handle);
+    }
+
+    if (events != 0) {
+      handle->poll_cb(handle, 0, events);
+    }
+  }
+
+  if ((handle->events & ~(handle->submitted_events_1 |
+      handle->submitted_events_2)) != 0) {
+    uv__fast_poll_submit_poll_req(loop, handle);
+  } else if ((handle->flags & UV__HANDLE_CLOSING) &&
+             handle->submitted_events_1 == 0 &&
+             handle->submitted_events_2 == 0) {
+    uv_want_endgame(loop, (uv_handle_t*) handle);
+  }
+}
+
+
+static int uv__fast_poll_set(uv_loop_t* loop, uv_poll_t* handle, int events) {
+  assert(handle->type == UV_POLL);
+  assert(!(handle->flags & UV__HANDLE_CLOSING));
+  assert((events & ~(UV_READABLE | UV_WRITABLE | UV_DISCONNECT)) == 0);
+
+  handle->events = events;
+
+  if (handle->events != 0) {
+    uv__handle_start(handle);
+  } else {
+    uv__handle_stop(handle);
+  }
+
+  if ((handle->events & ~(handle->submitted_events_1 |
+      handle->submitted_events_2)) != 0) {
+    uv__fast_poll_submit_poll_req(handle->loop, handle);
+  }
+
+  return 0;
+}
+
+
+static int uv__fast_poll_close(uv_loop_t* loop, uv_poll_t* handle) {
+  handle->events = 0;
+  uv__handle_closing(handle);
+
+  if (handle->submitted_events_1 == 0 &&
+      handle->submitted_events_2 == 0) {
+    uv_want_endgame(loop, (uv_handle_t*) handle);
+    return 0;
+  } else {
+    /* Cancel outstanding poll requests by executing another, unique poll */
+    /* request that forces the outstanding ones to return. */
+    return uv__fast_poll_cancel_poll_req(loop, handle);
+  }
+}
+
+
+static SOCKET uv__fast_poll_create_peer_socket(HANDLE iocp,
+    WSAPROTOCOL_INFOW* protocol_info) {
+  SOCKET sock = 0;
+
+  sock = WSASocketW(protocol_info->iAddressFamily,
+                    protocol_info->iSocketType,
+                    protocol_info->iProtocol,
+                    protocol_info,
+                    0,
+                    WSA_FLAG_OVERLAPPED);
+  if (sock == INVALID_SOCKET) {
+    return INVALID_SOCKET;
+  }
+
+  if (!SetHandleInformation((HANDLE) sock, HANDLE_FLAG_INHERIT, 0)) {
+    goto error;
+  };
+
+  if (CreateIoCompletionPort((HANDLE) sock,
+                             iocp,
+                             (ULONG_PTR) sock,
+                             0) == NULL) {
+    goto error;
+  }
+
+  return sock;
+
+ error:
+  closesocket(sock);
+  return INVALID_SOCKET;
+}
+
+
+static SOCKET uv__fast_poll_get_peer_socket(uv_loop_t* loop,
+    WSAPROTOCOL_INFOW* protocol_info) {
+  int index, i;
+  SOCKET peer_socket;
+
+  index = -1;
+  for (i = 0; (size_t) i < ARRAY_SIZE(uv_msafd_provider_ids); i++) {
+    if (memcmp((void*) &protocol_info->ProviderId,
+               (void*) &uv_msafd_provider_ids[i],
+               sizeof protocol_info->ProviderId) == 0) {
+      index = i;
+    }
+  }
+
+  /* Check if the protocol uses an msafd socket. */
+  if (index < 0) {
+    return INVALID_SOCKET;
+  }
+
+  /* If we didn't (try) to create a peer socket yet, try to make one. Don't */
+  /* try again if the peer socket creation failed earlier for the same */
+  /* protocol. */
+  peer_socket = loop->poll_peer_sockets[index];
+  if (peer_socket == 0) {
+    peer_socket = uv__fast_poll_create_peer_socket(loop->iocp, protocol_info);
+    loop->poll_peer_sockets[index] = peer_socket;
+  }
+
+  return peer_socket;
+}
+
+
+static DWORD WINAPI uv__slow_poll_thread_proc(void* arg) {
+  uv_req_t* req = (uv_req_t*) arg;
+  uv_poll_t* handle = (uv_poll_t*) req->data;
+  unsigned char reported_events;
+  int r;
+  uv_single_fd_set_t rfds, wfds, efds;
+  struct timeval timeout;
+
+  assert(handle->type == UV_POLL);
+  assert(req->type == UV_POLL_REQ);
+
+  if (handle->events & UV_READABLE) {
+    rfds.fd_count = 1;
+    rfds.fd_array[0] = handle->socket;
+  } else {
+    rfds.fd_count = 0;
+  }
+
+  if (handle->events & UV_WRITABLE) {
+    wfds.fd_count = 1;
+    wfds.fd_array[0] = handle->socket;
+    efds.fd_count = 1;
+    efds.fd_array[0] = handle->socket;
+  } else {
+    wfds.fd_count = 0;
+    efds.fd_count = 0;
+  }
+
+  /* Make the select() time out after 3 minutes. If select() hangs because */
+  /* the user closed the socket, we will at least not hang indefinitely. */
+  timeout.tv_sec = 3 * 60;
+  timeout.tv_usec = 0;
+
+  r = select(1, (fd_set*) &rfds, (fd_set*) &wfds, (fd_set*) &efds, &timeout);
+  if (r == SOCKET_ERROR) {
+    /* Queue this req, reporting an error. */
+    SET_REQ_ERROR(&handle->poll_req_1, WSAGetLastError());
+    POST_COMPLETION_FOR_REQ(handle->loop, req);
+    return 0;
+  }
+
+  reported_events = 0;
+
+  if (r > 0) {
+    if (rfds.fd_count > 0) {
+      assert(rfds.fd_count == 1);
+      assert(rfds.fd_array[0] == handle->socket);
+      reported_events |= UV_READABLE;
+    }
+
+    if (wfds.fd_count > 0) {
+      assert(wfds.fd_count == 1);
+      assert(wfds.fd_array[0] == handle->socket);
+      reported_events |= UV_WRITABLE;
+    } else if (efds.fd_count > 0) {
+      assert(efds.fd_count == 1);
+      assert(efds.fd_array[0] == handle->socket);
+      reported_events |= UV_WRITABLE;
+    }
+  }
+
+  SET_REQ_SUCCESS(req);
+  req->u.io.overlapped.InternalHigh = (DWORD) reported_events;
+  POST_COMPLETION_FOR_REQ(handle->loop, req);
+
+  return 0;
+}
+
+
+static void uv__slow_poll_submit_poll_req(uv_loop_t* loop, uv_poll_t* handle) {
+  uv_req_t* req;
+
+  /* Find a yet unsubmitted req to submit. */
+  if (handle->submitted_events_1 == 0) {
+    req = &handle->poll_req_1;
+    handle->submitted_events_1 = handle->events;
+    handle->mask_events_1 = 0;
+    handle->mask_events_2 = handle->events;
+  } else if (handle->submitted_events_2 == 0) {
+    req = &handle->poll_req_2;
+    handle->submitted_events_2 = handle->events;
+    handle->mask_events_1 = handle->events;
+    handle->mask_events_2 = 0;
+  } else {
+    assert(0);
+    return;
+  }
+
+  if (!QueueUserWorkItem(uv__slow_poll_thread_proc,
+                         (void*) req,
+                         WT_EXECUTELONGFUNCTION)) {
+    /* Make this req pending, reporting an error. */
+    SET_REQ_ERROR(req, GetLastError());
+    uv_insert_pending_req(loop, req);
+  }
+}
+
+
+
+static void uv__slow_poll_process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
+    uv_req_t* req) {
+  unsigned char mask_events;
+  int err;
+
+  if (req == &handle->poll_req_1) {
+    handle->submitted_events_1 = 0;
+    mask_events = handle->mask_events_1;
+  } else if (req == &handle->poll_req_2) {
+    handle->submitted_events_2 = 0;
+    mask_events = handle->mask_events_2;
+  } else {
+    assert(0);
+    return;
+  }
+
+  if (!REQ_SUCCESS(req)) {
+    /* Error. */
+    if (handle->events != 0) {
+      err = GET_REQ_ERROR(req);
+      handle->events = 0; /* Stop the watcher */
+      handle->poll_cb(handle, uv_translate_sys_error(err), 0);
+    }
+  } else {
+    /* Got some events. */
+    int events = req->u.io.overlapped.InternalHigh & handle->events & ~mask_events;
+    if (events != 0) {
+      handle->poll_cb(handle, 0, events);
+    }
+  }
+
+  if ((handle->events & ~(handle->submitted_events_1 |
+      handle->submitted_events_2)) != 0) {
+    uv__slow_poll_submit_poll_req(loop, handle);
+  } else if ((handle->flags & UV__HANDLE_CLOSING) &&
+             handle->submitted_events_1 == 0 &&
+             handle->submitted_events_2 == 0) {
+    uv_want_endgame(loop, (uv_handle_t*) handle);
+  }
+}
+
+
+static int uv__slow_poll_set(uv_loop_t* loop, uv_poll_t* handle, int events) {
+  assert(handle->type == UV_POLL);
+  assert(!(handle->flags & UV__HANDLE_CLOSING));
+  assert((events & ~(UV_READABLE | UV_WRITABLE)) == 0);
+
+  handle->events = events;
+
+  if (handle->events != 0) {
+    uv__handle_start(handle);
+  } else {
+    uv__handle_stop(handle);
+  }
+
+  if ((handle->events &
+      ~(handle->submitted_events_1 | handle->submitted_events_2)) != 0) {
+    uv__slow_poll_submit_poll_req(handle->loop, handle);
+  }
+
+  return 0;
+}
+
+
+static int uv__slow_poll_close(uv_loop_t* loop, uv_poll_t* handle) {
+  handle->events = 0;
+  uv__handle_closing(handle);
+
+  if (handle->submitted_events_1 == 0 &&
+      handle->submitted_events_2 == 0) {
+    uv_want_endgame(loop, (uv_handle_t*) handle);
+  }
+
+  return 0;
+}
+
+
+int uv_poll_init(uv_loop_t* loop, uv_poll_t* handle, int fd) {
+  return uv_poll_init_socket(loop, handle, (SOCKET) uv__get_osfhandle(fd));
+}
+
+
+int uv_poll_init_socket(uv_loop_t* loop, uv_poll_t* handle,
+    uv_os_sock_t socket) {
+  WSAPROTOCOL_INFOW protocol_info;
+  int len;
+  SOCKET peer_socket, base_socket;
+  DWORD bytes;
+  DWORD yes = 1;
+
+  /* Set the socket to nonblocking mode */
+  if (ioctlsocket(socket, FIONBIO, &yes) == SOCKET_ERROR)
+    return uv_translate_sys_error(WSAGetLastError());
+
+  /* Try to obtain a base handle for the socket. This increases this chances */
+  /* that we find an AFD handle and are able to use the fast poll mechanism. */
+  /* This will always fail on windows XP/2k3, since they don't support the */
+  /* SIO_BASE_HANDLE ioctl. */
+#ifndef NDEBUG
+  base_socket = INVALID_SOCKET;
+#endif
+
+  if (WSAIoctl(socket,
+               SIO_BASE_HANDLE,
+               NULL,
+               0,
+               &base_socket,
+               sizeof base_socket,
+               &bytes,
+               NULL,
+               NULL) == 0) {
+    assert(base_socket != 0 && base_socket != INVALID_SOCKET);
+    socket = base_socket;
+  }
+
+  uv__handle_init(loop, (uv_handle_t*) handle, UV_POLL);
+  handle->socket = socket;
+  handle->events = 0;
+
+  /* Obtain protocol information about the socket. */
+  len = sizeof protocol_info;
+  if (getsockopt(socket,
+                 SOL_SOCKET,
+                 SO_PROTOCOL_INFOW,
+                 (char*) &protocol_info,
+                 &len) != 0) {
+    return uv_translate_sys_error(WSAGetLastError());
+  }
+
+  /* Get the peer socket that is needed to enable fast poll. If the returned */
+  /* value is NULL, the protocol is not implemented by MSAFD and we'll have */
+  /* to use slow mode. */
+  peer_socket = uv__fast_poll_get_peer_socket(loop, &protocol_info);
+
+  if (peer_socket != INVALID_SOCKET) {
+    /* Initialize fast poll specific fields. */
+    handle->peer_socket = peer_socket;
+  } else {
+    /* Initialize slow poll specific fields. */
+    handle->flags |= UV_HANDLE_POLL_SLOW;
+  }
+
+  /* Initialize 2 poll reqs. */
+  handle->submitted_events_1 = 0;
+  uv_req_init(loop, (uv_req_t*) &(handle->poll_req_1));
+  handle->poll_req_1.type = UV_POLL_REQ;
+  handle->poll_req_1.data = handle;
+
+  handle->submitted_events_2 = 0;
+  uv_req_init(loop, (uv_req_t*) &(handle->poll_req_2));
+  handle->poll_req_2.type = UV_POLL_REQ;
+  handle->poll_req_2.data = handle;
+
+  return 0;
+}
+
+
+int uv_poll_start(uv_poll_t* handle, int events, uv_poll_cb cb) {
+  int err;
+
+  if (!(handle->flags & UV_HANDLE_POLL_SLOW)) {
+    err = uv__fast_poll_set(handle->loop, handle, events);
+  } else {
+    err = uv__slow_poll_set(handle->loop, handle, events);
+  }
+
+  if (err) {
+    return uv_translate_sys_error(err);
+  }
+
+  handle->poll_cb = cb;
+
+  return 0;
+}
+
+
+int uv_poll_stop(uv_poll_t* handle) {
+  int err;
+
+  if (!(handle->flags & UV_HANDLE_POLL_SLOW)) {
+    err = uv__fast_poll_set(handle->loop, handle, 0);
+  } else {
+    err = uv__slow_poll_set(handle->loop, handle, 0);
+  }
+
+  return uv_translate_sys_error(err);
+}
+
+
+void uv_process_poll_req(uv_loop_t* loop, uv_poll_t* handle, uv_req_t* req) {
+  if (!(handle->flags & UV_HANDLE_POLL_SLOW)) {
+    uv__fast_poll_process_poll_req(loop, handle, req);
+  } else {
+    uv__slow_poll_process_poll_req(loop, handle, req);
+  }
+}
+
+
+int uv_poll_close(uv_loop_t* loop, uv_poll_t* handle) {
+  if (!(handle->flags & UV_HANDLE_POLL_SLOW)) {
+    return uv__fast_poll_close(loop, handle);
+  } else {
+    return uv__slow_poll_close(loop, handle);
+  }
+}
+
+
+void uv_poll_endgame(uv_loop_t* loop, uv_poll_t* handle) {
+  assert(handle->flags & UV__HANDLE_CLOSING);
+  assert(!(handle->flags & UV_HANDLE_CLOSED));
+
+  assert(handle->submitted_events_1 == 0);
+  assert(handle->submitted_events_2 == 0);
+
+  uv__handle_close(handle);
+}

+ 510 - 0
Utilities/cmlibuv/src/win/process-stdio.c

@@ -0,0 +1,510 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <io.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "uv.h"
+#include "internal.h"
+#include "handle-inl.h"
+
+
+/*
+ * The `child_stdio_buffer` buffer has the following layout:
+ *   int number_of_fds
+ *   unsigned char crt_flags[number_of_fds]
+ *   HANDLE os_handle[number_of_fds]
+ */
+#define CHILD_STDIO_SIZE(count)                     \
+    (sizeof(int) +                                  \
+     sizeof(unsigned char) * (count) +              \
+     sizeof(uintptr_t) * (count))
+
+#define CHILD_STDIO_COUNT(buffer)                   \
+    *((unsigned int*) (buffer))
+
+#define CHILD_STDIO_CRT_FLAGS(buffer, fd)           \
+    *((unsigned char*) (buffer) + sizeof(int) + fd)
+
+#define CHILD_STDIO_HANDLE(buffer, fd)              \
+    *((HANDLE*) ((unsigned char*) (buffer) +        \
+                 sizeof(int) +                      \
+                 sizeof(unsigned char) *            \
+                 CHILD_STDIO_COUNT((buffer)) +      \
+                 sizeof(HANDLE) * (fd)))
+
+
+/* CRT file descriptor mode flags */
+#define FOPEN       0x01
+#define FEOFLAG     0x02
+#define FCRLF       0x04
+#define FPIPE       0x08
+#define FNOINHERIT  0x10
+#define FAPPEND     0x20
+#define FDEV        0x40
+#define FTEXT       0x80
+
+
+/*
+ * Clear the HANDLE_FLAG_INHERIT flag from all HANDLEs that were inherited
+ * the parent process. Don't check for errors - the stdio handles may not be
+ * valid, or may be closed already. There is no guarantee that this function
+ * does a perfect job.
+ */
+void uv_disable_stdio_inheritance(void) {
+  HANDLE handle;
+  STARTUPINFOW si;
+
+  /* Make the windows stdio handles non-inheritable. */
+  handle = GetStdHandle(STD_INPUT_HANDLE);
+  if (handle != NULL && handle != INVALID_HANDLE_VALUE)
+    SetHandleInformation(handle, HANDLE_FLAG_INHERIT, 0);
+
+  handle = GetStdHandle(STD_OUTPUT_HANDLE);
+  if (handle != NULL && handle != INVALID_HANDLE_VALUE)
+    SetHandleInformation(handle, HANDLE_FLAG_INHERIT, 0);
+
+  handle = GetStdHandle(STD_ERROR_HANDLE);
+  if (handle != NULL && handle != INVALID_HANDLE_VALUE)
+    SetHandleInformation(handle, HANDLE_FLAG_INHERIT, 0);
+
+  /* Make inherited CRT FDs non-inheritable. */
+  GetStartupInfoW(&si);
+  if (uv__stdio_verify(si.lpReserved2, si.cbReserved2))
+    uv__stdio_noinherit(si.lpReserved2);
+}
+
+
+static int uv__create_stdio_pipe_pair(uv_loop_t* loop,
+    uv_pipe_t* server_pipe, HANDLE* child_pipe_ptr, unsigned int flags) {
+  char pipe_name[64];
+  SECURITY_ATTRIBUTES sa;
+  DWORD server_access = 0;
+  DWORD client_access = 0;
+  HANDLE child_pipe = INVALID_HANDLE_VALUE;
+  int err;
+
+  if (flags & UV_READABLE_PIPE) {
+    /* The server needs inbound access too, otherwise CreateNamedPipe() */
+    /* won't give us the FILE_READ_ATTRIBUTES permission. We need that to */
+    /* probe the state of the write buffer when we're trying to shutdown */
+    /* the pipe. */
+    server_access |= PIPE_ACCESS_OUTBOUND | PIPE_ACCESS_INBOUND;
+    client_access |= GENERIC_READ | FILE_WRITE_ATTRIBUTES;
+  }
+  if (flags & UV_WRITABLE_PIPE) {
+    server_access |= PIPE_ACCESS_INBOUND;
+    client_access |= GENERIC_WRITE | FILE_READ_ATTRIBUTES;
+  }
+
+  /* Create server pipe handle. */
+  err = uv_stdio_pipe_server(loop,
+                             server_pipe,
+                             server_access,
+                             pipe_name,
+                             sizeof(pipe_name));
+  if (err)
+    goto error;
+
+  /* Create child pipe handle. */
+  sa.nLength = sizeof sa;
+  sa.lpSecurityDescriptor = NULL;
+  sa.bInheritHandle = TRUE;
+
+  child_pipe = CreateFileA(pipe_name,
+                           client_access,
+                           0,
+                           &sa,
+                           OPEN_EXISTING,
+                           server_pipe->ipc ? FILE_FLAG_OVERLAPPED : 0,
+                           NULL);
+  if (child_pipe == INVALID_HANDLE_VALUE) {
+    err = GetLastError();
+    goto error;
+  }
+
+#ifndef NDEBUG
+  /* Validate that the pipe was opened in the right mode. */
+  {
+    DWORD mode;
+    BOOL r = GetNamedPipeHandleState(child_pipe,
+                                     &mode,
+                                     NULL,
+                                     NULL,
+                                     NULL,
+                                     NULL,
+                                     0);
+    assert(r == TRUE);
+    assert(mode == (PIPE_READMODE_BYTE | PIPE_WAIT));
+  }
+#endif
+
+  /* Do a blocking ConnectNamedPipe.  This should not block because we have */
+  /* both ends of the pipe created. */
+  if (!ConnectNamedPipe(server_pipe->handle, NULL)) {
+    if (GetLastError() != ERROR_PIPE_CONNECTED) {
+      err = GetLastError();
+      goto error;
+    }
+  }
+
+  /* The server end is now readable and/or writable. */
+  if (flags & UV_READABLE_PIPE)
+    server_pipe->flags |= UV_HANDLE_WRITABLE;
+  if (flags & UV_WRITABLE_PIPE)
+    server_pipe->flags |= UV_HANDLE_READABLE;
+
+  *child_pipe_ptr = child_pipe;
+  return 0;
+
+ error:
+  if (server_pipe->handle != INVALID_HANDLE_VALUE) {
+    uv_pipe_cleanup(loop, server_pipe);
+  }
+
+  if (child_pipe != INVALID_HANDLE_VALUE) {
+    CloseHandle(child_pipe);
+  }
+
+  return err;
+}
+
+
+static int uv__duplicate_handle(uv_loop_t* loop, HANDLE handle, HANDLE* dup) {
+  HANDLE current_process;
+
+
+  /* _get_osfhandle will sometimes return -2 in case of an error. This seems */
+  /* to happen when fd <= 2 and the process' corresponding stdio handle is */
+  /* set to NULL. Unfortunately DuplicateHandle will happily duplicate */
+  /* (HANDLE) -2, so this situation goes unnoticed until someone tries to */
+  /* use the duplicate. Therefore we filter out known-invalid handles here. */
+  if (handle == INVALID_HANDLE_VALUE ||
+      handle == NULL ||
+      handle == (HANDLE) -2) {
+    *dup = INVALID_HANDLE_VALUE;
+    return ERROR_INVALID_HANDLE;
+  }
+
+  current_process = GetCurrentProcess();
+
+  if (!DuplicateHandle(current_process,
+                       handle,
+                       current_process,
+                       dup,
+                       0,
+                       TRUE,
+                       DUPLICATE_SAME_ACCESS)) {
+    *dup = INVALID_HANDLE_VALUE;
+    return GetLastError();
+  }
+
+  return 0;
+}
+
+
+static int uv__duplicate_fd(uv_loop_t* loop, int fd, HANDLE* dup) {
+  HANDLE handle;
+
+  if (fd == -1) {
+    *dup = INVALID_HANDLE_VALUE;
+    return ERROR_INVALID_HANDLE;
+  }
+
+  handle = uv__get_osfhandle(fd);
+  return uv__duplicate_handle(loop, handle, dup);
+}
+
+
+int uv__create_nul_handle(HANDLE* handle_ptr,
+    DWORD access) {
+  HANDLE handle;
+  SECURITY_ATTRIBUTES sa;
+
+  sa.nLength = sizeof sa;
+  sa.lpSecurityDescriptor = NULL;
+  sa.bInheritHandle = TRUE;
+
+  handle = CreateFileW(L"NUL",
+                       access,
+                       FILE_SHARE_READ | FILE_SHARE_WRITE,
+                       &sa,
+                       OPEN_EXISTING,
+                       0,
+                       NULL);
+  if (handle == INVALID_HANDLE_VALUE) {
+    return GetLastError();
+  }
+
+  *handle_ptr = handle;
+  return 0;
+}
+
+
+int uv__stdio_create(uv_loop_t* loop,
+                     const uv_process_options_t* options,
+                     BYTE** buffer_ptr) {
+  BYTE* buffer;
+  int count, i;
+  int err;
+
+  count = options->stdio_count;
+
+  if (count < 0 || count > 255) {
+    /* Only support FDs 0-255 */
+    return ERROR_NOT_SUPPORTED;
+  } else if (count < 3) {
+    /* There should always be at least 3 stdio handles. */
+    count = 3;
+  }
+
+  /* Allocate the child stdio buffer */
+  buffer = (BYTE*) uv__malloc(CHILD_STDIO_SIZE(count));
+  if (buffer == NULL) {
+    return ERROR_OUTOFMEMORY;
+  }
+
+  /* Prepopulate the buffer with INVALID_HANDLE_VALUE handles so we can */
+  /* clean up on failure. */
+  CHILD_STDIO_COUNT(buffer) = count;
+  for (i = 0; i < count; i++) {
+    CHILD_STDIO_CRT_FLAGS(buffer, i) = 0;
+    CHILD_STDIO_HANDLE(buffer, i) = INVALID_HANDLE_VALUE;
+  }
+
+  for (i = 0; i < count; i++) {
+    uv_stdio_container_t fdopt;
+    if (i < options->stdio_count) {
+      fdopt = options->stdio[i];
+    } else {
+      fdopt.flags = UV_IGNORE;
+    }
+
+    switch (fdopt.flags & (UV_IGNORE | UV_CREATE_PIPE | UV_INHERIT_FD |
+            UV_INHERIT_STREAM)) {
+      case UV_IGNORE:
+        /* Starting a process with no stdin/stout/stderr can confuse it. */
+        /* So no matter what the user specified, we make sure the first */
+        /* three FDs are always open in their typical modes, e.g. stdin */
+        /* be readable and stdout/err should be writable. For FDs > 2, don't */
+        /* do anything - all handles in the stdio buffer are initialized with */
+        /* INVALID_HANDLE_VALUE, which should be okay. */
+        if (i <= 2) {
+          DWORD access = (i == 0) ? FILE_GENERIC_READ :
+                                    FILE_GENERIC_WRITE | FILE_READ_ATTRIBUTES;
+
+          err = uv__create_nul_handle(&CHILD_STDIO_HANDLE(buffer, i),
+                                      access);
+          if (err)
+            goto error;
+
+          CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN | FDEV;
+        }
+        break;
+
+      case UV_CREATE_PIPE: {
+        /* Create a pair of two connected pipe ends; one end is turned into */
+        /* an uv_pipe_t for use by the parent. The other one is given to */
+        /* the child. */
+        uv_pipe_t* parent_pipe = (uv_pipe_t*) fdopt.data.stream;
+        HANDLE child_pipe = INVALID_HANDLE_VALUE;
+
+        /* Create a new, connected pipe pair. stdio[i].stream should point */
+        /* to an uninitialized, but not connected pipe handle. */
+        assert(fdopt.data.stream->type == UV_NAMED_PIPE);
+        assert(!(fdopt.data.stream->flags & UV_HANDLE_CONNECTION));
+        assert(!(fdopt.data.stream->flags & UV_HANDLE_PIPESERVER));
+
+        err = uv__create_stdio_pipe_pair(loop,
+                                         parent_pipe,
+                                         &child_pipe,
+                                         fdopt.flags);
+        if (err)
+          goto error;
+
+        CHILD_STDIO_HANDLE(buffer, i) = child_pipe;
+        CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN | FPIPE;
+        break;
+      }
+
+      case UV_INHERIT_FD: {
+        /* Inherit a raw FD. */
+        HANDLE child_handle;
+
+        /* Make an inheritable duplicate of the handle. */
+        err = uv__duplicate_fd(loop, fdopt.data.fd, &child_handle);
+        if (err) {
+          /* If fdopt.data.fd is not valid and fd fd <= 2, then ignore the */
+          /* error. */
+          if (fdopt.data.fd <= 2 && err == ERROR_INVALID_HANDLE) {
+            CHILD_STDIO_CRT_FLAGS(buffer, i) = 0;
+            CHILD_STDIO_HANDLE(buffer, i) = INVALID_HANDLE_VALUE;
+            break;
+          }
+          goto error;
+        }
+
+        /* Figure out what the type is. */
+        switch (GetFileType(child_handle)) {
+          case FILE_TYPE_DISK:
+            CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN;
+            break;
+
+          case FILE_TYPE_PIPE:
+            CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN | FPIPE;
+
+          case FILE_TYPE_CHAR:
+          case FILE_TYPE_REMOTE:
+            CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN | FDEV;
+            break;
+
+          case FILE_TYPE_UNKNOWN:
+            if (GetLastError() != 0) {
+              err = GetLastError();
+              CloseHandle(child_handle);
+              goto error;
+            }
+            CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN | FDEV;
+            break;
+
+          default:
+            assert(0);
+            return -1;
+        }
+
+        CHILD_STDIO_HANDLE(buffer, i) = child_handle;
+        break;
+      }
+
+      case UV_INHERIT_STREAM: {
+        /* Use an existing stream as the stdio handle for the child. */
+        HANDLE stream_handle, child_handle;
+        unsigned char crt_flags;
+        uv_stream_t* stream = fdopt.data.stream;
+
+        /* Leech the handle out of the stream. */
+        if (stream->type == UV_TTY) {
+          stream_handle = ((uv_tty_t*) stream)->handle;
+          crt_flags = FOPEN | FDEV;
+        } else if (stream->type == UV_NAMED_PIPE &&
+                   stream->flags & UV_HANDLE_CONNECTION) {
+          stream_handle = ((uv_pipe_t*) stream)->handle;
+          crt_flags = FOPEN | FPIPE;
+        } else {
+          stream_handle = INVALID_HANDLE_VALUE;
+          crt_flags = 0;
+        }
+
+        if (stream_handle == NULL ||
+            stream_handle == INVALID_HANDLE_VALUE) {
+          /* The handle is already closed, or not yet created, or the */
+          /* stream type is not supported. */
+          err = ERROR_NOT_SUPPORTED;
+          goto error;
+        }
+
+        /* Make an inheritable copy of the handle. */
+        err = uv__duplicate_handle(loop, stream_handle, &child_handle);
+        if (err)
+          goto error;
+
+        CHILD_STDIO_HANDLE(buffer, i) = child_handle;
+        CHILD_STDIO_CRT_FLAGS(buffer, i) = crt_flags;
+        break;
+      }
+
+      default:
+        assert(0);
+        return -1;
+    }
+  }
+
+  *buffer_ptr  = buffer;
+  return 0;
+
+ error:
+  uv__stdio_destroy(buffer);
+  return err;
+}
+
+
+void uv__stdio_destroy(BYTE* buffer) {
+  int i, count;
+
+  count = CHILD_STDIO_COUNT(buffer);
+  for (i = 0; i < count; i++) {
+    HANDLE handle = CHILD_STDIO_HANDLE(buffer, i);
+    if (handle != INVALID_HANDLE_VALUE) {
+      CloseHandle(handle);
+    }
+  }
+
+  uv__free(buffer);
+}
+
+
+void uv__stdio_noinherit(BYTE* buffer) {
+  int i, count;
+
+  count = CHILD_STDIO_COUNT(buffer);
+  for (i = 0; i < count; i++) {
+    HANDLE handle = CHILD_STDIO_HANDLE(buffer, i);
+    if (handle != INVALID_HANDLE_VALUE) {
+      SetHandleInformation(handle, HANDLE_FLAG_INHERIT, 0);
+    }
+  }
+}
+
+
+int uv__stdio_verify(BYTE* buffer, WORD size) {
+  unsigned int count;
+
+  /* Check the buffer pointer. */
+  if (buffer == NULL)
+    return 0;
+
+  /* Verify that the buffer is at least big enough to hold the count. */
+  if (size < CHILD_STDIO_SIZE(0))
+    return 0;
+
+  /* Verify if the count is within range. */
+  count = CHILD_STDIO_COUNT(buffer);
+  if (count > 256)
+    return 0;
+
+  /* Verify that the buffer size is big enough to hold info for N FDs. */
+  if (size < CHILD_STDIO_SIZE(count))
+    return 0;
+
+  return 1;
+}
+
+
+WORD uv__stdio_size(BYTE* buffer) {
+  return (WORD) CHILD_STDIO_SIZE(CHILD_STDIO_COUNT((buffer)));
+}
+
+
+HANDLE uv__stdio_handle(BYTE* buffer, int fd) {
+  return CHILD_STDIO_HANDLE(buffer, fd);
+}

+ 1247 - 0
Utilities/cmlibuv/src/win/process.c

@@ -0,0 +1,1247 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <io.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <limits.h>
+#include <wchar.h>
+#include <malloc.h>    /* alloca */
+
+#include "uv.h"
+#include "internal.h"
+#include "handle-inl.h"
+#include "req-inl.h"
+
+
+#define SIGKILL         9
+
+
+typedef struct env_var {
+  const WCHAR* const wide;
+  const WCHAR* const wide_eq;
+  const size_t len; /* including null or '=' */
+} env_var_t;
+
+#define E_V(str) { L##str, L##str L"=", sizeof(str) }
+
+static const env_var_t required_vars[] = { /* keep me sorted */
+  E_V("HOMEDRIVE"),
+  E_V("HOMEPATH"),
+  E_V("LOGONSERVER"),
+  E_V("PATH"),
+  E_V("SYSTEMDRIVE"),
+  E_V("SYSTEMROOT"),
+  E_V("TEMP"),
+  E_V("USERDOMAIN"),
+  E_V("USERNAME"),
+  E_V("USERPROFILE"),
+  E_V("WINDIR"),
+};
+static size_t n_required_vars = ARRAY_SIZE(required_vars);
+
+
+static HANDLE uv_global_job_handle_;
+static uv_once_t uv_global_job_handle_init_guard_ = UV_ONCE_INIT;
+
+
+static void uv__init_global_job_handle(void) {
+  /* Create a job object and set it up to kill all contained processes when
+   * it's closed. Since this handle is made non-inheritable and we're not
+   * giving it to anyone, we're the only process holding a reference to it.
+   * That means that if this process exits it is closed and all the processes
+   * it contains are killed. All processes created with uv_spawn that are not
+   * spawned with the UV_PROCESS_DETACHED flag are assigned to this job.
+   *
+   * We're setting the JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK flag so only the
+   * processes that we explicitly add are affected, and *their* subprocesses
+   * are not. This ensures that our child processes are not limited in their
+   * ability to use job control on Windows versions that don't deal with
+   * nested jobs (prior to Windows 8 / Server 2012). It also lets our child
+   * processes created detached processes without explicitly breaking away
+   * from job control (which uv_spawn doesn't, either).
+   */
+  SECURITY_ATTRIBUTES attr;
+  JOBOBJECT_EXTENDED_LIMIT_INFORMATION info;
+
+  memset(&attr, 0, sizeof attr);
+  attr.bInheritHandle = FALSE;
+
+  memset(&info, 0, sizeof info);
+  info.BasicLimitInformation.LimitFlags =
+      JOB_OBJECT_LIMIT_BREAKAWAY_OK |
+      JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK |
+      JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION |
+      JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE;
+
+  uv_global_job_handle_ = CreateJobObjectW(&attr, NULL);
+  if (uv_global_job_handle_ == NULL)
+    uv_fatal_error(GetLastError(), "CreateJobObjectW");
+
+  if (!SetInformationJobObject(uv_global_job_handle_,
+                               JobObjectExtendedLimitInformation,
+                               &info,
+                               sizeof info))
+    uv_fatal_error(GetLastError(), "SetInformationJobObject");
+}
+
+
+static int uv_utf8_to_utf16_alloc(const char* s, WCHAR** ws_ptr) {
+  int ws_len, r;
+  WCHAR* ws;
+
+  ws_len = MultiByteToWideChar(CP_UTF8,
+                               0,
+                               s,
+                               -1,
+                               NULL,
+                               0);
+  if (ws_len <= 0) {
+    return GetLastError();
+  }
+
+  ws = (WCHAR*) uv__malloc(ws_len * sizeof(WCHAR));
+  if (ws == NULL) {
+    return ERROR_OUTOFMEMORY;
+  }
+
+  r = MultiByteToWideChar(CP_UTF8,
+                          0,
+                          s,
+                          -1,
+                          ws,
+                          ws_len);
+  assert(r == ws_len);
+
+  *ws_ptr = ws;
+  return 0;
+}
+
+
+static void uv_process_init(uv_loop_t* loop, uv_process_t* handle) {
+  uv__handle_init(loop, (uv_handle_t*) handle, UV_PROCESS);
+  handle->exit_cb = NULL;
+  handle->pid = 0;
+  handle->exit_signal = 0;
+  handle->wait_handle = INVALID_HANDLE_VALUE;
+  handle->process_handle = INVALID_HANDLE_VALUE;
+  handle->child_stdio_buffer = NULL;
+  handle->exit_cb_pending = 0;
+
+  uv_req_init(loop, (uv_req_t*)&handle->exit_req);
+  handle->exit_req.type = UV_PROCESS_EXIT;
+  handle->exit_req.data = handle;
+}
+
+
+/*
+ * Path search functions
+ */
+
+/*
+ * Helper function for search_path
+ */
+static WCHAR* search_path_join_test(const WCHAR* dir,
+                                    size_t dir_len,
+                                    const WCHAR* name,
+                                    size_t name_len,
+                                    const WCHAR* ext,
+                                    size_t ext_len,
+                                    const WCHAR* cwd,
+                                    size_t cwd_len) {
+  WCHAR *result, *result_pos;
+  DWORD attrs;
+  if (dir_len > 2 && dir[0] == L'\\' && dir[1] == L'\\') {
+    /* It's a UNC path so ignore cwd */
+    cwd_len = 0;
+  } else if (dir_len >= 1 && (dir[0] == L'/' || dir[0] == L'\\')) {
+    /* It's a full path without drive letter, use cwd's drive letter only */
+    cwd_len = 2;
+  } else if (dir_len >= 2 && dir[1] == L':' &&
+      (dir_len < 3 || (dir[2] != L'/' && dir[2] != L'\\'))) {
+    /* It's a relative path with drive letter (ext.g. D:../some/file)
+     * Replace drive letter in dir by full cwd if it points to the same drive,
+     * otherwise use the dir only.
+     */
+    if (cwd_len < 2 || _wcsnicmp(cwd, dir, 2) != 0) {
+      cwd_len = 0;
+    } else {
+      dir += 2;
+      dir_len -= 2;
+    }
+  } else if (dir_len > 2 && dir[1] == L':') {
+    /* It's an absolute path with drive letter
+     * Don't use the cwd at all
+     */
+    cwd_len = 0;
+  }
+
+  /* Allocate buffer for output */
+  result = result_pos = (WCHAR*)uv__malloc(sizeof(WCHAR) *
+      (cwd_len + 1 + dir_len + 1 + name_len + 1 + ext_len + 1));
+
+  /* Copy cwd */
+  wcsncpy(result_pos, cwd, cwd_len);
+  result_pos += cwd_len;
+
+  /* Add a path separator if cwd didn't end with one */
+  if (cwd_len && wcsrchr(L"\\/:", result_pos[-1]) == NULL) {
+    result_pos[0] = L'\\';
+    result_pos++;
+  }
+
+  /* Copy dir */
+  wcsncpy(result_pos, dir, dir_len);
+  result_pos += dir_len;
+
+  /* Add a separator if the dir didn't end with one */
+  if (dir_len && wcsrchr(L"\\/:", result_pos[-1]) == NULL) {
+    result_pos[0] = L'\\';
+    result_pos++;
+  }
+
+  /* Copy filename */
+  wcsncpy(result_pos, name, name_len);
+  result_pos += name_len;
+
+  if (ext_len) {
+    /* Add a dot if the filename didn't end with one */
+    if (name_len && result_pos[-1] != '.') {
+      result_pos[0] = L'.';
+      result_pos++;
+    }
+
+    /* Copy extension */
+    wcsncpy(result_pos, ext, ext_len);
+    result_pos += ext_len;
+  }
+
+  /* Null terminator */
+  result_pos[0] = L'\0';
+
+  attrs = GetFileAttributesW(result);
+
+  if (attrs != INVALID_FILE_ATTRIBUTES &&
+      !(attrs & FILE_ATTRIBUTE_DIRECTORY)) {
+    return result;
+  }
+
+  uv__free(result);
+  return NULL;
+}
+
+
+/*
+ * Helper function for search_path
+ */
+static WCHAR* path_search_walk_ext(const WCHAR *dir,
+                                   size_t dir_len,
+                                   const WCHAR *name,
+                                   size_t name_len,
+                                   WCHAR *cwd,
+                                   size_t cwd_len,
+                                   int name_has_ext) {
+  WCHAR* result;
+
+  /* If the name itself has a nonempty extension, try this extension first */
+  if (name_has_ext) {
+    result = search_path_join_test(dir, dir_len,
+                                   name, name_len,
+                                   L"", 0,
+                                   cwd, cwd_len);
+    if (result != NULL) {
+      return result;
+    }
+  }
+
+  /* Try .com extension */
+  result = search_path_join_test(dir, dir_len,
+                                 name, name_len,
+                                 L"com", 3,
+                                 cwd, cwd_len);
+  if (result != NULL) {
+    return result;
+  }
+
+  /* Try .exe extension */
+  result = search_path_join_test(dir, dir_len,
+                                 name, name_len,
+                                 L"exe", 3,
+                                 cwd, cwd_len);
+  if (result != NULL) {
+    return result;
+  }
+
+  return NULL;
+}
+
+
+/*
+ * search_path searches the system path for an executable filename -
+ * the windows API doesn't provide this as a standalone function nor as an
+ * option to CreateProcess.
+ *
+ * It tries to return an absolute filename.
+ *
+ * Furthermore, it tries to follow the semantics that cmd.exe, with this
+ * exception that PATHEXT environment variable isn't used. Since CreateProcess
+ * can start only .com and .exe files, only those extensions are tried. This
+ * behavior equals that of msvcrt's spawn functions.
+ *
+ * - Do not search the path if the filename already contains a path (either
+ *   relative or absolute).
+ *
+ * - If there's really only a filename, check the current directory for file,
+ *   then search all path directories.
+ *
+ * - If filename specified has *any* extension, search for the file with the
+ *   specified extension first.
+ *
+ * - If the literal filename is not found in a directory, try *appending*
+ *   (not replacing) .com first and then .exe.
+ *
+ * - The path variable may contain relative paths; relative paths are relative
+ *   to the cwd.
+ *
+ * - Directories in path may or may not end with a trailing backslash.
+ *
+ * - CMD does not trim leading/trailing whitespace from path/pathex entries
+ *   nor from the environment variables as a whole.
+ *
+ * - When cmd.exe cannot read a directory, it will just skip it and go on
+ *   searching. However, unlike posix-y systems, it will happily try to run a
+ *   file that is not readable/executable; if the spawn fails it will not
+ *   continue searching.
+ *
+ * UNC path support: we are dealing with UNC paths in both the path and the
+ * filename. This is a deviation from what cmd.exe does (it does not let you
+ * start a program by specifying an UNC path on the command line) but this is
+ * really a pointless restriction.
+ *
+ */
+static WCHAR* search_path(const WCHAR *file,
+                            WCHAR *cwd,
+                            const WCHAR *path) {
+  int file_has_dir;
+  WCHAR* result = NULL;
+  WCHAR *file_name_start;
+  WCHAR *dot;
+  const WCHAR *dir_start, *dir_end, *dir_path;
+  size_t dir_len;
+  int name_has_ext;
+
+  size_t file_len = wcslen(file);
+  size_t cwd_len = wcslen(cwd);
+
+  /* If the caller supplies an empty filename,
+   * we're not gonna return c:\windows\.exe -- GFY!
+   */
+  if (file_len == 0
+      || (file_len == 1 && file[0] == L'.')) {
+    return NULL;
+  }
+
+  /* Find the start of the filename so we can split the directory from the */
+  /* name. */
+  for (file_name_start = (WCHAR*)file + file_len;
+       file_name_start > file
+           && file_name_start[-1] != L'\\'
+           && file_name_start[-1] != L'/'
+           && file_name_start[-1] != L':';
+       file_name_start--);
+
+  file_has_dir = file_name_start != file;
+
+  /* Check if the filename includes an extension */
+  dot = wcschr(file_name_start, L'.');
+  name_has_ext = (dot != NULL && dot[1] != L'\0');
+
+  if (file_has_dir) {
+    /* The file has a path inside, don't use path */
+    result = path_search_walk_ext(
+        file, file_name_start - file,
+        file_name_start, file_len - (file_name_start - file),
+        cwd, cwd_len,
+        name_has_ext);
+
+  } else {
+    dir_end = path;
+
+    /* The file is really only a name; look in cwd first, then scan path */
+    result = path_search_walk_ext(L"", 0,
+                                  file, file_len,
+                                  cwd, cwd_len,
+                                  name_has_ext);
+
+    while (result == NULL) {
+      if (*dir_end == L'\0') {
+        break;
+      }
+
+      /* Skip the separator that dir_end now points to */
+      if (dir_end != path || *path == L';') {
+        dir_end++;
+      }
+
+      /* Next slice starts just after where the previous one ended */
+      dir_start = dir_end;
+
+      /* Slice until the next ; or \0 is found */
+      dir_end = wcschr(dir_start, L';');
+      if (dir_end == NULL) {
+        dir_end = wcschr(dir_start, L'\0');
+      }
+
+      /* If the slice is zero-length, don't bother */
+      if (dir_end - dir_start == 0) {
+        continue;
+      }
+
+      dir_path = dir_start;
+      dir_len = dir_end - dir_start;
+
+      /* Adjust if the path is quoted. */
+      if (dir_path[0] == '"' || dir_path[0] == '\'') {
+        ++dir_path;
+        --dir_len;
+      }
+
+      if (dir_path[dir_len - 1] == '"' || dir_path[dir_len - 1] == '\'') {
+        --dir_len;
+      }
+
+      result = path_search_walk_ext(dir_path, dir_len,
+                                    file, file_len,
+                                    cwd, cwd_len,
+                                    name_has_ext);
+    }
+  }
+
+  return result;
+}
+
+
+/*
+ * Quotes command line arguments
+ * Returns a pointer to the end (next char to be written) of the buffer
+ */
+WCHAR* quote_cmd_arg(const WCHAR *source, WCHAR *target) {
+  size_t len = wcslen(source);
+  size_t i;
+  int quote_hit;
+  WCHAR* start;
+
+  if (len == 0) {
+    /* Need double quotation for empty argument */
+    *(target++) = L'"';
+    *(target++) = L'"';
+    return target;
+  }
+
+  if (NULL == wcspbrk(source, L" \t\"")) {
+    /* No quotation needed */
+    wcsncpy(target, source, len);
+    target += len;
+    return target;
+  }
+
+  if (NULL == wcspbrk(source, L"\"\\")) {
+    /*
+     * No embedded double quotes or backlashes, so I can just wrap
+     * quote marks around the whole thing.
+     */
+    *(target++) = L'"';
+    wcsncpy(target, source, len);
+    target += len;
+    *(target++) = L'"';
+    return target;
+  }
+
+  /*
+   * Expected input/output:
+   *   input : hello"world
+   *   output: "hello\"world"
+   *   input : hello""world
+   *   output: "hello\"\"world"
+   *   input : hello\world
+   *   output: hello\world
+   *   input : hello\\world
+   *   output: hello\\world
+   *   input : hello\"world
+   *   output: "hello\\\"world"
+   *   input : hello\\"world
+   *   output: "hello\\\\\"world"
+   *   input : hello world\
+   *   output: "hello world\"
+   */
+
+  *(target++) = L'"';
+  start = target;
+  quote_hit = 1;
+
+  for (i = len; i > 0; --i) {
+    *(target++) = source[i - 1];
+
+    if (quote_hit && source[i - 1] == L'\\') {
+      *(target++) = L'\\';
+    } else if(source[i - 1] == L'"') {
+      quote_hit = 1;
+      *(target++) = L'\\';
+    } else {
+      quote_hit = 0;
+    }
+  }
+  target[0] = L'\0';
+  wcsrev(start);
+  *(target++) = L'"';
+  return target;
+}
+
+
+int make_program_args(char** args, int verbatim_arguments, WCHAR** dst_ptr) {
+  char** arg;
+  WCHAR* dst = NULL;
+  WCHAR* temp_buffer = NULL;
+  size_t dst_len = 0;
+  size_t temp_buffer_len = 0;
+  WCHAR* pos;
+  int arg_count = 0;
+  int err = 0;
+
+  /* Count the required size. */
+  for (arg = args; *arg; arg++) {
+    DWORD arg_len;
+
+    arg_len = MultiByteToWideChar(CP_UTF8,
+                                  0,
+                                  *arg,
+                                  -1,
+                                  NULL,
+                                  0);
+    if (arg_len == 0) {
+      return GetLastError();
+    }
+
+    dst_len += arg_len;
+
+    if (arg_len > temp_buffer_len)
+      temp_buffer_len = arg_len;
+
+    arg_count++;
+  }
+
+  /* Adjust for potential quotes. Also assume the worst-case scenario */
+  /* that every character needs escaping, so we need twice as much space. */
+  dst_len = dst_len * 2 + arg_count * 2;
+
+  /* Allocate buffer for the final command line. */
+  dst = (WCHAR*) uv__malloc(dst_len * sizeof(WCHAR));
+  if (dst == NULL) {
+    err = ERROR_OUTOFMEMORY;
+    goto error;
+  }
+
+  /* Allocate temporary working buffer. */
+  temp_buffer = (WCHAR*) uv__malloc(temp_buffer_len * sizeof(WCHAR));
+  if (temp_buffer == NULL) {
+    err = ERROR_OUTOFMEMORY;
+    goto error;
+  }
+
+  pos = dst;
+  for (arg = args; *arg; arg++) {
+    DWORD arg_len;
+
+    /* Convert argument to wide char. */
+    arg_len = MultiByteToWideChar(CP_UTF8,
+                                  0,
+                                  *arg,
+                                  -1,
+                                  temp_buffer,
+                                  (int) (dst + dst_len - pos));
+    if (arg_len == 0) {
+      err = GetLastError();
+      goto error;
+    }
+
+    if (verbatim_arguments) {
+      /* Copy verbatim. */
+      wcscpy(pos, temp_buffer);
+      pos += arg_len - 1;
+    } else {
+      /* Quote/escape, if needed. */
+      pos = quote_cmd_arg(temp_buffer, pos);
+    }
+
+    *pos++ = *(arg + 1) ? L' ' : L'\0';
+  }
+
+  uv__free(temp_buffer);
+
+  *dst_ptr = dst;
+  return 0;
+
+error:
+  uv__free(dst);
+  uv__free(temp_buffer);
+  return err;
+}
+
+
+int env_strncmp(const wchar_t* a, int na, const wchar_t* b) {
+  wchar_t* a_eq;
+  wchar_t* b_eq;
+  wchar_t* A;
+  wchar_t* B;
+  int nb;
+  int r;
+
+  if (na < 0) {
+    a_eq = wcschr(a, L'=');
+    assert(a_eq);
+    na = (int)(long)(a_eq - a);
+  } else {
+    na--;
+  }
+  b_eq = wcschr(b, L'=');
+  assert(b_eq);
+  nb = b_eq - b;
+
+  A = alloca((na+1) * sizeof(wchar_t));
+  B = alloca((nb+1) * sizeof(wchar_t));
+
+  r = LCMapStringW(LOCALE_INVARIANT, LCMAP_UPPERCASE, a, na, A, na);
+  assert(r==na);
+  A[na] = L'\0';
+  r = LCMapStringW(LOCALE_INVARIANT, LCMAP_UPPERCASE, b, nb, B, nb);
+  assert(r==nb);
+  B[nb] = L'\0';
+
+  while (1) {
+    wchar_t AA = *A++;
+    wchar_t BB = *B++;
+    if (AA < BB) {
+      return -1;
+    } else if (AA > BB) {
+      return 1;
+    } else if (!AA && !BB) {
+      return 0;
+    }
+  }
+}
+
+
+static int qsort_wcscmp(const void *a, const void *b) {
+  wchar_t* astr = *(wchar_t* const*)a;
+  wchar_t* bstr = *(wchar_t* const*)b;
+  return env_strncmp(astr, -1, bstr);
+}
+
+
+/*
+ * The way windows takes environment variables is different than what C does;
+ * Windows wants a contiguous block of null-terminated strings, terminated
+ * with an additional null.
+ *
+ * Windows has a few "essential" environment variables. winsock will fail
+ * to initialize if SYSTEMROOT is not defined; some APIs make reference to
+ * TEMP. SYSTEMDRIVE is probably also important. We therefore ensure that
+ * these get defined if the input environment block does not contain any
+ * values for them.
+ *
+ * Also add variables known to Cygwin to be required for correct
+ * subprocess operation in many cases:
+ * https://github.com/Alexpux/Cygwin/blob/b266b04fbbd3a595f02ea149e4306d3ab9b1fe3d/winsup/cygwin/environ.cc#L955
+ *
+ */
+int make_program_env(char* env_block[], WCHAR** dst_ptr) {
+  WCHAR* dst;
+  WCHAR* ptr;
+  char** env;
+  size_t env_len = 0;
+  int len;
+  size_t i;
+  DWORD var_size;
+  size_t env_block_count = 1; /* 1 for null-terminator */
+  WCHAR* dst_copy;
+  WCHAR** ptr_copy;
+  WCHAR** env_copy;
+  DWORD* required_vars_value_len = alloca(n_required_vars * sizeof(DWORD*));
+
+  /* first pass: determine size in UTF-16 */
+  for (env = env_block; *env; env++) {
+    int len;
+    if (strchr(*env, '=')) {
+      len = MultiByteToWideChar(CP_UTF8,
+                                0,
+                                *env,
+                                -1,
+                                NULL,
+                                0);
+      if (len <= 0) {
+        return GetLastError();
+      }
+      env_len += len;
+      env_block_count++;
+    }
+  }
+
+  /* second pass: copy to UTF-16 environment block */
+  dst_copy = (WCHAR*)uv__malloc(env_len * sizeof(WCHAR));
+  if (!dst_copy) {
+    return ERROR_OUTOFMEMORY;
+  }
+  env_copy = alloca(env_block_count * sizeof(WCHAR*));
+
+  ptr = dst_copy;
+  ptr_copy = env_copy;
+  for (env = env_block; *env; env++) {
+    if (strchr(*env, '=')) {
+      len = MultiByteToWideChar(CP_UTF8,
+                                0,
+                                *env,
+                                -1,
+                                ptr,
+                                (int) (env_len - (ptr - dst_copy)));
+      if (len <= 0) {
+        DWORD err = GetLastError();
+        uv__free(dst_copy);
+        return err;
+      }
+      *ptr_copy++ = ptr;
+      ptr += len;
+    }
+  }
+  *ptr_copy = NULL;
+  assert(env_len == ptr - dst_copy);
+
+  /* sort our (UTF-16) copy */
+  qsort(env_copy, env_block_count-1, sizeof(wchar_t*), qsort_wcscmp);
+
+  /* third pass: check for required variables */
+  for (ptr_copy = env_copy, i = 0; i < n_required_vars; ) {
+    int cmp;
+    if (!*ptr_copy) {
+      cmp = -1;
+    } else {
+      cmp = env_strncmp(required_vars[i].wide_eq,
+                       required_vars[i].len,
+                        *ptr_copy);
+    }
+    if (cmp < 0) {
+      /* missing required var */
+      var_size = GetEnvironmentVariableW(required_vars[i].wide, NULL, 0);
+      required_vars_value_len[i] = var_size;
+      if (var_size != 0) {
+        env_len += required_vars[i].len;
+        env_len += var_size;
+      }
+      i++;
+    } else {
+      ptr_copy++;
+      if (cmp == 0)
+        i++;
+    }
+  }
+
+  /* final pass: copy, in sort order, and inserting required variables */
+  dst = uv__malloc((1+env_len) * sizeof(WCHAR));
+  if (!dst) {
+    uv__free(dst_copy);
+    return ERROR_OUTOFMEMORY;
+  }
+
+  for (ptr = dst, ptr_copy = env_copy, i = 0;
+       *ptr_copy || i < n_required_vars;
+       ptr += len) {
+    int cmp;
+    if (i >= n_required_vars) {
+      cmp = 1;
+    } else if (!*ptr_copy) {
+      cmp = -1;
+    } else {
+      cmp = env_strncmp(required_vars[i].wide_eq,
+                        required_vars[i].len,
+                        *ptr_copy);
+    }
+    if (cmp < 0) {
+      /* missing required var */
+      len = required_vars_value_len[i];
+      if (len) {
+        wcscpy(ptr, required_vars[i].wide_eq);
+        ptr += required_vars[i].len;
+        var_size = GetEnvironmentVariableW(required_vars[i].wide,
+                                           ptr,
+                                           (int) (env_len - (ptr - dst)));
+        if (var_size != len-1) { /* race condition? */
+          uv_fatal_error(GetLastError(), "GetEnvironmentVariableW");
+        }
+      }
+      i++;
+    } else {
+      /* copy var from env_block */
+      len = wcslen(*ptr_copy) + 1;
+      wmemcpy(ptr, *ptr_copy, len);
+      ptr_copy++;
+      if (cmp == 0)
+        i++;
+    }
+  }
+
+  /* Terminate with an extra NULL. */
+  assert(env_len == (ptr - dst));
+  *ptr = L'\0';
+
+  uv__free(dst_copy);
+  *dst_ptr = dst;
+  return 0;
+}
+
+/*
+ * Attempt to find the value of the PATH environment variable in the child's
+ * preprocessed environment.
+ *
+ * If found, a pointer into `env` is returned. If not found, NULL is returned.
+ */
+static WCHAR* find_path(WCHAR *env) {
+  for (; env != NULL && *env != 0; env += wcslen(env) + 1) {
+    if (wcsncmp(env, L"PATH=", 5) == 0)
+      return &env[5];
+  }
+
+  return NULL;
+}
+
+/*
+ * Called on Windows thread-pool thread to indicate that
+ * a child process has exited.
+ */
+static void CALLBACK exit_wait_callback(void* data, BOOLEAN didTimeout) {
+  uv_process_t* process = (uv_process_t*) data;
+  uv_loop_t* loop = process->loop;
+
+  assert(didTimeout == FALSE);
+  assert(process);
+  assert(!process->exit_cb_pending);
+
+  process->exit_cb_pending = 1;
+
+  /* Post completed */
+  POST_COMPLETION_FOR_REQ(loop, &process->exit_req);
+}
+
+
+/* Called on main thread after a child process has exited. */
+void uv_process_proc_exit(uv_loop_t* loop, uv_process_t* handle) {
+  int64_t exit_code;
+  DWORD status;
+
+  assert(handle->exit_cb_pending);
+  handle->exit_cb_pending = 0;
+
+  /* If we're closing, don't call the exit callback. Just schedule a close */
+  /* callback now. */
+  if (handle->flags & UV__HANDLE_CLOSING) {
+    uv_want_endgame(loop, (uv_handle_t*) handle);
+    return;
+  }
+
+  /* Unregister from process notification. */
+  if (handle->wait_handle != INVALID_HANDLE_VALUE) {
+    UnregisterWait(handle->wait_handle);
+    handle->wait_handle = INVALID_HANDLE_VALUE;
+  }
+
+  /* Set the handle to inactive: no callbacks will be made after the exit */
+  /* callback.*/
+  uv__handle_stop(handle);
+
+  if (GetExitCodeProcess(handle->process_handle, &status)) {
+    exit_code = status;
+  } else {
+    /* Unable to to obtain the exit code. This should never happen. */
+    exit_code = uv_translate_sys_error(GetLastError());
+  }
+
+  /* Fire the exit callback. */
+  if (handle->exit_cb) {
+    handle->exit_cb(handle, exit_code, handle->exit_signal);
+  }
+}
+
+
+void uv_process_close(uv_loop_t* loop, uv_process_t* handle) {
+  uv__handle_closing(handle);
+
+  if (handle->wait_handle != INVALID_HANDLE_VALUE) {
+    /* This blocks until either the wait was cancelled, or the callback has */
+    /* completed. */
+    BOOL r = UnregisterWaitEx(handle->wait_handle, INVALID_HANDLE_VALUE);
+    if (!r) {
+      /* This should never happen, and if it happens, we can't recover... */
+      uv_fatal_error(GetLastError(), "UnregisterWaitEx");
+    }
+
+    handle->wait_handle = INVALID_HANDLE_VALUE;
+  }
+
+  if (!handle->exit_cb_pending) {
+    uv_want_endgame(loop, (uv_handle_t*)handle);
+  }
+}
+
+
+void uv_process_endgame(uv_loop_t* loop, uv_process_t* handle) {
+  assert(!handle->exit_cb_pending);
+  assert(handle->flags & UV__HANDLE_CLOSING);
+  assert(!(handle->flags & UV_HANDLE_CLOSED));
+
+  /* Clean-up the process handle. */
+  CloseHandle(handle->process_handle);
+
+  uv__handle_close(handle);
+}
+
+
+int uv_spawn(uv_loop_t* loop,
+             uv_process_t* process,
+             const uv_process_options_t* options) {
+  int i;
+  int err = 0;
+  WCHAR* path = NULL, *alloc_path = NULL;
+  BOOL result;
+  WCHAR* application_path = NULL, *application = NULL, *arguments = NULL,
+         *env = NULL, *cwd = NULL;
+  STARTUPINFOW startup;
+  PROCESS_INFORMATION info;
+  DWORD process_flags;
+
+  uv_process_init(loop, process);
+  process->exit_cb = options->exit_cb;
+
+  if (options->flags & (UV_PROCESS_SETGID | UV_PROCESS_SETUID)) {
+    return UV_ENOTSUP;
+  }
+
+  if (options->file == NULL ||
+      options->args == NULL) {
+    return UV_EINVAL;
+  }
+
+  assert(options->file != NULL);
+  assert(!(options->flags & ~(UV_PROCESS_DETACHED |
+                              UV_PROCESS_SETGID |
+                              UV_PROCESS_SETUID |
+                              UV_PROCESS_WINDOWS_HIDE |
+                              UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS)));
+
+  err = uv_utf8_to_utf16_alloc(options->file, &application);
+  if (err)
+    goto done;
+
+  err = make_program_args(
+      options->args,
+      options->flags & UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS,
+      &arguments);
+  if (err)
+    goto done;
+
+  if (options->env) {
+     err = make_program_env(options->env, &env);
+     if (err)
+       goto done;
+  }
+
+  if (options->cwd) {
+    /* Explicit cwd */
+    err = uv_utf8_to_utf16_alloc(options->cwd, &cwd);
+    if (err)
+      goto done;
+
+  } else {
+    /* Inherit cwd */
+    DWORD cwd_len, r;
+
+    cwd_len = GetCurrentDirectoryW(0, NULL);
+    if (!cwd_len) {
+      err = GetLastError();
+      goto done;
+    }
+
+    cwd = (WCHAR*) uv__malloc(cwd_len * sizeof(WCHAR));
+    if (cwd == NULL) {
+      err = ERROR_OUTOFMEMORY;
+      goto done;
+    }
+
+    r = GetCurrentDirectoryW(cwd_len, cwd);
+    if (r == 0 || r >= cwd_len) {
+      err = GetLastError();
+      goto done;
+    }
+  }
+
+  /* Get PATH environment variable. */
+  path = find_path(env);
+  if (path == NULL) {
+    DWORD path_len, r;
+
+    path_len = GetEnvironmentVariableW(L"PATH", NULL, 0);
+    if (path_len == 0) {
+      err = GetLastError();
+      goto done;
+    }
+
+    alloc_path = (WCHAR*) uv__malloc(path_len * sizeof(WCHAR));
+    if (alloc_path == NULL) {
+      err = ERROR_OUTOFMEMORY;
+      goto done;
+    }
+    path = alloc_path;
+
+    r = GetEnvironmentVariableW(L"PATH", path, path_len);
+    if (r == 0 || r >= path_len) {
+      err = GetLastError();
+      goto done;
+    }
+  }
+
+  err = uv__stdio_create(loop, options, &process->child_stdio_buffer);
+  if (err)
+    goto done;
+
+  application_path = search_path(application,
+                                 cwd,
+                                 path);
+  if (application_path == NULL) {
+    /* Not found. */
+    err = ERROR_FILE_NOT_FOUND;
+    goto done;
+  }
+
+  startup.cb = sizeof(startup);
+  startup.lpReserved = NULL;
+  startup.lpDesktop = NULL;
+  startup.lpTitle = NULL;
+  startup.dwFlags = STARTF_USESTDHANDLES | STARTF_USESHOWWINDOW;
+
+  startup.cbReserved2 = uv__stdio_size(process->child_stdio_buffer);
+  startup.lpReserved2 = (BYTE*) process->child_stdio_buffer;
+
+  startup.hStdInput = uv__stdio_handle(process->child_stdio_buffer, 0);
+  startup.hStdOutput = uv__stdio_handle(process->child_stdio_buffer, 1);
+  startup.hStdError = uv__stdio_handle(process->child_stdio_buffer, 2);
+
+  if (options->flags & UV_PROCESS_WINDOWS_HIDE) {
+    /* Use SW_HIDE to avoid any potential process window. */
+    startup.wShowWindow = SW_HIDE;
+  } else {
+    startup.wShowWindow = SW_SHOWDEFAULT;
+  }
+
+  process_flags = CREATE_UNICODE_ENVIRONMENT;
+
+  if (options->flags & UV_PROCESS_DETACHED) {
+    /* Note that we're not setting the CREATE_BREAKAWAY_FROM_JOB flag. That
+     * means that libuv might not let you create a fully daemonized process
+     * when run under job control. However the type of job control that libuv
+     * itself creates doesn't trickle down to subprocesses so they can still
+     * daemonize.
+     *
+     * A reason to not do this is that CREATE_BREAKAWAY_FROM_JOB makes the
+     * CreateProcess call fail if we're under job control that doesn't allow
+     * breakaway.
+     */
+    process_flags |= DETACHED_PROCESS | CREATE_NEW_PROCESS_GROUP;
+  }
+
+  if (!CreateProcessW(application_path,
+                     arguments,
+                     NULL,
+                     NULL,
+                     1,
+                     process_flags,
+                     env,
+                     cwd,
+                     &startup,
+                     &info)) {
+    /* CreateProcessW failed. */
+    err = GetLastError();
+    goto done;
+  }
+
+  /* Spawn succeeded */
+  /* Beyond this point, failure is reported asynchronously. */
+
+  process->process_handle = info.hProcess;
+  process->pid = info.dwProcessId;
+
+  /* If the process isn't spawned as detached, assign to the global job */
+  /* object so windows will kill it when the parent process dies. */
+  if (!(options->flags & UV_PROCESS_DETACHED)) {
+    uv_once(&uv_global_job_handle_init_guard_, uv__init_global_job_handle);
+
+    if (!AssignProcessToJobObject(uv_global_job_handle_, info.hProcess)) {
+      /* AssignProcessToJobObject might fail if this process is under job
+       * control and the job doesn't have the
+       * JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK flag set, on a Windows version
+       * that doesn't support nested jobs.
+       *
+       * When that happens we just swallow the error and continue without
+       * establishing a kill-child-on-parent-exit relationship, otherwise
+       * there would be no way for libuv applications run under job control
+       * to spawn processes at all.
+       */
+      DWORD err = GetLastError();
+      if (err != ERROR_ACCESS_DENIED)
+        uv_fatal_error(err, "AssignProcessToJobObject");
+    }
+  }
+
+  /* Set IPC pid to all IPC pipes. */
+  for (i = 0; i < options->stdio_count; i++) {
+    const uv_stdio_container_t* fdopt = &options->stdio[i];
+    if (fdopt->flags & UV_CREATE_PIPE &&
+        fdopt->data.stream->type == UV_NAMED_PIPE &&
+        ((uv_pipe_t*) fdopt->data.stream)->ipc) {
+      ((uv_pipe_t*) fdopt->data.stream)->pipe.conn.ipc_pid = info.dwProcessId;
+    }
+  }
+
+  /* Setup notifications for when the child process exits. */
+  result = RegisterWaitForSingleObject(&process->wait_handle,
+      process->process_handle, exit_wait_callback, (void*)process, INFINITE,
+      WT_EXECUTEINWAITTHREAD | WT_EXECUTEONLYONCE);
+  if (!result) {
+    uv_fatal_error(GetLastError(), "RegisterWaitForSingleObject");
+  }
+
+  CloseHandle(info.hThread);
+
+  assert(!err);
+
+  /* Make the handle active. It will remain active until the exit callback */
+  /* is made or the handle is closed, whichever happens first. */
+  uv__handle_start(process);
+
+  /* Cleanup, whether we succeeded or failed. */
+ done:
+  uv__free(application);
+  uv__free(application_path);
+  uv__free(arguments);
+  uv__free(cwd);
+  uv__free(env);
+  uv__free(alloc_path);
+
+  if (process->child_stdio_buffer != NULL) {
+    /* Clean up child stdio handles. */
+    uv__stdio_destroy(process->child_stdio_buffer);
+    process->child_stdio_buffer = NULL;
+  }
+
+  return uv_translate_sys_error(err);
+}
+
+
+static int uv__kill(HANDLE process_handle, int signum) {
+  switch (signum) {
+    case SIGTERM:
+    case SIGKILL:
+    case SIGINT: {
+      /* Unconditionally terminate the process. On Windows, killed processes */
+      /* normally return 1. */
+      DWORD status;
+      int err;
+
+      if (TerminateProcess(process_handle, 1))
+        return 0;
+
+      /* If the process already exited before TerminateProcess was called, */
+      /* TerminateProcess will fail with ERROR_ACCESS_DENIED. */
+      err = GetLastError();
+      if (err == ERROR_ACCESS_DENIED &&
+          GetExitCodeProcess(process_handle, &status) &&
+          status != STILL_ACTIVE) {
+        return UV_ESRCH;
+      }
+
+      return uv_translate_sys_error(err);
+    }
+
+    case 0: {
+      /* Health check: is the process still alive? */
+      DWORD status;
+
+      if (!GetExitCodeProcess(process_handle, &status))
+        return uv_translate_sys_error(GetLastError());
+
+      if (status != STILL_ACTIVE)
+        return UV_ESRCH;
+
+      return 0;
+    }
+
+    default:
+      /* Unsupported signal. */
+      return UV_ENOSYS;
+  }
+}
+
+
+int uv_process_kill(uv_process_t* process, int signum) {
+  int err;
+
+  if (process->process_handle == INVALID_HANDLE_VALUE) {
+    return UV_EINVAL;
+  }
+
+  err = uv__kill(process->process_handle, signum);
+  if (err) {
+    return err;  /* err is already translated. */
+  }
+
+  process->exit_signal = signum;
+
+  return 0;
+}
+
+
+int uv_kill(int pid, int signum) {
+  int err;
+  HANDLE process_handle = OpenProcess(PROCESS_TERMINATE |
+    PROCESS_QUERY_INFORMATION, FALSE, pid);
+
+  if (process_handle == NULL) {
+    err = GetLastError();
+    if (err == ERROR_INVALID_PARAMETER) {
+      return UV_ESRCH;
+    } else {
+      return uv_translate_sys_error(err);
+    }
+  }
+
+  err = uv__kill(process_handle, signum);
+  CloseHandle(process_handle);
+
+  return err;  /* err is already translated. */
+}

+ 224 - 0
Utilities/cmlibuv/src/win/req-inl.h

@@ -0,0 +1,224 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_WIN_REQ_INL_H_
+#define UV_WIN_REQ_INL_H_
+
+#include <assert.h>
+
+#include "uv.h"
+#include "internal.h"
+
+
+#define SET_REQ_STATUS(req, status)                                     \
+   (req)->u.io.overlapped.Internal = (ULONG_PTR) (status)
+
+#define SET_REQ_ERROR(req, error)                                       \
+  SET_REQ_STATUS((req), NTSTATUS_FROM_WIN32((error)))
+
+#define SET_REQ_SUCCESS(req)                                            \
+  SET_REQ_STATUS((req), STATUS_SUCCESS)
+
+#define GET_REQ_STATUS(req)                                             \
+  ((NTSTATUS) (req)->u.io.overlapped.Internal)
+
+#define REQ_SUCCESS(req)                                                \
+  (NT_SUCCESS(GET_REQ_STATUS((req))))
+
+#define GET_REQ_ERROR(req)                                              \
+  (pRtlNtStatusToDosError(GET_REQ_STATUS((req))))
+
+#define GET_REQ_SOCK_ERROR(req)                                         \
+  (uv_ntstatus_to_winsock_error(GET_REQ_STATUS((req))))
+
+
+#define REGISTER_HANDLE_REQ(loop, handle, req)                          \
+  do {                                                                  \
+    INCREASE_ACTIVE_COUNT((loop), (handle));                            \
+    uv__req_register((loop), (req));                                    \
+  } while (0)
+
+#define UNREGISTER_HANDLE_REQ(loop, handle, req)                        \
+  do {                                                                  \
+    DECREASE_ACTIVE_COUNT((loop), (handle));                            \
+    uv__req_unregister((loop), (req));                                  \
+  } while (0)
+
+
+#define UV_SUCCEEDED_WITHOUT_IOCP(result)                               \
+  ((result) && (handle->flags & UV_HANDLE_SYNC_BYPASS_IOCP))
+
+#define UV_SUCCEEDED_WITH_IOCP(result)                                  \
+  ((result) || (GetLastError() == ERROR_IO_PENDING))
+
+
+#define POST_COMPLETION_FOR_REQ(loop, req)                              \
+  if (!PostQueuedCompletionStatus((loop)->iocp,                         \
+                                  0,                                    \
+                                  0,                                    \
+                                  &((req)->u.io.overlapped))) {         \
+    uv_fatal_error(GetLastError(), "PostQueuedCompletionStatus");       \
+  }
+
+
+INLINE static void uv_req_init(uv_loop_t* loop, uv_req_t* req) {
+  req->type = UV_UNKNOWN_REQ;
+  SET_REQ_SUCCESS(req);
+}
+
+
+INLINE static uv_req_t* uv_overlapped_to_req(OVERLAPPED* overlapped) {
+  return CONTAINING_RECORD(overlapped, uv_req_t, u.io.overlapped);
+}
+
+
+INLINE static void uv_insert_pending_req(uv_loop_t* loop, uv_req_t* req) {
+  req->next_req = NULL;
+  if (loop->pending_reqs_tail) {
+#ifdef _DEBUG
+    /* Ensure the request is not already in the queue, or the queue
+     * will get corrupted.
+     */
+    uv_req_t* current = loop->pending_reqs_tail;
+    do {
+      assert(req != current);
+      current = current->next_req;
+    } while(current != loop->pending_reqs_tail);
+#endif
+
+    req->next_req = loop->pending_reqs_tail->next_req;
+    loop->pending_reqs_tail->next_req = req;
+    loop->pending_reqs_tail = req;
+  } else {
+    req->next_req = req;
+    loop->pending_reqs_tail = req;
+  }
+}
+
+
+#define DELEGATE_STREAM_REQ(loop, req, method, handle_at)                     \
+  do {                                                                        \
+    switch (((uv_handle_t*) (req)->handle_at)->type) {                        \
+      case UV_TCP:                                                            \
+        uv_process_tcp_##method##_req(loop,                                   \
+                                      (uv_tcp_t*) ((req)->handle_at),         \
+                                      req);                                   \
+        break;                                                                \
+                                                                              \
+      case UV_NAMED_PIPE:                                                     \
+        uv_process_pipe_##method##_req(loop,                                  \
+                                       (uv_pipe_t*) ((req)->handle_at),       \
+                                       req);                                  \
+        break;                                                                \
+                                                                              \
+      case UV_TTY:                                                            \
+        uv_process_tty_##method##_req(loop,                                   \
+                                      (uv_tty_t*) ((req)->handle_at),         \
+                                      req);                                   \
+        break;                                                                \
+                                                                              \
+      default:                                                                \
+        assert(0);                                                            \
+    }                                                                         \
+  } while (0)
+
+
+INLINE static int uv_process_reqs(uv_loop_t* loop) {
+  uv_req_t* req;
+  uv_req_t* first;
+  uv_req_t* next;
+
+  if (loop->pending_reqs_tail == NULL)
+    return 0;
+
+  first = loop->pending_reqs_tail->next_req;
+  next = first;
+  loop->pending_reqs_tail = NULL;
+
+  while (next != NULL) {
+    req = next;
+    next = req->next_req != first ? req->next_req : NULL;
+
+    switch (req->type) {
+      case UV_READ:
+        DELEGATE_STREAM_REQ(loop, req, read, data);
+        break;
+
+      case UV_WRITE:
+        DELEGATE_STREAM_REQ(loop, (uv_write_t*) req, write, handle);
+        break;
+
+      case UV_ACCEPT:
+        DELEGATE_STREAM_REQ(loop, req, accept, data);
+        break;
+
+      case UV_CONNECT:
+        DELEGATE_STREAM_REQ(loop, (uv_connect_t*) req, connect, handle);
+        break;
+
+      case UV_SHUTDOWN:
+        /* Tcp shutdown requests don't come here. */
+        assert(((uv_shutdown_t*) req)->handle->type == UV_NAMED_PIPE);
+        uv_process_pipe_shutdown_req(
+            loop,
+            (uv_pipe_t*) ((uv_shutdown_t*) req)->handle,
+            (uv_shutdown_t*) req);
+        break;
+
+      case UV_UDP_RECV:
+        uv_process_udp_recv_req(loop, (uv_udp_t*) req->data, req);
+        break;
+
+      case UV_UDP_SEND:
+        uv_process_udp_send_req(loop,
+                                ((uv_udp_send_t*) req)->handle,
+                                (uv_udp_send_t*) req);
+        break;
+
+      case UV_WAKEUP:
+        uv_process_async_wakeup_req(loop, (uv_async_t*) req->data, req);
+        break;
+
+      case UV_SIGNAL_REQ:
+        uv_process_signal_req(loop, (uv_signal_t*) req->data, req);
+        break;
+
+      case UV_POLL_REQ:
+        uv_process_poll_req(loop, (uv_poll_t*) req->data, req);
+        break;
+
+      case UV_PROCESS_EXIT:
+        uv_process_proc_exit(loop, (uv_process_t*) req->data);
+        break;
+
+      case UV_FS_EVENT_REQ:
+        uv_process_fs_event_req(loop, req, (uv_fs_event_t*) req->data);
+        break;
+
+      default:
+        assert(0);
+    }
+  }
+
+  return 1;
+}
+
+#endif /* UV_WIN_REQ_INL_H_ */

+ 25 - 0
Utilities/cmlibuv/src/win/req.c

@@ -0,0 +1,25 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+
+#include "uv.h"
+#include "internal.h"

+ 356 - 0
Utilities/cmlibuv/src/win/signal.c

@@ -0,0 +1,356 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <signal.h>
+
+#include "uv.h"
+#include "internal.h"
+#include "handle-inl.h"
+#include "req-inl.h"
+
+
+RB_HEAD(uv_signal_tree_s, uv_signal_s);
+
+static struct uv_signal_tree_s uv__signal_tree = RB_INITIALIZER(uv__signal_tree);
+static ssize_t volatile uv__signal_control_handler_refs = 0;
+static CRITICAL_SECTION uv__signal_lock;
+
+
+void uv_signals_init() {
+  InitializeCriticalSection(&uv__signal_lock);
+}
+
+
+static int uv__signal_compare(uv_signal_t* w1, uv_signal_t* w2) {
+  /* Compare signums first so all watchers with the same signnum end up */
+  /* adjacent. */
+  if (w1->signum < w2->signum) return -1;
+  if (w1->signum > w2->signum) return 1;
+
+  /* Sort by loop pointer, so we can easily look up the first item after */
+  /* { .signum = x, .loop = NULL } */
+  if ((uintptr_t) w1->loop < (uintptr_t) w2->loop) return -1;
+  if ((uintptr_t) w1->loop > (uintptr_t) w2->loop) return 1;
+
+  if ((uintptr_t) w1 < (uintptr_t) w2) return -1;
+  if ((uintptr_t) w1 > (uintptr_t) w2) return 1;
+
+  return 0;
+}
+
+
+RB_GENERATE_STATIC(uv_signal_tree_s, uv_signal_s, tree_entry, uv__signal_compare);
+
+
+/*
+ * Dispatches signal {signum} to all active uv_signal_t watchers in all loops.
+ * Returns 1 if the signal was dispatched to any watcher, or 0 if there were
+ * no active signal watchers observing this signal.
+ */
+int uv__signal_dispatch(int signum) {
+  uv_signal_t lookup;
+  uv_signal_t* handle;
+  int dispatched = 0;
+
+  EnterCriticalSection(&uv__signal_lock);
+
+  lookup.signum = signum;
+  lookup.loop = NULL;
+
+  for (handle = RB_NFIND(uv_signal_tree_s, &uv__signal_tree, &lookup);
+       handle != NULL && handle->signum == signum;
+       handle = RB_NEXT(uv_signal_tree_s, &uv__signal_tree, handle)) {
+    unsigned long previous = InterlockedExchange(
+            (volatile LONG*) &handle->pending_signum, signum);
+
+    if (!previous) {
+      POST_COMPLETION_FOR_REQ(handle->loop, &handle->signal_req);
+    }
+
+    dispatched = 1;
+  }
+
+  LeaveCriticalSection(&uv__signal_lock);
+
+  return dispatched;
+}
+
+
+static BOOL WINAPI uv__signal_control_handler(DWORD type) {
+  switch (type) {
+    case CTRL_C_EVENT:
+      return uv__signal_dispatch(SIGINT);
+
+    case CTRL_BREAK_EVENT:
+      return uv__signal_dispatch(SIGBREAK);
+
+    case CTRL_CLOSE_EVENT:
+      if (uv__signal_dispatch(SIGHUP)) {
+        /* Windows will terminate the process after the control handler */
+        /* returns. After that it will just terminate our process. Therefore */
+        /* block the signal handler so the main loop has some time to pick */
+        /* up the signal and do something for a few seconds. */
+        Sleep(INFINITE);
+        return TRUE;
+      }
+      return FALSE;
+
+    case CTRL_LOGOFF_EVENT:
+    case CTRL_SHUTDOWN_EVENT:
+      /* These signals are only sent to services. Services have their own */
+      /* notification mechanism, so there's no point in handling these. */
+
+    default:
+      /* We don't handle these. */
+      return FALSE;
+  }
+}
+
+
+static int uv__signal_register_control_handler() {
+  /* When this function is called, the uv__signal_lock must be held. */
+
+  /* If the console control handler has already been hooked, just add a */
+  /* reference. */
+  if (uv__signal_control_handler_refs > 0) {
+    uv__signal_control_handler_refs++;
+    return 0;
+  }
+
+  if (!SetConsoleCtrlHandler(uv__signal_control_handler, TRUE))
+    return GetLastError();
+
+  uv__signal_control_handler_refs++;
+
+  return 0;
+}
+
+
+static void uv__signal_unregister_control_handler() {
+  /* When this function is called, the uv__signal_lock must be held. */
+  BOOL r;
+
+  /* Don't unregister if the number of console control handlers exceeds one. */
+  /* Just remove a reference in that case. */
+  if (uv__signal_control_handler_refs > 1) {
+    uv__signal_control_handler_refs--;
+    return;
+  }
+
+  assert(uv__signal_control_handler_refs == 1);
+
+  r = SetConsoleCtrlHandler(uv__signal_control_handler, FALSE);
+  /* This should never fail; if it does it is probably a bug in libuv. */
+  assert(r);
+
+  uv__signal_control_handler_refs--;
+}
+
+
+static int uv__signal_register(int signum) {
+  switch (signum) {
+    case SIGINT:
+    case SIGBREAK:
+    case SIGHUP:
+      return uv__signal_register_control_handler();
+
+    case SIGWINCH:
+      /* SIGWINCH is generated in tty.c. No need to register anything. */
+      return 0;
+
+    case SIGILL:
+    case SIGABRT_COMPAT:
+    case SIGFPE:
+    case SIGSEGV:
+    case SIGTERM:
+    case SIGABRT:
+      /* Signal is never raised. */
+      return 0;
+
+    default:
+      /* Invalid signal. */
+      return ERROR_INVALID_PARAMETER;
+  }
+}
+
+
+static void uv__signal_unregister(int signum) {
+  switch (signum) {
+    case SIGINT:
+    case SIGBREAK:
+    case SIGHUP:
+      uv__signal_unregister_control_handler();
+      return;
+
+    case SIGWINCH:
+      /* SIGWINCH is generated in tty.c. No need to unregister anything. */
+      return;
+
+    case SIGILL:
+    case SIGABRT_COMPAT:
+    case SIGFPE:
+    case SIGSEGV:
+    case SIGTERM:
+    case SIGABRT:
+      /* Nothing is registered for this signal. */
+      return;
+
+    default:
+      /* Libuv bug. */
+      assert(0 && "Invalid signum");
+      return;
+  }
+}
+
+
+int uv_signal_init(uv_loop_t* loop, uv_signal_t* handle) {
+  uv_req_t* req;
+
+  uv__handle_init(loop, (uv_handle_t*) handle, UV_SIGNAL);
+  handle->pending_signum = 0;
+  handle->signum = 0;
+  handle->signal_cb = NULL;
+
+  req = &handle->signal_req;
+  uv_req_init(loop, req);
+  req->type = UV_SIGNAL_REQ;
+  req->data = handle;
+
+  return 0;
+}
+
+
+int uv_signal_stop(uv_signal_t* handle) {
+  uv_signal_t* removed_handle;
+
+  /* If the watcher wasn't started, this is a no-op. */
+  if (handle->signum == 0)
+    return 0;
+
+  EnterCriticalSection(&uv__signal_lock);
+
+  uv__signal_unregister(handle->signum);
+
+  removed_handle = RB_REMOVE(uv_signal_tree_s, &uv__signal_tree, handle);
+  assert(removed_handle == handle);
+
+  LeaveCriticalSection(&uv__signal_lock);
+
+  handle->signum = 0;
+  uv__handle_stop(handle);
+
+  return 0;
+}
+
+
+int uv_signal_start(uv_signal_t* handle, uv_signal_cb signal_cb, int signum) {
+  int err;
+
+  /* If the user supplies signum == 0, then return an error already. If the */
+  /* signum is otherwise invalid then uv__signal_register will find out */
+  /* eventually. */
+  if (signum == 0) {
+    return UV_EINVAL;
+  }
+
+  /* Short circuit: if the signal watcher is already watching {signum} don't */
+  /* go through the process of deregistering and registering the handler. */
+  /* Additionally, this avoids pending signals getting lost in the (small) */
+  /* time frame that handle->signum == 0. */
+  if (signum == handle->signum) {
+    handle->signal_cb = signal_cb;
+    return 0;
+  }
+
+  /* If the signal handler was already active, stop it first. */
+  if (handle->signum != 0) {
+    int r = uv_signal_stop(handle);
+    /* uv_signal_stop is infallible. */
+    assert(r == 0);
+  }
+
+  EnterCriticalSection(&uv__signal_lock);
+
+  err = uv__signal_register(signum);
+  if (err) {
+    /* Uh-oh, didn't work. */
+    LeaveCriticalSection(&uv__signal_lock);
+    return uv_translate_sys_error(err);
+  }
+
+  handle->signum = signum;
+  RB_INSERT(uv_signal_tree_s, &uv__signal_tree, handle);
+
+  LeaveCriticalSection(&uv__signal_lock);
+
+  handle->signal_cb = signal_cb;
+  uv__handle_start(handle);
+
+  return 0;
+}
+
+
+void uv_process_signal_req(uv_loop_t* loop, uv_signal_t* handle,
+    uv_req_t* req) {
+  long dispatched_signum;
+
+  assert(handle->type == UV_SIGNAL);
+  assert(req->type == UV_SIGNAL_REQ);
+
+  dispatched_signum = InterlockedExchange(
+          (volatile LONG*) &handle->pending_signum, 0);
+  assert(dispatched_signum != 0);
+
+  /* Check if the pending signal equals the signum that we are watching for. */
+  /* These can get out of sync when the handler is stopped and restarted */
+  /* while the signal_req is pending. */
+  if (dispatched_signum == handle->signum)
+    handle->signal_cb(handle, dispatched_signum);
+
+  if (handle->flags & UV__HANDLE_CLOSING) {
+    /* When it is closing, it must be stopped at this point. */
+    assert(handle->signum == 0);
+    uv_want_endgame(loop, (uv_handle_t*) handle);
+  }
+}
+
+
+void uv_signal_close(uv_loop_t* loop, uv_signal_t* handle) {
+  uv_signal_stop(handle);
+  uv__handle_closing(handle);
+
+  if (handle->pending_signum == 0) {
+    uv_want_endgame(loop, (uv_handle_t*) handle);
+  }
+}
+
+
+void uv_signal_endgame(uv_loop_t* loop, uv_signal_t* handle) {
+  assert(handle->flags & UV__HANDLE_CLOSING);
+  assert(!(handle->flags & UV_HANDLE_CLOSED));
+
+  assert(handle->signum == 0);
+  assert(handle->pending_signum == 0);
+
+  handle->flags |= UV_HANDLE_CLOSED;
+
+  uv__handle_close(handle);
+}

+ 42 - 0
Utilities/cmlibuv/src/win/snprintf.c

@@ -0,0 +1,42 @@
+/* Copyright the libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#if defined(_MSC_VER) && _MSC_VER < 1900
+
+#include <stdio.h>
+#include <stdarg.h>
+
+/* Emulate snprintf() on MSVC<2015, _snprintf() doesn't zero-terminate the buffer
+ * on overflow...
+ */
+int snprintf(char* buf, size_t len, const char* fmt, ...) {
+  int n;
+  va_list ap;
+  va_start(ap, fmt);
+
+  n = _vscprintf(fmt, ap);
+  vsnprintf_s(buf, len, _TRUNCATE, fmt, ap);
+
+  va_end(ap);
+  return n;
+}
+
+#endif

Some files were not shown because too many files changed in this diff