poll.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584
  1. /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
  2. *
  3. * Permission is hereby granted, free of charge, to any person obtaining a copy
  4. * of this software and associated documentation files (the "Software"), to
  5. * deal in the Software without restriction, including without limitation the
  6. * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
  7. * sell copies of the Software, and to permit persons to whom the Software is
  8. * furnished to do so, subject to the following conditions:
  9. *
  10. * The above copyright notice and this permission notice shall be included in
  11. * all copies or substantial portions of the Software.
  12. *
  13. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  16. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  17. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  18. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  19. * IN THE SOFTWARE.
  20. */
  21. #include <assert.h>
  22. #include <io.h>
  23. #include "uv.h"
  24. #include "internal.h"
  25. #include "handle-inl.h"
  26. #include "req-inl.h"
  27. static const GUID uv_msafd_provider_ids[UV_MSAFD_PROVIDER_COUNT] = {
  28. {0xe70f1aa0, 0xab8b, 0x11cf,
  29. {0x8c, 0xa3, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92}},
  30. {0xf9eab0c0, 0x26d4, 0x11d0,
  31. {0xbb, 0xbf, 0x00, 0xaa, 0x00, 0x6c, 0x34, 0xe4}},
  32. {0x9fc48064, 0x7298, 0x43e4,
  33. {0xb7, 0xbd, 0x18, 0x1f, 0x20, 0x89, 0x79, 0x2a}}
  34. };
  35. typedef struct uv_single_fd_set_s {
  36. unsigned int fd_count;
  37. SOCKET fd_array[1];
  38. } uv_single_fd_set_t;
  39. static OVERLAPPED overlapped_dummy_;
  40. static uv_once_t overlapped_dummy_init_guard_ = UV_ONCE_INIT;
  41. static AFD_POLL_INFO afd_poll_info_dummy_;
  42. static void uv__init_overlapped_dummy(void) {
  43. HANDLE event;
  44. event = CreateEvent(NULL, TRUE, TRUE, NULL);
  45. if (event == NULL)
  46. uv_fatal_error(GetLastError(), "CreateEvent");
  47. memset(&overlapped_dummy_, 0, sizeof overlapped_dummy_);
  48. overlapped_dummy_.hEvent = (HANDLE) ((uintptr_t) event | 1);
  49. }
  50. static OVERLAPPED* uv__get_overlapped_dummy(void) {
  51. uv_once(&overlapped_dummy_init_guard_, uv__init_overlapped_dummy);
  52. return &overlapped_dummy_;
  53. }
  54. static AFD_POLL_INFO* uv__get_afd_poll_info_dummy(void) {
  55. return &afd_poll_info_dummy_;
  56. }
  57. static void uv__fast_poll_submit_poll_req(uv_loop_t* loop, uv_poll_t* handle) {
  58. uv_req_t* req;
  59. AFD_POLL_INFO* afd_poll_info;
  60. int result;
  61. /* Find a yet unsubmitted req to submit. */
  62. if (handle->submitted_events_1 == 0) {
  63. req = &handle->poll_req_1;
  64. afd_poll_info = &handle->afd_poll_info_1;
  65. handle->submitted_events_1 = handle->events;
  66. handle->mask_events_1 = 0;
  67. handle->mask_events_2 = handle->events;
  68. } else if (handle->submitted_events_2 == 0) {
  69. req = &handle->poll_req_2;
  70. afd_poll_info = &handle->afd_poll_info_2;
  71. handle->submitted_events_2 = handle->events;
  72. handle->mask_events_1 = handle->events;
  73. handle->mask_events_2 = 0;
  74. } else {
  75. /* Just wait until there's an unsubmitted req. This will happen almost
  76. * immediately as one of the 2 outstanding requests is about to return.
  77. * When this happens, uv__fast_poll_process_poll_req will be called, and
  78. * the pending events, if needed, will be processed in a subsequent
  79. * request. */
  80. return;
  81. }
  82. /* Setting Exclusive to TRUE makes the other poll request return if there is
  83. * any. */
  84. afd_poll_info->Exclusive = TRUE;
  85. afd_poll_info->NumberOfHandles = 1;
  86. afd_poll_info->Timeout.QuadPart = INT64_MAX;
  87. afd_poll_info->Handles[0].Handle = (HANDLE) handle->socket;
  88. afd_poll_info->Handles[0].Status = 0;
  89. afd_poll_info->Handles[0].Events = 0;
  90. if (handle->events & UV_READABLE) {
  91. afd_poll_info->Handles[0].Events |= AFD_POLL_RECEIVE |
  92. AFD_POLL_DISCONNECT | AFD_POLL_ACCEPT | AFD_POLL_ABORT;
  93. } else {
  94. if (handle->events & UV_DISCONNECT) {
  95. afd_poll_info->Handles[0].Events |= AFD_POLL_DISCONNECT;
  96. }
  97. }
  98. if (handle->events & UV_WRITABLE) {
  99. afd_poll_info->Handles[0].Events |= AFD_POLL_SEND | AFD_POLL_CONNECT_FAIL;
  100. }
  101. memset(&req->u.io.overlapped, 0, sizeof req->u.io.overlapped);
  102. result = uv_msafd_poll((SOCKET) handle->peer_socket,
  103. afd_poll_info,
  104. afd_poll_info,
  105. &req->u.io.overlapped);
  106. if (result != 0 && WSAGetLastError() != WSA_IO_PENDING) {
  107. /* Queue this req, reporting an error. */
  108. SET_REQ_ERROR(req, WSAGetLastError());
  109. uv_insert_pending_req(loop, req);
  110. }
  111. }
  112. static void uv__fast_poll_process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
  113. uv_req_t* req) {
  114. unsigned char mask_events;
  115. AFD_POLL_INFO* afd_poll_info;
  116. if (req == &handle->poll_req_1) {
  117. afd_poll_info = &handle->afd_poll_info_1;
  118. handle->submitted_events_1 = 0;
  119. mask_events = handle->mask_events_1;
  120. } else if (req == &handle->poll_req_2) {
  121. afd_poll_info = &handle->afd_poll_info_2;
  122. handle->submitted_events_2 = 0;
  123. mask_events = handle->mask_events_2;
  124. } else {
  125. assert(0);
  126. return;
  127. }
  128. /* Report an error unless the select was just interrupted. */
  129. if (!REQ_SUCCESS(req)) {
  130. DWORD error = GET_REQ_SOCK_ERROR(req);
  131. if (error != WSAEINTR && handle->events != 0) {
  132. handle->events = 0; /* Stop the watcher */
  133. handle->poll_cb(handle, uv_translate_sys_error(error), 0);
  134. }
  135. } else if (afd_poll_info->NumberOfHandles >= 1) {
  136. unsigned char events = 0;
  137. if ((afd_poll_info->Handles[0].Events & (AFD_POLL_RECEIVE |
  138. AFD_POLL_DISCONNECT | AFD_POLL_ACCEPT | AFD_POLL_ABORT)) != 0) {
  139. events |= UV_READABLE;
  140. if ((afd_poll_info->Handles[0].Events & AFD_POLL_DISCONNECT) != 0) {
  141. events |= UV_DISCONNECT;
  142. }
  143. }
  144. if ((afd_poll_info->Handles[0].Events & (AFD_POLL_SEND |
  145. AFD_POLL_CONNECT_FAIL)) != 0) {
  146. events |= UV_WRITABLE;
  147. }
  148. events &= handle->events & ~mask_events;
  149. if (afd_poll_info->Handles[0].Events & AFD_POLL_LOCAL_CLOSE) {
  150. /* Stop polling. */
  151. handle->events = 0;
  152. if (uv__is_active(handle))
  153. uv__handle_stop(handle);
  154. }
  155. if (events != 0) {
  156. handle->poll_cb(handle, 0, events);
  157. }
  158. }
  159. if ((handle->events & ~(handle->submitted_events_1 |
  160. handle->submitted_events_2)) != 0) {
  161. uv__fast_poll_submit_poll_req(loop, handle);
  162. } else if ((handle->flags & UV_HANDLE_CLOSING) &&
  163. handle->submitted_events_1 == 0 &&
  164. handle->submitted_events_2 == 0) {
  165. uv_want_endgame(loop, (uv_handle_t*) handle);
  166. }
  167. }
  168. static SOCKET uv__fast_poll_create_peer_socket(HANDLE iocp,
  169. WSAPROTOCOL_INFOW* protocol_info) {
  170. SOCKET sock = 0;
  171. sock = WSASocketW(protocol_info->iAddressFamily,
  172. protocol_info->iSocketType,
  173. protocol_info->iProtocol,
  174. protocol_info,
  175. 0,
  176. WSA_FLAG_OVERLAPPED);
  177. if (sock == INVALID_SOCKET) {
  178. return INVALID_SOCKET;
  179. }
  180. if (!SetHandleInformation((HANDLE) sock, HANDLE_FLAG_INHERIT, 0)) {
  181. goto error;
  182. };
  183. if (CreateIoCompletionPort((HANDLE) sock,
  184. iocp,
  185. (ULONG_PTR) sock,
  186. 0) == NULL) {
  187. goto error;
  188. }
  189. return sock;
  190. error:
  191. closesocket(sock);
  192. return INVALID_SOCKET;
  193. }
  194. static SOCKET uv__fast_poll_get_peer_socket(uv_loop_t* loop,
  195. WSAPROTOCOL_INFOW* protocol_info) {
  196. int index, i;
  197. SOCKET peer_socket;
  198. index = -1;
  199. for (i = 0; (size_t) i < ARRAY_SIZE(uv_msafd_provider_ids); i++) {
  200. if (memcmp((void*) &protocol_info->ProviderId,
  201. (void*) &uv_msafd_provider_ids[i],
  202. sizeof protocol_info->ProviderId) == 0) {
  203. index = i;
  204. }
  205. }
  206. /* Check if the protocol uses an msafd socket. */
  207. if (index < 0) {
  208. return INVALID_SOCKET;
  209. }
  210. /* If we didn't (try) to create a peer socket yet, try to make one. Don't try
  211. * again if the peer socket creation failed earlier for the same protocol. */
  212. peer_socket = loop->poll_peer_sockets[index];
  213. if (peer_socket == 0) {
  214. peer_socket = uv__fast_poll_create_peer_socket(loop->iocp, protocol_info);
  215. loop->poll_peer_sockets[index] = peer_socket;
  216. }
  217. return peer_socket;
  218. }
  219. static DWORD WINAPI uv__slow_poll_thread_proc(void* arg) {
  220. uv_req_t* req = (uv_req_t*) arg;
  221. uv_poll_t* handle = (uv_poll_t*) req->data;
  222. unsigned char reported_events;
  223. int r;
  224. uv_single_fd_set_t rfds, wfds, efds;
  225. struct timeval timeout;
  226. assert(handle->type == UV_POLL);
  227. assert(req->type == UV_POLL_REQ);
  228. if (handle->events & UV_READABLE) {
  229. rfds.fd_count = 1;
  230. rfds.fd_array[0] = handle->socket;
  231. } else {
  232. rfds.fd_count = 0;
  233. }
  234. if (handle->events & UV_WRITABLE) {
  235. wfds.fd_count = 1;
  236. wfds.fd_array[0] = handle->socket;
  237. efds.fd_count = 1;
  238. efds.fd_array[0] = handle->socket;
  239. } else {
  240. wfds.fd_count = 0;
  241. efds.fd_count = 0;
  242. }
  243. /* Make the select() time out after 3 minutes. If select() hangs because the
  244. * user closed the socket, we will at least not hang indefinitely. */
  245. timeout.tv_sec = 3 * 60;
  246. timeout.tv_usec = 0;
  247. r = select(1, (fd_set*) &rfds, (fd_set*) &wfds, (fd_set*) &efds, &timeout);
  248. if (r == SOCKET_ERROR) {
  249. /* Queue this req, reporting an error. */
  250. SET_REQ_ERROR(&handle->poll_req_1, WSAGetLastError());
  251. POST_COMPLETION_FOR_REQ(handle->loop, req);
  252. return 0;
  253. }
  254. reported_events = 0;
  255. if (r > 0) {
  256. if (rfds.fd_count > 0) {
  257. assert(rfds.fd_count == 1);
  258. assert(rfds.fd_array[0] == handle->socket);
  259. reported_events |= UV_READABLE;
  260. }
  261. if (wfds.fd_count > 0) {
  262. assert(wfds.fd_count == 1);
  263. assert(wfds.fd_array[0] == handle->socket);
  264. reported_events |= UV_WRITABLE;
  265. } else if (efds.fd_count > 0) {
  266. assert(efds.fd_count == 1);
  267. assert(efds.fd_array[0] == handle->socket);
  268. reported_events |= UV_WRITABLE;
  269. }
  270. }
  271. SET_REQ_SUCCESS(req);
  272. req->u.io.overlapped.InternalHigh = (DWORD) reported_events;
  273. POST_COMPLETION_FOR_REQ(handle->loop, req);
  274. return 0;
  275. }
  276. static void uv__slow_poll_submit_poll_req(uv_loop_t* loop, uv_poll_t* handle) {
  277. uv_req_t* req;
  278. /* Find a yet unsubmitted req to submit. */
  279. if (handle->submitted_events_1 == 0) {
  280. req = &handle->poll_req_1;
  281. handle->submitted_events_1 = handle->events;
  282. handle->mask_events_1 = 0;
  283. handle->mask_events_2 = handle->events;
  284. } else if (handle->submitted_events_2 == 0) {
  285. req = &handle->poll_req_2;
  286. handle->submitted_events_2 = handle->events;
  287. handle->mask_events_1 = handle->events;
  288. handle->mask_events_2 = 0;
  289. } else {
  290. assert(0);
  291. return;
  292. }
  293. if (!QueueUserWorkItem(uv__slow_poll_thread_proc,
  294. (void*) req,
  295. WT_EXECUTELONGFUNCTION)) {
  296. /* Make this req pending, reporting an error. */
  297. SET_REQ_ERROR(req, GetLastError());
  298. uv_insert_pending_req(loop, req);
  299. }
  300. }
  301. static void uv__slow_poll_process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
  302. uv_req_t* req) {
  303. unsigned char mask_events;
  304. int err;
  305. if (req == &handle->poll_req_1) {
  306. handle->submitted_events_1 = 0;
  307. mask_events = handle->mask_events_1;
  308. } else if (req == &handle->poll_req_2) {
  309. handle->submitted_events_2 = 0;
  310. mask_events = handle->mask_events_2;
  311. } else {
  312. assert(0);
  313. return;
  314. }
  315. if (!REQ_SUCCESS(req)) {
  316. /* Error. */
  317. if (handle->events != 0) {
  318. err = GET_REQ_ERROR(req);
  319. handle->events = 0; /* Stop the watcher */
  320. handle->poll_cb(handle, uv_translate_sys_error(err), 0);
  321. }
  322. } else {
  323. /* Got some events. */
  324. int events = req->u.io.overlapped.InternalHigh & handle->events & ~mask_events;
  325. if (events != 0) {
  326. handle->poll_cb(handle, 0, events);
  327. }
  328. }
  329. if ((handle->events & ~(handle->submitted_events_1 |
  330. handle->submitted_events_2)) != 0) {
  331. uv__slow_poll_submit_poll_req(loop, handle);
  332. } else if ((handle->flags & UV_HANDLE_CLOSING) &&
  333. handle->submitted_events_1 == 0 &&
  334. handle->submitted_events_2 == 0) {
  335. uv_want_endgame(loop, (uv_handle_t*) handle);
  336. }
  337. }
  338. int uv_poll_init(uv_loop_t* loop, uv_poll_t* handle, int fd) {
  339. return uv_poll_init_socket(loop, handle, (SOCKET) uv__get_osfhandle(fd));
  340. }
  341. int uv_poll_init_socket(uv_loop_t* loop, uv_poll_t* handle,
  342. uv_os_sock_t socket) {
  343. WSAPROTOCOL_INFOW protocol_info;
  344. int len;
  345. SOCKET peer_socket, base_socket;
  346. DWORD bytes;
  347. DWORD yes = 1;
  348. /* Set the socket to nonblocking mode */
  349. if (ioctlsocket(socket, FIONBIO, &yes) == SOCKET_ERROR)
  350. return uv_translate_sys_error(WSAGetLastError());
  351. /* Try to obtain a base handle for the socket. This increases this chances that
  352. * we find an AFD handle and are able to use the fast poll mechanism. This will
  353. * always fail on windows XP/2k3, since they don't support the. SIO_BASE_HANDLE
  354. * ioctl. */
  355. #ifndef NDEBUG
  356. base_socket = INVALID_SOCKET;
  357. #endif
  358. if (WSAIoctl(socket,
  359. SIO_BASE_HANDLE,
  360. NULL,
  361. 0,
  362. &base_socket,
  363. sizeof base_socket,
  364. &bytes,
  365. NULL,
  366. NULL) == 0) {
  367. assert(base_socket != 0 && base_socket != INVALID_SOCKET);
  368. socket = base_socket;
  369. }
  370. uv__handle_init(loop, (uv_handle_t*) handle, UV_POLL);
  371. handle->socket = socket;
  372. handle->events = 0;
  373. /* Obtain protocol information about the socket. */
  374. len = sizeof protocol_info;
  375. if (getsockopt(socket,
  376. SOL_SOCKET,
  377. SO_PROTOCOL_INFOW,
  378. (char*) &protocol_info,
  379. &len) != 0) {
  380. return uv_translate_sys_error(WSAGetLastError());
  381. }
  382. /* Get the peer socket that is needed to enable fast poll. If the returned
  383. * value is NULL, the protocol is not implemented by MSAFD and we'll have to
  384. * use slow mode. */
  385. peer_socket = uv__fast_poll_get_peer_socket(loop, &protocol_info);
  386. if (peer_socket != INVALID_SOCKET) {
  387. /* Initialize fast poll specific fields. */
  388. handle->peer_socket = peer_socket;
  389. } else {
  390. /* Initialize slow poll specific fields. */
  391. handle->flags |= UV_HANDLE_POLL_SLOW;
  392. }
  393. /* Initialize 2 poll reqs. */
  394. handle->submitted_events_1 = 0;
  395. UV_REQ_INIT(&handle->poll_req_1, UV_POLL_REQ);
  396. handle->poll_req_1.data = handle;
  397. handle->submitted_events_2 = 0;
  398. UV_REQ_INIT(&handle->poll_req_2, UV_POLL_REQ);
  399. handle->poll_req_2.data = handle;
  400. return 0;
  401. }
  402. static int uv__poll_set(uv_poll_t* handle, int events, uv_poll_cb cb) {
  403. int submitted_events;
  404. assert(handle->type == UV_POLL);
  405. assert(!(handle->flags & UV_HANDLE_CLOSING));
  406. assert((events & ~(UV_READABLE | UV_WRITABLE | UV_DISCONNECT)) == 0);
  407. handle->events = events;
  408. handle->poll_cb = cb;
  409. if (handle->events == 0) {
  410. uv__handle_stop(handle);
  411. return 0;
  412. }
  413. uv__handle_start(handle);
  414. submitted_events = handle->submitted_events_1 | handle->submitted_events_2;
  415. if (handle->events & ~submitted_events) {
  416. if (handle->flags & UV_HANDLE_POLL_SLOW) {
  417. uv__slow_poll_submit_poll_req(handle->loop, handle);
  418. } else {
  419. uv__fast_poll_submit_poll_req(handle->loop, handle);
  420. }
  421. }
  422. return 0;
  423. }
  424. int uv_poll_start(uv_poll_t* handle, int events, uv_poll_cb cb) {
  425. return uv__poll_set(handle, events, cb);
  426. }
  427. int uv_poll_stop(uv_poll_t* handle) {
  428. return uv__poll_set(handle, 0, handle->poll_cb);
  429. }
  430. void uv_process_poll_req(uv_loop_t* loop, uv_poll_t* handle, uv_req_t* req) {
  431. if (!(handle->flags & UV_HANDLE_POLL_SLOW)) {
  432. uv__fast_poll_process_poll_req(loop, handle, req);
  433. } else {
  434. uv__slow_poll_process_poll_req(loop, handle, req);
  435. }
  436. }
  437. int uv_poll_close(uv_loop_t* loop, uv_poll_t* handle) {
  438. AFD_POLL_INFO afd_poll_info;
  439. DWORD error;
  440. int result;
  441. handle->events = 0;
  442. uv__handle_closing(handle);
  443. if (handle->submitted_events_1 == 0 &&
  444. handle->submitted_events_2 == 0) {
  445. uv_want_endgame(loop, (uv_handle_t*) handle);
  446. return 0;
  447. }
  448. if (handle->flags & UV_HANDLE_POLL_SLOW)
  449. return 0;
  450. /* Cancel outstanding poll requests by executing another, unique poll
  451. * request that forces the outstanding ones to return. */
  452. afd_poll_info.Exclusive = TRUE;
  453. afd_poll_info.NumberOfHandles = 1;
  454. afd_poll_info.Timeout.QuadPart = INT64_MAX;
  455. afd_poll_info.Handles[0].Handle = (HANDLE) handle->socket;
  456. afd_poll_info.Handles[0].Status = 0;
  457. afd_poll_info.Handles[0].Events = AFD_POLL_ALL;
  458. result = uv_msafd_poll(handle->socket,
  459. &afd_poll_info,
  460. uv__get_afd_poll_info_dummy(),
  461. uv__get_overlapped_dummy());
  462. if (result == SOCKET_ERROR) {
  463. error = WSAGetLastError();
  464. if (error != WSA_IO_PENDING)
  465. return uv_translate_sys_error(error);
  466. }
  467. return 0;
  468. }
  469. void uv_poll_endgame(uv_loop_t* loop, uv_poll_t* handle) {
  470. assert(handle->flags & UV_HANDLE_CLOSING);
  471. assert(!(handle->flags & UV_HANDLE_CLOSED));
  472. assert(handle->submitted_events_1 == 0);
  473. assert(handle->submitted_events_2 == 0);
  474. uv__handle_close(handle);
  475. }