1
0

core.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242
  1. /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
  2. * Permission is hereby granted, free of charge, to any person obtaining a copy
  3. * of this software and associated documentation files (the "Software"), to
  4. * deal in the Software without restriction, including without limitation the
  5. * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
  6. * sell copies of the Software, and to permit persons to whom the Software is
  7. * furnished to do so, subject to the following conditions:
  8. *
  9. * The above copyright notice and this permission notice shall be included in
  10. * all copies or substantial portions of the Software.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  13. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  14. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  15. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  16. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  17. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  18. * IN THE SOFTWARE.
  19. */
  20. #include "uv.h"
  21. #include "internal.h"
  22. #include <stddef.h> /* NULL */
  23. #include <stdio.h> /* printf */
  24. #include <stdlib.h>
  25. #include <string.h> /* strerror */
  26. #include <errno.h>
  27. #include <assert.h>
  28. #include <unistd.h>
  29. #include <sys/types.h>
  30. #include <sys/stat.h>
  31. #include <fcntl.h>
  32. #include <sys/ioctl.h>
  33. #include <sys/socket.h>
  34. #include <sys/un.h>
  35. #include <netinet/in.h>
  36. #include <arpa/inet.h>
  37. #include <limits.h> /* INT_MAX, PATH_MAX, IOV_MAX */
  38. #include <sys/uio.h> /* writev */
  39. #include <sys/resource.h> /* getrusage */
  40. #include <pwd.h>
  41. #ifdef __sun
  42. # include <sys/filio.h>
  43. # include <sys/types.h>
  44. # include <sys/wait.h>
  45. #endif
  46. #ifdef __APPLE__
  47. # include <mach-o/dyld.h> /* _NSGetExecutablePath */
  48. # include <sys/filio.h>
  49. # if defined(O_CLOEXEC)
  50. # define UV__O_CLOEXEC O_CLOEXEC
  51. # endif
  52. #endif
  53. #if defined(__DragonFly__) || \
  54. defined(__FreeBSD__) || \
  55. defined(__FreeBSD_kernel__)
  56. # include <sys/sysctl.h>
  57. # include <sys/filio.h>
  58. # include <sys/wait.h>
  59. # define UV__O_CLOEXEC O_CLOEXEC
  60. # if defined(__FreeBSD__) && __FreeBSD__ >= 10
  61. # define uv__accept4 accept4
  62. # define UV__SOCK_NONBLOCK SOCK_NONBLOCK
  63. # define UV__SOCK_CLOEXEC SOCK_CLOEXEC
  64. # endif
  65. # if !defined(F_DUP2FD_CLOEXEC) && defined(_F_DUP2FD_CLOEXEC)
  66. # define F_DUP2FD_CLOEXEC _F_DUP2FD_CLOEXEC
  67. # endif
  68. #endif
  69. #if defined(__ANDROID_API__) && __ANDROID_API__ < 21
  70. # include <dlfcn.h> /* for dlsym */
  71. #endif
  72. #if defined(__MVS__)
  73. #include <sys/ioctl.h>
  74. #endif
  75. static int uv__run_pending(uv_loop_t* loop);
  76. /* Verify that uv_buf_t is ABI-compatible with struct iovec. */
  77. STATIC_ASSERT(sizeof(uv_buf_t) == sizeof(struct iovec));
  78. STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->base) ==
  79. sizeof(((struct iovec*) 0)->iov_base));
  80. STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->len) ==
  81. sizeof(((struct iovec*) 0)->iov_len));
  82. STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base));
  83. STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len));
  84. uint64_t uv_hrtime(void) {
  85. return uv__hrtime(UV_CLOCK_PRECISE);
  86. }
  87. void uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
  88. assert(!uv__is_closing(handle));
  89. handle->flags |= UV_CLOSING;
  90. handle->close_cb = close_cb;
  91. switch (handle->type) {
  92. case UV_NAMED_PIPE:
  93. uv__pipe_close((uv_pipe_t*)handle);
  94. break;
  95. case UV_TTY:
  96. uv__stream_close((uv_stream_t*)handle);
  97. break;
  98. case UV_TCP:
  99. uv__tcp_close((uv_tcp_t*)handle);
  100. break;
  101. case UV_UDP:
  102. uv__udp_close((uv_udp_t*)handle);
  103. break;
  104. case UV_PREPARE:
  105. uv__prepare_close((uv_prepare_t*)handle);
  106. break;
  107. case UV_CHECK:
  108. uv__check_close((uv_check_t*)handle);
  109. break;
  110. case UV_IDLE:
  111. uv__idle_close((uv_idle_t*)handle);
  112. break;
  113. case UV_ASYNC:
  114. uv__async_close((uv_async_t*)handle);
  115. break;
  116. case UV_TIMER:
  117. uv__timer_close((uv_timer_t*)handle);
  118. break;
  119. case UV_PROCESS:
  120. uv__process_close((uv_process_t*)handle);
  121. break;
  122. case UV_FS_EVENT:
  123. uv__fs_event_close((uv_fs_event_t*)handle);
  124. break;
  125. case UV_POLL:
  126. uv__poll_close((uv_poll_t*)handle);
  127. break;
  128. case UV_FS_POLL:
  129. uv__fs_poll_close((uv_fs_poll_t*)handle);
  130. break;
  131. case UV_SIGNAL:
  132. uv__signal_close((uv_signal_t*) handle);
  133. /* Signal handles may not be closed immediately. The signal code will */
  134. /* itself close uv__make_close_pending whenever appropriate. */
  135. return;
  136. default:
  137. assert(0);
  138. }
  139. uv__make_close_pending(handle);
  140. }
  141. int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value) {
  142. int r;
  143. int fd;
  144. socklen_t len;
  145. if (handle == NULL || value == NULL)
  146. return -EINVAL;
  147. if (handle->type == UV_TCP || handle->type == UV_NAMED_PIPE)
  148. fd = uv__stream_fd((uv_stream_t*) handle);
  149. else if (handle->type == UV_UDP)
  150. fd = ((uv_udp_t *) handle)->io_watcher.fd;
  151. else
  152. return -ENOTSUP;
  153. len = sizeof(*value);
  154. if (*value == 0)
  155. r = getsockopt(fd, SOL_SOCKET, optname, value, &len);
  156. else
  157. r = setsockopt(fd, SOL_SOCKET, optname, (const void*) value, len);
  158. if (r < 0)
  159. return -errno;
  160. return 0;
  161. }
  162. void uv__make_close_pending(uv_handle_t* handle) {
  163. assert(handle->flags & UV_CLOSING);
  164. assert(!(handle->flags & UV_CLOSED));
  165. handle->next_closing = handle->loop->closing_handles;
  166. handle->loop->closing_handles = handle;
  167. }
  168. int uv__getiovmax(void) {
  169. #if defined(IOV_MAX)
  170. return IOV_MAX;
  171. #elif defined(_SC_IOV_MAX)
  172. static int iovmax = -1;
  173. if (iovmax == -1) {
  174. iovmax = sysconf(_SC_IOV_MAX);
  175. /* On some embedded devices (arm-linux-uclibc based ip camera),
  176. * sysconf(_SC_IOV_MAX) can not get the correct value. The return
  177. * value is -1 and the errno is EINPROGRESS. Degrade the value to 1.
  178. */
  179. if (iovmax == -1) iovmax = 1;
  180. }
  181. return iovmax;
  182. #else
  183. return 1024;
  184. #endif
  185. }
  186. static void uv__finish_close(uv_handle_t* handle) {
  187. /* Note: while the handle is in the UV_CLOSING state now, it's still possible
  188. * for it to be active in the sense that uv__is_active() returns true.
  189. * A good example is when the user calls uv_shutdown(), immediately followed
  190. * by uv_close(). The handle is considered active at this point because the
  191. * completion of the shutdown req is still pending.
  192. */
  193. assert(handle->flags & UV_CLOSING);
  194. assert(!(handle->flags & UV_CLOSED));
  195. handle->flags |= UV_CLOSED;
  196. switch (handle->type) {
  197. case UV_PREPARE:
  198. case UV_CHECK:
  199. case UV_IDLE:
  200. case UV_ASYNC:
  201. case UV_TIMER:
  202. case UV_PROCESS:
  203. case UV_FS_EVENT:
  204. case UV_FS_POLL:
  205. case UV_POLL:
  206. case UV_SIGNAL:
  207. break;
  208. case UV_NAMED_PIPE:
  209. case UV_TCP:
  210. case UV_TTY:
  211. uv__stream_destroy((uv_stream_t*)handle);
  212. break;
  213. case UV_UDP:
  214. uv__udp_finish_close((uv_udp_t*)handle);
  215. break;
  216. default:
  217. assert(0);
  218. break;
  219. }
  220. uv__handle_unref(handle);
  221. QUEUE_REMOVE(&handle->handle_queue);
  222. if (handle->close_cb) {
  223. handle->close_cb(handle);
  224. }
  225. }
  226. static void uv__run_closing_handles(uv_loop_t* loop) {
  227. uv_handle_t* p;
  228. uv_handle_t* q;
  229. p = loop->closing_handles;
  230. loop->closing_handles = NULL;
  231. while (p) {
  232. q = p->next_closing;
  233. uv__finish_close(p);
  234. p = q;
  235. }
  236. }
  237. int uv_is_closing(const uv_handle_t* handle) {
  238. return uv__is_closing(handle);
  239. }
  240. int uv_backend_fd(const uv_loop_t* loop) {
  241. return loop->backend_fd;
  242. }
  243. int uv_backend_timeout(const uv_loop_t* loop) {
  244. if (loop->stop_flag != 0)
  245. return 0;
  246. if (!uv__has_active_handles(loop) && !uv__has_active_reqs(loop))
  247. return 0;
  248. if (!QUEUE_EMPTY(&loop->idle_handles))
  249. return 0;
  250. if (!QUEUE_EMPTY(&loop->pending_queue))
  251. return 0;
  252. if (loop->closing_handles)
  253. return 0;
  254. return uv__next_timeout(loop);
  255. }
  256. static int uv__loop_alive(const uv_loop_t* loop) {
  257. return uv__has_active_handles(loop) ||
  258. uv__has_active_reqs(loop) ||
  259. loop->closing_handles != NULL;
  260. }
  261. int uv_loop_alive(const uv_loop_t* loop) {
  262. return uv__loop_alive(loop);
  263. }
  264. int uv_run(uv_loop_t* loop, uv_run_mode mode) {
  265. int timeout;
  266. int r;
  267. int ran_pending;
  268. r = uv__loop_alive(loop);
  269. if (!r)
  270. uv__update_time(loop);
  271. while (r != 0 && loop->stop_flag == 0) {
  272. uv__update_time(loop);
  273. uv__run_timers(loop);
  274. ran_pending = uv__run_pending(loop);
  275. uv__run_idle(loop);
  276. uv__run_prepare(loop);
  277. timeout = 0;
  278. if ((mode == UV_RUN_ONCE && !ran_pending) || mode == UV_RUN_DEFAULT)
  279. timeout = uv_backend_timeout(loop);
  280. uv__io_poll(loop, timeout);
  281. uv__run_check(loop);
  282. uv__run_closing_handles(loop);
  283. if (mode == UV_RUN_ONCE) {
  284. /* UV_RUN_ONCE implies forward progress: at least one callback must have
  285. * been invoked when it returns. uv__io_poll() can return without doing
  286. * I/O (meaning: no callbacks) when its timeout expires - which means we
  287. * have pending timers that satisfy the forward progress constraint.
  288. *
  289. * UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
  290. * the check.
  291. */
  292. uv__update_time(loop);
  293. uv__run_timers(loop);
  294. }
  295. r = uv__loop_alive(loop);
  296. if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT)
  297. break;
  298. }
  299. /* The if statement lets gcc compile it to a conditional store. Avoids
  300. * dirtying a cache line.
  301. */
  302. if (loop->stop_flag != 0)
  303. loop->stop_flag = 0;
  304. return r;
  305. }
  306. void uv_update_time(uv_loop_t* loop) {
  307. uv__update_time(loop);
  308. }
  309. int uv_is_active(const uv_handle_t* handle) {
  310. return uv__is_active(handle);
  311. }
  312. /* Open a socket in non-blocking close-on-exec mode, atomically if possible. */
  313. int uv__socket(int domain, int type, int protocol) {
  314. int sockfd;
  315. int err;
  316. #if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC)
  317. sockfd = socket(domain, type | SOCK_NONBLOCK | SOCK_CLOEXEC, protocol);
  318. if (sockfd != -1)
  319. return sockfd;
  320. if (errno != EINVAL)
  321. return -errno;
  322. #endif
  323. sockfd = socket(domain, type, protocol);
  324. if (sockfd == -1)
  325. return -errno;
  326. err = uv__nonblock(sockfd, 1);
  327. if (err == 0)
  328. err = uv__cloexec(sockfd, 1);
  329. if (err) {
  330. uv__close(sockfd);
  331. return err;
  332. }
  333. #if defined(SO_NOSIGPIPE)
  334. {
  335. int on = 1;
  336. setsockopt(sockfd, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on));
  337. }
  338. #endif
  339. return sockfd;
  340. }
  341. /* get a file pointer to a file in read-only and close-on-exec mode */
  342. FILE* uv__open_file(const char* path) {
  343. int fd;
  344. FILE* fp;
  345. fd = uv__open_cloexec(path, O_RDONLY);
  346. if (fd < 0)
  347. return NULL;
  348. fp = fdopen(fd, "r");
  349. if (fp == NULL)
  350. uv__close(fd);
  351. return fp;
  352. }
  353. int uv__accept(int sockfd) {
  354. int peerfd;
  355. int err;
  356. assert(sockfd >= 0);
  357. while (1) {
  358. #if defined(__linux__) || (defined(__FreeBSD__) && __FreeBSD__ >= 10)
  359. static int no_accept4;
  360. if (no_accept4)
  361. goto skip;
  362. peerfd = uv__accept4(sockfd,
  363. NULL,
  364. NULL,
  365. UV__SOCK_NONBLOCK|UV__SOCK_CLOEXEC);
  366. if (peerfd != -1)
  367. return peerfd;
  368. if (errno == EINTR)
  369. continue;
  370. if (errno != ENOSYS)
  371. return -errno;
  372. no_accept4 = 1;
  373. skip:
  374. #endif
  375. peerfd = accept(sockfd, NULL, NULL);
  376. if (peerfd == -1) {
  377. if (errno == EINTR)
  378. continue;
  379. return -errno;
  380. }
  381. err = uv__cloexec(peerfd, 1);
  382. if (err == 0)
  383. err = uv__nonblock(peerfd, 1);
  384. if (err) {
  385. uv__close(peerfd);
  386. return err;
  387. }
  388. return peerfd;
  389. }
  390. }
  391. int uv__close_nocheckstdio(int fd) {
  392. int saved_errno;
  393. int rc;
  394. assert(fd > -1); /* Catch uninitialized io_watcher.fd bugs. */
  395. saved_errno = errno;
  396. rc = close(fd);
  397. if (rc == -1) {
  398. rc = -errno;
  399. if (rc == -EINTR || rc == -EINPROGRESS)
  400. rc = 0; /* The close is in progress, not an error. */
  401. errno = saved_errno;
  402. }
  403. return rc;
  404. }
  405. int uv__close(int fd) {
  406. assert(fd > STDERR_FILENO); /* Catch stdio close bugs. */
  407. #if defined(__MVS__)
  408. epoll_file_close(fd);
  409. #endif
  410. return uv__close_nocheckstdio(fd);
  411. }
  412. int uv__nonblock_ioctl(int fd, int set) {
  413. int r;
  414. do
  415. r = ioctl(fd, FIONBIO, &set);
  416. while (r == -1 && errno == EINTR);
  417. if (r)
  418. return -errno;
  419. return 0;
  420. }
  421. int uv__cloexec_ioctl(int fd, int set) {
  422. int r;
  423. do
  424. r = ioctl(fd, set ? FIOCLEX : FIONCLEX);
  425. while (r == -1 && errno == EINTR);
  426. if (r)
  427. return -errno;
  428. return 0;
  429. }
  430. int uv__nonblock_fcntl(int fd, int set) {
  431. int flags;
  432. int r;
  433. do
  434. r = fcntl(fd, F_GETFL);
  435. while (r == -1 && errno == EINTR);
  436. if (r == -1)
  437. return -errno;
  438. /* Bail out now if already set/clear. */
  439. if (!!(r & O_NONBLOCK) == !!set)
  440. return 0;
  441. if (set)
  442. flags = r | O_NONBLOCK;
  443. else
  444. flags = r & ~O_NONBLOCK;
  445. do
  446. r = fcntl(fd, F_SETFL, flags);
  447. while (r == -1 && errno == EINTR);
  448. if (r)
  449. return -errno;
  450. return 0;
  451. }
  452. int uv__cloexec_fcntl(int fd, int set) {
  453. int flags;
  454. int r;
  455. do
  456. r = fcntl(fd, F_GETFD);
  457. while (r == -1 && errno == EINTR);
  458. if (r == -1)
  459. return -errno;
  460. /* Bail out now if already set/clear. */
  461. if (!!(r & FD_CLOEXEC) == !!set)
  462. return 0;
  463. if (set)
  464. flags = r | FD_CLOEXEC;
  465. else
  466. flags = r & ~FD_CLOEXEC;
  467. do
  468. r = fcntl(fd, F_SETFD, flags);
  469. while (r == -1 && errno == EINTR);
  470. if (r)
  471. return -errno;
  472. return 0;
  473. }
  474. /* This function is not execve-safe, there is a race window
  475. * between the call to dup() and fcntl(FD_CLOEXEC).
  476. */
  477. int uv__dup(int fd) {
  478. int err;
  479. fd = dup(fd);
  480. if (fd == -1)
  481. return -errno;
  482. err = uv__cloexec(fd, 1);
  483. if (err) {
  484. uv__close(fd);
  485. return err;
  486. }
  487. return fd;
  488. }
  489. ssize_t uv__recvmsg(int fd, struct msghdr* msg, int flags) {
  490. struct cmsghdr* cmsg;
  491. ssize_t rc;
  492. int* pfd;
  493. int* end;
  494. #if defined(__linux__)
  495. static int no_msg_cmsg_cloexec;
  496. if (no_msg_cmsg_cloexec == 0) {
  497. rc = recvmsg(fd, msg, flags | 0x40000000); /* MSG_CMSG_CLOEXEC */
  498. if (rc != -1)
  499. return rc;
  500. if (errno != EINVAL)
  501. return -errno;
  502. rc = recvmsg(fd, msg, flags);
  503. if (rc == -1)
  504. return -errno;
  505. no_msg_cmsg_cloexec = 1;
  506. } else {
  507. rc = recvmsg(fd, msg, flags);
  508. }
  509. #else
  510. rc = recvmsg(fd, msg, flags);
  511. #endif
  512. if (rc == -1)
  513. return -errno;
  514. if (msg->msg_controllen == 0)
  515. return rc;
  516. for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg))
  517. if (cmsg->cmsg_type == SCM_RIGHTS)
  518. for (pfd = (int*) CMSG_DATA(cmsg),
  519. end = (int*) ((char*) cmsg + cmsg->cmsg_len);
  520. pfd < end;
  521. pfd += 1)
  522. uv__cloexec(*pfd, 1);
  523. return rc;
  524. }
  525. int uv_cwd(char* buffer, size_t* size) {
  526. if (buffer == NULL || size == NULL)
  527. return -EINVAL;
  528. if (getcwd(buffer, *size) == NULL)
  529. return -errno;
  530. *size = strlen(buffer);
  531. if (*size > 1 && buffer[*size - 1] == '/') {
  532. buffer[*size-1] = '\0';
  533. (*size)--;
  534. }
  535. return 0;
  536. }
  537. int uv_chdir(const char* dir) {
  538. if (chdir(dir))
  539. return -errno;
  540. return 0;
  541. }
  542. void uv_disable_stdio_inheritance(void) {
  543. int fd;
  544. /* Set the CLOEXEC flag on all open descriptors. Unconditionally try the
  545. * first 16 file descriptors. After that, bail out after the first error.
  546. */
  547. for (fd = 0; ; fd++)
  548. if (uv__cloexec(fd, 1) && fd > 15)
  549. break;
  550. }
  551. int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) {
  552. int fd_out;
  553. switch (handle->type) {
  554. case UV_TCP:
  555. case UV_NAMED_PIPE:
  556. case UV_TTY:
  557. fd_out = uv__stream_fd((uv_stream_t*) handle);
  558. break;
  559. case UV_UDP:
  560. fd_out = ((uv_udp_t *) handle)->io_watcher.fd;
  561. break;
  562. case UV_POLL:
  563. fd_out = ((uv_poll_t *) handle)->io_watcher.fd;
  564. break;
  565. default:
  566. return -EINVAL;
  567. }
  568. if (uv__is_closing(handle) || fd_out == -1)
  569. return -EBADF;
  570. *fd = fd_out;
  571. return 0;
  572. }
  573. static int uv__run_pending(uv_loop_t* loop) {
  574. QUEUE* q;
  575. QUEUE pq;
  576. uv__io_t* w;
  577. if (QUEUE_EMPTY(&loop->pending_queue))
  578. return 0;
  579. QUEUE_MOVE(&loop->pending_queue, &pq);
  580. while (!QUEUE_EMPTY(&pq)) {
  581. q = QUEUE_HEAD(&pq);
  582. QUEUE_REMOVE(q);
  583. QUEUE_INIT(q);
  584. w = QUEUE_DATA(q, uv__io_t, pending_queue);
  585. w->cb(loop, w, POLLOUT);
  586. }
  587. return 1;
  588. }
  589. static unsigned int next_power_of_two(unsigned int val) {
  590. val -= 1;
  591. val |= val >> 1;
  592. val |= val >> 2;
  593. val |= val >> 4;
  594. val |= val >> 8;
  595. val |= val >> 16;
  596. val += 1;
  597. return val;
  598. }
  599. static void maybe_resize(uv_loop_t* loop, unsigned int len) {
  600. uv__io_t** watchers;
  601. void* fake_watcher_list;
  602. void* fake_watcher_count;
  603. unsigned int nwatchers;
  604. unsigned int i;
  605. if (len <= loop->nwatchers)
  606. return;
  607. /* Preserve fake watcher list and count at the end of the watchers */
  608. if (loop->watchers != NULL) {
  609. fake_watcher_list = loop->watchers[loop->nwatchers];
  610. fake_watcher_count = loop->watchers[loop->nwatchers + 1];
  611. } else {
  612. fake_watcher_list = NULL;
  613. fake_watcher_count = NULL;
  614. }
  615. nwatchers = next_power_of_two(len + 2) - 2;
  616. watchers = uv__realloc(loop->watchers,
  617. (nwatchers + 2) * sizeof(loop->watchers[0]));
  618. if (watchers == NULL)
  619. abort();
  620. for (i = loop->nwatchers; i < nwatchers; i++)
  621. watchers[i] = NULL;
  622. watchers[nwatchers] = fake_watcher_list;
  623. watchers[nwatchers + 1] = fake_watcher_count;
  624. loop->watchers = watchers;
  625. loop->nwatchers = nwatchers;
  626. }
  627. void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) {
  628. assert(cb != NULL);
  629. assert(fd >= -1);
  630. QUEUE_INIT(&w->pending_queue);
  631. QUEUE_INIT(&w->watcher_queue);
  632. w->cb = cb;
  633. w->fd = fd;
  634. w->events = 0;
  635. w->pevents = 0;
  636. #if defined(UV_HAVE_KQUEUE)
  637. w->rcount = 0;
  638. w->wcount = 0;
  639. #endif /* defined(UV_HAVE_KQUEUE) */
  640. }
  641. void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
  642. assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP)));
  643. assert(0 != events);
  644. assert(w->fd >= 0);
  645. assert(w->fd < INT_MAX);
  646. w->pevents |= events;
  647. maybe_resize(loop, w->fd + 1);
  648. #if !defined(__sun)
  649. /* The event ports backend needs to rearm all file descriptors on each and
  650. * every tick of the event loop but the other backends allow us to
  651. * short-circuit here if the event mask is unchanged.
  652. */
  653. if (w->events == w->pevents)
  654. return;
  655. #endif
  656. if (QUEUE_EMPTY(&w->watcher_queue))
  657. QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
  658. if (loop->watchers[w->fd] == NULL) {
  659. loop->watchers[w->fd] = w;
  660. loop->nfds++;
  661. }
  662. }
  663. void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
  664. assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP)));
  665. assert(0 != events);
  666. if (w->fd == -1)
  667. return;
  668. assert(w->fd >= 0);
  669. /* Happens when uv__io_stop() is called on a handle that was never started. */
  670. if ((unsigned) w->fd >= loop->nwatchers)
  671. return;
  672. w->pevents &= ~events;
  673. if (w->pevents == 0) {
  674. QUEUE_REMOVE(&w->watcher_queue);
  675. QUEUE_INIT(&w->watcher_queue);
  676. if (loop->watchers[w->fd] != NULL) {
  677. assert(loop->watchers[w->fd] == w);
  678. assert(loop->nfds > 0);
  679. loop->watchers[w->fd] = NULL;
  680. loop->nfds--;
  681. w->events = 0;
  682. }
  683. }
  684. else if (QUEUE_EMPTY(&w->watcher_queue))
  685. QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
  686. }
  687. void uv__io_close(uv_loop_t* loop, uv__io_t* w) {
  688. uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP);
  689. QUEUE_REMOVE(&w->pending_queue);
  690. /* Remove stale events for this file descriptor */
  691. uv__platform_invalidate_fd(loop, w->fd);
  692. }
  693. void uv__io_feed(uv_loop_t* loop, uv__io_t* w) {
  694. if (QUEUE_EMPTY(&w->pending_queue))
  695. QUEUE_INSERT_TAIL(&loop->pending_queue, &w->pending_queue);
  696. }
  697. int uv__io_active(const uv__io_t* w, unsigned int events) {
  698. assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP)));
  699. assert(0 != events);
  700. return 0 != (w->pevents & events);
  701. }
  702. int uv_getrusage(uv_rusage_t* rusage) {
  703. struct rusage usage;
  704. if (getrusage(RUSAGE_SELF, &usage))
  705. return -errno;
  706. rusage->ru_utime.tv_sec = usage.ru_utime.tv_sec;
  707. rusage->ru_utime.tv_usec = usage.ru_utime.tv_usec;
  708. rusage->ru_stime.tv_sec = usage.ru_stime.tv_sec;
  709. rusage->ru_stime.tv_usec = usage.ru_stime.tv_usec;
  710. #if !defined(__MVS__)
  711. rusage->ru_maxrss = usage.ru_maxrss;
  712. rusage->ru_ixrss = usage.ru_ixrss;
  713. rusage->ru_idrss = usage.ru_idrss;
  714. rusage->ru_isrss = usage.ru_isrss;
  715. rusage->ru_minflt = usage.ru_minflt;
  716. rusage->ru_majflt = usage.ru_majflt;
  717. rusage->ru_nswap = usage.ru_nswap;
  718. rusage->ru_inblock = usage.ru_inblock;
  719. rusage->ru_oublock = usage.ru_oublock;
  720. rusage->ru_msgsnd = usage.ru_msgsnd;
  721. rusage->ru_msgrcv = usage.ru_msgrcv;
  722. rusage->ru_nsignals = usage.ru_nsignals;
  723. rusage->ru_nvcsw = usage.ru_nvcsw;
  724. rusage->ru_nivcsw = usage.ru_nivcsw;
  725. #endif
  726. return 0;
  727. }
  728. int uv__open_cloexec(const char* path, int flags) {
  729. int err;
  730. int fd;
  731. #if defined(UV__O_CLOEXEC)
  732. static int no_cloexec;
  733. if (!no_cloexec) {
  734. fd = open(path, flags | UV__O_CLOEXEC);
  735. if (fd != -1)
  736. return fd;
  737. if (errno != EINVAL)
  738. return -errno;
  739. /* O_CLOEXEC not supported. */
  740. no_cloexec = 1;
  741. }
  742. #endif
  743. fd = open(path, flags);
  744. if (fd == -1)
  745. return -errno;
  746. err = uv__cloexec(fd, 1);
  747. if (err) {
  748. uv__close(fd);
  749. return err;
  750. }
  751. return fd;
  752. }
  753. int uv__dup2_cloexec(int oldfd, int newfd) {
  754. int r;
  755. #if defined(__FreeBSD__) && __FreeBSD__ >= 10
  756. r = dup3(oldfd, newfd, O_CLOEXEC);
  757. if (r == -1)
  758. return -errno;
  759. return r;
  760. #elif defined(__FreeBSD__) && defined(F_DUP2FD_CLOEXEC)
  761. r = fcntl(oldfd, F_DUP2FD_CLOEXEC, newfd);
  762. if (r != -1)
  763. return r;
  764. if (errno != EINVAL)
  765. return -errno;
  766. /* Fall through. */
  767. #elif defined(__linux__)
  768. static int no_dup3;
  769. if (!no_dup3) {
  770. do
  771. r = uv__dup3(oldfd, newfd, UV__O_CLOEXEC);
  772. while (r == -1 && errno == EBUSY);
  773. if (r != -1)
  774. return r;
  775. if (errno != ENOSYS)
  776. return -errno;
  777. /* Fall through. */
  778. no_dup3 = 1;
  779. }
  780. #endif
  781. {
  782. int err;
  783. do
  784. r = dup2(oldfd, newfd);
  785. #if defined(__linux__)
  786. while (r == -1 && errno == EBUSY);
  787. #else
  788. while (0); /* Never retry. */
  789. #endif
  790. if (r == -1)
  791. return -errno;
  792. err = uv__cloexec(newfd, 1);
  793. if (err) {
  794. uv__close(newfd);
  795. return err;
  796. }
  797. return r;
  798. }
  799. }
  800. int uv_os_homedir(char* buffer, size_t* size) {
  801. uv_passwd_t pwd;
  802. char* buf;
  803. size_t len;
  804. int r;
  805. if (buffer == NULL || size == NULL || *size == 0)
  806. return -EINVAL;
  807. /* Check if the HOME environment variable is set first */
  808. buf = getenv("HOME");
  809. if (buf != NULL) {
  810. len = strlen(buf);
  811. if (len >= *size) {
  812. *size = len + 1;
  813. return -ENOBUFS;
  814. }
  815. memcpy(buffer, buf, len + 1);
  816. *size = len;
  817. return 0;
  818. }
  819. /* HOME is not set, so call uv__getpwuid_r() */
  820. r = uv__getpwuid_r(&pwd);
  821. if (r != 0) {
  822. return r;
  823. }
  824. len = strlen(pwd.homedir);
  825. if (len >= *size) {
  826. *size = len + 1;
  827. uv_os_free_passwd(&pwd);
  828. return -ENOBUFS;
  829. }
  830. memcpy(buffer, pwd.homedir, len + 1);
  831. *size = len;
  832. uv_os_free_passwd(&pwd);
  833. return 0;
  834. }
  835. int uv_os_tmpdir(char* buffer, size_t* size) {
  836. const char* buf;
  837. size_t len;
  838. if (buffer == NULL || size == NULL || *size == 0)
  839. return -EINVAL;
  840. #define CHECK_ENV_VAR(name) \
  841. do { \
  842. buf = getenv(name); \
  843. if (buf != NULL) \
  844. goto return_buffer; \
  845. } \
  846. while (0)
  847. /* Check the TMPDIR, TMP, TEMP, and TEMPDIR environment variables in order */
  848. CHECK_ENV_VAR("TMPDIR");
  849. CHECK_ENV_VAR("TMP");
  850. CHECK_ENV_VAR("TEMP");
  851. CHECK_ENV_VAR("TEMPDIR");
  852. #undef CHECK_ENV_VAR
  853. /* No temp environment variables defined */
  854. #if defined(__ANDROID__)
  855. buf = "/data/local/tmp";
  856. #else
  857. buf = "/tmp";
  858. #endif
  859. return_buffer:
  860. len = strlen(buf);
  861. if (len >= *size) {
  862. *size = len + 1;
  863. return -ENOBUFS;
  864. }
  865. /* The returned directory should not have a trailing slash. */
  866. if (len > 1 && buf[len - 1] == '/') {
  867. len--;
  868. }
  869. memcpy(buffer, buf, len + 1);
  870. buffer[len] = '\0';
  871. *size = len;
  872. return 0;
  873. }
  874. int uv__getpwuid_r(uv_passwd_t* pwd) {
  875. struct passwd pw;
  876. struct passwd* result;
  877. char* buf;
  878. uid_t uid;
  879. size_t bufsize;
  880. size_t name_size;
  881. size_t homedir_size;
  882. size_t shell_size;
  883. long initsize;
  884. int r;
  885. #if defined(__ANDROID_API__) && __ANDROID_API__ < 21
  886. int (*getpwuid_r)(uid_t, struct passwd*, char*, size_t, struct passwd**);
  887. getpwuid_r = dlsym(RTLD_DEFAULT, "getpwuid_r");
  888. if (getpwuid_r == NULL)
  889. return -ENOSYS;
  890. #endif
  891. if (pwd == NULL)
  892. return -EINVAL;
  893. initsize = sysconf(_SC_GETPW_R_SIZE_MAX);
  894. if (initsize <= 0)
  895. bufsize = 4096;
  896. else
  897. bufsize = (size_t) initsize;
  898. uid = geteuid();
  899. buf = NULL;
  900. for (;;) {
  901. uv__free(buf);
  902. buf = uv__malloc(bufsize);
  903. if (buf == NULL)
  904. return -ENOMEM;
  905. r = getpwuid_r(uid, &pw, buf, bufsize, &result);
  906. if (r != ERANGE)
  907. break;
  908. bufsize *= 2;
  909. }
  910. if (r != 0) {
  911. uv__free(buf);
  912. return -r;
  913. }
  914. if (result == NULL) {
  915. uv__free(buf);
  916. return -ENOENT;
  917. }
  918. /* Allocate memory for the username, shell, and home directory */
  919. name_size = strlen(pw.pw_name) + 1;
  920. homedir_size = strlen(pw.pw_dir) + 1;
  921. shell_size = strlen(pw.pw_shell) + 1;
  922. pwd->username = uv__malloc(name_size + homedir_size + shell_size);
  923. if (pwd->username == NULL) {
  924. uv__free(buf);
  925. return -ENOMEM;
  926. }
  927. /* Copy the username */
  928. memcpy(pwd->username, pw.pw_name, name_size);
  929. /* Copy the home directory */
  930. pwd->homedir = pwd->username + name_size;
  931. memcpy(pwd->homedir, pw.pw_dir, homedir_size);
  932. /* Copy the shell */
  933. pwd->shell = pwd->homedir + homedir_size;
  934. memcpy(pwd->shell, pw.pw_shell, shell_size);
  935. /* Copy the uid and gid */
  936. pwd->uid = pw.pw_uid;
  937. pwd->gid = pw.pw_gid;
  938. uv__free(buf);
  939. return 0;
  940. }
  941. void uv_os_free_passwd(uv_passwd_t* pwd) {
  942. if (pwd == NULL)
  943. return;
  944. /*
  945. The memory for name, shell, and homedir are allocated in a single
  946. uv__malloc() call. The base of the pointer is stored in pwd->username, so
  947. that is the field that needs to be freed.
  948. */
  949. uv__free(pwd->username);
  950. pwd->username = NULL;
  951. pwd->shell = NULL;
  952. pwd->homedir = NULL;
  953. }
  954. int uv_os_get_passwd(uv_passwd_t* pwd) {
  955. return uv__getpwuid_r(pwd);
  956. }
  957. int uv_translate_sys_error(int sys_errno) {
  958. /* If < 0 then it's already a libuv error. */
  959. return sys_errno <= 0 ? sys_errno : -sys_errno;
  960. }