fs.c 54 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134
  1. /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
  2. *
  3. * Permission is hereby granted, free of charge, to any person obtaining a copy
  4. * of this software and associated documentation files (the "Software"), to
  5. * deal in the Software without restriction, including without limitation the
  6. * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
  7. * sell copies of the Software, and to permit persons to whom the Software is
  8. * furnished to do so, subject to the following conditions:
  9. *
  10. * The above copyright notice and this permission notice shall be included in
  11. * all copies or substantial portions of the Software.
  12. *
  13. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  16. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  17. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  18. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  19. * IN THE SOFTWARE.
  20. */
  21. /* Caveat emptor: this file deviates from the libuv convention of returning
  22. * negated errno codes. Most uv_fs_*() functions map directly to the system
  23. * call of the same name. For more complex wrappers, it's easier to just
  24. * return -1 with errno set. The dispatcher in uv__fs_work() takes care of
  25. * getting the errno to the right place (req->result or as the return value.)
  26. */
  27. #include "uv.h"
  28. #include "internal.h"
  29. #include <errno.h>
  30. #include <dlfcn.h>
  31. #include <stdio.h>
  32. #include <stdlib.h>
  33. #include <string.h>
  34. #include <limits.h> /* PATH_MAX */
  35. #include <sys/types.h>
  36. #include <sys/socket.h>
  37. #include <sys/stat.h>
  38. #include <sys/time.h>
  39. #include <sys/uio.h>
  40. #include <pthread.h>
  41. #include <unistd.h>
  42. #include <fcntl.h>
  43. #include <poll.h>
  44. #if defined(__DragonFly__) || \
  45. defined(__FreeBSD__) || \
  46. defined(__FreeBSD_kernel__) || \
  47. defined(__OpenBSD__) || \
  48. defined(__NetBSD__)
  49. # define HAVE_PREADV 1
  50. #else
  51. # define HAVE_PREADV 0
  52. #endif
  53. #if defined(__linux__) || defined(__sun)
  54. # include <sys/sendfile.h>
  55. #endif
  56. #if defined(__APPLE__)
  57. # include <sys/sysctl.h>
  58. #elif defined(__linux__) && !defined(FICLONE)
  59. # include <sys/ioctl.h>
  60. # define FICLONE _IOW(0x94, 9, int)
  61. #endif
  62. #if defined(_AIX) && !defined(_AIX71)
  63. # include <utime.h>
  64. #endif
  65. #if defined(__APPLE__) || \
  66. defined(__DragonFly__) || \
  67. defined(__FreeBSD__) || \
  68. defined(__FreeBSD_kernel__) || \
  69. defined(__OpenBSD__) || \
  70. defined(__NetBSD__)
  71. # include <sys/param.h>
  72. # include <sys/mount.h>
  73. #elif defined(__sun) || \
  74. defined(__MVS__) || \
  75. defined(__NetBSD__) || \
  76. defined(__HAIKU__) || \
  77. defined(__QNX__)
  78. # include <sys/statvfs.h>
  79. #else
  80. # include <sys/statfs.h>
  81. #endif
  82. #if defined(_AIX) && _XOPEN_SOURCE <= 600
  83. extern char *mkdtemp(char *template); /* See issue #740 on AIX < 7 */
  84. #endif
  85. #define INIT(subtype) \
  86. do { \
  87. if (req == NULL) \
  88. return UV_EINVAL; \
  89. UV_REQ_INIT(req, UV_FS); \
  90. req->fs_type = UV_FS_ ## subtype; \
  91. req->result = 0; \
  92. req->ptr = NULL; \
  93. req->loop = loop; \
  94. req->path = NULL; \
  95. req->new_path = NULL; \
  96. req->bufs = NULL; \
  97. req->cb = cb; \
  98. } \
  99. while (0)
  100. #define PATH \
  101. do { \
  102. assert(path != NULL); \
  103. if (cb == NULL) { \
  104. req->path = path; \
  105. } else { \
  106. req->path = uv__strdup(path); \
  107. if (req->path == NULL) \
  108. return UV_ENOMEM; \
  109. } \
  110. } \
  111. while (0)
  112. #define PATH2 \
  113. do { \
  114. if (cb == NULL) { \
  115. req->path = path; \
  116. req->new_path = new_path; \
  117. } else { \
  118. size_t path_len; \
  119. size_t new_path_len; \
  120. path_len = strlen(path) + 1; \
  121. new_path_len = strlen(new_path) + 1; \
  122. req->path = uv__malloc(path_len + new_path_len); \
  123. if (req->path == NULL) \
  124. return UV_ENOMEM; \
  125. req->new_path = req->path + path_len; \
  126. memcpy((void*) req->path, path, path_len); \
  127. memcpy((void*) req->new_path, new_path, new_path_len); \
  128. } \
  129. } \
  130. while (0)
  131. #define POST \
  132. do { \
  133. if (cb != NULL) { \
  134. uv__req_register(loop, req); \
  135. uv__work_submit(loop, \
  136. &req->work_req, \
  137. UV__WORK_FAST_IO, \
  138. uv__fs_work, \
  139. uv__fs_done); \
  140. return 0; \
  141. } \
  142. else { \
  143. uv__fs_work(&req->work_req); \
  144. return req->result; \
  145. } \
  146. } \
  147. while (0)
  148. static int uv__fs_close(int fd) {
  149. int rc;
  150. rc = uv__close_nocancel(fd);
  151. if (rc == -1)
  152. if (errno == EINTR || errno == EINPROGRESS)
  153. rc = 0; /* The close is in progress, not an error. */
  154. return rc;
  155. }
  156. static ssize_t uv__fs_fsync(uv_fs_t* req) {
  157. #if defined(__APPLE__)
  158. /* Apple's fdatasync and fsync explicitly do NOT flush the drive write cache
  159. * to the drive platters. This is in contrast to Linux's fdatasync and fsync
  160. * which do, according to recent man pages. F_FULLFSYNC is Apple's equivalent
  161. * for flushing buffered data to permanent storage. If F_FULLFSYNC is not
  162. * supported by the file system we fall back to F_BARRIERFSYNC or fsync().
  163. * This is the same approach taken by sqlite, except sqlite does not issue
  164. * an F_BARRIERFSYNC call.
  165. */
  166. int r;
  167. r = fcntl(req->file, F_FULLFSYNC);
  168. if (r != 0)
  169. r = fcntl(req->file, 85 /* F_BARRIERFSYNC */); /* fsync + barrier */
  170. if (r != 0)
  171. r = fsync(req->file);
  172. return r;
  173. #else
  174. return fsync(req->file);
  175. #endif
  176. }
  177. static ssize_t uv__fs_fdatasync(uv_fs_t* req) {
  178. #if defined(__linux__) || defined(__sun) || defined(__NetBSD__)
  179. return fdatasync(req->file);
  180. #elif defined(__APPLE__)
  181. /* See the comment in uv__fs_fsync. */
  182. return uv__fs_fsync(req);
  183. #else
  184. return fsync(req->file);
  185. #endif
  186. }
  187. UV_UNUSED(static struct timespec uv__fs_to_timespec(double time)) {
  188. struct timespec ts;
  189. ts.tv_sec = time;
  190. ts.tv_nsec = (uint64_t)(time * 1000000) % 1000000 * 1000;
  191. return ts;
  192. }
  193. UV_UNUSED(static struct timeval uv__fs_to_timeval(double time)) {
  194. struct timeval tv;
  195. tv.tv_sec = time;
  196. tv.tv_usec = (uint64_t)(time * 1000000) % 1000000;
  197. return tv;
  198. }
  199. static ssize_t uv__fs_futime(uv_fs_t* req) {
  200. #if defined(__linux__) \
  201. || defined(_AIX71) \
  202. || defined(__HAIKU__)
  203. /* utimesat() has nanosecond resolution but we stick to microseconds
  204. * for the sake of consistency with other platforms.
  205. */
  206. struct timespec ts[2];
  207. ts[0] = uv__fs_to_timespec(req->atime);
  208. ts[1] = uv__fs_to_timespec(req->mtime);
  209. return futimens(req->file, ts);
  210. #elif defined(__APPLE__) \
  211. || defined(__DragonFly__) \
  212. || defined(__FreeBSD__) \
  213. || defined(__FreeBSD_kernel__) \
  214. || defined(__NetBSD__) \
  215. || defined(__OpenBSD__) \
  216. || defined(__sun)
  217. struct timeval tv[2];
  218. tv[0] = uv__fs_to_timeval(req->atime);
  219. tv[1] = uv__fs_to_timeval(req->mtime);
  220. # if defined(__sun)
  221. return futimesat(req->file, NULL, tv);
  222. # else
  223. return futimes(req->file, tv);
  224. # endif
  225. #elif defined(__MVS__)
  226. attrib_t atr;
  227. memset(&atr, 0, sizeof(atr));
  228. atr.att_mtimechg = 1;
  229. atr.att_atimechg = 1;
  230. atr.att_mtime = req->mtime;
  231. atr.att_atime = req->atime;
  232. return __fchattr(req->file, &atr, sizeof(atr));
  233. #else
  234. errno = ENOSYS;
  235. return -1;
  236. #endif
  237. }
  238. static ssize_t uv__fs_mkdtemp(uv_fs_t* req) {
  239. return mkdtemp((char*) req->path) ? 0 : -1;
  240. }
  241. static int (*uv__mkostemp)(char*, int);
  242. static void uv__mkostemp_initonce(void) {
  243. /* z/os doesn't have RTLD_DEFAULT but that's okay
  244. * because it doesn't have mkostemp(O_CLOEXEC) either.
  245. */
  246. #ifdef RTLD_DEFAULT
  247. uv__mkostemp = (int (*)(char*, int)) dlsym(RTLD_DEFAULT, "mkostemp");
  248. /* We don't care about errors, but we do want to clean them up.
  249. * If there has been no error, then dlerror() will just return
  250. * NULL.
  251. */
  252. dlerror();
  253. #endif /* RTLD_DEFAULT */
  254. }
  255. static int uv__fs_mkstemp(uv_fs_t* req) {
  256. static uv_once_t once = UV_ONCE_INIT;
  257. int r;
  258. #ifdef O_CLOEXEC
  259. static int no_cloexec_support;
  260. #endif
  261. static const char pattern[] = "XXXXXX";
  262. static const size_t pattern_size = sizeof(pattern) - 1;
  263. char* path;
  264. size_t path_length;
  265. path = (char*) req->path;
  266. path_length = strlen(path);
  267. /* EINVAL can be returned for 2 reasons:
  268. 1. The template's last 6 characters were not XXXXXX
  269. 2. open() didn't support O_CLOEXEC
  270. We want to avoid going to the fallback path in case
  271. of 1, so it's manually checked before. */
  272. if (path_length < pattern_size ||
  273. strcmp(path + path_length - pattern_size, pattern)) {
  274. errno = EINVAL;
  275. r = -1;
  276. goto clobber;
  277. }
  278. uv_once(&once, uv__mkostemp_initonce);
  279. #ifdef O_CLOEXEC
  280. if (uv__load_relaxed(&no_cloexec_support) == 0 && uv__mkostemp != NULL) {
  281. r = uv__mkostemp(path, O_CLOEXEC);
  282. if (r >= 0)
  283. return r;
  284. /* If mkostemp() returns EINVAL, it means the kernel doesn't
  285. support O_CLOEXEC, so we just fallback to mkstemp() below. */
  286. if (errno != EINVAL)
  287. goto clobber;
  288. /* We set the static variable so that next calls don't even
  289. try to use mkostemp. */
  290. uv__store_relaxed(&no_cloexec_support, 1);
  291. }
  292. #endif /* O_CLOEXEC */
  293. if (req->cb != NULL)
  294. uv_rwlock_rdlock(&req->loop->cloexec_lock);
  295. r = mkstemp(path);
  296. /* In case of failure `uv__cloexec` will leave error in `errno`,
  297. * so it is enough to just set `r` to `-1`.
  298. */
  299. if (r >= 0 && uv__cloexec(r, 1) != 0) {
  300. r = uv__close(r);
  301. if (r != 0)
  302. abort();
  303. r = -1;
  304. }
  305. if (req->cb != NULL)
  306. uv_rwlock_rdunlock(&req->loop->cloexec_lock);
  307. clobber:
  308. if (r < 0)
  309. path[0] = '\0';
  310. return r;
  311. }
  312. static ssize_t uv__fs_open(uv_fs_t* req) {
  313. #ifdef O_CLOEXEC
  314. return open(req->path, req->flags | O_CLOEXEC, req->mode);
  315. #else /* O_CLOEXEC */
  316. int r;
  317. if (req->cb != NULL)
  318. uv_rwlock_rdlock(&req->loop->cloexec_lock);
  319. r = open(req->path, req->flags, req->mode);
  320. /* In case of failure `uv__cloexec` will leave error in `errno`,
  321. * so it is enough to just set `r` to `-1`.
  322. */
  323. if (r >= 0 && uv__cloexec(r, 1) != 0) {
  324. r = uv__close(r);
  325. if (r != 0)
  326. abort();
  327. r = -1;
  328. }
  329. if (req->cb != NULL)
  330. uv_rwlock_rdunlock(&req->loop->cloexec_lock);
  331. return r;
  332. #endif /* O_CLOEXEC */
  333. }
  334. #if !HAVE_PREADV
  335. static ssize_t uv__fs_preadv(uv_file fd,
  336. uv_buf_t* bufs,
  337. unsigned int nbufs,
  338. off_t off) {
  339. uv_buf_t* buf;
  340. uv_buf_t* end;
  341. ssize_t result;
  342. ssize_t rc;
  343. size_t pos;
  344. assert(nbufs > 0);
  345. result = 0;
  346. pos = 0;
  347. buf = bufs + 0;
  348. end = bufs + nbufs;
  349. for (;;) {
  350. do
  351. rc = pread(fd, buf->base + pos, buf->len - pos, off + result);
  352. while (rc == -1 && errno == EINTR);
  353. if (rc == 0)
  354. break;
  355. if (rc == -1 && result == 0)
  356. return UV__ERR(errno);
  357. if (rc == -1)
  358. break; /* We read some data so return that, ignore the error. */
  359. pos += rc;
  360. result += rc;
  361. if (pos < buf->len)
  362. continue;
  363. pos = 0;
  364. buf += 1;
  365. if (buf == end)
  366. break;
  367. }
  368. return result;
  369. }
  370. #endif
  371. static ssize_t uv__fs_read(uv_fs_t* req) {
  372. #if defined(__linux__)
  373. static int no_preadv;
  374. #endif
  375. unsigned int iovmax;
  376. ssize_t result;
  377. iovmax = uv__getiovmax();
  378. if (req->nbufs > iovmax)
  379. req->nbufs = iovmax;
  380. if (req->off < 0) {
  381. if (req->nbufs == 1)
  382. result = read(req->file, req->bufs[0].base, req->bufs[0].len);
  383. else
  384. result = readv(req->file, (struct iovec*) req->bufs, req->nbufs);
  385. } else {
  386. if (req->nbufs == 1) {
  387. result = pread(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
  388. goto done;
  389. }
  390. #if HAVE_PREADV
  391. result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
  392. #else
  393. # if defined(__linux__)
  394. if (uv__load_relaxed(&no_preadv)) retry:
  395. # endif
  396. {
  397. result = uv__fs_preadv(req->file, req->bufs, req->nbufs, req->off);
  398. }
  399. # if defined(__linux__)
  400. else {
  401. result = uv__preadv(req->file,
  402. (struct iovec*)req->bufs,
  403. req->nbufs,
  404. req->off);
  405. if (result == -1 && errno == ENOSYS) {
  406. uv__store_relaxed(&no_preadv, 1);
  407. goto retry;
  408. }
  409. }
  410. # endif
  411. #endif
  412. }
  413. done:
  414. /* Early cleanup of bufs allocation, since we're done with it. */
  415. if (req->bufs != req->bufsml)
  416. uv__free(req->bufs);
  417. req->bufs = NULL;
  418. req->nbufs = 0;
  419. #ifdef __PASE__
  420. /* PASE returns EOPNOTSUPP when reading a directory, convert to EISDIR */
  421. if (result == -1 && errno == EOPNOTSUPP) {
  422. struct stat buf;
  423. ssize_t rc;
  424. rc = fstat(req->file, &buf);
  425. if (rc == 0 && S_ISDIR(buf.st_mode)) {
  426. errno = EISDIR;
  427. }
  428. }
  429. #endif
  430. return result;
  431. }
  432. #if defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_8)
  433. #define UV_CONST_DIRENT uv__dirent_t
  434. #else
  435. #define UV_CONST_DIRENT const uv__dirent_t
  436. #endif
  437. static int uv__fs_scandir_filter(UV_CONST_DIRENT* dent) {
  438. return strcmp(dent->d_name, ".") != 0 && strcmp(dent->d_name, "..") != 0;
  439. }
  440. static int uv__fs_scandir_sort(UV_CONST_DIRENT** a, UV_CONST_DIRENT** b) {
  441. return strcmp((*a)->d_name, (*b)->d_name);
  442. }
  443. static ssize_t uv__fs_scandir(uv_fs_t* req) {
  444. uv__dirent_t** dents;
  445. int n;
  446. dents = NULL;
  447. n = scandir(req->path, &dents, uv__fs_scandir_filter, uv__fs_scandir_sort);
  448. /* NOTE: We will use nbufs as an index field */
  449. req->nbufs = 0;
  450. if (n == 0) {
  451. /* OS X still needs to deallocate some memory.
  452. * Memory was allocated using the system allocator, so use free() here.
  453. */
  454. free(dents);
  455. dents = NULL;
  456. } else if (n == -1) {
  457. return n;
  458. }
  459. req->ptr = dents;
  460. return n;
  461. }
  462. static int uv__fs_opendir(uv_fs_t* req) {
  463. uv_dir_t* dir;
  464. dir = uv__malloc(sizeof(*dir));
  465. if (dir == NULL)
  466. goto error;
  467. dir->dir = opendir(req->path);
  468. if (dir->dir == NULL)
  469. goto error;
  470. req->ptr = dir;
  471. return 0;
  472. error:
  473. uv__free(dir);
  474. req->ptr = NULL;
  475. return -1;
  476. }
  477. static int uv__fs_readdir(uv_fs_t* req) {
  478. uv_dir_t* dir;
  479. uv_dirent_t* dirent;
  480. struct dirent* res;
  481. unsigned int dirent_idx;
  482. unsigned int i;
  483. dir = req->ptr;
  484. dirent_idx = 0;
  485. while (dirent_idx < dir->nentries) {
  486. /* readdir() returns NULL on end of directory, as well as on error. errno
  487. is used to differentiate between the two conditions. */
  488. errno = 0;
  489. res = readdir(dir->dir);
  490. if (res == NULL) {
  491. if (errno != 0)
  492. goto error;
  493. break;
  494. }
  495. if (strcmp(res->d_name, ".") == 0 || strcmp(res->d_name, "..") == 0)
  496. continue;
  497. dirent = &dir->dirents[dirent_idx];
  498. dirent->name = uv__strdup(res->d_name);
  499. if (dirent->name == NULL)
  500. goto error;
  501. dirent->type = uv__fs_get_dirent_type(res);
  502. ++dirent_idx;
  503. }
  504. return dirent_idx;
  505. error:
  506. for (i = 0; i < dirent_idx; ++i) {
  507. uv__free((char*) dir->dirents[i].name);
  508. dir->dirents[i].name = NULL;
  509. }
  510. return -1;
  511. }
  512. static int uv__fs_closedir(uv_fs_t* req) {
  513. uv_dir_t* dir;
  514. dir = req->ptr;
  515. if (dir->dir != NULL) {
  516. closedir(dir->dir);
  517. dir->dir = NULL;
  518. }
  519. uv__free(req->ptr);
  520. req->ptr = NULL;
  521. return 0;
  522. }
  523. static int uv__fs_statfs(uv_fs_t* req) {
  524. uv_statfs_t* stat_fs;
  525. #if defined(__sun) || \
  526. defined(__MVS__) || \
  527. defined(__NetBSD__) || \
  528. defined(__HAIKU__) || \
  529. defined(__QNX__)
  530. struct statvfs buf;
  531. if (0 != statvfs(req->path, &buf))
  532. #else
  533. struct statfs buf;
  534. if (0 != statfs(req->path, &buf))
  535. #endif /* defined(__sun) */
  536. return -1;
  537. stat_fs = uv__malloc(sizeof(*stat_fs));
  538. if (stat_fs == NULL) {
  539. errno = ENOMEM;
  540. return -1;
  541. }
  542. #if defined(__sun) || \
  543. defined(__MVS__) || \
  544. defined(__OpenBSD__) || \
  545. defined(__NetBSD__) || \
  546. defined(__HAIKU__) || \
  547. defined(__QNX__)
  548. stat_fs->f_type = 0; /* f_type is not supported. */
  549. #else
  550. stat_fs->f_type = buf.f_type;
  551. #endif
  552. stat_fs->f_bsize = buf.f_bsize;
  553. stat_fs->f_blocks = buf.f_blocks;
  554. stat_fs->f_bfree = buf.f_bfree;
  555. stat_fs->f_bavail = buf.f_bavail;
  556. stat_fs->f_files = buf.f_files;
  557. stat_fs->f_ffree = buf.f_ffree;
  558. req->ptr = stat_fs;
  559. return 0;
  560. }
  561. static ssize_t uv__fs_pathmax_size(const char* path) {
  562. ssize_t pathmax;
  563. pathmax = pathconf(path, _PC_PATH_MAX);
  564. if (pathmax == -1)
  565. pathmax = UV__PATH_MAX;
  566. return pathmax;
  567. }
  568. static ssize_t uv__fs_readlink(uv_fs_t* req) {
  569. ssize_t maxlen;
  570. ssize_t len;
  571. char* buf;
  572. #if defined(_POSIX_PATH_MAX) || defined(PATH_MAX)
  573. maxlen = uv__fs_pathmax_size(req->path);
  574. #else
  575. /* We may not have a real PATH_MAX. Read size of link. */
  576. struct stat st;
  577. int ret;
  578. ret = lstat(req->path, &st);
  579. if (ret != 0)
  580. return -1;
  581. if (!S_ISLNK(st.st_mode)) {
  582. errno = EINVAL;
  583. return -1;
  584. }
  585. maxlen = st.st_size;
  586. /* According to readlink(2) lstat can report st_size == 0
  587. for some symlinks, such as those in /proc or /sys. */
  588. if (maxlen == 0)
  589. maxlen = uv__fs_pathmax_size(req->path);
  590. #endif
  591. buf = uv__malloc(maxlen);
  592. if (buf == NULL) {
  593. errno = ENOMEM;
  594. return -1;
  595. }
  596. #if defined(__MVS__)
  597. len = os390_readlink(req->path, buf, maxlen);
  598. #else
  599. len = readlink(req->path, buf, maxlen);
  600. #endif
  601. if (len == -1) {
  602. uv__free(buf);
  603. return -1;
  604. }
  605. /* Uncommon case: resize to make room for the trailing nul byte. */
  606. if (len == maxlen) {
  607. buf = uv__reallocf(buf, len + 1);
  608. if (buf == NULL)
  609. return -1;
  610. }
  611. buf[len] = '\0';
  612. req->ptr = buf;
  613. return 0;
  614. }
  615. static ssize_t uv__fs_realpath(uv_fs_t* req) {
  616. char* buf;
  617. #if defined(_POSIX_VERSION) && _POSIX_VERSION >= 200809L
  618. buf = realpath(req->path, NULL);
  619. if (buf == NULL)
  620. return -1;
  621. #else
  622. ssize_t len;
  623. len = uv__fs_pathmax_size(req->path);
  624. buf = uv__malloc(len + 1);
  625. if (buf == NULL) {
  626. errno = ENOMEM;
  627. return -1;
  628. }
  629. if (realpath(req->path, buf) == NULL) {
  630. uv__free(buf);
  631. return -1;
  632. }
  633. #endif
  634. req->ptr = buf;
  635. return 0;
  636. }
  637. static ssize_t uv__fs_sendfile_emul(uv_fs_t* req) {
  638. struct pollfd pfd;
  639. int use_pread;
  640. off_t offset;
  641. ssize_t nsent;
  642. ssize_t nread;
  643. ssize_t nwritten;
  644. size_t buflen;
  645. size_t len;
  646. ssize_t n;
  647. int in_fd;
  648. int out_fd;
  649. char buf[8192];
  650. len = req->bufsml[0].len;
  651. in_fd = req->flags;
  652. out_fd = req->file;
  653. offset = req->off;
  654. use_pread = 1;
  655. /* Here are the rules regarding errors:
  656. *
  657. * 1. Read errors are reported only if nsent==0, otherwise we return nsent.
  658. * The user needs to know that some data has already been sent, to stop
  659. * them from sending it twice.
  660. *
  661. * 2. Write errors are always reported. Write errors are bad because they
  662. * mean data loss: we've read data but now we can't write it out.
  663. *
  664. * We try to use pread() and fall back to regular read() if the source fd
  665. * doesn't support positional reads, for example when it's a pipe fd.
  666. *
  667. * If we get EAGAIN when writing to the target fd, we poll() on it until
  668. * it becomes writable again.
  669. *
  670. * FIXME: If we get a write error when use_pread==1, it should be safe to
  671. * return the number of sent bytes instead of an error because pread()
  672. * is, in theory, idempotent. However, special files in /dev or /proc
  673. * may support pread() but not necessarily return the same data on
  674. * successive reads.
  675. *
  676. * FIXME: There is no way now to signal that we managed to send *some* data
  677. * before a write error.
  678. */
  679. for (nsent = 0; (size_t) nsent < len; ) {
  680. buflen = len - nsent;
  681. if (buflen > sizeof(buf))
  682. buflen = sizeof(buf);
  683. do
  684. if (use_pread)
  685. nread = pread(in_fd, buf, buflen, offset);
  686. else
  687. nread = read(in_fd, buf, buflen);
  688. while (nread == -1 && errno == EINTR);
  689. if (nread == 0)
  690. goto out;
  691. if (nread == -1) {
  692. if (use_pread && nsent == 0 && (errno == EIO || errno == ESPIPE)) {
  693. use_pread = 0;
  694. continue;
  695. }
  696. if (nsent == 0)
  697. nsent = -1;
  698. goto out;
  699. }
  700. for (nwritten = 0; nwritten < nread; ) {
  701. do
  702. n = write(out_fd, buf + nwritten, nread - nwritten);
  703. while (n == -1 && errno == EINTR);
  704. if (n != -1) {
  705. nwritten += n;
  706. continue;
  707. }
  708. if (errno != EAGAIN && errno != EWOULDBLOCK) {
  709. nsent = -1;
  710. goto out;
  711. }
  712. pfd.fd = out_fd;
  713. pfd.events = POLLOUT;
  714. pfd.revents = 0;
  715. do
  716. n = poll(&pfd, 1, -1);
  717. while (n == -1 && errno == EINTR);
  718. if (n == -1 || (pfd.revents & ~POLLOUT) != 0) {
  719. errno = EIO;
  720. nsent = -1;
  721. goto out;
  722. }
  723. }
  724. offset += nread;
  725. nsent += nread;
  726. }
  727. out:
  728. if (nsent != -1)
  729. req->off = offset;
  730. return nsent;
  731. }
  732. static ssize_t uv__fs_sendfile(uv_fs_t* req) {
  733. int in_fd;
  734. int out_fd;
  735. in_fd = req->flags;
  736. out_fd = req->file;
  737. #if defined(__linux__) || defined(__sun)
  738. {
  739. off_t off;
  740. ssize_t r;
  741. off = req->off;
  742. #ifdef __linux__
  743. {
  744. static int copy_file_range_support = 1;
  745. if (copy_file_range_support) {
  746. r = uv__fs_copy_file_range(in_fd, NULL, out_fd, &off, req->bufsml[0].len, 0);
  747. if (r == -1 && errno == ENOSYS) {
  748. errno = 0;
  749. copy_file_range_support = 0;
  750. } else {
  751. goto ok;
  752. }
  753. }
  754. }
  755. #endif
  756. r = sendfile(out_fd, in_fd, &off, req->bufsml[0].len);
  757. ok:
  758. /* sendfile() on SunOS returns EINVAL if the target fd is not a socket but
  759. * it still writes out data. Fortunately, we can detect it by checking if
  760. * the offset has been updated.
  761. */
  762. if (r != -1 || off > req->off) {
  763. r = off - req->off;
  764. req->off = off;
  765. return r;
  766. }
  767. if (errno == EINVAL ||
  768. errno == EIO ||
  769. errno == ENOTSOCK ||
  770. errno == EXDEV) {
  771. errno = 0;
  772. return uv__fs_sendfile_emul(req);
  773. }
  774. return -1;
  775. }
  776. #elif defined(__APPLE__) || \
  777. defined(__DragonFly__) || \
  778. defined(__FreeBSD__) || \
  779. defined(__FreeBSD_kernel__)
  780. {
  781. off_t len;
  782. ssize_t r;
  783. /* sendfile() on FreeBSD and Darwin returns EAGAIN if the target fd is in
  784. * non-blocking mode and not all data could be written. If a non-zero
  785. * number of bytes have been sent, we don't consider it an error.
  786. */
  787. #if defined(__FreeBSD__) || defined(__DragonFly__)
  788. len = 0;
  789. r = sendfile(in_fd, out_fd, req->off, req->bufsml[0].len, NULL, &len, 0);
  790. #elif defined(__FreeBSD_kernel__)
  791. len = 0;
  792. r = bsd_sendfile(in_fd,
  793. out_fd,
  794. req->off,
  795. req->bufsml[0].len,
  796. NULL,
  797. &len,
  798. 0);
  799. #else
  800. /* The darwin sendfile takes len as an input for the length to send,
  801. * so make sure to initialize it with the caller's value. */
  802. len = req->bufsml[0].len;
  803. r = sendfile(in_fd, out_fd, req->off, &len, NULL, 0);
  804. #endif
  805. /*
  806. * The man page for sendfile(2) on DragonFly states that `len` contains
  807. * a meaningful value ONLY in case of EAGAIN and EINTR.
  808. * Nothing is said about it's value in case of other errors, so better
  809. * not depend on the potential wrong assumption that is was not modified
  810. * by the syscall.
  811. */
  812. if (r == 0 || ((errno == EAGAIN || errno == EINTR) && len != 0)) {
  813. req->off += len;
  814. return (ssize_t) len;
  815. }
  816. if (errno == EINVAL ||
  817. errno == EIO ||
  818. errno == ENOTSOCK ||
  819. errno == EXDEV) {
  820. errno = 0;
  821. return uv__fs_sendfile_emul(req);
  822. }
  823. return -1;
  824. }
  825. #else
  826. /* Squelch compiler warnings. */
  827. (void) &in_fd;
  828. (void) &out_fd;
  829. return uv__fs_sendfile_emul(req);
  830. #endif
  831. }
  832. static ssize_t uv__fs_utime(uv_fs_t* req) {
  833. #if defined(__linux__) \
  834. || defined(_AIX71) \
  835. || defined(__sun) \
  836. || defined(__HAIKU__)
  837. /* utimesat() has nanosecond resolution but we stick to microseconds
  838. * for the sake of consistency with other platforms.
  839. */
  840. struct timespec ts[2];
  841. ts[0] = uv__fs_to_timespec(req->atime);
  842. ts[1] = uv__fs_to_timespec(req->mtime);
  843. return utimensat(AT_FDCWD, req->path, ts, 0);
  844. #elif defined(__APPLE__) \
  845. || defined(__DragonFly__) \
  846. || defined(__FreeBSD__) \
  847. || defined(__FreeBSD_kernel__) \
  848. || defined(__NetBSD__) \
  849. || defined(__OpenBSD__)
  850. struct timeval tv[2];
  851. tv[0] = uv__fs_to_timeval(req->atime);
  852. tv[1] = uv__fs_to_timeval(req->mtime);
  853. return utimes(req->path, tv);
  854. #elif defined(_AIX) \
  855. && !defined(_AIX71)
  856. struct utimbuf buf;
  857. buf.actime = req->atime;
  858. buf.modtime = req->mtime;
  859. return utime(req->path, &buf);
  860. #elif defined(__MVS__)
  861. attrib_t atr;
  862. memset(&atr, 0, sizeof(atr));
  863. atr.att_mtimechg = 1;
  864. atr.att_atimechg = 1;
  865. atr.att_mtime = req->mtime;
  866. atr.att_atime = req->atime;
  867. return __lchattr((char*) req->path, &atr, sizeof(atr));
  868. #else
  869. errno = ENOSYS;
  870. return -1;
  871. #endif
  872. }
  873. static ssize_t uv__fs_lutime(uv_fs_t* req) {
  874. #if defined(__linux__) || \
  875. defined(_AIX71) || \
  876. defined(__sun) || \
  877. defined(__HAIKU__)
  878. struct timespec ts[2];
  879. ts[0] = uv__fs_to_timespec(req->atime);
  880. ts[1] = uv__fs_to_timespec(req->mtime);
  881. return utimensat(AT_FDCWD, req->path, ts, AT_SYMLINK_NOFOLLOW);
  882. #elif defined(__APPLE__) || \
  883. defined(__DragonFly__) || \
  884. defined(__FreeBSD__) || \
  885. defined(__FreeBSD_kernel__) || \
  886. defined(__NetBSD__)
  887. struct timeval tv[2];
  888. tv[0] = uv__fs_to_timeval(req->atime);
  889. tv[1] = uv__fs_to_timeval(req->mtime);
  890. return lutimes(req->path, tv);
  891. #else
  892. errno = ENOSYS;
  893. return -1;
  894. #endif
  895. }
  896. static ssize_t uv__fs_write(uv_fs_t* req) {
  897. #if defined(__linux__)
  898. static int no_pwritev;
  899. #endif
  900. ssize_t r;
  901. /* Serialize writes on OS X, concurrent write() and pwrite() calls result in
  902. * data loss. We can't use a per-file descriptor lock, the descriptor may be
  903. * a dup().
  904. */
  905. #if defined(__APPLE__)
  906. static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
  907. if (pthread_mutex_lock(&lock))
  908. abort();
  909. #endif
  910. if (req->off < 0) {
  911. if (req->nbufs == 1)
  912. r = write(req->file, req->bufs[0].base, req->bufs[0].len);
  913. else
  914. r = writev(req->file, (struct iovec*) req->bufs, req->nbufs);
  915. } else {
  916. if (req->nbufs == 1) {
  917. r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
  918. goto done;
  919. }
  920. #if HAVE_PREADV
  921. r = pwritev(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
  922. #else
  923. # if defined(__linux__)
  924. if (no_pwritev) retry:
  925. # endif
  926. {
  927. r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
  928. }
  929. # if defined(__linux__)
  930. else {
  931. r = uv__pwritev(req->file,
  932. (struct iovec*) req->bufs,
  933. req->nbufs,
  934. req->off);
  935. if (r == -1 && errno == ENOSYS) {
  936. no_pwritev = 1;
  937. goto retry;
  938. }
  939. }
  940. # endif
  941. #endif
  942. }
  943. done:
  944. #if defined(__APPLE__)
  945. if (pthread_mutex_unlock(&lock))
  946. abort();
  947. #endif
  948. return r;
  949. }
  950. static ssize_t uv__fs_copyfile(uv_fs_t* req) {
  951. uv_fs_t fs_req;
  952. uv_file srcfd;
  953. uv_file dstfd;
  954. struct stat src_statsbuf;
  955. struct stat dst_statsbuf;
  956. int dst_flags;
  957. int result;
  958. int err;
  959. off_t bytes_to_send;
  960. off_t in_offset;
  961. off_t bytes_written;
  962. size_t bytes_chunk;
  963. dstfd = -1;
  964. err = 0;
  965. /* Open the source file. */
  966. srcfd = uv_fs_open(NULL, &fs_req, req->path, O_RDONLY, 0, NULL);
  967. uv_fs_req_cleanup(&fs_req);
  968. if (srcfd < 0)
  969. return srcfd;
  970. /* Get the source file's mode. */
  971. if (fstat(srcfd, &src_statsbuf)) {
  972. err = UV__ERR(errno);
  973. goto out;
  974. }
  975. dst_flags = O_WRONLY | O_CREAT;
  976. if (req->flags & UV_FS_COPYFILE_EXCL)
  977. dst_flags |= O_EXCL;
  978. /* Open the destination file. */
  979. dstfd = uv_fs_open(NULL,
  980. &fs_req,
  981. req->new_path,
  982. dst_flags,
  983. src_statsbuf.st_mode,
  984. NULL);
  985. uv_fs_req_cleanup(&fs_req);
  986. if (dstfd < 0) {
  987. err = dstfd;
  988. goto out;
  989. }
  990. /* If the file is not being opened exclusively, verify that the source and
  991. destination are not the same file. If they are the same, bail out early. */
  992. if ((req->flags & UV_FS_COPYFILE_EXCL) == 0) {
  993. /* Get the destination file's mode. */
  994. if (fstat(dstfd, &dst_statsbuf)) {
  995. err = UV__ERR(errno);
  996. goto out;
  997. }
  998. /* Check if srcfd and dstfd refer to the same file */
  999. if (src_statsbuf.st_dev == dst_statsbuf.st_dev &&
  1000. src_statsbuf.st_ino == dst_statsbuf.st_ino) {
  1001. goto out;
  1002. }
  1003. /* Truncate the file in case the destination already existed. */
  1004. if (ftruncate(dstfd, 0) != 0) {
  1005. err = UV__ERR(errno);
  1006. goto out;
  1007. }
  1008. }
  1009. if (fchmod(dstfd, src_statsbuf.st_mode) == -1) {
  1010. err = UV__ERR(errno);
  1011. #ifdef __linux__
  1012. if (err != UV_EPERM)
  1013. goto out;
  1014. {
  1015. struct statfs s;
  1016. /* fchmod() on CIFS shares always fails with EPERM unless the share is
  1017. * mounted with "noperm". As fchmod() is a meaningless operation on such
  1018. * shares anyway, detect that condition and squelch the error.
  1019. */
  1020. if (fstatfs(dstfd, &s) == -1)
  1021. goto out;
  1022. if (s.f_type != /* CIFS */ 0xFF534D42u)
  1023. goto out;
  1024. }
  1025. err = 0;
  1026. #else /* !__linux__ */
  1027. goto out;
  1028. #endif /* !__linux__ */
  1029. }
  1030. #ifdef FICLONE
  1031. if (req->flags & UV_FS_COPYFILE_FICLONE ||
  1032. req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
  1033. if (ioctl(dstfd, FICLONE, srcfd) == 0) {
  1034. /* ioctl() with FICLONE succeeded. */
  1035. goto out;
  1036. }
  1037. /* If an error occurred and force was set, return the error to the caller;
  1038. * fall back to sendfile() when force was not set. */
  1039. if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
  1040. err = UV__ERR(errno);
  1041. goto out;
  1042. }
  1043. }
  1044. #else
  1045. if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
  1046. err = UV_ENOSYS;
  1047. goto out;
  1048. }
  1049. #endif
  1050. bytes_to_send = src_statsbuf.st_size;
  1051. in_offset = 0;
  1052. while (bytes_to_send != 0) {
  1053. bytes_chunk = SSIZE_MAX;
  1054. if (bytes_to_send < (off_t) bytes_chunk)
  1055. bytes_chunk = bytes_to_send;
  1056. uv_fs_sendfile(NULL, &fs_req, dstfd, srcfd, in_offset, bytes_chunk, NULL);
  1057. bytes_written = fs_req.result;
  1058. uv_fs_req_cleanup(&fs_req);
  1059. if (bytes_written < 0) {
  1060. err = bytes_written;
  1061. break;
  1062. }
  1063. bytes_to_send -= bytes_written;
  1064. in_offset += bytes_written;
  1065. }
  1066. out:
  1067. if (err < 0)
  1068. result = err;
  1069. else
  1070. result = 0;
  1071. /* Close the source file. */
  1072. err = uv__close_nocheckstdio(srcfd);
  1073. /* Don't overwrite any existing errors. */
  1074. if (err != 0 && result == 0)
  1075. result = err;
  1076. /* Close the destination file if it is open. */
  1077. if (dstfd >= 0) {
  1078. err = uv__close_nocheckstdio(dstfd);
  1079. /* Don't overwrite any existing errors. */
  1080. if (err != 0 && result == 0)
  1081. result = err;
  1082. /* Remove the destination file if something went wrong. */
  1083. if (result != 0) {
  1084. uv_fs_unlink(NULL, &fs_req, req->new_path, NULL);
  1085. /* Ignore the unlink return value, as an error already happened. */
  1086. uv_fs_req_cleanup(&fs_req);
  1087. }
  1088. }
  1089. if (result == 0)
  1090. return 0;
  1091. errno = UV__ERR(result);
  1092. return -1;
  1093. }
  1094. static void uv__to_stat(struct stat* src, uv_stat_t* dst) {
  1095. dst->st_dev = src->st_dev;
  1096. dst->st_mode = src->st_mode;
  1097. dst->st_nlink = src->st_nlink;
  1098. dst->st_uid = src->st_uid;
  1099. dst->st_gid = src->st_gid;
  1100. dst->st_rdev = src->st_rdev;
  1101. dst->st_ino = src->st_ino;
  1102. dst->st_size = src->st_size;
  1103. dst->st_blksize = src->st_blksize;
  1104. dst->st_blocks = src->st_blocks;
  1105. #if defined(__APPLE__)
  1106. dst->st_atim.tv_sec = src->st_atimespec.tv_sec;
  1107. dst->st_atim.tv_nsec = src->st_atimespec.tv_nsec;
  1108. dst->st_mtim.tv_sec = src->st_mtimespec.tv_sec;
  1109. dst->st_mtim.tv_nsec = src->st_mtimespec.tv_nsec;
  1110. dst->st_ctim.tv_sec = src->st_ctimespec.tv_sec;
  1111. dst->st_ctim.tv_nsec = src->st_ctimespec.tv_nsec;
  1112. dst->st_birthtim.tv_sec = src->st_birthtimespec.tv_sec;
  1113. dst->st_birthtim.tv_nsec = src->st_birthtimespec.tv_nsec;
  1114. dst->st_flags = src->st_flags;
  1115. dst->st_gen = src->st_gen;
  1116. #elif defined(__ANDROID__)
  1117. dst->st_atim.tv_sec = src->st_atime;
  1118. dst->st_atim.tv_nsec = src->st_atimensec;
  1119. dst->st_mtim.tv_sec = src->st_mtime;
  1120. dst->st_mtim.tv_nsec = src->st_mtimensec;
  1121. dst->st_ctim.tv_sec = src->st_ctime;
  1122. dst->st_ctim.tv_nsec = src->st_ctimensec;
  1123. dst->st_birthtim.tv_sec = src->st_ctime;
  1124. dst->st_birthtim.tv_nsec = src->st_ctimensec;
  1125. dst->st_flags = 0;
  1126. dst->st_gen = 0;
  1127. #elif !defined(_AIX) && ( \
  1128. defined(__DragonFly__) || \
  1129. defined(__FreeBSD__) || \
  1130. defined(__OpenBSD__) || \
  1131. defined(__NetBSD__) || \
  1132. defined(_GNU_SOURCE) || \
  1133. defined(_BSD_SOURCE) || \
  1134. defined(_SVID_SOURCE) || \
  1135. defined(_XOPEN_SOURCE) || \
  1136. defined(_DEFAULT_SOURCE))
  1137. dst->st_atim.tv_sec = src->st_atim.tv_sec;
  1138. dst->st_atim.tv_nsec = src->st_atim.tv_nsec;
  1139. dst->st_mtim.tv_sec = src->st_mtim.tv_sec;
  1140. dst->st_mtim.tv_nsec = src->st_mtim.tv_nsec;
  1141. dst->st_ctim.tv_sec = src->st_ctim.tv_sec;
  1142. dst->st_ctim.tv_nsec = src->st_ctim.tv_nsec;
  1143. # if defined(__FreeBSD__) || \
  1144. defined(__NetBSD__)
  1145. dst->st_birthtim.tv_sec = src->st_birthtim.tv_sec;
  1146. dst->st_birthtim.tv_nsec = src->st_birthtim.tv_nsec;
  1147. dst->st_flags = src->st_flags;
  1148. dst->st_gen = src->st_gen;
  1149. # else
  1150. dst->st_birthtim.tv_sec = src->st_ctim.tv_sec;
  1151. dst->st_birthtim.tv_nsec = src->st_ctim.tv_nsec;
  1152. dst->st_flags = 0;
  1153. dst->st_gen = 0;
  1154. # endif
  1155. #else
  1156. dst->st_atim.tv_sec = src->st_atime;
  1157. dst->st_atim.tv_nsec = 0;
  1158. dst->st_mtim.tv_sec = src->st_mtime;
  1159. dst->st_mtim.tv_nsec = 0;
  1160. dst->st_ctim.tv_sec = src->st_ctime;
  1161. dst->st_ctim.tv_nsec = 0;
  1162. dst->st_birthtim.tv_sec = src->st_ctime;
  1163. dst->st_birthtim.tv_nsec = 0;
  1164. dst->st_flags = 0;
  1165. dst->st_gen = 0;
  1166. #endif
  1167. }
  1168. static int uv__fs_statx(int fd,
  1169. const char* path,
  1170. int is_fstat,
  1171. int is_lstat,
  1172. uv_stat_t* buf) {
  1173. STATIC_ASSERT(UV_ENOSYS != -1);
  1174. #ifdef __linux__
  1175. static int no_statx;
  1176. struct uv__statx statxbuf;
  1177. int dirfd;
  1178. int flags;
  1179. int mode;
  1180. int rc;
  1181. if (uv__load_relaxed(&no_statx))
  1182. return UV_ENOSYS;
  1183. dirfd = AT_FDCWD;
  1184. flags = 0; /* AT_STATX_SYNC_AS_STAT */
  1185. mode = 0xFFF; /* STATX_BASIC_STATS + STATX_BTIME */
  1186. if (is_fstat) {
  1187. dirfd = fd;
  1188. flags |= 0x1000; /* AT_EMPTY_PATH */
  1189. }
  1190. if (is_lstat)
  1191. flags |= AT_SYMLINK_NOFOLLOW;
  1192. rc = uv__statx(dirfd, path, flags, mode, &statxbuf);
  1193. switch (rc) {
  1194. case 0:
  1195. break;
  1196. case -1:
  1197. /* EPERM happens when a seccomp filter rejects the system call.
  1198. * Has been observed with libseccomp < 2.3.3 and docker < 18.04.
  1199. */
  1200. if (errno != EINVAL && errno != EPERM && errno != ENOSYS)
  1201. return -1;
  1202. /* Fall through. */
  1203. default:
  1204. /* Normally on success, zero is returned and On error, -1 is returned.
  1205. * Observed on S390 RHEL running in a docker container with statx not
  1206. * implemented, rc might return 1 with 0 set as the error code in which
  1207. * case we return ENOSYS.
  1208. */
  1209. uv__store_relaxed(&no_statx, 1);
  1210. return UV_ENOSYS;
  1211. }
  1212. buf->st_dev = 256 * statxbuf.stx_dev_major + statxbuf.stx_dev_minor;
  1213. buf->st_mode = statxbuf.stx_mode;
  1214. buf->st_nlink = statxbuf.stx_nlink;
  1215. buf->st_uid = statxbuf.stx_uid;
  1216. buf->st_gid = statxbuf.stx_gid;
  1217. buf->st_rdev = statxbuf.stx_rdev_major;
  1218. buf->st_ino = statxbuf.stx_ino;
  1219. buf->st_size = statxbuf.stx_size;
  1220. buf->st_blksize = statxbuf.stx_blksize;
  1221. buf->st_blocks = statxbuf.stx_blocks;
  1222. buf->st_atim.tv_sec = statxbuf.stx_atime.tv_sec;
  1223. buf->st_atim.tv_nsec = statxbuf.stx_atime.tv_nsec;
  1224. buf->st_mtim.tv_sec = statxbuf.stx_mtime.tv_sec;
  1225. buf->st_mtim.tv_nsec = statxbuf.stx_mtime.tv_nsec;
  1226. buf->st_ctim.tv_sec = statxbuf.stx_ctime.tv_sec;
  1227. buf->st_ctim.tv_nsec = statxbuf.stx_ctime.tv_nsec;
  1228. buf->st_birthtim.tv_sec = statxbuf.stx_btime.tv_sec;
  1229. buf->st_birthtim.tv_nsec = statxbuf.stx_btime.tv_nsec;
  1230. buf->st_flags = 0;
  1231. buf->st_gen = 0;
  1232. return 0;
  1233. #else
  1234. return UV_ENOSYS;
  1235. #endif /* __linux__ */
  1236. }
  1237. static int uv__fs_stat(const char *path, uv_stat_t *buf) {
  1238. struct stat pbuf;
  1239. int ret;
  1240. ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 0, buf);
  1241. if (ret != UV_ENOSYS)
  1242. return ret;
  1243. ret = stat(path, &pbuf);
  1244. if (ret == 0)
  1245. uv__to_stat(&pbuf, buf);
  1246. return ret;
  1247. }
  1248. static int uv__fs_lstat(const char *path, uv_stat_t *buf) {
  1249. struct stat pbuf;
  1250. int ret;
  1251. ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 1, buf);
  1252. if (ret != UV_ENOSYS)
  1253. return ret;
  1254. ret = lstat(path, &pbuf);
  1255. if (ret == 0)
  1256. uv__to_stat(&pbuf, buf);
  1257. return ret;
  1258. }
  1259. static int uv__fs_fstat(int fd, uv_stat_t *buf) {
  1260. struct stat pbuf;
  1261. int ret;
  1262. ret = uv__fs_statx(fd, "", /* is_fstat */ 1, /* is_lstat */ 0, buf);
  1263. if (ret != UV_ENOSYS)
  1264. return ret;
  1265. ret = fstat(fd, &pbuf);
  1266. if (ret == 0)
  1267. uv__to_stat(&pbuf, buf);
  1268. return ret;
  1269. }
  1270. static size_t uv__fs_buf_offset(uv_buf_t* bufs, size_t size) {
  1271. size_t offset;
  1272. /* Figure out which bufs are done */
  1273. for (offset = 0; size > 0 && bufs[offset].len <= size; ++offset)
  1274. size -= bufs[offset].len;
  1275. /* Fix a partial read/write */
  1276. if (size > 0) {
  1277. bufs[offset].base += size;
  1278. bufs[offset].len -= size;
  1279. }
  1280. return offset;
  1281. }
  1282. static ssize_t uv__fs_write_all(uv_fs_t* req) {
  1283. unsigned int iovmax;
  1284. unsigned int nbufs;
  1285. uv_buf_t* bufs;
  1286. ssize_t total;
  1287. ssize_t result;
  1288. iovmax = uv__getiovmax();
  1289. nbufs = req->nbufs;
  1290. bufs = req->bufs;
  1291. total = 0;
  1292. while (nbufs > 0) {
  1293. req->nbufs = nbufs;
  1294. if (req->nbufs > iovmax)
  1295. req->nbufs = iovmax;
  1296. do
  1297. result = uv__fs_write(req);
  1298. while (result < 0 && errno == EINTR);
  1299. if (result <= 0) {
  1300. if (total == 0)
  1301. total = result;
  1302. break;
  1303. }
  1304. if (req->off >= 0)
  1305. req->off += result;
  1306. req->nbufs = uv__fs_buf_offset(req->bufs, result);
  1307. req->bufs += req->nbufs;
  1308. nbufs -= req->nbufs;
  1309. total += result;
  1310. }
  1311. if (bufs != req->bufsml)
  1312. uv__free(bufs);
  1313. req->bufs = NULL;
  1314. req->nbufs = 0;
  1315. return total;
  1316. }
  1317. static void uv__fs_work(struct uv__work* w) {
  1318. int retry_on_eintr;
  1319. uv_fs_t* req;
  1320. ssize_t r;
  1321. req = container_of(w, uv_fs_t, work_req);
  1322. retry_on_eintr = !(req->fs_type == UV_FS_CLOSE ||
  1323. req->fs_type == UV_FS_READ);
  1324. do {
  1325. errno = 0;
  1326. #define X(type, action) \
  1327. case UV_FS_ ## type: \
  1328. r = action; \
  1329. break;
  1330. switch (req->fs_type) {
  1331. X(ACCESS, access(req->path, req->flags));
  1332. X(CHMOD, chmod(req->path, req->mode));
  1333. X(CHOWN, chown(req->path, req->uid, req->gid));
  1334. X(CLOSE, uv__fs_close(req->file));
  1335. X(COPYFILE, uv__fs_copyfile(req));
  1336. X(FCHMOD, fchmod(req->file, req->mode));
  1337. X(FCHOWN, fchown(req->file, req->uid, req->gid));
  1338. X(LCHOWN, lchown(req->path, req->uid, req->gid));
  1339. X(FDATASYNC, uv__fs_fdatasync(req));
  1340. X(FSTAT, uv__fs_fstat(req->file, &req->statbuf));
  1341. X(FSYNC, uv__fs_fsync(req));
  1342. X(FTRUNCATE, ftruncate(req->file, req->off));
  1343. X(FUTIME, uv__fs_futime(req));
  1344. X(LUTIME, uv__fs_lutime(req));
  1345. X(LSTAT, uv__fs_lstat(req->path, &req->statbuf));
  1346. X(LINK, link(req->path, req->new_path));
  1347. X(MKDIR, mkdir(req->path, req->mode));
  1348. X(MKDTEMP, uv__fs_mkdtemp(req));
  1349. X(MKSTEMP, uv__fs_mkstemp(req));
  1350. X(OPEN, uv__fs_open(req));
  1351. X(READ, uv__fs_read(req));
  1352. X(SCANDIR, uv__fs_scandir(req));
  1353. X(OPENDIR, uv__fs_opendir(req));
  1354. X(READDIR, uv__fs_readdir(req));
  1355. X(CLOSEDIR, uv__fs_closedir(req));
  1356. X(READLINK, uv__fs_readlink(req));
  1357. X(REALPATH, uv__fs_realpath(req));
  1358. X(RENAME, rename(req->path, req->new_path));
  1359. X(RMDIR, rmdir(req->path));
  1360. X(SENDFILE, uv__fs_sendfile(req));
  1361. X(STAT, uv__fs_stat(req->path, &req->statbuf));
  1362. X(STATFS, uv__fs_statfs(req));
  1363. X(SYMLINK, symlink(req->path, req->new_path));
  1364. X(UNLINK, unlink(req->path));
  1365. X(UTIME, uv__fs_utime(req));
  1366. X(WRITE, uv__fs_write_all(req));
  1367. default: abort();
  1368. }
  1369. #undef X
  1370. } while (r == -1 && errno == EINTR && retry_on_eintr);
  1371. if (r == -1)
  1372. req->result = UV__ERR(errno);
  1373. else
  1374. req->result = r;
  1375. if (r == 0 && (req->fs_type == UV_FS_STAT ||
  1376. req->fs_type == UV_FS_FSTAT ||
  1377. req->fs_type == UV_FS_LSTAT)) {
  1378. req->ptr = &req->statbuf;
  1379. }
  1380. }
  1381. static void uv__fs_done(struct uv__work* w, int status) {
  1382. uv_fs_t* req;
  1383. req = container_of(w, uv_fs_t, work_req);
  1384. uv__req_unregister(req->loop, req);
  1385. if (status == UV_ECANCELED) {
  1386. assert(req->result == 0);
  1387. req->result = UV_ECANCELED;
  1388. }
  1389. req->cb(req);
  1390. }
  1391. int uv_fs_access(uv_loop_t* loop,
  1392. uv_fs_t* req,
  1393. const char* path,
  1394. int flags,
  1395. uv_fs_cb cb) {
  1396. INIT(ACCESS);
  1397. PATH;
  1398. req->flags = flags;
  1399. POST;
  1400. }
  1401. int uv_fs_chmod(uv_loop_t* loop,
  1402. uv_fs_t* req,
  1403. const char* path,
  1404. int mode,
  1405. uv_fs_cb cb) {
  1406. INIT(CHMOD);
  1407. PATH;
  1408. req->mode = mode;
  1409. POST;
  1410. }
  1411. int uv_fs_chown(uv_loop_t* loop,
  1412. uv_fs_t* req,
  1413. const char* path,
  1414. uv_uid_t uid,
  1415. uv_gid_t gid,
  1416. uv_fs_cb cb) {
  1417. INIT(CHOWN);
  1418. PATH;
  1419. req->uid = uid;
  1420. req->gid = gid;
  1421. POST;
  1422. }
  1423. int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
  1424. INIT(CLOSE);
  1425. req->file = file;
  1426. POST;
  1427. }
  1428. int uv_fs_fchmod(uv_loop_t* loop,
  1429. uv_fs_t* req,
  1430. uv_file file,
  1431. int mode,
  1432. uv_fs_cb cb) {
  1433. INIT(FCHMOD);
  1434. req->file = file;
  1435. req->mode = mode;
  1436. POST;
  1437. }
  1438. int uv_fs_fchown(uv_loop_t* loop,
  1439. uv_fs_t* req,
  1440. uv_file file,
  1441. uv_uid_t uid,
  1442. uv_gid_t gid,
  1443. uv_fs_cb cb) {
  1444. INIT(FCHOWN);
  1445. req->file = file;
  1446. req->uid = uid;
  1447. req->gid = gid;
  1448. POST;
  1449. }
  1450. int uv_fs_lchown(uv_loop_t* loop,
  1451. uv_fs_t* req,
  1452. const char* path,
  1453. uv_uid_t uid,
  1454. uv_gid_t gid,
  1455. uv_fs_cb cb) {
  1456. INIT(LCHOWN);
  1457. PATH;
  1458. req->uid = uid;
  1459. req->gid = gid;
  1460. POST;
  1461. }
  1462. int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
  1463. INIT(FDATASYNC);
  1464. req->file = file;
  1465. POST;
  1466. }
  1467. int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
  1468. INIT(FSTAT);
  1469. req->file = file;
  1470. POST;
  1471. }
  1472. int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
  1473. INIT(FSYNC);
  1474. req->file = file;
  1475. POST;
  1476. }
  1477. int uv_fs_ftruncate(uv_loop_t* loop,
  1478. uv_fs_t* req,
  1479. uv_file file,
  1480. int64_t off,
  1481. uv_fs_cb cb) {
  1482. INIT(FTRUNCATE);
  1483. req->file = file;
  1484. req->off = off;
  1485. POST;
  1486. }
  1487. int uv_fs_futime(uv_loop_t* loop,
  1488. uv_fs_t* req,
  1489. uv_file file,
  1490. double atime,
  1491. double mtime,
  1492. uv_fs_cb cb) {
  1493. INIT(FUTIME);
  1494. req->file = file;
  1495. req->atime = atime;
  1496. req->mtime = mtime;
  1497. POST;
  1498. }
  1499. int uv_fs_lutime(uv_loop_t* loop,
  1500. uv_fs_t* req,
  1501. const char* path,
  1502. double atime,
  1503. double mtime,
  1504. uv_fs_cb cb) {
  1505. INIT(LUTIME);
  1506. PATH;
  1507. req->atime = atime;
  1508. req->mtime = mtime;
  1509. POST;
  1510. }
  1511. int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
  1512. INIT(LSTAT);
  1513. PATH;
  1514. POST;
  1515. }
  1516. int uv_fs_link(uv_loop_t* loop,
  1517. uv_fs_t* req,
  1518. const char* path,
  1519. const char* new_path,
  1520. uv_fs_cb cb) {
  1521. INIT(LINK);
  1522. PATH2;
  1523. POST;
  1524. }
  1525. int uv_fs_mkdir(uv_loop_t* loop,
  1526. uv_fs_t* req,
  1527. const char* path,
  1528. int mode,
  1529. uv_fs_cb cb) {
  1530. INIT(MKDIR);
  1531. PATH;
  1532. req->mode = mode;
  1533. POST;
  1534. }
  1535. int uv_fs_mkdtemp(uv_loop_t* loop,
  1536. uv_fs_t* req,
  1537. const char* tpl,
  1538. uv_fs_cb cb) {
  1539. INIT(MKDTEMP);
  1540. req->path = uv__strdup(tpl);
  1541. if (req->path == NULL)
  1542. return UV_ENOMEM;
  1543. POST;
  1544. }
  1545. int uv_fs_mkstemp(uv_loop_t* loop,
  1546. uv_fs_t* req,
  1547. const char* tpl,
  1548. uv_fs_cb cb) {
  1549. INIT(MKSTEMP);
  1550. req->path = uv__strdup(tpl);
  1551. if (req->path == NULL)
  1552. return UV_ENOMEM;
  1553. POST;
  1554. }
  1555. int uv_fs_open(uv_loop_t* loop,
  1556. uv_fs_t* req,
  1557. const char* path,
  1558. int flags,
  1559. int mode,
  1560. uv_fs_cb cb) {
  1561. INIT(OPEN);
  1562. PATH;
  1563. req->flags = flags;
  1564. req->mode = mode;
  1565. POST;
  1566. }
  1567. int uv_fs_read(uv_loop_t* loop, uv_fs_t* req,
  1568. uv_file file,
  1569. const uv_buf_t bufs[],
  1570. unsigned int nbufs,
  1571. int64_t off,
  1572. uv_fs_cb cb) {
  1573. INIT(READ);
  1574. if (bufs == NULL || nbufs == 0)
  1575. return UV_EINVAL;
  1576. req->file = file;
  1577. req->nbufs = nbufs;
  1578. req->bufs = req->bufsml;
  1579. if (nbufs > ARRAY_SIZE(req->bufsml))
  1580. req->bufs = uv__malloc(nbufs * sizeof(*bufs));
  1581. if (req->bufs == NULL)
  1582. return UV_ENOMEM;
  1583. memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
  1584. req->off = off;
  1585. POST;
  1586. }
  1587. int uv_fs_scandir(uv_loop_t* loop,
  1588. uv_fs_t* req,
  1589. const char* path,
  1590. int flags,
  1591. uv_fs_cb cb) {
  1592. INIT(SCANDIR);
  1593. PATH;
  1594. req->flags = flags;
  1595. POST;
  1596. }
  1597. int uv_fs_opendir(uv_loop_t* loop,
  1598. uv_fs_t* req,
  1599. const char* path,
  1600. uv_fs_cb cb) {
  1601. INIT(OPENDIR);
  1602. PATH;
  1603. POST;
  1604. }
  1605. int uv_fs_readdir(uv_loop_t* loop,
  1606. uv_fs_t* req,
  1607. uv_dir_t* dir,
  1608. uv_fs_cb cb) {
  1609. INIT(READDIR);
  1610. if (dir == NULL || dir->dir == NULL || dir->dirents == NULL)
  1611. return UV_EINVAL;
  1612. req->ptr = dir;
  1613. POST;
  1614. }
  1615. int uv_fs_closedir(uv_loop_t* loop,
  1616. uv_fs_t* req,
  1617. uv_dir_t* dir,
  1618. uv_fs_cb cb) {
  1619. INIT(CLOSEDIR);
  1620. if (dir == NULL)
  1621. return UV_EINVAL;
  1622. req->ptr = dir;
  1623. POST;
  1624. }
  1625. int uv_fs_readlink(uv_loop_t* loop,
  1626. uv_fs_t* req,
  1627. const char* path,
  1628. uv_fs_cb cb) {
  1629. INIT(READLINK);
  1630. PATH;
  1631. POST;
  1632. }
  1633. int uv_fs_realpath(uv_loop_t* loop,
  1634. uv_fs_t* req,
  1635. const char * path,
  1636. uv_fs_cb cb) {
  1637. INIT(REALPATH);
  1638. PATH;
  1639. POST;
  1640. }
  1641. int uv_fs_rename(uv_loop_t* loop,
  1642. uv_fs_t* req,
  1643. const char* path,
  1644. const char* new_path,
  1645. uv_fs_cb cb) {
  1646. INIT(RENAME);
  1647. PATH2;
  1648. POST;
  1649. }
  1650. int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
  1651. INIT(RMDIR);
  1652. PATH;
  1653. POST;
  1654. }
  1655. int uv_fs_sendfile(uv_loop_t* loop,
  1656. uv_fs_t* req,
  1657. uv_file out_fd,
  1658. uv_file in_fd,
  1659. int64_t off,
  1660. size_t len,
  1661. uv_fs_cb cb) {
  1662. INIT(SENDFILE);
  1663. req->flags = in_fd; /* hack */
  1664. req->file = out_fd;
  1665. req->off = off;
  1666. req->bufsml[0].len = len;
  1667. POST;
  1668. }
  1669. int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
  1670. INIT(STAT);
  1671. PATH;
  1672. POST;
  1673. }
  1674. int uv_fs_symlink(uv_loop_t* loop,
  1675. uv_fs_t* req,
  1676. const char* path,
  1677. const char* new_path,
  1678. int flags,
  1679. uv_fs_cb cb) {
  1680. INIT(SYMLINK);
  1681. PATH2;
  1682. req->flags = flags;
  1683. POST;
  1684. }
  1685. int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
  1686. INIT(UNLINK);
  1687. PATH;
  1688. POST;
  1689. }
  1690. int uv_fs_utime(uv_loop_t* loop,
  1691. uv_fs_t* req,
  1692. const char* path,
  1693. double atime,
  1694. double mtime,
  1695. uv_fs_cb cb) {
  1696. INIT(UTIME);
  1697. PATH;
  1698. req->atime = atime;
  1699. req->mtime = mtime;
  1700. POST;
  1701. }
  1702. int uv_fs_write(uv_loop_t* loop,
  1703. uv_fs_t* req,
  1704. uv_file file,
  1705. const uv_buf_t bufs[],
  1706. unsigned int nbufs,
  1707. int64_t off,
  1708. uv_fs_cb cb) {
  1709. INIT(WRITE);
  1710. if (bufs == NULL || nbufs == 0)
  1711. return UV_EINVAL;
  1712. req->file = file;
  1713. req->nbufs = nbufs;
  1714. req->bufs = req->bufsml;
  1715. if (nbufs > ARRAY_SIZE(req->bufsml))
  1716. req->bufs = uv__malloc(nbufs * sizeof(*bufs));
  1717. if (req->bufs == NULL)
  1718. return UV_ENOMEM;
  1719. memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
  1720. req->off = off;
  1721. POST;
  1722. }
  1723. void uv_fs_req_cleanup(uv_fs_t* req) {
  1724. if (req == NULL)
  1725. return;
  1726. /* Only necessary for asychronous requests, i.e., requests with a callback.
  1727. * Synchronous ones don't copy their arguments and have req->path and
  1728. * req->new_path pointing to user-owned memory. UV_FS_MKDTEMP and
  1729. * UV_FS_MKSTEMP are the exception to the rule, they always allocate memory.
  1730. */
  1731. if (req->path != NULL &&
  1732. (req->cb != NULL ||
  1733. req->fs_type == UV_FS_MKDTEMP || req->fs_type == UV_FS_MKSTEMP))
  1734. uv__free((void*) req->path); /* Memory is shared with req->new_path. */
  1735. req->path = NULL;
  1736. req->new_path = NULL;
  1737. if (req->fs_type == UV_FS_READDIR && req->ptr != NULL)
  1738. uv__fs_readdir_cleanup(req);
  1739. if (req->fs_type == UV_FS_SCANDIR && req->ptr != NULL)
  1740. uv__fs_scandir_cleanup(req);
  1741. if (req->bufs != req->bufsml)
  1742. uv__free(req->bufs);
  1743. req->bufs = NULL;
  1744. if (req->fs_type != UV_FS_OPENDIR && req->ptr != &req->statbuf)
  1745. uv__free(req->ptr);
  1746. req->ptr = NULL;
  1747. }
  1748. int uv_fs_copyfile(uv_loop_t* loop,
  1749. uv_fs_t* req,
  1750. const char* path,
  1751. const char* new_path,
  1752. int flags,
  1753. uv_fs_cb cb) {
  1754. INIT(COPYFILE);
  1755. if (flags & ~(UV_FS_COPYFILE_EXCL |
  1756. UV_FS_COPYFILE_FICLONE |
  1757. UV_FS_COPYFILE_FICLONE_FORCE)) {
  1758. return UV_EINVAL;
  1759. }
  1760. PATH2;
  1761. req->flags = flags;
  1762. POST;
  1763. }
  1764. int uv_fs_statfs(uv_loop_t* loop,
  1765. uv_fs_t* req,
  1766. const char* path,
  1767. uv_fs_cb cb) {
  1768. INIT(STATFS);
  1769. PATH;
  1770. POST;
  1771. }
  1772. int uv_fs_get_system_error(const uv_fs_t* req) {
  1773. return -req->result;
  1774. }