1
0

linux-inotify.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352
  1. /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
  2. * Permission is hereby granted, free of charge, to any person obtaining a copy
  3. * of this software and associated documentation files (the "Software"), to
  4. * deal in the Software without restriction, including without limitation the
  5. * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
  6. * sell copies of the Software, and to permit persons to whom the Software is
  7. * furnished to do so, subject to the following conditions:
  8. *
  9. * The above copyright notice and this permission notice shall be included in
  10. * all copies or substantial portions of the Software.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  13. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  14. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  15. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  16. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  17. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  18. * IN THE SOFTWARE.
  19. */
  20. #include "uv.h"
  21. #include "tree.h"
  22. #include "internal.h"
  23. #include <stdint.h>
  24. #include <stdio.h>
  25. #include <stdlib.h>
  26. #include <string.h>
  27. #include <assert.h>
  28. #include <errno.h>
  29. #include <sys/types.h>
  30. #include <unistd.h>
  31. struct watcher_list {
  32. RB_ENTRY(watcher_list) entry;
  33. QUEUE watchers;
  34. int iterating;
  35. char* path;
  36. int wd;
  37. };
  38. struct watcher_root {
  39. struct watcher_list* rbh_root;
  40. };
  41. #define CAST(p) ((struct watcher_root*)(p))
  42. static int compare_watchers(const struct watcher_list* a,
  43. const struct watcher_list* b) {
  44. if (a->wd < b->wd) return -1;
  45. if (a->wd > b->wd) return 1;
  46. return 0;
  47. }
  48. RB_GENERATE_STATIC(watcher_root, watcher_list, entry, compare_watchers)
  49. static void uv__inotify_read(uv_loop_t* loop,
  50. uv__io_t* w,
  51. unsigned int revents);
  52. static void maybe_free_watcher_list(struct watcher_list* w,
  53. uv_loop_t* loop);
  54. static int new_inotify_fd(void) {
  55. int err;
  56. int fd;
  57. fd = uv__inotify_init1(UV__IN_NONBLOCK | UV__IN_CLOEXEC);
  58. if (fd != -1)
  59. return fd;
  60. if (errno != ENOSYS)
  61. return -errno;
  62. fd = uv__inotify_init();
  63. if (fd == -1)
  64. return -errno;
  65. err = uv__cloexec(fd, 1);
  66. if (err == 0)
  67. err = uv__nonblock(fd, 1);
  68. if (err) {
  69. uv__close(fd);
  70. return err;
  71. }
  72. return fd;
  73. }
  74. static int init_inotify(uv_loop_t* loop) {
  75. int err;
  76. if (loop->inotify_fd != -1)
  77. return 0;
  78. err = new_inotify_fd();
  79. if (err < 0)
  80. return err;
  81. loop->inotify_fd = err;
  82. uv__io_init(&loop->inotify_read_watcher, uv__inotify_read, loop->inotify_fd);
  83. uv__io_start(loop, &loop->inotify_read_watcher, POLLIN);
  84. return 0;
  85. }
  86. int uv__inotify_fork(uv_loop_t* loop, void* old_watchers) {
  87. /* Open the inotify_fd, and re-arm all the inotify watchers. */
  88. int err;
  89. struct watcher_list* tmp_watcher_list_iter;
  90. struct watcher_list* watcher_list;
  91. struct watcher_list tmp_watcher_list;
  92. QUEUE queue;
  93. QUEUE* q;
  94. uv_fs_event_t* handle;
  95. char* tmp_path;
  96. if (old_watchers != NULL) {
  97. /* We must restore the old watcher list to be able to close items
  98. * out of it.
  99. */
  100. loop->inotify_watchers = old_watchers;
  101. QUEUE_INIT(&tmp_watcher_list.watchers);
  102. /* Note that the queue we use is shared with the start and stop()
  103. * functions, making QUEUE_FOREACH unsafe to use. So we use the
  104. * QUEUE_MOVE trick to safely iterate. Also don't free the watcher
  105. * list until we're done iterating. c.f. uv__inotify_read.
  106. */
  107. RB_FOREACH_SAFE(watcher_list, watcher_root,
  108. CAST(&old_watchers), tmp_watcher_list_iter) {
  109. watcher_list->iterating = 1;
  110. QUEUE_MOVE(&watcher_list->watchers, &queue);
  111. while (!QUEUE_EMPTY(&queue)) {
  112. q = QUEUE_HEAD(&queue);
  113. handle = QUEUE_DATA(q, uv_fs_event_t, watchers);
  114. /* It's critical to keep a copy of path here, because it
  115. * will be set to NULL by stop() and then deallocated by
  116. * maybe_free_watcher_list
  117. */
  118. tmp_path = uv__strdup(handle->path);
  119. assert(tmp_path != NULL);
  120. QUEUE_REMOVE(q);
  121. QUEUE_INSERT_TAIL(&watcher_list->watchers, q);
  122. uv_fs_event_stop(handle);
  123. QUEUE_INSERT_TAIL(&tmp_watcher_list.watchers, &handle->watchers);
  124. handle->path = tmp_path;
  125. }
  126. watcher_list->iterating = 0;
  127. maybe_free_watcher_list(watcher_list, loop);
  128. }
  129. QUEUE_MOVE(&tmp_watcher_list.watchers, &queue);
  130. while (!QUEUE_EMPTY(&queue)) {
  131. q = QUEUE_HEAD(&queue);
  132. QUEUE_REMOVE(q);
  133. handle = QUEUE_DATA(q, uv_fs_event_t, watchers);
  134. tmp_path = handle->path;
  135. handle->path = NULL;
  136. err = uv_fs_event_start(handle, handle->cb, tmp_path, 0);
  137. uv__free(tmp_path);
  138. if (err)
  139. return err;
  140. }
  141. }
  142. return 0;
  143. }
  144. static struct watcher_list* find_watcher(uv_loop_t* loop, int wd) {
  145. struct watcher_list w;
  146. w.wd = wd;
  147. return RB_FIND(watcher_root, CAST(&loop->inotify_watchers), &w);
  148. }
  149. static void maybe_free_watcher_list(struct watcher_list* w, uv_loop_t* loop) {
  150. /* if the watcher_list->watchers is being iterated over, we can't free it. */
  151. if ((!w->iterating) && QUEUE_EMPTY(&w->watchers)) {
  152. /* No watchers left for this path. Clean up. */
  153. RB_REMOVE(watcher_root, CAST(&loop->inotify_watchers), w);
  154. uv__inotify_rm_watch(loop->inotify_fd, w->wd);
  155. uv__free(w);
  156. }
  157. }
  158. static void uv__inotify_read(uv_loop_t* loop,
  159. uv__io_t* dummy,
  160. unsigned int events) {
  161. const struct uv__inotify_event* e;
  162. struct watcher_list* w;
  163. uv_fs_event_t* h;
  164. QUEUE queue;
  165. QUEUE* q;
  166. const char* path;
  167. ssize_t size;
  168. const char *p;
  169. /* needs to be large enough for sizeof(inotify_event) + strlen(path) */
  170. char buf[4096];
  171. while (1) {
  172. do
  173. size = read(loop->inotify_fd, buf, sizeof(buf));
  174. while (size == -1 && errno == EINTR);
  175. if (size == -1) {
  176. assert(errno == EAGAIN || errno == EWOULDBLOCK);
  177. break;
  178. }
  179. assert(size > 0); /* pre-2.6.21 thing, size=0 == read buffer too small */
  180. /* Now we have one or more inotify_event structs. */
  181. for (p = buf; p < buf + size; p += sizeof(*e) + e->len) {
  182. e = (const struct uv__inotify_event*)p;
  183. events = 0;
  184. if (e->mask & (UV__IN_ATTRIB|UV__IN_MODIFY))
  185. events |= UV_CHANGE;
  186. if (e->mask & ~(UV__IN_ATTRIB|UV__IN_MODIFY))
  187. events |= UV_RENAME;
  188. w = find_watcher(loop, e->wd);
  189. if (w == NULL)
  190. continue; /* Stale event, no watchers left. */
  191. /* inotify does not return the filename when monitoring a single file
  192. * for modifications. Repurpose the filename for API compatibility.
  193. * I'm not convinced this is a good thing, maybe it should go.
  194. */
  195. path = e->len ? (const char*) (e + 1) : uv__basename_r(w->path);
  196. /* We're about to iterate over the queue and call user's callbacks.
  197. * What can go wrong?
  198. * A callback could call uv_fs_event_stop()
  199. * and the queue can change under our feet.
  200. * So, we use QUEUE_MOVE() trick to safely iterate over the queue.
  201. * And we don't free the watcher_list until we're done iterating.
  202. *
  203. * First,
  204. * tell uv_fs_event_stop() (that could be called from a user's callback)
  205. * not to free watcher_list.
  206. */
  207. w->iterating = 1;
  208. QUEUE_MOVE(&w->watchers, &queue);
  209. while (!QUEUE_EMPTY(&queue)) {
  210. q = QUEUE_HEAD(&queue);
  211. h = QUEUE_DATA(q, uv_fs_event_t, watchers);
  212. QUEUE_REMOVE(q);
  213. QUEUE_INSERT_TAIL(&w->watchers, q);
  214. h->cb(h, path, events, 0);
  215. }
  216. /* done iterating, time to (maybe) free empty watcher_list */
  217. w->iterating = 0;
  218. maybe_free_watcher_list(w, loop);
  219. }
  220. }
  221. }
  222. int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
  223. uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
  224. return 0;
  225. }
  226. int uv_fs_event_start(uv_fs_event_t* handle,
  227. uv_fs_event_cb cb,
  228. const char* path,
  229. unsigned int flags) {
  230. struct watcher_list* w;
  231. int events;
  232. int err;
  233. int wd;
  234. if (uv__is_active(handle))
  235. return -EINVAL;
  236. err = init_inotify(handle->loop);
  237. if (err)
  238. return err;
  239. events = UV__IN_ATTRIB
  240. | UV__IN_CREATE
  241. | UV__IN_MODIFY
  242. | UV__IN_DELETE
  243. | UV__IN_DELETE_SELF
  244. | UV__IN_MOVE_SELF
  245. | UV__IN_MOVED_FROM
  246. | UV__IN_MOVED_TO;
  247. wd = uv__inotify_add_watch(handle->loop->inotify_fd, path, events);
  248. if (wd == -1)
  249. return -errno;
  250. w = find_watcher(handle->loop, wd);
  251. if (w)
  252. goto no_insert;
  253. w = uv__malloc(sizeof(*w) + strlen(path) + 1);
  254. if (w == NULL)
  255. return -ENOMEM;
  256. w->wd = wd;
  257. w->path = strcpy((char*)(w + 1), path);
  258. QUEUE_INIT(&w->watchers);
  259. w->iterating = 0;
  260. RB_INSERT(watcher_root, CAST(&handle->loop->inotify_watchers), w);
  261. no_insert:
  262. uv__handle_start(handle);
  263. QUEUE_INSERT_TAIL(&w->watchers, &handle->watchers);
  264. handle->path = w->path;
  265. handle->cb = cb;
  266. handle->wd = wd;
  267. return 0;
  268. }
  269. int uv_fs_event_stop(uv_fs_event_t* handle) {
  270. struct watcher_list* w;
  271. if (!uv__is_active(handle))
  272. return 0;
  273. w = find_watcher(handle->loop, handle->wd);
  274. assert(w != NULL);
  275. handle->wd = -1;
  276. handle->path = NULL;
  277. uv__handle_stop(handle);
  278. QUEUE_REMOVE(&handle->watchers);
  279. maybe_free_watcher_list(w, handle->loop);
  280. return 0;
  281. }
  282. void uv__fs_event_close(uv_fs_event_t* handle) {
  283. uv_fs_event_stop(handle);
  284. }