1
0

callback.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163
  1. /*
  2. * Facility for queueing callback functions to be run from the
  3. * top-level event loop after the current top-level activity finishes.
  4. */
  5. #include <stddef.h>
  6. #include <assert.h> // WINSCP
  7. #include "putty.h"
  8. struct callback {
  9. struct callback *next;
  10. toplevel_callback_fn_t fn;
  11. void *ctx;
  12. };
  13. #ifdef MPEXT
  14. // PuTTY has one thread only, so run_toplevel_callbacks does not cater for multi threaded uses.
  15. // It would call callbacks registered any on thread from the thread that happens to call it.
  16. // We need to create separate callback queue for every SSH session.
  17. #define CALLBACK_SET_VAR callback_set_v
  18. #define cbcurr CALLBACK_SET_VAR->cbcurr
  19. #define cbhead CALLBACK_SET_VAR->cbhead
  20. #define cbtail CALLBACK_SET_VAR->cbtail
  21. #else
  22. static struct callback *cbcurr = NULL, *cbhead = NULL, *cbtail = NULL;
  23. #endif
  24. #ifndef MPEXT
  25. static toplevel_callback_notify_fn_t notify_frontend = NULL;
  26. static void *notify_ctx = NULL;
  27. void request_callback_notifications(toplevel_callback_notify_fn_t fn,
  28. void *ctx)
  29. {
  30. notify_frontend = fn;
  31. notify_ctx = ctx;
  32. }
  33. #endif
  34. static void run_idempotent_callback(void *ctx)
  35. {
  36. struct IdempotentCallback *ic = (struct IdempotentCallback *)ctx;
  37. ic->queued = false;
  38. ic->fn(ic->ctx);
  39. }
  40. void queue_idempotent_callback(struct IdempotentCallback *ic)
  41. {
  42. if (ic->queued)
  43. return;
  44. ic->queued = true;
  45. #ifdef MPEXT
  46. assert(ic->set != NULL);
  47. #endif
  48. queue_toplevel_callback(ic->set, run_idempotent_callback, ic);
  49. }
  50. void delete_callbacks_for_context(CALLBACK_SET void *ctx)
  51. {
  52. struct callback *newhead, *newtail;
  53. newhead = newtail = NULL;
  54. while (cbhead) {
  55. struct callback *cb = cbhead;
  56. cbhead = cbhead->next;
  57. if (cb->ctx == ctx ||
  58. (cb->fn == run_idempotent_callback &&
  59. ((struct IdempotentCallback *)cb->ctx)->ctx == ctx)) {
  60. sfree(cb);
  61. } else {
  62. if (!newhead)
  63. newhead = cb;
  64. else
  65. newtail->next = cb;
  66. newtail = cb;
  67. }
  68. }
  69. cbhead = newhead;
  70. cbtail = newtail;
  71. if (newtail)
  72. newtail->next = NULL;
  73. }
  74. void queue_toplevel_callback(CALLBACK_SET toplevel_callback_fn_t fn, void *ctx)
  75. {
  76. struct callback *cb;
  77. cb = snew(struct callback);
  78. cb->fn = fn;
  79. cb->ctx = ctx;
  80. #ifndef MPEXT
  81. /*
  82. * If the front end has requested notification of pending
  83. * callbacks, and we didn't already have one queued, let it know
  84. * we do have one now.
  85. *
  86. * If cbcurr is non-NULL, i.e. we are actually in the middle of
  87. * executing a callback right now, then we count that as the queue
  88. * already having been non-empty. That saves the front end getting
  89. * a constant stream of needless re-notifications if the last
  90. * callback keeps re-scheduling itself.
  91. */
  92. if (notify_frontend && !cbhead && !cbcurr)
  93. notify_frontend(notify_ctx);
  94. #endif
  95. if (cbtail)
  96. cbtail->next = cb;
  97. else
  98. cbhead = cb;
  99. cbtail = cb;
  100. cb->next = NULL;
  101. }
  102. bool run_toplevel_callbacks(CALLBACK_SET_ONLY)
  103. {
  104. bool done_something = false;
  105. if (cbhead) {
  106. /*
  107. * Transfer the head callback into cbcurr to indicate that
  108. * it's being executed. Then operations which transform the
  109. * queue, like delete_callbacks_for_context, can proceed as if
  110. * it's not there.
  111. */
  112. cbcurr = cbhead;
  113. cbhead = cbhead->next;
  114. if (!cbhead)
  115. cbtail = NULL;
  116. /*
  117. * Now run the callback, and then clear it out of cbcurr.
  118. */
  119. cbcurr->fn(cbcurr->ctx);
  120. sfree(cbcurr);
  121. // WINSCP: this does not happen, when exception (disconnect) occurs while the callback is called.
  122. // See also the comment in TSecureShell::FreeBackend().
  123. cbcurr = NULL;
  124. done_something = true;
  125. }
  126. return done_something;
  127. }
  128. bool toplevel_callback_pending(CALLBACK_SET_ONLY)
  129. {
  130. // MP does not have to be guarded
  131. return cbcurr != NULL || cbhead != NULL;
  132. }
  133. // WINSCP
  134. bool is_idempotent_callback_pending(CALLBACK_SET struct IdempotentCallback *ic)
  135. {
  136. return
  137. (cbhead != NULL) &&
  138. (cbhead->fn == run_idempotent_callback) &&
  139. (cbhead->ctx == ic);
  140. }