bufq.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723
  1. /***************************************************************************
  2. * _ _ ____ _
  3. * Project ___| | | | _ \| |
  4. * / __| | | | |_) | |
  5. * | (__| |_| | _ <| |___
  6. * \___|\___/|_| \_\_____|
  7. *
  8. * Copyright (C) Daniel Stenberg, <[email protected]>, et al.
  9. *
  10. * This software is licensed as described in the file COPYING, which
  11. * you should have received as part of this distribution. The terms
  12. * are also available at https://curl.se/docs/copyright.html.
  13. *
  14. * You may opt to use, copy, modify, merge, publish, distribute and/or sell
  15. * copies of the Software, and permit persons to whom the Software is
  16. * furnished to do so, under the terms of the COPYING file.
  17. *
  18. * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
  19. * KIND, either express or implied.
  20. *
  21. * SPDX-License-Identifier: curl
  22. *
  23. ***************************************************************************/
  24. #include "curl_setup.h"
  25. #include "bufq.h"
  26. /* The last 3 #include files should be in this order */
  27. #include "curl_printf.h"
  28. #include "curl_memory.h"
  29. #include "memdebug.h"
  30. static bool chunk_is_empty(const struct buf_chunk *chunk)
  31. {
  32. return chunk->r_offset >= chunk->w_offset;
  33. }
  34. static bool chunk_is_full(const struct buf_chunk *chunk)
  35. {
  36. return chunk->w_offset >= chunk->dlen;
  37. }
  38. static size_t chunk_len(const struct buf_chunk *chunk)
  39. {
  40. return chunk->w_offset - chunk->r_offset;
  41. }
  42. static void chunk_reset(struct buf_chunk *chunk)
  43. {
  44. chunk->next = NULL;
  45. chunk->r_offset = chunk->w_offset = 0;
  46. }
  47. static size_t chunk_append(struct buf_chunk *chunk,
  48. const unsigned char *buf, size_t len)
  49. {
  50. unsigned char *p = &chunk->x.data[chunk->w_offset];
  51. size_t n = chunk->dlen - chunk->w_offset;
  52. DEBUGASSERT(chunk->dlen >= chunk->w_offset);
  53. if(n) {
  54. n = CURLMIN(n, len);
  55. memcpy(p, buf, n);
  56. chunk->w_offset += n;
  57. }
  58. return n;
  59. }
  60. static size_t chunk_read(struct buf_chunk *chunk,
  61. unsigned char *buf, size_t len)
  62. {
  63. unsigned char *p = &chunk->x.data[chunk->r_offset];
  64. size_t n = chunk->w_offset - chunk->r_offset;
  65. DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
  66. if(!n) {
  67. return 0;
  68. }
  69. else if(n <= len) {
  70. memcpy(buf, p, n);
  71. chunk->r_offset = chunk->w_offset = 0;
  72. return n;
  73. }
  74. else {
  75. memcpy(buf, p, len);
  76. chunk->r_offset += len;
  77. return len;
  78. }
  79. }
  80. static size_t chunk_unwrite(struct buf_chunk *chunk, size_t len)
  81. {
  82. size_t n = chunk->w_offset - chunk->r_offset;
  83. DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
  84. if(!n) {
  85. return 0;
  86. }
  87. else if(n <= len) {
  88. chunk->r_offset = chunk->w_offset = 0;
  89. return n;
  90. }
  91. else {
  92. chunk->w_offset -= len;
  93. return len;
  94. }
  95. }
  96. static ssize_t chunk_slurpn(struct buf_chunk *chunk, size_t max_len,
  97. Curl_bufq_reader *reader,
  98. void *reader_ctx, CURLcode *err)
  99. {
  100. unsigned char *p = &chunk->x.data[chunk->w_offset];
  101. size_t n = chunk->dlen - chunk->w_offset; /* free amount */
  102. ssize_t nread;
  103. DEBUGASSERT(chunk->dlen >= chunk->w_offset);
  104. if(!n) {
  105. *err = CURLE_AGAIN;
  106. return -1;
  107. }
  108. if(max_len && n > max_len)
  109. n = max_len;
  110. nread = reader(reader_ctx, p, n, err);
  111. if(nread > 0) {
  112. DEBUGASSERT((size_t)nread <= n);
  113. chunk->w_offset += nread;
  114. }
  115. return nread;
  116. }
  117. static void chunk_peek(const struct buf_chunk *chunk,
  118. const unsigned char **pbuf, size_t *plen)
  119. {
  120. DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
  121. *pbuf = &chunk->x.data[chunk->r_offset];
  122. *plen = chunk->w_offset - chunk->r_offset;
  123. }
  124. static void chunk_peek_at(const struct buf_chunk *chunk, size_t offset,
  125. const unsigned char **pbuf, size_t *plen)
  126. {
  127. offset += chunk->r_offset;
  128. DEBUGASSERT(chunk->w_offset >= offset);
  129. *pbuf = &chunk->x.data[offset];
  130. *plen = chunk->w_offset - offset;
  131. }
  132. static size_t chunk_skip(struct buf_chunk *chunk, size_t amount)
  133. {
  134. size_t n = chunk->w_offset - chunk->r_offset;
  135. DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
  136. if(n) {
  137. n = CURLMIN(n, amount);
  138. chunk->r_offset += n;
  139. if(chunk->r_offset == chunk->w_offset)
  140. chunk->r_offset = chunk->w_offset = 0;
  141. }
  142. return n;
  143. }
  144. static void chunk_list_free(struct buf_chunk **anchor)
  145. {
  146. struct buf_chunk *chunk;
  147. while(*anchor) {
  148. chunk = *anchor;
  149. *anchor = chunk->next;
  150. free(chunk);
  151. }
  152. }
  153. void Curl_bufcp_init(struct bufc_pool *pool,
  154. size_t chunk_size, size_t spare_max)
  155. {
  156. DEBUGASSERT(chunk_size > 0);
  157. DEBUGASSERT(spare_max > 0);
  158. memset(pool, 0, sizeof(*pool));
  159. pool->chunk_size = chunk_size;
  160. pool->spare_max = spare_max;
  161. }
  162. static CURLcode bufcp_take(struct bufc_pool *pool,
  163. struct buf_chunk **pchunk)
  164. {
  165. struct buf_chunk *chunk = NULL;
  166. if(pool->spare) {
  167. chunk = pool->spare;
  168. pool->spare = chunk->next;
  169. --pool->spare_count;
  170. chunk_reset(chunk);
  171. *pchunk = chunk;
  172. return CURLE_OK;
  173. }
  174. chunk = calloc(1, sizeof(*chunk) + pool->chunk_size);
  175. if(!chunk) {
  176. *pchunk = NULL;
  177. return CURLE_OUT_OF_MEMORY;
  178. }
  179. chunk->dlen = pool->chunk_size;
  180. *pchunk = chunk;
  181. return CURLE_OK;
  182. }
  183. static void bufcp_put(struct bufc_pool *pool,
  184. struct buf_chunk *chunk)
  185. {
  186. if(pool->spare_count >= pool->spare_max) {
  187. free(chunk);
  188. }
  189. else {
  190. chunk_reset(chunk);
  191. chunk->next = pool->spare;
  192. pool->spare = chunk;
  193. ++pool->spare_count;
  194. }
  195. }
  196. void Curl_bufcp_free(struct bufc_pool *pool)
  197. {
  198. chunk_list_free(&pool->spare);
  199. pool->spare_count = 0;
  200. }
  201. static void bufq_init(struct bufq *q, struct bufc_pool *pool,
  202. size_t chunk_size, size_t max_chunks, int opts)
  203. {
  204. DEBUGASSERT(chunk_size > 0);
  205. DEBUGASSERT(max_chunks > 0);
  206. memset(q, 0, sizeof(*q));
  207. q->chunk_size = chunk_size;
  208. q->max_chunks = max_chunks;
  209. q->pool = pool;
  210. q->opts = opts;
  211. }
  212. void Curl_bufq_init2(struct bufq *q, size_t chunk_size, size_t max_chunks,
  213. int opts)
  214. {
  215. bufq_init(q, NULL, chunk_size, max_chunks, opts);
  216. }
  217. void Curl_bufq_init(struct bufq *q, size_t chunk_size, size_t max_chunks)
  218. {
  219. bufq_init(q, NULL, chunk_size, max_chunks, BUFQ_OPT_NONE);
  220. }
  221. void Curl_bufq_initp(struct bufq *q, struct bufc_pool *pool,
  222. size_t max_chunks, int opts)
  223. {
  224. bufq_init(q, pool, pool->chunk_size, max_chunks, opts);
  225. }
  226. void Curl_bufq_free(struct bufq *q)
  227. {
  228. chunk_list_free(&q->head);
  229. chunk_list_free(&q->spare);
  230. q->tail = NULL;
  231. q->chunk_count = 0;
  232. }
  233. void Curl_bufq_reset(struct bufq *q)
  234. {
  235. struct buf_chunk *chunk;
  236. while(q->head) {
  237. chunk = q->head;
  238. q->head = chunk->next;
  239. chunk->next = q->spare;
  240. q->spare = chunk;
  241. }
  242. q->tail = NULL;
  243. }
  244. size_t Curl_bufq_len(const struct bufq *q)
  245. {
  246. const struct buf_chunk *chunk = q->head;
  247. size_t len = 0;
  248. while(chunk) {
  249. len += chunk_len(chunk);
  250. chunk = chunk->next;
  251. }
  252. return len;
  253. }
  254. bool Curl_bufq_is_empty(const struct bufq *q)
  255. {
  256. return !q->head || chunk_is_empty(q->head);
  257. }
  258. bool Curl_bufq_is_full(const struct bufq *q)
  259. {
  260. if(!q->tail || q->spare)
  261. return FALSE;
  262. if(q->chunk_count < q->max_chunks)
  263. return FALSE;
  264. if(q->chunk_count > q->max_chunks)
  265. return TRUE;
  266. /* we have no spares and cannot make more, is the tail full? */
  267. return chunk_is_full(q->tail);
  268. }
  269. static struct buf_chunk *get_spare(struct bufq *q)
  270. {
  271. struct buf_chunk *chunk = NULL;
  272. if(q->spare) {
  273. chunk = q->spare;
  274. q->spare = chunk->next;
  275. chunk_reset(chunk);
  276. return chunk;
  277. }
  278. if(q->chunk_count >= q->max_chunks && (!(q->opts & BUFQ_OPT_SOFT_LIMIT)))
  279. return NULL;
  280. if(q->pool) {
  281. if(bufcp_take(q->pool, &chunk))
  282. return NULL;
  283. ++q->chunk_count;
  284. return chunk;
  285. }
  286. else {
  287. chunk = calloc(1, sizeof(*chunk) + q->chunk_size);
  288. if(!chunk)
  289. return NULL;
  290. chunk->dlen = q->chunk_size;
  291. ++q->chunk_count;
  292. return chunk;
  293. }
  294. }
  295. static void prune_head(struct bufq *q)
  296. {
  297. struct buf_chunk *chunk;
  298. while(q->head && chunk_is_empty(q->head)) {
  299. chunk = q->head;
  300. q->head = chunk->next;
  301. if(q->tail == chunk)
  302. q->tail = q->head;
  303. if(q->pool) {
  304. bufcp_put(q->pool, chunk);
  305. --q->chunk_count;
  306. }
  307. else if((q->chunk_count > q->max_chunks) ||
  308. (q->opts & BUFQ_OPT_NO_SPARES)) {
  309. /* SOFT_LIMIT allowed us more than max. free spares until
  310. * we are at max again. Or free them if we are configured
  311. * to not use spares. */
  312. free(chunk);
  313. --q->chunk_count;
  314. }
  315. else {
  316. chunk->next = q->spare;
  317. q->spare = chunk;
  318. }
  319. }
  320. }
  321. static struct buf_chunk *chunk_prev(struct buf_chunk *head,
  322. struct buf_chunk *chunk)
  323. {
  324. while(head) {
  325. if(head == chunk)
  326. return NULL;
  327. if(head->next == chunk)
  328. return head;
  329. head = head->next;
  330. }
  331. return NULL;
  332. }
  333. static void prune_tail(struct bufq *q)
  334. {
  335. struct buf_chunk *chunk;
  336. while(q->tail && chunk_is_empty(q->tail)) {
  337. chunk = q->tail;
  338. q->tail = chunk_prev(q->head, chunk);
  339. if(q->tail)
  340. q->tail->next = NULL;
  341. if(q->head == chunk)
  342. q->head = q->tail;
  343. if(q->pool) {
  344. bufcp_put(q->pool, chunk);
  345. --q->chunk_count;
  346. }
  347. else if((q->chunk_count > q->max_chunks) ||
  348. (q->opts & BUFQ_OPT_NO_SPARES)) {
  349. /* SOFT_LIMIT allowed us more than max. free spares until
  350. * we are at max again. Or free them if we are configured
  351. * to not use spares. */
  352. free(chunk);
  353. --q->chunk_count;
  354. }
  355. else {
  356. chunk->next = q->spare;
  357. q->spare = chunk;
  358. }
  359. }
  360. }
  361. static struct buf_chunk *get_non_full_tail(struct bufq *q)
  362. {
  363. struct buf_chunk *chunk;
  364. if(q->tail && !chunk_is_full(q->tail))
  365. return q->tail;
  366. chunk = get_spare(q);
  367. if(chunk) {
  368. /* new tail, and possibly new head */
  369. if(q->tail) {
  370. q->tail->next = chunk;
  371. q->tail = chunk;
  372. }
  373. else {
  374. DEBUGASSERT(!q->head);
  375. q->head = q->tail = chunk;
  376. }
  377. }
  378. return chunk;
  379. }
  380. ssize_t Curl_bufq_write(struct bufq *q,
  381. const unsigned char *buf, size_t len,
  382. CURLcode *err)
  383. {
  384. struct buf_chunk *tail;
  385. ssize_t nwritten = 0;
  386. size_t n;
  387. DEBUGASSERT(q->max_chunks > 0);
  388. while(len) {
  389. tail = get_non_full_tail(q);
  390. if(!tail) {
  391. if((q->chunk_count < q->max_chunks) || (q->opts & BUFQ_OPT_SOFT_LIMIT)) {
  392. *err = CURLE_OUT_OF_MEMORY;
  393. return -1;
  394. }
  395. break;
  396. }
  397. n = chunk_append(tail, buf, len);
  398. if(!n)
  399. break;
  400. nwritten += n;
  401. buf += n;
  402. len -= n;
  403. }
  404. if(nwritten == 0 && len) {
  405. *err = CURLE_AGAIN;
  406. return -1;
  407. }
  408. *err = CURLE_OK;
  409. return nwritten;
  410. }
  411. CURLcode Curl_bufq_cwrite(struct bufq *q,
  412. const char *buf, size_t len,
  413. size_t *pnwritten)
  414. {
  415. ssize_t n;
  416. CURLcode result;
  417. n = Curl_bufq_write(q, (const unsigned char *)buf, len, &result);
  418. *pnwritten = (n < 0) ? 0 : (size_t)n;
  419. return result;
  420. }
  421. CURLcode Curl_bufq_unwrite(struct bufq *q, size_t len)
  422. {
  423. while(len && q->tail) {
  424. len -= chunk_unwrite(q->tail, len);
  425. prune_tail(q);
  426. }
  427. return len ? CURLE_AGAIN : CURLE_OK;
  428. }
  429. ssize_t Curl_bufq_read(struct bufq *q, unsigned char *buf, size_t len,
  430. CURLcode *err)
  431. {
  432. ssize_t nread = 0;
  433. size_t n;
  434. *err = CURLE_OK;
  435. while(len && q->head) {
  436. n = chunk_read(q->head, buf, len);
  437. if(n) {
  438. nread += n;
  439. buf += n;
  440. len -= n;
  441. }
  442. prune_head(q);
  443. }
  444. if(nread == 0) {
  445. *err = CURLE_AGAIN;
  446. return -1;
  447. }
  448. return nread;
  449. }
  450. CURLcode Curl_bufq_cread(struct bufq *q, char *buf, size_t len,
  451. size_t *pnread)
  452. {
  453. ssize_t n;
  454. CURLcode result;
  455. n = Curl_bufq_read(q, (unsigned char *)buf, len, &result);
  456. *pnread = (n < 0) ? 0 : (size_t)n;
  457. return result;
  458. }
  459. bool Curl_bufq_peek(struct bufq *q,
  460. const unsigned char **pbuf, size_t *plen)
  461. {
  462. if(q->head && chunk_is_empty(q->head)) {
  463. prune_head(q);
  464. }
  465. if(q->head && !chunk_is_empty(q->head)) {
  466. chunk_peek(q->head, pbuf, plen);
  467. return TRUE;
  468. }
  469. *pbuf = NULL;
  470. *plen = 0;
  471. return FALSE;
  472. }
  473. bool Curl_bufq_peek_at(struct bufq *q, size_t offset,
  474. const unsigned char **pbuf, size_t *plen)
  475. {
  476. struct buf_chunk *c = q->head;
  477. size_t clen;
  478. while(c) {
  479. clen = chunk_len(c);
  480. if(!clen)
  481. break;
  482. if(offset >= clen) {
  483. offset -= clen;
  484. c = c->next;
  485. continue;
  486. }
  487. chunk_peek_at(c, offset, pbuf, plen);
  488. return TRUE;
  489. }
  490. *pbuf = NULL;
  491. *plen = 0;
  492. return FALSE;
  493. }
  494. void Curl_bufq_skip(struct bufq *q, size_t amount)
  495. {
  496. size_t n;
  497. while(amount && q->head) {
  498. n = chunk_skip(q->head, amount);
  499. amount -= n;
  500. prune_head(q);
  501. }
  502. }
  503. ssize_t Curl_bufq_pass(struct bufq *q, Curl_bufq_writer *writer,
  504. void *writer_ctx, CURLcode *err)
  505. {
  506. const unsigned char *buf;
  507. size_t blen;
  508. ssize_t nwritten = 0;
  509. while(Curl_bufq_peek(q, &buf, &blen)) {
  510. ssize_t chunk_written;
  511. chunk_written = writer(writer_ctx, buf, blen, err);
  512. if(chunk_written < 0) {
  513. if(!nwritten || *err != CURLE_AGAIN) {
  514. /* blocked on first write or real error, fail */
  515. nwritten = -1;
  516. }
  517. break;
  518. }
  519. if(!chunk_written) {
  520. if(!nwritten) {
  521. /* treat as blocked */
  522. *err = CURLE_AGAIN;
  523. nwritten = -1;
  524. }
  525. break;
  526. }
  527. Curl_bufq_skip(q, (size_t)chunk_written);
  528. nwritten += chunk_written;
  529. }
  530. return nwritten;
  531. }
  532. ssize_t Curl_bufq_write_pass(struct bufq *q,
  533. const unsigned char *buf, size_t len,
  534. Curl_bufq_writer *writer, void *writer_ctx,
  535. CURLcode *err)
  536. {
  537. ssize_t nwritten = 0, n;
  538. *err = CURLE_OK;
  539. while(len) {
  540. if(Curl_bufq_is_full(q)) {
  541. /* try to make room in case we are full */
  542. n = Curl_bufq_pass(q, writer, writer_ctx, err);
  543. if(n < 0) {
  544. if(*err != CURLE_AGAIN) {
  545. /* real error, fail */
  546. return -1;
  547. }
  548. /* would block, bufq is full, give up */
  549. break;
  550. }
  551. }
  552. /* Add whatever is remaining now to bufq */
  553. n = Curl_bufq_write(q, buf, len, err);
  554. if(n < 0) {
  555. if(*err != CURLE_AGAIN) {
  556. /* real error, fail */
  557. return -1;
  558. }
  559. /* no room in bufq */
  560. break;
  561. }
  562. /* edge case of writer returning 0 (and len is >0)
  563. * break or we might enter an infinite loop here */
  564. if(n == 0)
  565. break;
  566. /* Maybe only part of `data` has been added, continue to loop */
  567. buf += (size_t)n;
  568. len -= (size_t)n;
  569. nwritten += (size_t)n;
  570. }
  571. if(!nwritten && len) {
  572. *err = CURLE_AGAIN;
  573. return -1;
  574. }
  575. *err = CURLE_OK;
  576. return nwritten;
  577. }
  578. ssize_t Curl_bufq_sipn(struct bufq *q, size_t max_len,
  579. Curl_bufq_reader *reader, void *reader_ctx,
  580. CURLcode *err)
  581. {
  582. struct buf_chunk *tail = NULL;
  583. ssize_t nread;
  584. *err = CURLE_AGAIN;
  585. tail = get_non_full_tail(q);
  586. if(!tail) {
  587. if(q->chunk_count < q->max_chunks) {
  588. *err = CURLE_OUT_OF_MEMORY;
  589. return -1;
  590. }
  591. /* full, blocked */
  592. *err = CURLE_AGAIN;
  593. return -1;
  594. }
  595. nread = chunk_slurpn(tail, max_len, reader, reader_ctx, err);
  596. if(nread < 0) {
  597. return -1;
  598. }
  599. else if(nread == 0) {
  600. /* eof */
  601. *err = CURLE_OK;
  602. }
  603. return nread;
  604. }
  605. /**
  606. * Read up to `max_len` bytes and append it to the end of the buffer queue.
  607. * if `max_len` is 0, no limit is imposed and the call behaves exactly
  608. * the same as `Curl_bufq_slurp()`.
  609. * Returns the total amount of buf read (may be 0) or -1 on other
  610. * reader errors.
  611. * Note that even in case of a -1 chunks may have been read and
  612. * the buffer queue will have different length than before.
  613. */
  614. static ssize_t bufq_slurpn(struct bufq *q, size_t max_len,
  615. Curl_bufq_reader *reader, void *reader_ctx,
  616. CURLcode *err)
  617. {
  618. ssize_t nread = 0, n;
  619. *err = CURLE_AGAIN;
  620. while(1) {
  621. n = Curl_bufq_sipn(q, max_len, reader, reader_ctx, err);
  622. if(n < 0) {
  623. if(!nread || *err != CURLE_AGAIN) {
  624. /* blocked on first read or real error, fail */
  625. nread = -1;
  626. }
  627. else
  628. *err = CURLE_OK;
  629. break;
  630. }
  631. else if(n == 0) {
  632. /* eof */
  633. *err = CURLE_OK;
  634. break;
  635. }
  636. nread += (size_t)n;
  637. if(max_len) {
  638. DEBUGASSERT((size_t)n <= max_len);
  639. max_len -= (size_t)n;
  640. if(!max_len)
  641. break;
  642. }
  643. /* give up slurping when we get less bytes than we asked for */
  644. if(q->tail && !chunk_is_full(q->tail))
  645. break;
  646. }
  647. return nread;
  648. }
  649. ssize_t Curl_bufq_slurp(struct bufq *q, Curl_bufq_reader *reader,
  650. void *reader_ctx, CURLcode *err)
  651. {
  652. return bufq_slurpn(q, 0, reader, reader_ctx, err);
  653. }