950-0361-xhci-refactor-out-TRBS_PER_SEGMENT-define-in-runtime.patch 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257
  1. From 11527dad9862ba7e53654943fdacc3ffdad00ae2 Mon Sep 17 00:00:00 2001
  2. From: Jonathan Bell <[email protected]>
  3. Date: Mon, 13 Dec 2021 15:05:56 +0000
  4. Subject: [PATCH] xhci: refactor out TRBS_PER_SEGMENT define in runtime
  5. code
  6. In anticipation of adjusting the number of utilised TRBs in a ring
  7. segment, add trbs_per_seg to struct xhci_ring and use this instead
  8. of a compile-time define.
  9. Signed-off-by: Jonathan Bell <[email protected]>
  10. ---
  11. drivers/usb/host/xhci-mem.c | 48 +++++++++++++++++++-----------------
  12. drivers/usb/host/xhci-ring.c | 20 +++++++++------
  13. drivers/usb/host/xhci.c | 6 ++---
  14. drivers/usb/host/xhci.h | 1 +
  15. 4 files changed, 42 insertions(+), 33 deletions(-)
  16. --- a/drivers/usb/host/xhci-mem.c
  17. +++ b/drivers/usb/host/xhci-mem.c
  18. @@ -98,6 +98,7 @@ static void xhci_free_segments_for_ring(
  19. */
  20. static void xhci_link_segments(struct xhci_segment *prev,
  21. struct xhci_segment *next,
  22. + unsigned int trbs_per_seg,
  23. enum xhci_ring_type type, bool chain_links)
  24. {
  25. u32 val;
  26. @@ -106,16 +107,16 @@ static void xhci_link_segments(struct xh
  27. return;
  28. prev->next = next;
  29. if (type != TYPE_EVENT) {
  30. - prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
  31. + prev->trbs[trbs_per_seg - 1].link.segment_ptr =
  32. cpu_to_le64(next->dma);
  33. /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
  34. - val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
  35. + val = le32_to_cpu(prev->trbs[trbs_per_seg - 1].link.control);
  36. val &= ~TRB_TYPE_BITMASK;
  37. val |= TRB_TYPE(TRB_LINK);
  38. if (chain_links)
  39. val |= TRB_CHAIN;
  40. - prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
  41. + prev->trbs[trbs_per_seg - 1].link.control = cpu_to_le32(val);
  42. }
  43. }
  44. @@ -139,15 +140,17 @@ static void xhci_link_rings(struct xhci_
  45. (xhci->quirks & XHCI_AMD_0x96_HOST)));
  46. next = ring->enq_seg->next;
  47. - xhci_link_segments(ring->enq_seg, first, ring->type, chain_links);
  48. - xhci_link_segments(last, next, ring->type, chain_links);
  49. + xhci_link_segments(ring->enq_seg, first, ring->trbs_per_seg,
  50. + ring->type, chain_links);
  51. + xhci_link_segments(last, next, ring->trbs_per_seg,
  52. + ring->type, chain_links);
  53. ring->num_segs += num_segs;
  54. - ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
  55. + ring->num_trbs_free += (ring->trbs_per_seg - 1) * num_segs;
  56. if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
  57. - ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
  58. + ring->last_seg->trbs[ring->trbs_per_seg - 1].link.control
  59. &= ~cpu_to_le32(LINK_TOGGLE);
  60. - last->trbs[TRBS_PER_SEGMENT-1].link.control
  61. + last->trbs[ring->trbs_per_seg - 1].link.control
  62. |= cpu_to_le32(LINK_TOGGLE);
  63. ring->last_seg = last;
  64. }
  65. @@ -314,14 +317,15 @@ void xhci_initialize_ring_info(struct xh
  66. * Each segment has a link TRB, and leave an extra TRB for SW
  67. * accounting purpose
  68. */
  69. - ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
  70. + ring->num_trbs_free = ring->num_segs * (ring->trbs_per_seg - 1) - 1;
  71. }
  72. /* Allocate segments and link them for a ring */
  73. static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
  74. struct xhci_segment **first, struct xhci_segment **last,
  75. - unsigned int num_segs, unsigned int cycle_state,
  76. - enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
  77. + unsigned int num_segs, unsigned int trbs_per_seg,
  78. + unsigned int cycle_state, enum xhci_ring_type type,
  79. + unsigned int max_packet, gfp_t flags)
  80. {
  81. struct xhci_segment *prev;
  82. bool chain_links;
  83. @@ -350,12 +354,12 @@ static int xhci_alloc_segments_for_ring(
  84. }
  85. return -ENOMEM;
  86. }
  87. - xhci_link_segments(prev, next, type, chain_links);
  88. + xhci_link_segments(prev, next, trbs_per_seg, type, chain_links);
  89. prev = next;
  90. num_segs--;
  91. }
  92. - xhci_link_segments(prev, *first, type, chain_links);
  93. + xhci_link_segments(prev, *first, trbs_per_seg, type, chain_links);
  94. *last = prev;
  95. return 0;
  96. @@ -387,16 +391,17 @@ struct xhci_ring *xhci_ring_alloc(struct
  97. if (num_segs == 0)
  98. return ring;
  99. + ring->trbs_per_seg = TRBS_PER_SEGMENT;
  100. ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
  101. - &ring->last_seg, num_segs, cycle_state, type,
  102. - max_packet, flags);
  103. + &ring->last_seg, num_segs, ring->trbs_per_seg,
  104. + cycle_state, type, max_packet, flags);
  105. if (ret)
  106. goto fail;
  107. /* Only event ring does not use link TRB */
  108. if (type != TYPE_EVENT) {
  109. /* See section 4.9.2.1 and 6.4.4.1 */
  110. - ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
  111. + ring->last_seg->trbs[ring->trbs_per_seg - 1].link.control |=
  112. cpu_to_le32(LINK_TOGGLE);
  113. }
  114. xhci_initialize_ring_info(ring, cycle_state);
  115. @@ -429,15 +434,14 @@ int xhci_ring_expansion(struct xhci_hcd
  116. unsigned int num_segs_needed;
  117. int ret;
  118. - num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
  119. - (TRBS_PER_SEGMENT - 1);
  120. -
  121. + num_segs_needed = (num_trbs + (ring->trbs_per_seg - 1) - 1) /
  122. + (ring->trbs_per_seg - 1);
  123. /* Allocate number of segments we needed, or double the ring size */
  124. num_segs = max(ring->num_segs, num_segs_needed);
  125. ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
  126. - num_segs, ring->cycle_state, ring->type,
  127. - ring->bounce_buf_len, flags);
  128. + num_segs, ring->trbs_per_seg, ring->cycle_state,
  129. + ring->type, ring->bounce_buf_len, flags);
  130. if (ret)
  131. return -ENOMEM;
  132. @@ -1811,7 +1815,7 @@ int xhci_alloc_erst(struct xhci_hcd *xhc
  133. for (val = 0; val < evt_ring->num_segs; val++) {
  134. entry = &erst->entries[val];
  135. entry->seg_addr = cpu_to_le64(seg->dma);
  136. - entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
  137. + entry->seg_size = cpu_to_le32(evt_ring->trbs_per_seg);
  138. entry->rsvd = 0;
  139. seg = seg->next;
  140. }
  141. --- a/drivers/usb/host/xhci-ring.c
  142. +++ b/drivers/usb/host/xhci-ring.c
  143. @@ -90,15 +90,16 @@ static bool trb_is_link(union xhci_trb *
  144. return TRB_TYPE_LINK_LE32(trb->link.control);
  145. }
  146. -static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb)
  147. +static bool last_trb_on_seg(struct xhci_segment *seg,
  148. + unsigned int trbs_per_seg, union xhci_trb *trb)
  149. {
  150. - return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
  151. + return trb == &seg->trbs[trbs_per_seg - 1];
  152. }
  153. static bool last_trb_on_ring(struct xhci_ring *ring,
  154. struct xhci_segment *seg, union xhci_trb *trb)
  155. {
  156. - return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
  157. + return last_trb_on_seg(seg, ring->trbs_per_seg, trb) && (seg->next == ring->first_seg);
  158. }
  159. static bool link_trb_toggles_cycle(union xhci_trb *trb)
  160. @@ -161,7 +162,8 @@ void inc_deq(struct xhci_hcd *xhci, stru
  161. /* event ring doesn't have link trbs, check for last trb */
  162. if (ring->type == TYPE_EVENT) {
  163. - if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
  164. + if (!last_trb_on_seg(ring->deq_seg, ring->trbs_per_seg,
  165. + ring->dequeue)) {
  166. ring->dequeue++;
  167. goto out;
  168. }
  169. @@ -174,7 +176,8 @@ void inc_deq(struct xhci_hcd *xhci, stru
  170. /* All other rings have link trbs */
  171. if (!trb_is_link(ring->dequeue)) {
  172. - if (last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
  173. + if (last_trb_on_seg(ring->deq_seg, ring->trbs_per_seg,
  174. + ring->dequeue)) {
  175. xhci_warn(xhci, "Missing link TRB at end of segment\n");
  176. } else {
  177. ring->dequeue++;
  178. @@ -225,7 +228,7 @@ static void inc_enq(struct xhci_hcd *xhc
  179. if (!trb_is_link(ring->enqueue))
  180. ring->num_trbs_free--;
  181. - if (last_trb_on_seg(ring->enq_seg, ring->enqueue)) {
  182. + if (last_trb_on_seg(ring->enq_seg, ring->trbs_per_seg, ring->enqueue)) {
  183. xhci_err(xhci, "Tried to move enqueue past ring segment\n");
  184. return;
  185. }
  186. @@ -3150,7 +3153,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd
  187. * that clears the EHB.
  188. */
  189. while (xhci_handle_event(xhci) > 0) {
  190. - if (event_loop++ < TRBS_PER_SEGMENT / 2)
  191. + if (event_loop++ < xhci->event_ring->trbs_per_seg / 2)
  192. continue;
  193. xhci_update_erst_dequeue(xhci, event_ring_deq);
  194. event_ring_deq = xhci->event_ring->dequeue;
  195. @@ -3292,7 +3295,8 @@ static int prepare_ring(struct xhci_hcd
  196. }
  197. }
  198. - if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue)) {
  199. + if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->trbs_per_seg,
  200. + ep_ring->enqueue)) {
  201. xhci_warn(xhci, "Missing link TRB at end of ring segment\n");
  202. return -EINVAL;
  203. }
  204. --- a/drivers/usb/host/xhci.c
  205. +++ b/drivers/usb/host/xhci.c
  206. @@ -895,8 +895,8 @@ static void xhci_clear_command_ring(stru
  207. seg = ring->deq_seg;
  208. do {
  209. memset(seg->trbs, 0,
  210. - sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
  211. - seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
  212. + sizeof(union xhci_trb) * (ring->trbs_per_seg - 1));
  213. + seg->trbs[ring->trbs_per_seg - 1].link.control &=
  214. cpu_to_le32(~TRB_CYCLE);
  215. seg = seg->next;
  216. } while (seg != ring->deq_seg);
  217. @@ -907,7 +907,7 @@ static void xhci_clear_command_ring(stru
  218. ring->enq_seg = ring->deq_seg;
  219. ring->enqueue = ring->dequeue;
  220. - ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
  221. + ring->num_trbs_free = ring->num_segs * (ring->trbs_per_seg - 1) - 1;
  222. /*
  223. * Ring is now zeroed, so the HW should look for change of ownership
  224. * when the cycle bit is set to 1.
  225. --- a/drivers/usb/host/xhci.h
  226. +++ b/drivers/usb/host/xhci.h
  227. @@ -1634,6 +1634,7 @@ struct xhci_ring {
  228. unsigned int num_trbs_free;
  229. unsigned int num_trbs_free_temp;
  230. unsigned int bounce_buf_len;
  231. + unsigned int trbs_per_seg;
  232. enum xhci_ring_type type;
  233. bool last_td_was_short;
  234. struct radix_tree_root *trb_address_map;