662-remove_pfifo_fast.patch 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. From b531d492d5ef1cf9dba0f4888eb5fd8624a6d762 Mon Sep 17 00:00:00 2001
  2. From: Felix Fietkau <[email protected]>
  3. Date: Fri, 7 Jul 2017 17:23:42 +0200
  4. Subject: net: sched: switch default qdisc from pfifo_fast to fq_codel and remove pfifo_fast
  5. Signed-off-by: Felix Fietkau <[email protected]>
  6. ---
  7. net/sched/sch_generic.c | 140 ------------------------------------------------
  8. 1 file changed, 140 deletions(-)
  9. --- a/net/sched/sch_generic.c
  10. +++ b/net/sched/sch_generic.c
  11. @@ -620,230 +620,6 @@ struct Qdisc_ops noqueue_qdisc_ops __rea
  12. .owner = THIS_MODULE,
  13. };
  14. -static const u8 prio2band[TC_PRIO_MAX + 1] = {
  15. - 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
  16. -};
  17. -
  18. -/* 3-band FIFO queue: old style, but should be a bit faster than
  19. - generic prio+fifo combination.
  20. - */
  21. -
  22. -#define PFIFO_FAST_BANDS 3
  23. -
  24. -/*
  25. - * Private data for a pfifo_fast scheduler containing:
  26. - * - rings for priority bands
  27. - */
  28. -struct pfifo_fast_priv {
  29. - struct skb_array q[PFIFO_FAST_BANDS];
  30. -};
  31. -
  32. -static inline struct skb_array *band2list(struct pfifo_fast_priv *priv,
  33. - int band)
  34. -{
  35. - return &priv->q[band];
  36. -}
  37. -
  38. -static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
  39. - struct sk_buff **to_free)
  40. -{
  41. - int band = prio2band[skb->priority & TC_PRIO_MAX];
  42. - struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
  43. - struct skb_array *q = band2list(priv, band);
  44. - unsigned int pkt_len = qdisc_pkt_len(skb);
  45. - int err;
  46. -
  47. - err = skb_array_produce(q, skb);
  48. -
  49. - if (unlikely(err)) {
  50. - if (qdisc_is_percpu_stats(qdisc))
  51. - return qdisc_drop_cpu(skb, qdisc, to_free);
  52. - else
  53. - return qdisc_drop(skb, qdisc, to_free);
  54. - }
  55. -
  56. - qdisc_update_stats_at_enqueue(qdisc, pkt_len);
  57. - return NET_XMIT_SUCCESS;
  58. -}
  59. -
  60. -static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
  61. -{
  62. - struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
  63. - struct sk_buff *skb = NULL;
  64. - bool need_retry = true;
  65. - int band;
  66. -
  67. -retry:
  68. - for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
  69. - struct skb_array *q = band2list(priv, band);
  70. -
  71. - if (__skb_array_empty(q))
  72. - continue;
  73. -
  74. - skb = __skb_array_consume(q);
  75. - }
  76. - if (likely(skb)) {
  77. - qdisc_update_stats_at_dequeue(qdisc, skb);
  78. - } else if (need_retry &&
  79. - test_bit(__QDISC_STATE_MISSED, &qdisc->state)) {
  80. - /* Delay clearing the STATE_MISSED here to reduce
  81. - * the overhead of the second spin_trylock() in
  82. - * qdisc_run_begin() and __netif_schedule() calling
  83. - * in qdisc_run_end().
  84. - */
  85. - clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
  86. -
  87. - /* Make sure dequeuing happens after clearing
  88. - * STATE_MISSED.
  89. - */
  90. - smp_mb__after_atomic();
  91. -
  92. - need_retry = false;
  93. -
  94. - goto retry;
  95. - } else {
  96. - WRITE_ONCE(qdisc->empty, true);
  97. - }
  98. -
  99. - return skb;
  100. -}
  101. -
  102. -static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
  103. -{
  104. - struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
  105. - struct sk_buff *skb = NULL;
  106. - int band;
  107. -
  108. - for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
  109. - struct skb_array *q = band2list(priv, band);
  110. -
  111. - skb = __skb_array_peek(q);
  112. - }
  113. -
  114. - return skb;
  115. -}
  116. -
  117. -static void pfifo_fast_reset(struct Qdisc *qdisc)
  118. -{
  119. - int i, band;
  120. - struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
  121. -
  122. - for (band = 0; band < PFIFO_FAST_BANDS; band++) {
  123. - struct skb_array *q = band2list(priv, band);
  124. - struct sk_buff *skb;
  125. -
  126. - /* NULL ring is possible if destroy path is due to a failed
  127. - * skb_array_init() in pfifo_fast_init() case.
  128. - */
  129. - if (!q->ring.queue)
  130. - continue;
  131. -
  132. - while ((skb = __skb_array_consume(q)) != NULL)
  133. - kfree_skb(skb);
  134. - }
  135. -
  136. - if (qdisc_is_percpu_stats(qdisc)) {
  137. - for_each_possible_cpu(i) {
  138. - struct gnet_stats_queue *q;
  139. -
  140. - q = per_cpu_ptr(qdisc->cpu_qstats, i);
  141. - q->backlog = 0;
  142. - q->qlen = 0;
  143. - }
  144. - }
  145. -}
  146. -
  147. -static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
  148. -{
  149. - struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
  150. -
  151. - memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
  152. - if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
  153. - goto nla_put_failure;
  154. - return skb->len;
  155. -
  156. -nla_put_failure:
  157. - return -1;
  158. -}
  159. -
  160. -static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt,
  161. - struct netlink_ext_ack *extack)
  162. -{
  163. - unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len;
  164. - struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
  165. - int prio;
  166. -
  167. - /* guard against zero length rings */
  168. - if (!qlen)
  169. - return -EINVAL;
  170. -
  171. - for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
  172. - struct skb_array *q = band2list(priv, prio);
  173. - int err;
  174. -
  175. - err = skb_array_init(q, qlen, GFP_KERNEL);
  176. - if (err)
  177. - return -ENOMEM;
  178. - }
  179. -
  180. - /* Can by-pass the queue discipline */
  181. - qdisc->flags |= TCQ_F_CAN_BYPASS;
  182. - return 0;
  183. -}
  184. -
  185. -static void pfifo_fast_destroy(struct Qdisc *sch)
  186. -{
  187. - struct pfifo_fast_priv *priv = qdisc_priv(sch);
  188. - int prio;
  189. -
  190. - for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
  191. - struct skb_array *q = band2list(priv, prio);
  192. -
  193. - /* NULL ring is possible if destroy path is due to a failed
  194. - * skb_array_init() in pfifo_fast_init() case.
  195. - */
  196. - if (!q->ring.queue)
  197. - continue;
  198. - /* Destroy ring but no need to kfree_skb because a call to
  199. - * pfifo_fast_reset() has already done that work.
  200. - */
  201. - ptr_ring_cleanup(&q->ring, NULL);
  202. - }
  203. -}
  204. -
  205. -static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch,
  206. - unsigned int new_len)
  207. -{
  208. - struct pfifo_fast_priv *priv = qdisc_priv(sch);
  209. - struct skb_array *bands[PFIFO_FAST_BANDS];
  210. - int prio;
  211. -
  212. - for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
  213. - struct skb_array *q = band2list(priv, prio);
  214. -
  215. - bands[prio] = q;
  216. - }
  217. -
  218. - return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len,
  219. - GFP_KERNEL);
  220. -}
  221. -
  222. -struct Qdisc_ops pfifo_fast_ops __read_mostly = {
  223. - .id = "pfifo_fast",
  224. - .priv_size = sizeof(struct pfifo_fast_priv),
  225. - .enqueue = pfifo_fast_enqueue,
  226. - .dequeue = pfifo_fast_dequeue,
  227. - .peek = pfifo_fast_peek,
  228. - .init = pfifo_fast_init,
  229. - .destroy = pfifo_fast_destroy,
  230. - .reset = pfifo_fast_reset,
  231. - .dump = pfifo_fast_dump,
  232. - .change_tx_queue_len = pfifo_fast_change_tx_queue_len,
  233. - .owner = THIS_MODULE,
  234. - .static_flags = TCQ_F_NOLOCK | TCQ_F_CPUSTATS,
  235. -};
  236. -EXPORT_SYMBOL(pfifo_fast_ops);
  237. -
  238. struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
  239. const struct Qdisc_ops *ops,
  240. struct netlink_ext_ack *extack)