0715-v5.17-net-lantiq_xrx200-convert-to-build_skb.patch 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206
  1. From e015593573b3e3f74bd8a63c05fa92902194a354 Mon Sep 17 00:00:00 2001
  2. From: Aleksander Jan Bajkowski <[email protected]>
  3. Date: Tue, 4 Jan 2022 16:11:44 +0100
  4. Subject: [PATCH 715/715] net: lantiq_xrx200: convert to build_skb
  5. We can increase the efficiency of rx path by using buffers to receive
  6. packets then build SKBs around them just before passing into the network
  7. stack. In contrast, preallocating SKBs too early reduces CPU cache
  8. efficiency.
  9. NAT Performance results on BT Home Hub 5A (kernel 5.10.89, mtu 1500):
  10. Down Up
  11. Before 577 Mbps 648 Mbps
  12. After 624 Mbps 695 Mbps
  13. Signed-off-by: Aleksander Jan Bajkowski <[email protected]>
  14. Signed-off-by: Jakub Kicinski <[email protected]>
  15. ---
  16. drivers/net/ethernet/lantiq_xrx200.c | 56 ++++++++++++++++++----------
  17. 1 file changed, 36 insertions(+), 20 deletions(-)
  18. --- a/drivers/net/ethernet/lantiq_xrx200.c
  19. +++ b/drivers/net/ethernet/lantiq_xrx200.c
  20. @@ -63,7 +63,11 @@ struct xrx200_chan {
  21. struct napi_struct napi;
  22. struct ltq_dma_channel dma;
  23. - struct sk_buff *skb[LTQ_DESC_NUM];
  24. +
  25. + union {
  26. + struct sk_buff *skb[LTQ_DESC_NUM];
  27. + void *rx_buff[LTQ_DESC_NUM];
  28. + };
  29. struct sk_buff *skb_head;
  30. struct sk_buff *skb_tail;
  31. @@ -78,6 +82,7 @@ struct xrx200_priv {
  32. struct xrx200_chan chan_rx;
  33. u16 rx_buf_size;
  34. + u16 rx_skb_size;
  35. struct net_device *net_dev;
  36. struct device *dev;
  37. @@ -115,6 +120,12 @@ static int xrx200_buffer_size(int mtu)
  38. return round_up(xrx200_max_frame_len(mtu), 4 * XRX200_DMA_BURST_LEN);
  39. }
  40. +static int xrx200_skb_size(u16 buf_size)
  41. +{
  42. + return SKB_DATA_ALIGN(buf_size + NET_SKB_PAD + NET_IP_ALIGN) +
  43. + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  44. +}
  45. +
  46. /* drop all the packets from the DMA ring */
  47. static void xrx200_flush_dma(struct xrx200_chan *ch)
  48. {
  49. @@ -173,30 +184,29 @@ static int xrx200_close(struct net_devic
  50. return 0;
  51. }
  52. -static int xrx200_alloc_skb(struct xrx200_chan *ch)
  53. +static int xrx200_alloc_buf(struct xrx200_chan *ch, void *(*alloc)(unsigned int size))
  54. {
  55. - struct sk_buff *skb = ch->skb[ch->dma.desc];
  56. + void *buf = ch->rx_buff[ch->dma.desc];
  57. struct xrx200_priv *priv = ch->priv;
  58. dma_addr_t mapping;
  59. int ret = 0;
  60. - ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(priv->net_dev,
  61. - priv->rx_buf_size);
  62. - if (!ch->skb[ch->dma.desc]) {
  63. + ch->rx_buff[ch->dma.desc] = alloc(priv->rx_skb_size);
  64. + if (!ch->rx_buff[ch->dma.desc]) {
  65. ret = -ENOMEM;
  66. goto skip;
  67. }
  68. - mapping = dma_map_single(priv->dev, ch->skb[ch->dma.desc]->data,
  69. + mapping = dma_map_single(priv->dev, ch->rx_buff[ch->dma.desc],
  70. priv->rx_buf_size, DMA_FROM_DEVICE);
  71. if (unlikely(dma_mapping_error(priv->dev, mapping))) {
  72. - dev_kfree_skb_any(ch->skb[ch->dma.desc]);
  73. - ch->skb[ch->dma.desc] = skb;
  74. + skb_free_frag(ch->rx_buff[ch->dma.desc]);
  75. + ch->rx_buff[ch->dma.desc] = buf;
  76. ret = -ENOMEM;
  77. goto skip;
  78. }
  79. - ch->dma.desc_base[ch->dma.desc].addr = mapping;
  80. + ch->dma.desc_base[ch->dma.desc].addr = mapping + NET_SKB_PAD + NET_IP_ALIGN;
  81. /* Make sure the address is written before we give it to HW */
  82. wmb();
  83. skip:
  84. @@ -210,13 +220,14 @@ static int xrx200_hw_receive(struct xrx2
  85. {
  86. struct xrx200_priv *priv = ch->priv;
  87. struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
  88. - struct sk_buff *skb = ch->skb[ch->dma.desc];
  89. + void *buf = ch->rx_buff[ch->dma.desc];
  90. u32 ctl = desc->ctl;
  91. int len = (ctl & LTQ_DMA_SIZE_MASK);
  92. struct net_device *net_dev = priv->net_dev;
  93. + struct sk_buff *skb;
  94. int ret;
  95. - ret = xrx200_alloc_skb(ch);
  96. + ret = xrx200_alloc_buf(ch, napi_alloc_frag);
  97. ch->dma.desc++;
  98. ch->dma.desc %= LTQ_DESC_NUM;
  99. @@ -227,19 +238,21 @@ static int xrx200_hw_receive(struct xrx2
  100. return ret;
  101. }
  102. + skb = build_skb(buf, priv->rx_skb_size);
  103. + skb_reserve(skb, NET_SKB_PAD);
  104. skb_put(skb, len);
  105. /* add buffers to skb via skb->frag_list */
  106. if (ctl & LTQ_DMA_SOP) {
  107. ch->skb_head = skb;
  108. ch->skb_tail = skb;
  109. + skb_reserve(skb, NET_IP_ALIGN);
  110. } else if (ch->skb_head) {
  111. if (ch->skb_head == ch->skb_tail)
  112. skb_shinfo(ch->skb_tail)->frag_list = skb;
  113. else
  114. ch->skb_tail->next = skb;
  115. ch->skb_tail = skb;
  116. - skb_reserve(ch->skb_tail, -NET_IP_ALIGN);
  117. ch->skb_head->len += skb->len;
  118. ch->skb_head->data_len += skb->len;
  119. ch->skb_head->truesize += skb->truesize;
  120. @@ -395,12 +408,13 @@ xrx200_change_mtu(struct net_device *net
  121. struct xrx200_chan *ch_rx = &priv->chan_rx;
  122. int old_mtu = net_dev->mtu;
  123. bool running = false;
  124. - struct sk_buff *skb;
  125. + void *buff;
  126. int curr_desc;
  127. int ret = 0;
  128. net_dev->mtu = new_mtu;
  129. priv->rx_buf_size = xrx200_buffer_size(new_mtu);
  130. + priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
  131. if (new_mtu <= old_mtu)
  132. return ret;
  133. @@ -416,14 +430,15 @@ xrx200_change_mtu(struct net_device *net
  134. for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
  135. ch_rx->dma.desc++) {
  136. - skb = ch_rx->skb[ch_rx->dma.desc];
  137. - ret = xrx200_alloc_skb(ch_rx);
  138. + buff = ch_rx->rx_buff[ch_rx->dma.desc];
  139. + ret = xrx200_alloc_buf(ch_rx, netdev_alloc_frag);
  140. if (ret) {
  141. net_dev->mtu = old_mtu;
  142. priv->rx_buf_size = xrx200_buffer_size(old_mtu);
  143. + priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
  144. break;
  145. }
  146. - dev_kfree_skb_any(skb);
  147. + skb_free_frag(buff);
  148. }
  149. ch_rx->dma.desc = curr_desc;
  150. @@ -476,7 +491,7 @@ static int xrx200_dma_init(struct xrx200
  151. ltq_dma_alloc_rx(&ch_rx->dma);
  152. for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
  153. ch_rx->dma.desc++) {
  154. - ret = xrx200_alloc_skb(ch_rx);
  155. + ret = xrx200_alloc_buf(ch_rx, netdev_alloc_frag);
  156. if (ret)
  157. goto rx_free;
  158. }
  159. @@ -511,7 +526,7 @@ rx_ring_free:
  160. /* free the allocated RX ring */
  161. for (i = 0; i < LTQ_DESC_NUM; i++) {
  162. if (priv->chan_rx.skb[i])
  163. - dev_kfree_skb_any(priv->chan_rx.skb[i]);
  164. + skb_free_frag(priv->chan_rx.rx_buff[i]);
  165. }
  166. rx_free:
  167. @@ -528,7 +543,7 @@ static void xrx200_hw_cleanup(struct xrx
  168. /* free the allocated RX ring */
  169. for (i = 0; i < LTQ_DESC_NUM; i++)
  170. - dev_kfree_skb_any(priv->chan_rx.skb[i]);
  171. + skb_free_frag(priv->chan_rx.rx_buff[i]);
  172. }
  173. static int xrx200_probe(struct platform_device *pdev)
  174. @@ -553,6 +568,7 @@ static int xrx200_probe(struct platform_
  175. net_dev->min_mtu = ETH_ZLEN;
  176. net_dev->max_mtu = XRX200_DMA_DATA_LEN - xrx200_max_frame_len(0);
  177. priv->rx_buf_size = xrx200_buffer_size(ETH_DATA_LEN);
  178. + priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
  179. /* load the memory ranges */
  180. priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);