004-net-mvneta-introduce-page-pool-API-for-sw-buffer-man.patch 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181
  1. From 568a3fa24a95476d40afe3f8616bafb543bc4182 Mon Sep 17 00:00:00 2001
  2. From: Lorenzo Bianconi <[email protected]>
  3. Date: Sat, 19 Oct 2019 10:13:22 +0200
  4. Subject: [PATCH 2/7] net: mvneta: introduce page pool API for sw buffer
  5. manager
  6. Use the page_pool api for allocations and DMA handling instead of
  7. __dev_alloc_page()/dma_map_page() and free_page()/dma_unmap_page().
  8. Pages are unmapped using page_pool_release_page before packets
  9. go into the network stack.
  10. The page_pool API offers buffer recycling capabilities for XDP but
  11. allocates one page per packet, unless the driver splits and manages
  12. the allocated page.
  13. This is a preliminary patch to add XDP support to mvneta driver
  14. Signed-off-by: Ilias Apalodimas <[email protected]>
  15. Signed-off-by: Jesper Dangaard Brouer <[email protected]>
  16. Signed-off-by: Lorenzo Bianconi <[email protected]>
  17. Signed-off-by: David S. Miller <[email protected]>
  18. ---
  19. drivers/net/ethernet/marvell/Kconfig | 1 +
  20. drivers/net/ethernet/marvell/mvneta.c | 83 +++++++++++++++++++++------
  21. 2 files changed, 65 insertions(+), 19 deletions(-)
  22. --- a/drivers/net/ethernet/marvell/Kconfig
  23. +++ b/drivers/net/ethernet/marvell/Kconfig
  24. @@ -61,6 +61,7 @@ config MVNETA
  25. depends on ARCH_MVEBU || COMPILE_TEST
  26. select MVMDIO
  27. select PHYLINK
  28. + select PAGE_POOL
  29. ---help---
  30. This driver supports the network interface units in the
  31. Marvell ARMADA XP, ARMADA 370, ARMADA 38x and
  32. --- a/drivers/net/ethernet/marvell/mvneta.c
  33. +++ b/drivers/net/ethernet/marvell/mvneta.c
  34. @@ -37,6 +37,7 @@
  35. #include <net/ip.h>
  36. #include <net/ipv6.h>
  37. #include <net/tso.h>
  38. +#include <net/page_pool.h>
  39. /* Registers */
  40. #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
  41. @@ -607,6 +608,10 @@ struct mvneta_rx_queue {
  42. u32 pkts_coal;
  43. u32 time_coal;
  44. + /* page_pool */
  45. + struct page_pool *page_pool;
  46. + struct xdp_rxq_info xdp_rxq;
  47. +
  48. /* Virtual address of the RX buffer */
  49. void **buf_virt_addr;
  50. @@ -1825,23 +1830,21 @@ static int mvneta_rx_refill(struct mvnet
  51. struct mvneta_rx_queue *rxq,
  52. gfp_t gfp_mask)
  53. {
  54. + enum dma_data_direction dma_dir;
  55. dma_addr_t phys_addr;
  56. struct page *page;
  57. - page = __dev_alloc_page(gfp_mask);
  58. + page = page_pool_alloc_pages(rxq->page_pool,
  59. + gfp_mask | __GFP_NOWARN);
  60. if (!page)
  61. return -ENOMEM;
  62. - /* map page for use */
  63. - phys_addr = dma_map_page(pp->dev->dev.parent, page, 0, PAGE_SIZE,
  64. - DMA_FROM_DEVICE);
  65. - if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
  66. - __free_page(page);
  67. - return -ENOMEM;
  68. - }
  69. -
  70. - phys_addr += pp->rx_offset_correction;
  71. + phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction;
  72. + dma_dir = page_pool_get_dma_dir(rxq->page_pool);
  73. + dma_sync_single_for_device(pp->dev->dev.parent, phys_addr,
  74. + PAGE_SIZE, dma_dir);
  75. mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
  76. +
  77. return 0;
  78. }
  79. @@ -1907,10 +1910,12 @@ static void mvneta_rxq_drop_pkts(struct
  80. if (!data || !(rx_desc->buf_phys_addr))
  81. continue;
  82. - dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr,
  83. - PAGE_SIZE, DMA_FROM_DEVICE);
  84. - __free_page(data);
  85. + page_pool_put_page(rxq->page_pool, data, false);
  86. }
  87. + if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
  88. + xdp_rxq_info_unreg(&rxq->xdp_rxq);
  89. + page_pool_destroy(rxq->page_pool);
  90. + rxq->page_pool = NULL;
  91. }
  92. static void
  93. @@ -2047,8 +2052,7 @@ static int mvneta_rx_swbm(struct napi_st
  94. skb_add_rx_frag(rxq->skb, frag_num, page,
  95. frag_offset, frag_size,
  96. PAGE_SIZE);
  97. - dma_unmap_page(dev->dev.parent, phys_addr,
  98. - PAGE_SIZE, DMA_FROM_DEVICE);
  99. + page_pool_release_page(rxq->page_pool, page);
  100. rxq->left_size -= frag_size;
  101. }
  102. } else {
  103. @@ -2078,9 +2082,7 @@ static int mvneta_rx_swbm(struct napi_st
  104. frag_offset, frag_size,
  105. PAGE_SIZE);
  106. - dma_unmap_page(dev->dev.parent, phys_addr,
  107. - PAGE_SIZE, DMA_FROM_DEVICE);
  108. -
  109. + page_pool_release_page(rxq->page_pool, page);
  110. rxq->left_size -= frag_size;
  111. }
  112. } /* Middle or Last descriptor */
  113. @@ -2847,11 +2849,54 @@ static int mvneta_poll(struct napi_struc
  114. return rx_done;
  115. }
  116. +static int mvneta_create_page_pool(struct mvneta_port *pp,
  117. + struct mvneta_rx_queue *rxq, int size)
  118. +{
  119. + struct page_pool_params pp_params = {
  120. + .order = 0,
  121. + .flags = PP_FLAG_DMA_MAP,
  122. + .pool_size = size,
  123. + .nid = cpu_to_node(0),
  124. + .dev = pp->dev->dev.parent,
  125. + .dma_dir = DMA_FROM_DEVICE,
  126. + };
  127. + int err;
  128. +
  129. + rxq->page_pool = page_pool_create(&pp_params);
  130. + if (IS_ERR(rxq->page_pool)) {
  131. + err = PTR_ERR(rxq->page_pool);
  132. + rxq->page_pool = NULL;
  133. + return err;
  134. + }
  135. +
  136. + err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id);
  137. + if (err < 0)
  138. + goto err_free_pp;
  139. +
  140. + err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
  141. + rxq->page_pool);
  142. + if (err)
  143. + goto err_unregister_rxq;
  144. +
  145. + return 0;
  146. +
  147. +err_unregister_rxq:
  148. + xdp_rxq_info_unreg(&rxq->xdp_rxq);
  149. +err_free_pp:
  150. + page_pool_destroy(rxq->page_pool);
  151. + rxq->page_pool = NULL;
  152. + return err;
  153. +}
  154. +
  155. /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
  156. static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
  157. int num)
  158. {
  159. - int i;
  160. + int i, err;
  161. +
  162. + err = mvneta_create_page_pool(pp, rxq, num);
  163. + if (err < 0)
  164. + return err;
  165. for (i = 0; i < num; i++) {
  166. memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));