702-v5.19-00-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327
  1. From: Felix Fietkau <[email protected]>
  2. Date: Sat, 5 Feb 2022 17:59:07 +0100
  3. Subject: [PATCH] net: ethernet: mtk_eth_soc: add support for coherent
  4. DMA
  5. It improves performance by eliminating the need for a cache flush on rx and tx
  6. In preparation for supporting WED (Wireless Ethernet Dispatch), also add a
  7. function for disabling coherent DMA at runtime.
  8. Signed-off-by: Felix Fietkau <[email protected]>
  9. ---
  10. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  11. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  12. @@ -9,6 +9,7 @@
  13. #include <linux/of_device.h>
  14. #include <linux/of_mdio.h>
  15. #include <linux/of_net.h>
  16. +#include <linux/of_address.h>
  17. #include <linux/mfd/syscon.h>
  18. #include <linux/regmap.h>
  19. #include <linux/clk.h>
  20. @@ -850,7 +851,7 @@ static int mtk_init_fq_dma(struct mtk_et
  21. dma_addr_t dma_addr;
  22. int i;
  23. - eth->scratch_ring = dma_alloc_coherent(eth->dev,
  24. + eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
  25. cnt * sizeof(struct mtk_tx_dma),
  26. &eth->phy_scratch_ring,
  27. GFP_ATOMIC);
  28. @@ -862,10 +863,10 @@ static int mtk_init_fq_dma(struct mtk_et
  29. if (unlikely(!eth->scratch_head))
  30. return -ENOMEM;
  31. - dma_addr = dma_map_single(eth->dev,
  32. + dma_addr = dma_map_single(eth->dma_dev,
  33. eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
  34. DMA_FROM_DEVICE);
  35. - if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
  36. + if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
  37. return -ENOMEM;
  38. phy_ring_tail = eth->phy_scratch_ring +
  39. @@ -919,26 +920,26 @@ static void mtk_tx_unmap(struct mtk_eth
  40. {
  41. if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
  42. if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
  43. - dma_unmap_single(eth->dev,
  44. + dma_unmap_single(eth->dma_dev,
  45. dma_unmap_addr(tx_buf, dma_addr0),
  46. dma_unmap_len(tx_buf, dma_len0),
  47. DMA_TO_DEVICE);
  48. } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
  49. - dma_unmap_page(eth->dev,
  50. + dma_unmap_page(eth->dma_dev,
  51. dma_unmap_addr(tx_buf, dma_addr0),
  52. dma_unmap_len(tx_buf, dma_len0),
  53. DMA_TO_DEVICE);
  54. }
  55. } else {
  56. if (dma_unmap_len(tx_buf, dma_len0)) {
  57. - dma_unmap_page(eth->dev,
  58. + dma_unmap_page(eth->dma_dev,
  59. dma_unmap_addr(tx_buf, dma_addr0),
  60. dma_unmap_len(tx_buf, dma_len0),
  61. DMA_TO_DEVICE);
  62. }
  63. if (dma_unmap_len(tx_buf, dma_len1)) {
  64. - dma_unmap_page(eth->dev,
  65. + dma_unmap_page(eth->dma_dev,
  66. dma_unmap_addr(tx_buf, dma_addr1),
  67. dma_unmap_len(tx_buf, dma_len1),
  68. DMA_TO_DEVICE);
  69. @@ -1016,9 +1017,9 @@ static int mtk_tx_map(struct sk_buff *sk
  70. if (skb_vlan_tag_present(skb))
  71. txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
  72. - mapped_addr = dma_map_single(eth->dev, skb->data,
  73. + mapped_addr = dma_map_single(eth->dma_dev, skb->data,
  74. skb_headlen(skb), DMA_TO_DEVICE);
  75. - if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
  76. + if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr)))
  77. return -ENOMEM;
  78. WRITE_ONCE(itxd->txd1, mapped_addr);
  79. @@ -1057,10 +1058,10 @@ static int mtk_tx_map(struct sk_buff *sk
  80. frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
  81. - mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
  82. + mapped_addr = skb_frag_dma_map(eth->dma_dev, frag, offset,
  83. frag_map_size,
  84. DMA_TO_DEVICE);
  85. - if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
  86. + if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr)))
  87. goto err_dma;
  88. if (i == nr_frags - 1 &&
  89. @@ -1341,18 +1342,18 @@ static int mtk_poll_rx(struct napi_struc
  90. netdev->stats.rx_dropped++;
  91. goto release_desc;
  92. }
  93. - dma_addr = dma_map_single(eth->dev,
  94. + dma_addr = dma_map_single(eth->dma_dev,
  95. new_data + NET_SKB_PAD +
  96. eth->ip_align,
  97. ring->buf_size,
  98. DMA_FROM_DEVICE);
  99. - if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
  100. + if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
  101. skb_free_frag(new_data);
  102. netdev->stats.rx_dropped++;
  103. goto release_desc;
  104. }
  105. - dma_unmap_single(eth->dev, trxd.rxd1,
  106. + dma_unmap_single(eth->dma_dev, trxd.rxd1,
  107. ring->buf_size, DMA_FROM_DEVICE);
  108. /* receive data */
  109. @@ -1625,7 +1626,7 @@ static int mtk_tx_alloc(struct mtk_eth *
  110. if (!ring->buf)
  111. goto no_tx_mem;
  112. - ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
  113. + ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
  114. &ring->phys, GFP_ATOMIC);
  115. if (!ring->dma)
  116. goto no_tx_mem;
  117. @@ -1643,7 +1644,7 @@ static int mtk_tx_alloc(struct mtk_eth *
  118. * descriptors in ring->dma_pdma.
  119. */
  120. if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
  121. - ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
  122. + ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
  123. &ring->phys_pdma,
  124. GFP_ATOMIC);
  125. if (!ring->dma_pdma)
  126. @@ -1702,7 +1703,7 @@ static void mtk_tx_clean(struct mtk_eth
  127. }
  128. if (ring->dma) {
  129. - dma_free_coherent(eth->dev,
  130. + dma_free_coherent(eth->dma_dev,
  131. MTK_DMA_SIZE * sizeof(*ring->dma),
  132. ring->dma,
  133. ring->phys);
  134. @@ -1710,7 +1711,7 @@ static void mtk_tx_clean(struct mtk_eth
  135. }
  136. if (ring->dma_pdma) {
  137. - dma_free_coherent(eth->dev,
  138. + dma_free_coherent(eth->dma_dev,
  139. MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
  140. ring->dma_pdma,
  141. ring->phys_pdma);
  142. @@ -1758,18 +1759,18 @@ static int mtk_rx_alloc(struct mtk_eth *
  143. return -ENOMEM;
  144. }
  145. - ring->dma = dma_alloc_coherent(eth->dev,
  146. + ring->dma = dma_alloc_coherent(eth->dma_dev,
  147. rx_dma_size * sizeof(*ring->dma),
  148. &ring->phys, GFP_ATOMIC);
  149. if (!ring->dma)
  150. return -ENOMEM;
  151. for (i = 0; i < rx_dma_size; i++) {
  152. - dma_addr_t dma_addr = dma_map_single(eth->dev,
  153. + dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
  154. ring->data[i] + NET_SKB_PAD + eth->ip_align,
  155. ring->buf_size,
  156. DMA_FROM_DEVICE);
  157. - if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
  158. + if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
  159. return -ENOMEM;
  160. ring->dma[i].rxd1 = (unsigned int)dma_addr;
  161. @@ -1805,7 +1806,7 @@ static void mtk_rx_clean(struct mtk_eth
  162. continue;
  163. if (!ring->dma[i].rxd1)
  164. continue;
  165. - dma_unmap_single(eth->dev,
  166. + dma_unmap_single(eth->dma_dev,
  167. ring->dma[i].rxd1,
  168. ring->buf_size,
  169. DMA_FROM_DEVICE);
  170. @@ -1816,7 +1817,7 @@ static void mtk_rx_clean(struct mtk_eth
  171. }
  172. if (ring->dma) {
  173. - dma_free_coherent(eth->dev,
  174. + dma_free_coherent(eth->dma_dev,
  175. ring->dma_size * sizeof(*ring->dma),
  176. ring->dma,
  177. ring->phys);
  178. @@ -2175,7 +2176,7 @@ static void mtk_dma_free(struct mtk_eth
  179. if (eth->netdev[i])
  180. netdev_reset_queue(eth->netdev[i]);
  181. if (eth->scratch_ring) {
  182. - dma_free_coherent(eth->dev,
  183. + dma_free_coherent(eth->dma_dev,
  184. MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
  185. eth->scratch_ring,
  186. eth->phy_scratch_ring);
  187. @@ -2527,6 +2528,8 @@ static void mtk_dim_tx(struct work_struc
  188. static int mtk_hw_init(struct mtk_eth *eth)
  189. {
  190. + u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
  191. + ETHSYS_DMA_AG_MAP_PPE;
  192. int i, val, ret;
  193. if (test_and_set_bit(MTK_HW_INIT, &eth->state))
  194. @@ -2539,6 +2542,10 @@ static int mtk_hw_init(struct mtk_eth *e
  195. if (ret)
  196. goto err_disable_pm;
  197. + if (eth->ethsys)
  198. + regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
  199. + of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
  200. +
  201. if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
  202. ret = device_reset(eth->dev);
  203. if (ret) {
  204. @@ -3085,6 +3092,35 @@ free_netdev:
  205. return err;
  206. }
  207. +void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
  208. +{
  209. + struct net_device *dev, *tmp;
  210. + LIST_HEAD(dev_list);
  211. + int i;
  212. +
  213. + rtnl_lock();
  214. +
  215. + for (i = 0; i < MTK_MAC_COUNT; i++) {
  216. + dev = eth->netdev[i];
  217. +
  218. + if (!dev || !(dev->flags & IFF_UP))
  219. + continue;
  220. +
  221. + list_add_tail(&dev->close_list, &dev_list);
  222. + }
  223. +
  224. + dev_close_many(&dev_list, false);
  225. +
  226. + eth->dma_dev = dma_dev;
  227. +
  228. + list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
  229. + list_del_init(&dev->close_list);
  230. + dev_open(dev, NULL);
  231. + }
  232. +
  233. + rtnl_unlock();
  234. +}
  235. +
  236. static int mtk_probe(struct platform_device *pdev)
  237. {
  238. struct device_node *mac_np;
  239. @@ -3098,6 +3134,7 @@ static int mtk_probe(struct platform_dev
  240. eth->soc = of_device_get_match_data(&pdev->dev);
  241. eth->dev = &pdev->dev;
  242. + eth->dma_dev = &pdev->dev;
  243. eth->base = devm_platform_ioremap_resource(pdev, 0);
  244. if (IS_ERR(eth->base))
  245. return PTR_ERR(eth->base);
  246. @@ -3146,6 +3183,16 @@ static int mtk_probe(struct platform_dev
  247. }
  248. }
  249. + if (of_dma_is_coherent(pdev->dev.of_node)) {
  250. + struct regmap *cci;
  251. +
  252. + cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
  253. + "mediatek,cci-control");
  254. + /* enable CPU/bus coherency */
  255. + if (!IS_ERR(cci))
  256. + regmap_write(cci, 0, 3);
  257. + }
  258. +
  259. if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
  260. eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
  261. GFP_KERNEL);
  262. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  263. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  264. @@ -463,6 +463,12 @@
  265. #define RSTCTRL_FE BIT(6)
  266. #define RSTCTRL_PPE BIT(31)
  267. +/* ethernet dma channel agent map */
  268. +#define ETHSYS_DMA_AG_MAP 0x408
  269. +#define ETHSYS_DMA_AG_MAP_PDMA BIT(0)
  270. +#define ETHSYS_DMA_AG_MAP_QDMA BIT(1)
  271. +#define ETHSYS_DMA_AG_MAP_PPE BIT(2)
  272. +
  273. /* SGMII subsystem config registers */
  274. /* Register to auto-negotiation restart */
  275. #define SGMSYS_PCS_CONTROL_1 0x0
  276. @@ -880,6 +886,7 @@ struct mtk_sgmii {
  277. /* struct mtk_eth - This is the main datasructure for holding the state
  278. * of the driver
  279. * @dev: The device pointer
  280. + * @dev: The device pointer used for dma mapping/alloc
  281. * @base: The mapped register i/o base
  282. * @page_lock: Make sure that register operations are atomic
  283. * @tx_irq__lock: Make sure that IRQ register operations are atomic
  284. @@ -923,6 +930,7 @@ struct mtk_sgmii {
  285. struct mtk_eth {
  286. struct device *dev;
  287. + struct device *dma_dev;
  288. void __iomem *base;
  289. spinlock_t page_lock;
  290. spinlock_t tx_irq_lock;
  291. @@ -1021,6 +1029,7 @@ int mtk_gmac_rgmii_path_setup(struct mtk
  292. int mtk_eth_offload_init(struct mtk_eth *eth);
  293. int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
  294. void *type_data);
  295. +void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
  296. #endif /* MTK_ETH_H */