002-0007-net-mediatek-stop-using-bitfileds-for-DMA-descriptor.patch 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317
  1. From 5f6f3600a334398e27802de33a6a8726aacbe88c Mon Sep 17 00:00:00 2001
  2. From: Weijie Gao <[email protected]>
  3. Date: Wed, 31 Aug 2022 19:04:23 +0800
  4. Subject: [PATCH 07/32] net: mediatek: stop using bitfileds for DMA descriptors
  5. This patch is a preparation for adding a new version of PDMA of which the
  6. DMA descriptor fields has changed. Using bitfields will result in a complex
  7. modification. Convert bitfields to u32 units can solve this problem easily.
  8. Reviewed-by: Simon Glass <[email protected]>
  9. Signed-off-by: Weijie Gao <[email protected]>
  10. ---
  11. drivers/net/mtk_eth.c | 144 ++++++++++++++----------------------------
  12. drivers/net/mtk_eth.h | 32 ++++++++++
  13. 2 files changed, 80 insertions(+), 96 deletions(-)
  14. --- a/drivers/net/mtk_eth.c
  15. +++ b/drivers/net/mtk_eth.c
  16. @@ -65,77 +65,6 @@
  17. (DP_DISCARD << MC_DP_S) | \
  18. (DP_DISCARD << UN_DP_S))
  19. -struct pdma_rxd_info1 {
  20. - u32 PDP0;
  21. -};
  22. -
  23. -struct pdma_rxd_info2 {
  24. - u32 PLEN1 : 14;
  25. - u32 LS1 : 1;
  26. - u32 UN_USED : 1;
  27. - u32 PLEN0 : 14;
  28. - u32 LS0 : 1;
  29. - u32 DDONE : 1;
  30. -};
  31. -
  32. -struct pdma_rxd_info3 {
  33. - u32 PDP1;
  34. -};
  35. -
  36. -struct pdma_rxd_info4 {
  37. - u32 FOE_ENTRY : 14;
  38. - u32 CRSN : 5;
  39. - u32 SP : 3;
  40. - u32 L4F : 1;
  41. - u32 L4VLD : 1;
  42. - u32 TACK : 1;
  43. - u32 IP4F : 1;
  44. - u32 IP4 : 1;
  45. - u32 IP6 : 1;
  46. - u32 UN_USED : 4;
  47. -};
  48. -
  49. -struct pdma_rxdesc {
  50. - struct pdma_rxd_info1 rxd_info1;
  51. - struct pdma_rxd_info2 rxd_info2;
  52. - struct pdma_rxd_info3 rxd_info3;
  53. - struct pdma_rxd_info4 rxd_info4;
  54. -};
  55. -
  56. -struct pdma_txd_info1 {
  57. - u32 SDP0;
  58. -};
  59. -
  60. -struct pdma_txd_info2 {
  61. - u32 SDL1 : 14;
  62. - u32 LS1 : 1;
  63. - u32 BURST : 1;
  64. - u32 SDL0 : 14;
  65. - u32 LS0 : 1;
  66. - u32 DDONE : 1;
  67. -};
  68. -
  69. -struct pdma_txd_info3 {
  70. - u32 SDP1;
  71. -};
  72. -
  73. -struct pdma_txd_info4 {
  74. - u32 VLAN_TAG : 16;
  75. - u32 INS : 1;
  76. - u32 RESV : 2;
  77. - u32 UDF : 6;
  78. - u32 FPORT : 3;
  79. - u32 TSO : 1;
  80. - u32 TUI_CO : 3;
  81. -};
  82. -
  83. -struct pdma_txdesc {
  84. - struct pdma_txd_info1 txd_info1;
  85. - struct pdma_txd_info2 txd_info2;
  86. - struct pdma_txd_info3 txd_info3;
  87. - struct pdma_txd_info4 txd_info4;
  88. -};
  89. -
  90. enum mtk_switch {
  91. SW_NONE,
  92. SW_MT7530,
  93. @@ -151,13 +80,15 @@ enum mtk_switch {
  94. struct mtk_soc_data {
  95. u32 caps;
  96. u32 ana_rgc3;
  97. + u32 txd_size;
  98. + u32 rxd_size;
  99. };
  100. struct mtk_eth_priv {
  101. char pkt_pool[TOTAL_PKT_BUF_SIZE] __aligned(ARCH_DMA_MINALIGN);
  102. - struct pdma_txdesc *tx_ring_noc;
  103. - struct pdma_rxdesc *rx_ring_noc;
  104. + void *tx_ring_noc;
  105. + void *rx_ring_noc;
  106. int rx_dma_owner_idx0;
  107. int tx_cpu_owner_idx0;
  108. @@ -1202,14 +1133,16 @@ static void mtk_mac_init(struct mtk_eth_
  109. static void mtk_eth_fifo_init(struct mtk_eth_priv *priv)
  110. {
  111. char *pkt_base = priv->pkt_pool;
  112. + struct mtk_tx_dma *txd;
  113. + struct mtk_rx_dma *rxd;
  114. int i;
  115. mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0xffff0000, 0);
  116. udelay(500);
  117. - memset(priv->tx_ring_noc, 0, NUM_TX_DESC * sizeof(struct pdma_txdesc));
  118. - memset(priv->rx_ring_noc, 0, NUM_RX_DESC * sizeof(struct pdma_rxdesc));
  119. - memset(priv->pkt_pool, 0, TOTAL_PKT_BUF_SIZE);
  120. + memset(priv->tx_ring_noc, 0, NUM_TX_DESC * priv->soc->txd_size);
  121. + memset(priv->rx_ring_noc, 0, NUM_RX_DESC * priv->soc->rxd_size);
  122. + memset(priv->pkt_pool, 0xff, TOTAL_PKT_BUF_SIZE);
  123. flush_dcache_range((ulong)pkt_base,
  124. (ulong)(pkt_base + TOTAL_PKT_BUF_SIZE));
  125. @@ -1218,17 +1151,21 @@ static void mtk_eth_fifo_init(struct mtk
  126. priv->tx_cpu_owner_idx0 = 0;
  127. for (i = 0; i < NUM_TX_DESC; i++) {
  128. - priv->tx_ring_noc[i].txd_info2.LS0 = 1;
  129. - priv->tx_ring_noc[i].txd_info2.DDONE = 1;
  130. - priv->tx_ring_noc[i].txd_info4.FPORT = priv->gmac_id + 1;
  131. + txd = priv->tx_ring_noc + i * priv->soc->txd_size;
  132. +
  133. + txd->txd1 = virt_to_phys(pkt_base);
  134. + txd->txd2 = PDMA_TXD2_DDONE | PDMA_TXD2_LS0;
  135. + txd->txd4 = PDMA_TXD4_FPORT_SET(priv->gmac_id + 1);
  136. - priv->tx_ring_noc[i].txd_info1.SDP0 = virt_to_phys(pkt_base);
  137. pkt_base += PKTSIZE_ALIGN;
  138. }
  139. for (i = 0; i < NUM_RX_DESC; i++) {
  140. - priv->rx_ring_noc[i].rxd_info2.PLEN0 = PKTSIZE_ALIGN;
  141. - priv->rx_ring_noc[i].rxd_info1.PDP0 = virt_to_phys(pkt_base);
  142. + rxd = priv->rx_ring_noc + i * priv->soc->rxd_size;
  143. +
  144. + rxd->rxd1 = virt_to_phys(pkt_base);
  145. + rxd->rxd2 = PDMA_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
  146. +
  147. pkt_base += PKTSIZE_ALIGN;
  148. }
  149. @@ -1315,20 +1252,22 @@ static int mtk_eth_send(struct udevice *
  150. {
  151. struct mtk_eth_priv *priv = dev_get_priv(dev);
  152. u32 idx = priv->tx_cpu_owner_idx0;
  153. + struct mtk_tx_dma *txd;
  154. void *pkt_base;
  155. - if (!priv->tx_ring_noc[idx].txd_info2.DDONE) {
  156. + txd = priv->tx_ring_noc + idx * priv->soc->txd_size;
  157. +
  158. + if (!(txd->txd2 & PDMA_TXD2_DDONE)) {
  159. debug("mtk-eth: TX DMA descriptor ring is full\n");
  160. return -EPERM;
  161. }
  162. - pkt_base = (void *)phys_to_virt(priv->tx_ring_noc[idx].txd_info1.SDP0);
  163. + pkt_base = (void *)phys_to_virt(txd->txd1);
  164. memcpy(pkt_base, packet, length);
  165. flush_dcache_range((ulong)pkt_base, (ulong)pkt_base +
  166. roundup(length, ARCH_DMA_MINALIGN));
  167. - priv->tx_ring_noc[idx].txd_info2.SDL0 = length;
  168. - priv->tx_ring_noc[idx].txd_info2.DDONE = 0;
  169. + txd->txd2 = PDMA_TXD2_LS0 | PDMA_TXD2_SDL0_SET(length);
  170. priv->tx_cpu_owner_idx0 = (priv->tx_cpu_owner_idx0 + 1) % NUM_TX_DESC;
  171. mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
  172. @@ -1340,16 +1279,20 @@ static int mtk_eth_recv(struct udevice *
  173. {
  174. struct mtk_eth_priv *priv = dev_get_priv(dev);
  175. u32 idx = priv->rx_dma_owner_idx0;
  176. + struct mtk_rx_dma *rxd;
  177. uchar *pkt_base;
  178. u32 length;
  179. - if (!priv->rx_ring_noc[idx].rxd_info2.DDONE) {
  180. + rxd = priv->rx_ring_noc + idx * priv->soc->rxd_size;
  181. +
  182. + if (!(rxd->rxd2 & PDMA_RXD2_DDONE)) {
  183. debug("mtk-eth: RX DMA descriptor ring is empty\n");
  184. return -EAGAIN;
  185. }
  186. - length = priv->rx_ring_noc[idx].rxd_info2.PLEN0;
  187. - pkt_base = (void *)phys_to_virt(priv->rx_ring_noc[idx].rxd_info1.PDP0);
  188. + length = PDMA_RXD2_PLEN0_GET(rxd->rxd2);
  189. +
  190. + pkt_base = (void *)phys_to_virt(rxd->rxd1);
  191. invalidate_dcache_range((ulong)pkt_base, (ulong)pkt_base +
  192. roundup(length, ARCH_DMA_MINALIGN));
  193. @@ -1363,10 +1306,11 @@ static int mtk_eth_free_pkt(struct udevi
  194. {
  195. struct mtk_eth_priv *priv = dev_get_priv(dev);
  196. u32 idx = priv->rx_dma_owner_idx0;
  197. + struct mtk_rx_dma *rxd;
  198. +
  199. + rxd = priv->rx_ring_noc + idx * priv->soc->rxd_size;
  200. - priv->rx_ring_noc[idx].rxd_info2.DDONE = 0;
  201. - priv->rx_ring_noc[idx].rxd_info2.LS0 = 0;
  202. - priv->rx_ring_noc[idx].rxd_info2.PLEN0 = PKTSIZE_ALIGN;
  203. + rxd->rxd2 = PDMA_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
  204. mtk_pdma_write(priv, RX_CRX_IDX_REG(0), idx);
  205. priv->rx_dma_owner_idx0 = (priv->rx_dma_owner_idx0 + 1) % NUM_RX_DESC;
  206. @@ -1393,11 +1337,11 @@ static int mtk_eth_probe(struct udevice
  207. return ret;
  208. /* Prepare for tx/rx rings */
  209. - priv->tx_ring_noc = (struct pdma_txdesc *)
  210. - noncached_alloc(sizeof(struct pdma_txdesc) * NUM_TX_DESC,
  211. + priv->tx_ring_noc = (void *)
  212. + noncached_alloc(priv->soc->txd_size * NUM_TX_DESC,
  213. ARCH_DMA_MINALIGN);
  214. - priv->rx_ring_noc = (struct pdma_rxdesc *)
  215. - noncached_alloc(sizeof(struct pdma_rxdesc) * NUM_RX_DESC,
  216. + priv->rx_ring_noc = (void *)
  217. + noncached_alloc(priv->soc->rxd_size * NUM_RX_DESC,
  218. ARCH_DMA_MINALIGN);
  219. /* Set MAC mode */
  220. @@ -1554,18 +1498,26 @@ static int mtk_eth_of_to_plat(struct ude
  221. static const struct mtk_soc_data mt7629_data = {
  222. .ana_rgc3 = 0x128,
  223. + .txd_size = sizeof(struct mtk_tx_dma),
  224. + .rxd_size = sizeof(struct mtk_rx_dma),
  225. };
  226. static const struct mtk_soc_data mt7623_data = {
  227. .caps = MT7623_CAPS,
  228. + .txd_size = sizeof(struct mtk_tx_dma),
  229. + .rxd_size = sizeof(struct mtk_rx_dma),
  230. };
  231. static const struct mtk_soc_data mt7622_data = {
  232. .ana_rgc3 = 0x2028,
  233. + .txd_size = sizeof(struct mtk_tx_dma),
  234. + .rxd_size = sizeof(struct mtk_rx_dma),
  235. };
  236. static const struct mtk_soc_data mt7621_data = {
  237. .caps = MT7621_CAPS,
  238. + .txd_size = sizeof(struct mtk_tx_dma),
  239. + .rxd_size = sizeof(struct mtk_rx_dma),
  240. };
  241. static const struct udevice_id mtk_eth_ids[] = {
  242. --- a/drivers/net/mtk_eth.h
  243. +++ b/drivers/net/mtk_eth.h
  244. @@ -10,6 +10,7 @@
  245. #define _MTK_ETH_H_
  246. #include <linux/bitops.h>
  247. +#include <linux/bitfield.h>
  248. enum mkt_eth_capabilities {
  249. MTK_TRGMII_BIT,
  250. @@ -435,4 +436,35 @@ enum mkt_eth_capabilities {
  251. #define PHY_POWER_SAVING_M 0x300
  252. #define PHY_POWER_SAVING_TX 0x0
  253. +/* PDMA descriptors */
  254. +struct mtk_rx_dma {
  255. + unsigned int rxd1;
  256. + unsigned int rxd2;
  257. + unsigned int rxd3;
  258. + unsigned int rxd4;
  259. +} __packed __aligned(4);
  260. +
  261. +struct mtk_tx_dma {
  262. + unsigned int txd1;
  263. + unsigned int txd2;
  264. + unsigned int txd3;
  265. + unsigned int txd4;
  266. +} __packed __aligned(4);
  267. +
  268. +/* PDMA TXD fields */
  269. +#define PDMA_TXD2_DDONE BIT(31)
  270. +#define PDMA_TXD2_LS0 BIT(30)
  271. +#define PDMA_TXD2_SDL0_M GENMASK(29, 16)
  272. +#define PDMA_TXD2_SDL0_SET(_v) FIELD_PREP(PDMA_TXD2_SDL0_M, (_v))
  273. +
  274. +#define PDMA_TXD4_FPORT_M GENMASK(27, 25)
  275. +#define PDMA_TXD4_FPORT_SET(_v) FIELD_PREP(PDMA_TXD4_FPORT_M, (_v))
  276. +
  277. +/* PDMA RXD fields */
  278. +#define PDMA_RXD2_DDONE BIT(31)
  279. +#define PDMA_RXD2_LS0 BIT(30)
  280. +#define PDMA_RXD2_PLEN0_M GENMASK(29, 16)
  281. +#define PDMA_RXD2_PLEN0_GET(_v) FIELD_GET(PDMA_RXD2_PLEN0_M, (_v))
  282. +#define PDMA_RXD2_PLEN0_SET(_v) FIELD_PREP(PDMA_RXD2_PLEN0_M, (_v))
  283. +
  284. #endif /* _MTK_ETH_H_ */