2
0

702-v5.19-18-net-ethernet-mtk_eth_soc-move-tx-dma-desc-configurat.patch 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206
  1. From: Lorenzo Bianconi <[email protected]>
  2. Date: Fri, 20 May 2022 20:11:27 +0200
  3. Subject: [PATCH] net: ethernet: mtk_eth_soc: move tx dma desc configuration in
  4. mtk_tx_set_dma_desc
  5. Move tx dma descriptor configuration in mtk_tx_set_dma_desc routine.
  6. This is a preliminary patch to introduce mt7986 ethernet support since
  7. it relies on a different tx dma descriptor layout.
  8. Tested-by: Sam Shih <[email protected]>
  9. Signed-off-by: Lorenzo Bianconi <[email protected]>
  10. Signed-off-by: David S. Miller <[email protected]>
  11. ---
  12. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  13. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  14. @@ -982,18 +982,51 @@ static void setup_tx_buf(struct mtk_eth
  15. }
  16. }
  17. +static void mtk_tx_set_dma_desc(struct net_device *dev, struct mtk_tx_dma *desc,
  18. + struct mtk_tx_dma_desc_info *info)
  19. +{
  20. + struct mtk_mac *mac = netdev_priv(dev);
  21. + u32 data;
  22. +
  23. + WRITE_ONCE(desc->txd1, info->addr);
  24. +
  25. + data = TX_DMA_SWC | TX_DMA_PLEN0(info->size);
  26. + if (info->last)
  27. + data |= TX_DMA_LS0;
  28. + WRITE_ONCE(desc->txd3, data);
  29. +
  30. + data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
  31. + if (info->first) {
  32. + if (info->gso)
  33. + data |= TX_DMA_TSO;
  34. + /* tx checksum offload */
  35. + if (info->csum)
  36. + data |= TX_DMA_CHKSUM;
  37. + /* vlan header offload */
  38. + if (info->vlan)
  39. + data |= TX_DMA_INS_VLAN | info->vlan_tci;
  40. + }
  41. + WRITE_ONCE(desc->txd4, data);
  42. +}
  43. +
  44. static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
  45. int tx_num, struct mtk_tx_ring *ring, bool gso)
  46. {
  47. + struct mtk_tx_dma_desc_info txd_info = {
  48. + .size = skb_headlen(skb),
  49. + .gso = gso,
  50. + .csum = skb->ip_summed == CHECKSUM_PARTIAL,
  51. + .vlan = skb_vlan_tag_present(skb),
  52. + .vlan_tci = skb_vlan_tag_get(skb),
  53. + .first = true,
  54. + .last = !skb_is_nonlinear(skb),
  55. + };
  56. struct mtk_mac *mac = netdev_priv(dev);
  57. struct mtk_eth *eth = mac->hw;
  58. struct mtk_tx_dma *itxd, *txd;
  59. struct mtk_tx_dma *itxd_pdma, *txd_pdma;
  60. struct mtk_tx_buf *itx_buf, *tx_buf;
  61. - dma_addr_t mapped_addr;
  62. - unsigned int nr_frags;
  63. int i, n_desc = 1;
  64. - u32 txd4 = 0, fport;
  65. int k = 0;
  66. itxd = ring->next_free;
  67. @@ -1001,49 +1034,32 @@ static int mtk_tx_map(struct sk_buff *sk
  68. if (itxd == ring->last_free)
  69. return -ENOMEM;
  70. - /* set the forward port */
  71. - fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
  72. - txd4 |= fport;
  73. -
  74. itx_buf = mtk_desc_to_tx_buf(ring, itxd);
  75. memset(itx_buf, 0, sizeof(*itx_buf));
  76. - if (gso)
  77. - txd4 |= TX_DMA_TSO;
  78. -
  79. - /* TX Checksum offload */
  80. - if (skb->ip_summed == CHECKSUM_PARTIAL)
  81. - txd4 |= TX_DMA_CHKSUM;
  82. -
  83. - /* VLAN header offload */
  84. - if (skb_vlan_tag_present(skb))
  85. - txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
  86. -
  87. - mapped_addr = dma_map_single(eth->dma_dev, skb->data,
  88. - skb_headlen(skb), DMA_TO_DEVICE);
  89. - if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr)))
  90. + txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
  91. + DMA_TO_DEVICE);
  92. + if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
  93. return -ENOMEM;
  94. - WRITE_ONCE(itxd->txd1, mapped_addr);
  95. + mtk_tx_set_dma_desc(dev, itxd, &txd_info);
  96. +
  97. itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
  98. itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
  99. MTK_TX_FLAGS_FPORT1;
  100. - setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb),
  101. + setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
  102. k++);
  103. /* TX SG offload */
  104. txd = itxd;
  105. txd_pdma = qdma_to_pdma(ring, txd);
  106. - nr_frags = skb_shinfo(skb)->nr_frags;
  107. - for (i = 0; i < nr_frags; i++) {
  108. + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  109. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  110. unsigned int offset = 0;
  111. int frag_size = skb_frag_size(frag);
  112. while (frag_size) {
  113. - bool last_frag = false;
  114. - unsigned int frag_map_size;
  115. bool new_desc = true;
  116. if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) ||
  117. @@ -1058,23 +1074,17 @@ static int mtk_tx_map(struct sk_buff *sk
  118. new_desc = false;
  119. }
  120. -
  121. - frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
  122. - mapped_addr = skb_frag_dma_map(eth->dma_dev, frag, offset,
  123. - frag_map_size,
  124. - DMA_TO_DEVICE);
  125. - if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr)))
  126. + memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
  127. + txd_info.size = min(frag_size, MTK_TX_DMA_BUF_LEN);
  128. + txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
  129. + !(frag_size - txd_info.size);
  130. + txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
  131. + offset, txd_info.size,
  132. + DMA_TO_DEVICE);
  133. + if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
  134. goto err_dma;
  135. - if (i == nr_frags - 1 &&
  136. - (frag_size - frag_map_size) == 0)
  137. - last_frag = true;
  138. -
  139. - WRITE_ONCE(txd->txd1, mapped_addr);
  140. - WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
  141. - TX_DMA_PLEN0(frag_map_size) |
  142. - last_frag * TX_DMA_LS0));
  143. - WRITE_ONCE(txd->txd4, fport);
  144. + mtk_tx_set_dma_desc(dev, txd, &txd_info);
  145. tx_buf = mtk_desc_to_tx_buf(ring, txd);
  146. if (new_desc)
  147. @@ -1084,20 +1094,17 @@ static int mtk_tx_map(struct sk_buff *sk
  148. tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
  149. MTK_TX_FLAGS_FPORT1;
  150. - setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr,
  151. - frag_map_size, k++);
  152. + setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
  153. + txd_info.size, k++);
  154. - frag_size -= frag_map_size;
  155. - offset += frag_map_size;
  156. + frag_size -= txd_info.size;
  157. + offset += txd_info.size;
  158. }
  159. }
  160. /* store skb to cleanup */
  161. itx_buf->skb = skb;
  162. - WRITE_ONCE(itxd->txd4, txd4);
  163. - WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
  164. - (!nr_frags * TX_DMA_LS0)));
  165. if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
  166. if (k & 0x1)
  167. txd_pdma->txd2 |= TX_DMA_LS0;
  168. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  169. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  170. @@ -843,6 +843,17 @@ enum mkt_eth_capabilities {
  171. MTK_MUX_U3_GMAC2_TO_QPHY | \
  172. MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA)
  173. +struct mtk_tx_dma_desc_info {
  174. + dma_addr_t addr;
  175. + u32 size;
  176. + u16 vlan_tci;
  177. + u8 gso:1;
  178. + u8 csum:1;
  179. + u8 vlan:1;
  180. + u8 first:1;
  181. + u8 last:1;
  182. +};
  183. +
  184. /* struct mtk_eth_data - This is the structure holding all differences
  185. * among various plaforms
  186. * @ana_rgc3: The offset for register ANA_RGC3 related to