0047-net-next-mediatek-split-IRQ-register-locking-into-TX.patch 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. From 5afceece38fa30e3c71e7ed9ac62aa70ba8cfbb1 Mon Sep 17 00:00:00 2001
  2. From: John Crispin <[email protected]>
  3. Date: Fri, 16 Jun 2017 10:00:30 +0200
  4. Subject: [PATCH 47/57] net-next: mediatek: split IRQ register locking into TX
  5. and RX
  6. Originally the driver only utilized the new QDMA engine. The current code
  7. still assumes this is the case when locking the IRQ mask register. Since
  8. RX now runs on the old style PDMA engine we can add a second lock. This
  9. patch reduces the IRQ latency as the TX and RX path no longer need to wait
  10. on each other under heavy load.
  11. Signed-off-by: John Crispin <[email protected]>
  12. ---
  13. drivers/net/ethernet/mediatek/mtk_eth_soc.c | 79 ++++++++++++++++++-----------
  14. drivers/net/ethernet/mediatek/mtk_eth_soc.h | 5 +-
  15. 2 files changed, 54 insertions(+), 30 deletions(-)
  16. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  17. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  18. @@ -372,28 +372,48 @@ static void mtk_mdio_cleanup(struct mtk_
  19. mdiobus_unregister(eth->mii_bus);
  20. }
  21. -static inline void mtk_irq_disable(struct mtk_eth *eth,
  22. - unsigned reg, u32 mask)
  23. +static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
  24. {
  25. unsigned long flags;
  26. u32 val;
  27. - spin_lock_irqsave(&eth->irq_lock, flags);
  28. - val = mtk_r32(eth, reg);
  29. - mtk_w32(eth, val & ~mask, reg);
  30. - spin_unlock_irqrestore(&eth->irq_lock, flags);
  31. + spin_lock_irqsave(&eth->tx_irq_lock, flags);
  32. + val = mtk_r32(eth, MTK_QDMA_INT_MASK);
  33. + mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK);
  34. + spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
  35. }
  36. -static inline void mtk_irq_enable(struct mtk_eth *eth,
  37. - unsigned reg, u32 mask)
  38. +static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
  39. {
  40. unsigned long flags;
  41. u32 val;
  42. - spin_lock_irqsave(&eth->irq_lock, flags);
  43. - val = mtk_r32(eth, reg);
  44. - mtk_w32(eth, val | mask, reg);
  45. - spin_unlock_irqrestore(&eth->irq_lock, flags);
  46. + spin_lock_irqsave(&eth->tx_irq_lock, flags);
  47. + val = mtk_r32(eth, MTK_QDMA_INT_MASK);
  48. + mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK);
  49. + spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
  50. +}
  51. +
  52. +static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
  53. +{
  54. + unsigned long flags;
  55. + u32 val;
  56. +
  57. + spin_lock_irqsave(&eth->rx_irq_lock, flags);
  58. + val = mtk_r32(eth, MTK_PDMA_INT_MASK);
  59. + mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
  60. + spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
  61. +}
  62. +
  63. +static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
  64. +{
  65. + unsigned long flags;
  66. + u32 val;
  67. +
  68. + spin_lock_irqsave(&eth->rx_irq_lock, flags);
  69. + val = mtk_r32(eth, MTK_PDMA_INT_MASK);
  70. + mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
  71. + spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
  72. }
  73. static int mtk_set_mac_address(struct net_device *dev, void *p)
  74. @@ -1116,7 +1136,7 @@ static int mtk_napi_tx(struct napi_struc
  75. return budget;
  76. napi_complete(napi);
  77. - mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  78. + mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
  79. return tx_done;
  80. }
  81. @@ -1150,7 +1170,7 @@ poll_again:
  82. goto poll_again;
  83. }
  84. napi_complete(napi);
  85. - mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  86. + mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
  87. return rx_done + budget - remain_budget;
  88. }
  89. @@ -1699,7 +1719,7 @@ static irqreturn_t mtk_handle_irq_rx(int
  90. if (likely(napi_schedule_prep(&eth->rx_napi))) {
  91. __napi_schedule(&eth->rx_napi);
  92. - mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  93. + mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
  94. }
  95. return IRQ_HANDLED;
  96. @@ -1711,7 +1731,7 @@ static irqreturn_t mtk_handle_irq_tx(int
  97. if (likely(napi_schedule_prep(&eth->tx_napi))) {
  98. __napi_schedule(&eth->tx_napi);
  99. - mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  100. + mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
  101. }
  102. return IRQ_HANDLED;
  103. @@ -1723,11 +1743,11 @@ static void mtk_poll_controller(struct n
  104. struct mtk_mac *mac = netdev_priv(dev);
  105. struct mtk_eth *eth = mac->hw;
  106. - mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  107. - mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  108. + mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
  109. + mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
  110. mtk_handle_irq_rx(eth->irq[2], dev);
  111. - mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  112. - mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  113. + mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
  114. + mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
  115. }
  116. #endif
  117. @@ -1770,8 +1790,8 @@ static int mtk_open(struct net_device *d
  118. napi_enable(&eth->tx_napi);
  119. napi_enable(&eth->rx_napi);
  120. - mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  121. - mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  122. + mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
  123. + mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
  124. }
  125. atomic_inc(&eth->dma_refcnt);
  126. @@ -1816,8 +1836,8 @@ static int mtk_stop(struct net_device *d
  127. if (!atomic_dec_and_test(&eth->dma_refcnt))
  128. return 0;
  129. - mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
  130. - mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
  131. + mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
  132. + mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
  133. napi_disable(&eth->tx_napi);
  134. napi_disable(&eth->rx_napi);
  135. @@ -1911,8 +1931,8 @@ static int mtk_hw_init(struct mtk_eth *e
  136. mtk_w32(eth, 0, MTK_PDMA_DELAY_INT);
  137. mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
  138. #endif
  139. - mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
  140. - mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
  141. + mtk_tx_irq_disable(eth, ~0);
  142. + mtk_rx_irq_disable(eth, ~0);
  143. mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
  144. mtk_w32(eth, 0, MTK_RST_GL);
  145. @@ -1983,8 +2003,8 @@ static void mtk_uninit(struct net_device
  146. phy_disconnect(dev->phydev);
  147. if (of_phy_is_fixed_link(mac->of_node))
  148. of_phy_deregister_fixed_link(mac->of_node);
  149. - mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
  150. - mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
  151. + mtk_tx_irq_disable(eth, ~0);
  152. + mtk_rx_irq_disable(eth, ~0);
  153. }
  154. static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  155. @@ -2442,7 +2462,8 @@ static int mtk_probe(struct platform_dev
  156. return PTR_ERR(eth->base);
  157. spin_lock_init(&eth->page_lock);
  158. - spin_lock_init(&eth->irq_lock);
  159. + spin_lock_init(&eth->tx_irq_lock);
  160. + spin_lock_init(&eth->rx_irq_lock);
  161. eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
  162. "mediatek,ethsys");
  163. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  164. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  165. @@ -526,6 +526,8 @@ struct mtk_rx_ring {
  166. * @dev: The device pointer
  167. * @base: The mapped register i/o base
  168. * @page_lock: Make sure that register operations are atomic
  169. + * @tx_irq__lock: Make sure that IRQ register operations are atomic
  170. + * @rx_irq__lock: Make sure that IRQ register operations are atomic
  171. * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a
  172. * dummy for NAPI to work
  173. * @netdev: The netdev instances
  174. @@ -555,7 +557,8 @@ struct mtk_eth {
  175. struct device *dev;
  176. void __iomem *base;
  177. spinlock_t page_lock;
  178. - spinlock_t irq_lock;
  179. + spinlock_t tx_irq_lock;
  180. + spinlock_t rx_irq_lock;
  181. struct net_device dummy_dev;
  182. struct net_device *netdev[MTK_MAX_DEVS];
  183. struct mtk_mac *mac[MTK_MAX_DEVS];