0073-net-mediatek-fix-stop-and-wakeup-of-queue.patch 2.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889
  1. From 283001c04f395f32e55345632d8129f5395cde33 Mon Sep 17 00:00:00 2001
  2. From: John Crispin <[email protected]>
  3. Date: Tue, 29 Mar 2016 16:41:07 +0200
  4. Subject: [PATCH 73/90] net: mediatek: fix stop and wakeup of queue
  5. The driver supports 2 MACs. Both run on the same DMA ring. If we go
  6. above/below the TX rings thershold value, we always need to wake/stop
  7. the queu of both devices. Not doing to can cause TX stalls and packet
  8. drops on one of the devices.
  9. Signed-off-by: John Crispin <[email protected]>
  10. ---
  11. drivers/net/ethernet/mediatek/mtk_eth_soc.c | 37 +++++++++++++++++++--------
  12. 1 file changed, 27 insertions(+), 10 deletions(-)
  13. diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  14. index 293ea59..04bdb9d 100644
  15. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  16. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  17. @@ -684,6 +684,28 @@ static inline int mtk_cal_txd_req(struct sk_buff *skb)
  18. return nfrags;
  19. }
  20. +static void mtk_wake_queue(struct mtk_eth *eth)
  21. +{
  22. + int i;
  23. +
  24. + for (i = 0; i < MTK_MAC_COUNT; i++) {
  25. + if (!eth->netdev[i])
  26. + continue;
  27. + netif_wake_queue(eth->netdev[i]);
  28. + }
  29. +}
  30. +
  31. +static void mtk_stop_queue(struct mtk_eth *eth)
  32. +{
  33. + int i;
  34. +
  35. + for (i = 0; i < MTK_MAC_COUNT; i++) {
  36. + if (!eth->netdev[i])
  37. + continue;
  38. + netif_stop_queue(eth->netdev[i]);
  39. + }
  40. +}
  41. +
  42. static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
  43. {
  44. struct mtk_mac *mac = netdev_priv(dev);
  45. @@ -695,7 +717,7 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
  46. tx_num = mtk_cal_txd_req(skb);
  47. if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
  48. - netif_stop_queue(dev);
  49. + mtk_stop_queue(eth);
  50. netif_err(eth, tx_queued, dev,
  51. "Tx Ring full when queue awake!\n");
  52. return NETDEV_TX_BUSY;
  53. @@ -720,10 +742,10 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
  54. goto drop;
  55. if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) {
  56. - netif_stop_queue(dev);
  57. + mtk_stop_queue(eth);
  58. if (unlikely(atomic_read(&ring->free_count) >
  59. ring->thresh))
  60. - netif_wake_queue(dev);
  61. + mtk_wake_queue(eth);
  62. }
  63. return NETDEV_TX_OK;
  64. @@ -897,13 +919,8 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
  65. if (!total)
  66. return 0;
  67. - for (i = 0; i < MTK_MAC_COUNT; i++) {
  68. - if (!eth->netdev[i] ||
  69. - unlikely(!netif_queue_stopped(eth->netdev[i])))
  70. - continue;
  71. - if (atomic_read(&ring->free_count) > ring->thresh)
  72. - netif_wake_queue(eth->netdev[i]);
  73. - }
  74. + if (atomic_read(&ring->free_count) > ring->thresh)
  75. + mtk_wake_queue(eth);
  76. return total;
  77. }
  78. --
  79. 1.7.10.4