| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778 |
- From: Felix Fietkau <[email protected]>
- Date: Thu, 3 Nov 2022 12:38:49 +0100
- Subject: [PATCH] net: ethernet: mtk_eth_soc: work around issue with
- sending small fragments
- When frames are sent with very small fragments, the DMA engine appears to
- lock up and transmit attempts time out. Fix this by detecting the presence
- of small fragments and use skb_gso_segment + skb_linearize to deal with
- them
- Signed-off-by: Felix Fietkau <[email protected]>
- ---
- --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
- +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
- @@ -1396,12 +1396,28 @@ static void mtk_wake_queue(struct mtk_et
- }
- }
-
- +static bool mtk_skb_has_small_frag(struct sk_buff *skb)
- +{
- + int min_size = 16;
- + int i;
- +
- + if (skb_headlen(skb) < min_size)
- + return true;
- +
- + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- + if (skb_frag_size(&skb_shinfo(skb)->frags[i]) < min_size)
- + return true;
- +
- + return false;
- +}
- +
- static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
- {
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
- struct mtk_tx_ring *ring = ð->tx_ring;
- struct net_device_stats *stats = &dev->stats;
- + struct sk_buff *segs, *next;
- bool gso = false;
- int tx_num;
-
- @@ -1423,6 +1439,17 @@ static netdev_tx_t mtk_start_xmit(struct
- return NETDEV_TX_BUSY;
- }
-
- + if (skb_is_gso(skb) && mtk_skb_has_small_frag(skb)) {
- + segs = skb_gso_segment(skb, dev->features & ~NETIF_F_ALL_TSO);
- + if (IS_ERR(segs))
- + goto drop;
- +
- + if (segs) {
- + consume_skb(skb);
- + skb = segs;
- + }
- + }
- +
- /* TSO: fill MSS info in tcp checksum field */
- if (skb_is_gso(skb)) {
- if (skb_cow_head(skb, 0)) {
- @@ -1438,8 +1465,13 @@ static netdev_tx_t mtk_start_xmit(struct
- }
- }
-
- - if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
- - goto drop;
- + skb_list_walk_safe(skb, skb, next) {
- + if ((mtk_skb_has_small_frag(skb) && skb_linearize(skb)) ||
- + mtk_tx_map(skb, dev, tx_num, ring, gso) < 0) {
- + stats->tx_dropped++;
- + dev_kfree_skb_any(skb);
- + }
- + }
-
- if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
- netif_tx_stop_all_queues(dev);
|