|
|
@@ -0,0 +1,197 @@
|
|
|
+From 72c050acbc425ef99313d5c2e4c866e25567e569 Mon Sep 17 00:00:00 2001
|
|
|
+From: Rakesh Nair <[email protected]>
|
|
|
+Date: Thu, 8 Jun 2017 14:29:20 +0530
|
|
|
+Subject: [PATCH] CHROMIUM: net: qualcomm: Add fix for memory allocation issues
|
|
|
+
|
|
|
+Added ethtool counters for memory allocation failures accounting.
|
|
|
+Added support to track number of allocation failures that could
|
|
|
+not be fulfilled in the current iteration in the rx descriptor
|
|
|
+field and use the info to allocate in the subsequent iteration.
|
|
|
+
|
|
|
+Change-Id: Ie4fd3b6cf25304e5db2c9247a498791e7e9bb4aa
|
|
|
+Signed-off-by: Rakesh Nair <[email protected]>
|
|
|
+Signed-off-by: Kan Yan <[email protected]>
|
|
|
+Reviewed-on: https://chromium-review.googlesource.com/535419
|
|
|
+Reviewed-by: Grant Grundler <[email protected]>
|
|
|
+---
|
|
|
+ drivers/net/ethernet/qualcomm/essedma/edma.c | 54 ++++++++++++++-----
|
|
|
+ drivers/net/ethernet/qualcomm/essedma/edma.h | 2 +
|
|
|
+ .../ethernet/qualcomm/essedma/edma_ethtool.c | 1 +
|
|
|
+ 3 files changed, 43 insertions(+), 14 deletions(-)
|
|
|
+
|
|
|
+--- a/drivers/net/ethernet/qualcomm/essedma/edma.c
|
|
|
++++ b/drivers/net/ethernet/qualcomm/essedma/edma.c
|
|
|
+@@ -103,6 +103,9 @@ static int edma_alloc_rx_ring(struct edm
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+
|
|
|
++ /* Initialize pending_fill */
|
|
|
++ erxd->pending_fill = 0;
|
|
|
++
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+@@ -185,11 +188,8 @@ static int edma_alloc_rx_buf(struct edma
|
|
|
+ u16 prod_idx, length;
|
|
|
+ u32 reg_data;
|
|
|
+
|
|
|
+- if (cleaned_count > erdr->count) {
|
|
|
+- dev_err(&pdev->dev, "Incorrect cleaned_count %d",
|
|
|
+- cleaned_count);
|
|
|
+- return -1;
|
|
|
+- }
|
|
|
++ if (cleaned_count > erdr->count)
|
|
|
++ cleaned_count = erdr->count - 1;
|
|
|
+
|
|
|
+ i = erdr->sw_next_to_fill;
|
|
|
+
|
|
|
+@@ -199,6 +199,9 @@ static int edma_alloc_rx_buf(struct edma
|
|
|
+
|
|
|
+ if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_REUSE) {
|
|
|
+ skb = sw_desc->skb;
|
|
|
++
|
|
|
++ /* Clear REUSE Flag */
|
|
|
++ sw_desc->flags &= ~EDMA_SW_DESC_FLAG_SKB_REUSE;
|
|
|
+ } else {
|
|
|
+ /* alloc skb */
|
|
|
+ skb = netdev_alloc_skb_ip_align(edma_netdev[0], length);
|
|
|
+@@ -264,6 +267,13 @@ static int edma_alloc_rx_buf(struct edma
|
|
|
+ reg_data &= ~EDMA_RFD_PROD_IDX_BITS;
|
|
|
+ reg_data |= prod_idx;
|
|
|
+ edma_write_reg(EDMA_REG_RFD_IDX_Q(queue_id), reg_data);
|
|
|
++
|
|
|
++ /* If we couldn't allocate all the buffers
|
|
|
++ * we increment the alloc failure counters
|
|
|
++ */
|
|
|
++ if (cleaned_count)
|
|
|
++ edma_cinfo->edma_ethstats.rx_alloc_fail_ctr++;
|
|
|
++
|
|
|
+ return cleaned_count;
|
|
|
+ }
|
|
|
+
|
|
|
+@@ -534,7 +544,7 @@ static int edma_rx_complete_paged(struct
|
|
|
+ * edma_rx_complete()
|
|
|
+ * Main api called from the poll function to process rx packets.
|
|
|
+ */
|
|
|
+-static void edma_rx_complete(struct edma_common_info *edma_cinfo,
|
|
|
++static u16 edma_rx_complete(struct edma_common_info *edma_cinfo,
|
|
|
+ int *work_done, int work_to_do, int queue_id,
|
|
|
+ struct napi_struct *napi)
|
|
|
+ {
|
|
|
+@@ -554,6 +564,7 @@ static void edma_rx_complete(struct edma
|
|
|
+ u16 count = erdr->count, rfd_avail;
|
|
|
+ u8 queue_to_rxid[8] = {0, 0, 1, 1, 2, 2, 3, 3};
|
|
|
+
|
|
|
++ cleaned_count = erdr->pending_fill;
|
|
|
+ sw_next_to_clean = erdr->sw_next_to_clean;
|
|
|
+
|
|
|
+ edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data);
|
|
|
+@@ -652,12 +663,13 @@ static void edma_rx_complete(struct edma
|
|
|
+ (*work_done)++;
|
|
|
+ drop_count = 0;
|
|
|
+ }
|
|
|
+- if (cleaned_count == EDMA_RX_BUFFER_WRITE) {
|
|
|
++ if (cleaned_count >= EDMA_RX_BUFFER_WRITE) {
|
|
|
+ /* If buffer clean count reaches 16, we replenish HW buffers. */
|
|
|
+ ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
|
|
|
+ edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
|
|
|
+ sw_next_to_clean);
|
|
|
+ cleaned_count = ret_count;
|
|
|
++ erdr->pending_fill = ret_count;
|
|
|
+ }
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+@@ -730,11 +742,12 @@ static void edma_rx_complete(struct edma
|
|
|
+ adapter->stats.rx_bytes += length;
|
|
|
+
|
|
|
+ /* Check if we reached refill threshold */
|
|
|
+- if (cleaned_count == EDMA_RX_BUFFER_WRITE) {
|
|
|
++ if (cleaned_count >= EDMA_RX_BUFFER_WRITE) {
|
|
|
+ ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
|
|
|
+ edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
|
|
|
+ sw_next_to_clean);
|
|
|
+ cleaned_count = ret_count;
|
|
|
++ erdr->pending_fill = ret_count;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* At this point skb should go to stack */
|
|
|
+@@ -756,11 +769,17 @@ static void edma_rx_complete(struct edma
|
|
|
+ /* Refill here in case refill threshold wasn't reached */
|
|
|
+ if (likely(cleaned_count)) {
|
|
|
+ ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id);
|
|
|
+- if (ret_count)
|
|
|
+- dev_dbg(&pdev->dev, "Not all buffers was reallocated");
|
|
|
++ erdr->pending_fill = ret_count;
|
|
|
++ if (ret_count) {
|
|
|
++ if (net_ratelimit())
|
|
|
++ dev_dbg(&pdev->dev, "Not all buffers was reallocated");
|
|
|
++ }
|
|
|
++
|
|
|
+ edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id),
|
|
|
+ erdr->sw_next_to_clean);
|
|
|
+ }
|
|
|
++
|
|
|
++ return erdr->pending_fill;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* edma_delete_rfs_filter()
|
|
|
+@@ -2064,6 +2083,7 @@ int edma_poll(struct napi_struct *napi,
|
|
|
+ u32 shadow_rx_status, shadow_tx_status;
|
|
|
+ int queue_id;
|
|
|
+ int i, work_done = 0;
|
|
|
++ u16 rx_pending_fill;
|
|
|
+
|
|
|
+ /* Store the Rx/Tx status by ANDing it with
|
|
|
+ * appropriate CPU RX?TX mask
|
|
|
+@@ -2097,13 +2117,19 @@ int edma_poll(struct napi_struct *napi,
|
|
|
+ */
|
|
|
+ while (edma_percpu_info->rx_status) {
|
|
|
+ queue_id = ffs(edma_percpu_info->rx_status) - 1;
|
|
|
+- edma_rx_complete(edma_cinfo, &work_done,
|
|
|
+- budget, queue_id, napi);
|
|
|
++ rx_pending_fill = edma_rx_complete(edma_cinfo, &work_done,
|
|
|
++ budget, queue_id, napi);
|
|
|
+
|
|
|
+- if (likely(work_done < budget))
|
|
|
++ if (likely(work_done < budget)) {
|
|
|
++ if (rx_pending_fill) {
|
|
|
++ /* reschedule poll() to refill rx buffer deficit */
|
|
|
++ work_done = budget;
|
|
|
++ break;
|
|
|
++ }
|
|
|
+ edma_percpu_info->rx_status &= ~(1 << queue_id);
|
|
|
+- else
|
|
|
++ } else {
|
|
|
+ break;
|
|
|
++ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Clear the status register, to avoid the interrupts to
|
|
|
+--- a/drivers/net/ethernet/qualcomm/essedma/edma.h
|
|
|
++++ b/drivers/net/ethernet/qualcomm/essedma/edma.h
|
|
|
+@@ -225,6 +225,7 @@ struct edma_ethtool_statistics {
|
|
|
+ u32 rx_q6_byte;
|
|
|
+ u32 rx_q7_byte;
|
|
|
+ u32 tx_desc_error;
|
|
|
++ u32 rx_alloc_fail_ctr;
|
|
|
+ };
|
|
|
+
|
|
|
+ struct edma_mdio_data {
|
|
|
+@@ -362,6 +363,7 @@ struct edma_rfd_desc_ring {
|
|
|
+ dma_addr_t dma; /* descriptor ring physical address */
|
|
|
+ u16 sw_next_to_fill; /* next descriptor to fill */
|
|
|
+ u16 sw_next_to_clean; /* next descriptor to clean */
|
|
|
++ u16 pending_fill; /* fill pending from previous iteration */
|
|
|
+ };
|
|
|
+
|
|
|
+ /* edma_rfs_flter_node - rfs filter node in hash table */
|
|
|
+--- a/drivers/net/ethernet/qualcomm/essedma/edma_ethtool.c
|
|
|
++++ b/drivers/net/ethernet/qualcomm/essedma/edma_ethtool.c
|
|
|
+@@ -78,6 +78,7 @@ static const struct edma_ethtool_stats e
|
|
|
+ {"rx_q6_byte", EDMA_STAT(rx_q6_byte)},
|
|
|
+ {"rx_q7_byte", EDMA_STAT(rx_q7_byte)},
|
|
|
+ {"tx_desc_error", EDMA_STAT(tx_desc_error)},
|
|
|
++ {"rx_alloc_fail_ctr", EDMA_STAT(rx_alloc_fail_ctr)},
|
|
|
+ };
|
|
|
+
|
|
|
+ #define EDMA_STATS_LEN ARRAY_SIZE(edma_gstrings_stats)
|