| 
					
				 | 
			
			
				@@ -0,0 +1,303 @@ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+From 00cf2a1d7d58631ba137b9acabe1de1d542625a8 Mon Sep 17 00:00:00 2001 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+From: Lorenzo Bianconi <[email protected]> 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+Date: Sat, 19 Oct 2019 10:13:23 +0200 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+Subject: [PATCH 3/7] net: mvneta: rely on build_skb in mvneta_rx_swbm poll 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ routine 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+Refactor mvneta_rx_swbm code introducing mvneta_swbm_rx_frame and 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+mvneta_swbm_add_rx_fragment routines. Rely on build_skb in oreder to 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+allocate skb since the previous patch introduced buffer recycling using 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+the page_pool API. 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+This patch fixes even an issue in the original driver where dma buffers 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+are accessed before dma sync. 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+mvneta driver can run on not cache coherent devices so it is 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+necessary to sync DMA buffers before sending them to the device 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+in order to avoid memory corruptions. Running perf analysis we can 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+see a performance cost associated with this DMA-sync (anyway it is 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+already there in the original driver code). In follow up patches we 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+will add more logic to reduce DMA-sync as much as possible. 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+Signed-off-by: Ilias Apalodimas <[email protected]> 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+Signed-off-by: Jesper Dangaard Brouer <[email protected]> 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+Signed-off-by: Lorenzo Bianconi <[email protected]> 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+Signed-off-by: David S. Miller <[email protected]> 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+--- 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ drivers/net/ethernet/marvell/mvneta.c | 185 +++++++++++++------------- 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 1 file changed, 95 insertions(+), 90 deletions(-) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+--- a/drivers/net/ethernet/marvell/mvneta.c 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++++ b/drivers/net/ethernet/marvell/mvneta.c 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+@@ -323,6 +323,11 @@ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 	      ETH_HLEN + ETH_FCS_LEN,			     \ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 	      cache_line_size()) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+  
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++#define MVNETA_SKB_PAD	(SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++			 NET_SKB_PAD)) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++#define MVNETA_SKB_SIZE(len)	(SKB_DATA_ALIGN(len) + MVNETA_SKB_PAD) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++#define MVNETA_MAX_RX_BUF_SIZE	(PAGE_SIZE - MVNETA_SKB_PAD) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ #define IS_TSO_HEADER(txq, addr) \ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 	((addr >= txq->tso_hdrs_phys) && \ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 	 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE)) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+@@ -648,7 +653,6 @@ static int txq_number = 8; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ static int rxq_def; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+  
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ static int rx_copybreak __read_mostly = 256; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-static int rx_header_size __read_mostly = 128; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+  
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ /* HW BM need that each port be identify by a unique ID */ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ static int global_port_id; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+@@ -1840,7 +1844,7 @@ static int mvneta_rx_refill(struct mvnet 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 	phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 	dma_dir = page_pool_get_dma_dir(rxq->page_pool); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 	dma_sync_single_for_device(pp->dev->dev.parent, phys_addr, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				   PAGE_SIZE, dma_dir); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++				   MVNETA_MAX_RX_BUF_SIZE, dma_dir); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 	mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+  
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 	return 0; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+@@ -1958,30 +1962,102 @@ int mvneta_rx_refill_queue(struct mvneta 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 	return i; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ } 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+  
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++static int 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++mvneta_swbm_rx_frame(struct mvneta_port *pp, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++		     struct mvneta_rx_desc *rx_desc, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++		     struct mvneta_rx_queue *rxq, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++		     struct page *page) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++{ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	unsigned char *data = page_address(page); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	int data_len = -MVNETA_MH_SIZE, len; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	struct net_device *dev = pp->dev; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	enum dma_data_direction dma_dir; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	if (MVNETA_SKB_SIZE(rx_desc->data_size) > PAGE_SIZE) { 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++		len = MVNETA_MAX_RX_BUF_SIZE; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++		data_len += len; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	} else { 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++		len = rx_desc->data_size; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++		data_len += len - ETH_FCS_LEN; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	} 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	dma_dir = page_pool_get_dma_dir(rxq->page_pool); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	dma_sync_single_for_cpu(dev->dev.parent, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++				rx_desc->buf_phys_addr, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++				len, dma_dir); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	rxq->skb = build_skb(data, PAGE_SIZE); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	if (unlikely(!rxq->skb)) { 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++		netdev_err(dev, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++			   "Can't allocate skb on queue %d\n", 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++			   rxq->id); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++		dev->stats.rx_dropped++; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++		rxq->skb_alloc_err++; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++		return -ENOMEM; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	} 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	page_pool_release_page(rxq->page_pool, page); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	skb_reserve(rxq->skb, MVNETA_MH_SIZE + NET_SKB_PAD); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	skb_put(rxq->skb, data_len); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	mvneta_rx_csum(pp, rx_desc->status, rxq->skb); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	rxq->left_size = rx_desc->data_size - len; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	rx_desc->buf_phys_addr = 0; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	return 0; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++} 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++static void 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++			    struct mvneta_rx_desc *rx_desc, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++			    struct mvneta_rx_queue *rxq, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++			    struct page *page) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++{ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	struct net_device *dev = pp->dev; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	enum dma_data_direction dma_dir; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	int data_len, len; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	if (rxq->left_size > MVNETA_MAX_RX_BUF_SIZE) { 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++		len = MVNETA_MAX_RX_BUF_SIZE; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++		data_len = len; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	} else { 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++		len = rxq->left_size; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++		data_len = len - ETH_FCS_LEN; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	} 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	dma_dir = page_pool_get_dma_dir(rxq->page_pool); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	dma_sync_single_for_cpu(dev->dev.parent, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++				rx_desc->buf_phys_addr, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++				len, dma_dir); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	if (data_len > 0) { 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++		/* refill descriptor with new buffer later */ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++		skb_add_rx_frag(rxq->skb, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++				skb_shinfo(rxq->skb)->nr_frags, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++				page, NET_SKB_PAD, data_len, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++				PAGE_SIZE); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	} 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	page_pool_release_page(rxq->page_pool, page); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	rx_desc->buf_phys_addr = 0; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	rxq->left_size -= len; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++} 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ /* Main rx processing when using software buffer management */ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ static int mvneta_rx_swbm(struct napi_struct *napi, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 			  struct mvneta_port *pp, int budget, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 			  struct mvneta_rx_queue *rxq) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ { 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	int rcvd_pkts = 0, rcvd_bytes = 0, rx_proc = 0; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 	struct net_device *dev = pp->dev; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-	int rx_todo, rx_proc; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-	int refill = 0; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-	u32 rcvd_pkts = 0; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-	u32 rcvd_bytes = 0; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	int rx_todo, refill; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+  
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 	/* Get number of received packets */ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 	rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-	rx_proc = 0; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+  
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 	/* Fairness NAPI loop */ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-	while ((rcvd_pkts < budget) && (rx_proc < rx_todo)) { 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	while (rx_proc < budget && rx_proc < rx_todo) { 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 		struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++		u32 rx_status, index; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 		unsigned char *data; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 		struct page *page; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-		dma_addr_t phys_addr; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-		u32 rx_status, index; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-		int rx_bytes, skb_size, copy_size; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-		int frag_num, frag_size, frag_offset; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+  
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 		index = rx_desc - rxq->descs; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 		page = (struct page *)rxq->buf_virt_addr[index]; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+@@ -1989,100 +2065,30 @@ static int mvneta_rx_swbm(struct napi_st 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 		/* Prefetch header */ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 		prefetch(data); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+  
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-		phys_addr = rx_desc->buf_phys_addr; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 		rx_status = rx_desc->status; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 		rx_proc++; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 		rxq->refill_num++; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+  
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 		if (rx_status & MVNETA_RXD_FIRST_DESC) { 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++			int err; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 			/* Check errors only for FIRST descriptor */ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 			if (rx_status & MVNETA_RXD_ERR_SUMMARY) { 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 				mvneta_rx_error(pp, rx_desc); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 				/* leave the descriptor untouched */ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 				continue; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 			} 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-			rx_bytes = rx_desc->data_size - 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				   (ETH_FCS_LEN + MVNETA_MH_SIZE); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+- 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-			/* Allocate small skb for each new packet */ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-			skb_size = max(rx_copybreak, rx_header_size); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-			rxq->skb = netdev_alloc_skb_ip_align(dev, skb_size); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-			if (unlikely(!rxq->skb)) { 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+- 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				netdev_err(dev, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-					   "Can't allocate skb on queue %d\n", 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-					   rxq->id); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+- 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				rxq->skb_alloc_err++; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+  
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				u64_stats_update_begin(&stats->syncp); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				stats->rx_dropped++; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				u64_stats_update_end(&stats->syncp); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++			err = mvneta_swbm_rx_frame(pp, rx_desc, rxq, page); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++			if (err) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 				continue; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-			} 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-			copy_size = min(skb_size, rx_bytes); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+- 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-			/* Copy data from buffer to SKB, skip Marvell header */ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-			memcpy(rxq->skb->data, data + MVNETA_MH_SIZE, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-			       copy_size); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-			skb_put(rxq->skb, copy_size); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-			rxq->left_size = rx_bytes - copy_size; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+- 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-			mvneta_rx_csum(pp, rx_status, rxq->skb); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-			if (rxq->left_size == 0) { 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				int size = copy_size + MVNETA_MH_SIZE; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+- 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				dma_sync_single_range_for_cpu(dev->dev.parent, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-							      phys_addr, 0, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-							      size, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-							      DMA_FROM_DEVICE); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+- 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				/* leave the descriptor and buffer untouched */ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-			} else { 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				/* refill descriptor with new buffer later */ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				rx_desc->buf_phys_addr = 0; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+- 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				frag_num = 0; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				frag_offset = copy_size + MVNETA_MH_SIZE; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				frag_size = min(rxq->left_size, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-						(int)(PAGE_SIZE - frag_offset)); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				skb_add_rx_frag(rxq->skb, frag_num, page, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-						frag_offset, frag_size, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-						PAGE_SIZE); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				page_pool_release_page(rxq->page_pool, page); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				rxq->left_size -= frag_size; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-			} 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 		} else { 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-			/* Middle or Last descriptor */ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 			if (unlikely(!rxq->skb)) { 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 				pr_debug("no skb for rx_status 0x%x\n", 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 					 rx_status); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 				continue; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 			} 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-			if (!rxq->left_size) { 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				/* last descriptor has only FCS */ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				/* and can be discarded */ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				dma_sync_single_range_for_cpu(dev->dev.parent, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-							      phys_addr, 0, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-							      ETH_FCS_LEN, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-							      DMA_FROM_DEVICE); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				/* leave the descriptor and buffer untouched */ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-			} else { 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				/* refill descriptor with new buffer later */ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				rx_desc->buf_phys_addr = 0; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+- 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				frag_num = skb_shinfo(rxq->skb)->nr_frags; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				frag_offset = 0; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				frag_size = min(rxq->left_size, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-						(int)(PAGE_SIZE - frag_offset)); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				skb_add_rx_frag(rxq->skb, frag_num, page, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-						frag_offset, frag_size, 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-						PAGE_SIZE); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+- 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				page_pool_release_page(rxq->page_pool, page); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-				rxq->left_size -= frag_size; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-			} 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++			mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, page); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 		} /* Middle or Last descriptor */ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+  
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 		if (!(rx_status & MVNETA_RXD_LAST_DESC)) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+@@ -2107,7 +2113,6 @@ static int mvneta_rx_swbm(struct napi_st 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+  
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 		/* clean uncomplete skb pointer in queue */ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 		rxq->skb = NULL; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-		rxq->left_size = 0; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 	} 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+  
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 	if (rcvd_pkts) 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+@@ -2968,7 +2973,7 @@ static void mvneta_rxq_hw_init(struct mv 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 		/* Set Offset */ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 		mvneta_rxq_offset_set(pp, rxq, 0); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 		mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ? 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-					PAGE_SIZE : 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++					MVNETA_MAX_RX_BUF_SIZE : 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 					MVNETA_RX_BUF_SIZE(pp->pkt_size)); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 		mvneta_rxq_bm_disable(pp, rxq); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 		mvneta_rxq_fill(pp, rxq, rxq->size); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+@@ -4678,7 +4683,7 @@ static int mvneta_probe(struct platform_ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 	SET_NETDEV_DEV(dev, &pdev->dev); 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+  
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 	pp->id = global_port_id++; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+-	pp->rx_offset_correction = 0; /* not relevant for SW BM */ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				++	pp->rx_offset_correction = NET_SKB_PAD; 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+  
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 	/* Obtain access to BM resources if enabled and already initialized */ 
			 | 
		
	
		
			
				 | 
				 | 
			
			
				+ 	bm_node = of_parse_phandle(dn, "buffer-manager", 0); 
			 |