12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289 |
- From 8531c80800c10e8ef7952022326c2f983e1314bf Mon Sep 17 00:00:00 2001
- From: Christian Marangi <[email protected]>
- Date: Sat, 23 Jul 2022 16:29:31 +0200
- Subject: [PATCH 3/5] net: ethernet: stmicro: stmmac: move dma conf to
- dedicated struct
- Move dma buf conf to dedicated struct. This in preparation for code
- rework that will permit to allocate separate dma_conf without affecting
- the priv struct.
- Signed-off-by: Christian Marangi <[email protected]>
- Signed-off-by: Jakub Kicinski <[email protected]>
- ---
- .../net/ethernet/stmicro/stmmac/chain_mode.c | 6 +-
- .../net/ethernet/stmicro/stmmac/ring_mode.c | 4 +-
- drivers/net/ethernet/stmicro/stmmac/stmmac.h | 21 +-
- .../ethernet/stmicro/stmmac/stmmac_ethtool.c | 4 +-
- .../net/ethernet/stmicro/stmmac/stmmac_main.c | 286 +++++++++---------
- .../stmicro/stmmac/stmmac_selftests.c | 8 +-
- .../net/ethernet/stmicro/stmmac/stmmac_tc.c | 6 +-
- 7 files changed, 172 insertions(+), 163 deletions(-)
- --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
- +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
- @@ -46,7 +46,7 @@ static int jumbo_frm(void *p, struct sk_
-
- while (len != 0) {
- tx_q->tx_skbuff[entry] = NULL;
- - entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
- + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
- desc = tx_q->dma_tx + entry;
-
- if (len > bmax) {
- @@ -137,7 +137,7 @@ static void refill_desc3(void *priv_ptr,
- */
- p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy +
- (((rx_q->dirty_rx) + 1) %
- - priv->dma_rx_size) *
- + priv->dma_conf.dma_rx_size) *
- sizeof(struct dma_desc)));
- }
-
- @@ -155,7 +155,7 @@ static void clean_desc3(void *priv_ptr,
- */
- p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy +
- ((tx_q->dirty_tx + 1) %
- - priv->dma_tx_size))
- + priv->dma_conf.dma_tx_size))
- * sizeof(struct dma_desc)));
- }
-
- --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
- +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
- @@ -51,7 +51,7 @@ static int jumbo_frm(void *p, struct sk_
- stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
- STMMAC_RING_MODE, 0, false, skb->len);
- tx_q->tx_skbuff[entry] = NULL;
- - entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
- + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
-
- if (priv->extend_desc)
- desc = (struct dma_desc *)(tx_q->dma_etx + entry);
- @@ -107,7 +107,7 @@ static void refill_desc3(void *priv_ptr,
- struct stmmac_priv *priv = rx_q->priv_data;
-
- /* Fill DES3 in case of RING mode */
- - if (priv->dma_buf_sz == BUF_SIZE_16KiB)
- + if (priv->dma_conf.dma_buf_sz == BUF_SIZE_16KiB)
- p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
- }
-
- --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
- +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
- @@ -185,6 +185,18 @@ struct stmmac_rfs_entry {
- int tc;
- };
-
- +struct stmmac_dma_conf {
- + unsigned int dma_buf_sz;
- +
- + /* RX Queue */
- + struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];
- + unsigned int dma_rx_size;
- +
- + /* TX Queue */
- + struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
- + unsigned int dma_tx_size;
- +};
- +
- struct stmmac_priv {
- /* Frequently used values are kept adjacent for cache effect */
- u32 tx_coal_frames[MTL_MAX_TX_QUEUES];
- @@ -199,7 +211,6 @@ struct stmmac_priv {
- int sph_cap;
- u32 sarc_type;
-
- - unsigned int dma_buf_sz;
- unsigned int rx_copybreak;
- u32 rx_riwt[MTL_MAX_TX_QUEUES];
- int hwts_rx_en;
- @@ -211,13 +222,7 @@ struct stmmac_priv {
- int (*hwif_quirks)(struct stmmac_priv *priv);
- struct mutex lock;
-
- - /* RX Queue */
- - struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];
- - unsigned int dma_rx_size;
- -
- - /* TX Queue */
- - struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
- - unsigned int dma_tx_size;
- + struct stmmac_dma_conf dma_conf;
-
- /* Generic channel for NAPI */
- struct stmmac_channel channel[STMMAC_CH_MAX];
- --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
- +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
- @@ -484,8 +484,8 @@ static void stmmac_get_ringparam(struct
-
- ring->rx_max_pending = DMA_MAX_RX_SIZE;
- ring->tx_max_pending = DMA_MAX_TX_SIZE;
- - ring->rx_pending = priv->dma_rx_size;
- - ring->tx_pending = priv->dma_tx_size;
- + ring->rx_pending = priv->dma_conf.dma_rx_size;
- + ring->tx_pending = priv->dma_conf.dma_tx_size;
- }
-
- static int stmmac_set_ringparam(struct net_device *netdev,
- --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
- +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
- @@ -74,8 +74,8 @@ static int phyaddr = -1;
- module_param(phyaddr, int, 0444);
- MODULE_PARM_DESC(phyaddr, "Physical device address");
-
- -#define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4)
- -#define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4)
- +#define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
- +#define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
-
- /* Limit to make sure XDP TX and slow path can coexist */
- #define STMMAC_XSK_TX_BUDGET_MAX 256
- @@ -232,7 +232,7 @@ static void stmmac_disable_all_queues(st
-
- /* synchronize_rcu() needed for pending XDP buffers to drain */
- for (queue = 0; queue < rx_queues_cnt; queue++) {
- - rx_q = &priv->rx_queue[queue];
- + rx_q = &priv->dma_conf.rx_queue[queue];
- if (rx_q->xsk_pool) {
- synchronize_rcu();
- break;
- @@ -358,13 +358,13 @@ static void print_pkt(unsigned char *buf
-
- static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
- {
- - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
- + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
- u32 avail;
-
- if (tx_q->dirty_tx > tx_q->cur_tx)
- avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
- else
- - avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
- + avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
-
- return avail;
- }
- @@ -376,13 +376,13 @@ static inline u32 stmmac_tx_avail(struct
- */
- static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
- {
- - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
- u32 dirty;
-
- if (rx_q->dirty_rx <= rx_q->cur_rx)
- dirty = rx_q->cur_rx - rx_q->dirty_rx;
- else
- - dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
- + dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
-
- return dirty;
- }
- @@ -410,7 +410,7 @@ static int stmmac_enable_eee_mode(struct
-
- /* check if all TX queues have the work finished */
- for (queue = 0; queue < tx_cnt; queue++) {
- - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
- + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
-
- if (tx_q->dirty_tx != tx_q->cur_tx)
- return -EBUSY; /* still unfinished work */
- @@ -1310,7 +1310,7 @@ static void stmmac_display_rx_rings(stru
-
- /* Display RX rings */
- for (queue = 0; queue < rx_cnt; queue++) {
- - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
-
- pr_info("\tRX Queue %u rings\n", queue);
-
- @@ -1323,7 +1323,7 @@ static void stmmac_display_rx_rings(stru
- }
-
- /* Display RX ring */
- - stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
- + stmmac_display_ring(priv, head_rx, priv->dma_conf.dma_rx_size, true,
- rx_q->dma_rx_phy, desc_size);
- }
- }
- @@ -1337,7 +1337,7 @@ static void stmmac_display_tx_rings(stru
-
- /* Display TX rings */
- for (queue = 0; queue < tx_cnt; queue++) {
- - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
- + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
-
- pr_info("\tTX Queue %d rings\n", queue);
-
- @@ -1352,7 +1352,7 @@ static void stmmac_display_tx_rings(stru
- desc_size = sizeof(struct dma_desc);
- }
-
- - stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
- + stmmac_display_ring(priv, head_tx, priv->dma_conf.dma_tx_size, false,
- tx_q->dma_tx_phy, desc_size);
- }
- }
- @@ -1393,21 +1393,21 @@ static int stmmac_set_bfsize(int mtu, in
- */
- static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
- {
- - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
- int i;
-
- /* Clear the RX descriptors */
- - for (i = 0; i < priv->dma_rx_size; i++)
- + for (i = 0; i < priv->dma_conf.dma_rx_size; i++)
- if (priv->extend_desc)
- stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
- priv->use_riwt, priv->mode,
- - (i == priv->dma_rx_size - 1),
- - priv->dma_buf_sz);
- + (i == priv->dma_conf.dma_rx_size - 1),
- + priv->dma_conf.dma_buf_sz);
- else
- stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
- priv->use_riwt, priv->mode,
- - (i == priv->dma_rx_size - 1),
- - priv->dma_buf_sz);
- + (i == priv->dma_conf.dma_rx_size - 1),
- + priv->dma_conf.dma_buf_sz);
- }
-
- /**
- @@ -1419,12 +1419,12 @@ static void stmmac_clear_rx_descriptors(
- */
- static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
- {
- - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
- + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
- int i;
-
- /* Clear the TX descriptors */
- - for (i = 0; i < priv->dma_tx_size; i++) {
- - int last = (i == (priv->dma_tx_size - 1));
- + for (i = 0; i < priv->dma_conf.dma_tx_size; i++) {
- + int last = (i == (priv->dma_conf.dma_tx_size - 1));
- struct dma_desc *p;
-
- if (priv->extend_desc)
- @@ -1472,7 +1472,7 @@ static void stmmac_clear_descriptors(str
- static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
- int i, gfp_t flags, u32 queue)
- {
- - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
- struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
-
- if (!buf->page) {
- @@ -1497,7 +1497,7 @@ static int stmmac_init_rx_buffers(struct
- buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
-
- stmmac_set_desc_addr(priv, p, buf->addr);
- - if (priv->dma_buf_sz == BUF_SIZE_16KiB)
- + if (priv->dma_conf.dma_buf_sz == BUF_SIZE_16KiB)
- stmmac_init_desc3(priv, p);
-
- return 0;
- @@ -1511,7 +1511,7 @@ static int stmmac_init_rx_buffers(struct
- */
- static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
- {
- - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
- struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
-
- if (buf->page)
- @@ -1531,7 +1531,7 @@ static void stmmac_free_rx_buffer(struct
- */
- static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
- {
- - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
- + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
-
- if (tx_q->tx_skbuff_dma[i].buf &&
- tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
- @@ -1576,17 +1576,17 @@ static void dma_free_rx_skbufs(struct st
- {
- int i;
-
- - for (i = 0; i < priv->dma_rx_size; i++)
- + for (i = 0; i < priv->dma_conf.dma_rx_size; i++)
- stmmac_free_rx_buffer(priv, queue, i);
- }
-
- static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
- gfp_t flags)
- {
- - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
- int i;
-
- - for (i = 0; i < priv->dma_rx_size; i++) {
- + for (i = 0; i < priv->dma_conf.dma_rx_size; i++) {
- struct dma_desc *p;
- int ret;
-
- @@ -1613,10 +1613,10 @@ static int stmmac_alloc_rx_buffers(struc
- */
- static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
- {
- - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
- int i;
-
- - for (i = 0; i < priv->dma_rx_size; i++) {
- + for (i = 0; i < priv->dma_conf.dma_rx_size; i++) {
- struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
-
- if (!buf->xdp)
- @@ -1629,10 +1629,10 @@ static void dma_free_rx_xskbufs(struct s
-
- static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
- {
- - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
- int i;
-
- - for (i = 0; i < priv->dma_rx_size; i++) {
- + for (i = 0; i < priv->dma_conf.dma_rx_size; i++) {
- struct stmmac_rx_buffer *buf;
- dma_addr_t dma_addr;
- struct dma_desc *p;
- @@ -1675,7 +1675,7 @@ static struct xsk_buff_pool *stmmac_get_
- */
- static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
- {
- - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
- int ret;
-
- netif_dbg(priv, probe, priv->dev,
- @@ -1721,11 +1721,11 @@ static int __init_dma_rx_desc_rings(stru
- if (priv->extend_desc)
- stmmac_mode_init(priv, rx_q->dma_erx,
- rx_q->dma_rx_phy,
- - priv->dma_rx_size, 1);
- + priv->dma_conf.dma_rx_size, 1);
- else
- stmmac_mode_init(priv, rx_q->dma_rx,
- rx_q->dma_rx_phy,
- - priv->dma_rx_size, 0);
- + priv->dma_conf.dma_rx_size, 0);
- }
-
- return 0;
- @@ -1752,7 +1752,7 @@ static int init_dma_rx_desc_rings(struct
-
- err_init_rx_buffers:
- while (queue >= 0) {
- - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
-
- if (rx_q->xsk_pool)
- dma_free_rx_xskbufs(priv, queue);
- @@ -1781,7 +1781,7 @@ err_init_rx_buffers:
- */
- static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
- {
- - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
- + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
- int i;
-
- netif_dbg(priv, probe, priv->dev,
- @@ -1793,16 +1793,16 @@ static int __init_dma_tx_desc_rings(stru
- if (priv->extend_desc)
- stmmac_mode_init(priv, tx_q->dma_etx,
- tx_q->dma_tx_phy,
- - priv->dma_tx_size, 1);
- + priv->dma_conf.dma_tx_size, 1);
- else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
- stmmac_mode_init(priv, tx_q->dma_tx,
- tx_q->dma_tx_phy,
- - priv->dma_tx_size, 0);
- + priv->dma_conf.dma_tx_size, 0);
- }
-
- tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
-
- - for (i = 0; i < priv->dma_tx_size; i++) {
- + for (i = 0; i < priv->dma_conf.dma_tx_size; i++) {
- struct dma_desc *p;
-
- if (priv->extend_desc)
- @@ -1872,12 +1872,12 @@ static int init_dma_desc_rings(struct ne
- */
- static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
- {
- - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
- + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
- int i;
-
- tx_q->xsk_frames_done = 0;
-
- - for (i = 0; i < priv->dma_tx_size; i++)
- + for (i = 0; i < priv->dma_conf.dma_tx_size; i++)
- stmmac_free_tx_buffer(priv, queue, i);
-
- if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
- @@ -1907,7 +1907,7 @@ static void stmmac_free_tx_skbufs(struct
- */
- static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
- {
- - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
-
- /* Release the DMA RX socket buffers */
- if (rx_q->xsk_pool)
- @@ -1920,11 +1920,11 @@ static void __free_dma_rx_desc_resources
-
- /* Free DMA regions of consistent memory previously allocated */
- if (!priv->extend_desc)
- - dma_free_coherent(priv->device, priv->dma_rx_size *
- + dma_free_coherent(priv->device, priv->dma_conf.dma_rx_size *
- sizeof(struct dma_desc),
- rx_q->dma_rx, rx_q->dma_rx_phy);
- else
- - dma_free_coherent(priv->device, priv->dma_rx_size *
- + dma_free_coherent(priv->device, priv->dma_conf.dma_rx_size *
- sizeof(struct dma_extended_desc),
- rx_q->dma_erx, rx_q->dma_rx_phy);
-
- @@ -1953,7 +1953,7 @@ static void free_dma_rx_desc_resources(s
- */
- static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
- {
- - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
- + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
- size_t size;
- void *addr;
-
- @@ -1971,7 +1971,7 @@ static void __free_dma_tx_desc_resources
- addr = tx_q->dma_tx;
- }
-
- - size *= priv->dma_tx_size;
- + size *= priv->dma_conf.dma_tx_size;
-
- dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
-
- @@ -2000,7 +2000,7 @@ static void free_dma_tx_desc_resources(s
- */
- static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
- {
- - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
- struct stmmac_channel *ch = &priv->channel[queue];
- bool xdp_prog = stmmac_xdp_is_enabled(priv);
- struct page_pool_params pp_params = { 0 };
- @@ -2012,8 +2012,8 @@ static int __alloc_dma_rx_desc_resources
- rx_q->priv_data = priv;
-
- pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
- - pp_params.pool_size = priv->dma_rx_size;
- - num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
- + pp_params.pool_size = priv->dma_conf.dma_rx_size;
- + num_pages = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE);
- pp_params.order = ilog2(num_pages);
- pp_params.nid = dev_to_node(priv->device);
- pp_params.dev = priv->device;
- @@ -2028,7 +2028,7 @@ static int __alloc_dma_rx_desc_resources
- return ret;
- }
-
- - rx_q->buf_pool = kcalloc(priv->dma_rx_size,
- + rx_q->buf_pool = kcalloc(priv->dma_conf.dma_rx_size,
- sizeof(*rx_q->buf_pool),
- GFP_KERNEL);
- if (!rx_q->buf_pool)
- @@ -2036,7 +2036,7 @@ static int __alloc_dma_rx_desc_resources
-
- if (priv->extend_desc) {
- rx_q->dma_erx = dma_alloc_coherent(priv->device,
- - priv->dma_rx_size *
- + priv->dma_conf.dma_rx_size *
- sizeof(struct dma_extended_desc),
- &rx_q->dma_rx_phy,
- GFP_KERNEL);
- @@ -2045,7 +2045,7 @@ static int __alloc_dma_rx_desc_resources
-
- } else {
- rx_q->dma_rx = dma_alloc_coherent(priv->device,
- - priv->dma_rx_size *
- + priv->dma_conf.dma_rx_size *
- sizeof(struct dma_desc),
- &rx_q->dma_rx_phy,
- GFP_KERNEL);
- @@ -2102,20 +2102,20 @@ err_dma:
- */
- static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
- {
- - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
- + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
- size_t size;
- void *addr;
-
- tx_q->queue_index = queue;
- tx_q->priv_data = priv;
-
- - tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
- + tx_q->tx_skbuff_dma = kcalloc(priv->dma_conf.dma_tx_size,
- sizeof(*tx_q->tx_skbuff_dma),
- GFP_KERNEL);
- if (!tx_q->tx_skbuff_dma)
- return -ENOMEM;
-
- - tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
- + tx_q->tx_skbuff = kcalloc(priv->dma_conf.dma_tx_size,
- sizeof(struct sk_buff *),
- GFP_KERNEL);
- if (!tx_q->tx_skbuff)
- @@ -2128,7 +2128,7 @@ static int __alloc_dma_tx_desc_resources
- else
- size = sizeof(struct dma_desc);
-
- - size *= priv->dma_tx_size;
- + size *= priv->dma_conf.dma_tx_size;
-
- addr = dma_alloc_coherent(priv->device, size,
- &tx_q->dma_tx_phy, GFP_KERNEL);
- @@ -2372,7 +2372,7 @@ static void stmmac_dma_operation_mode(st
-
- /* configure all channels */
- for (chan = 0; chan < rx_channels_count; chan++) {
- - struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
- + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
- u32 buf_size;
-
- qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
- @@ -2387,7 +2387,7 @@ static void stmmac_dma_operation_mode(st
- chan);
- } else {
- stmmac_set_dma_bfsize(priv, priv->ioaddr,
- - priv->dma_buf_sz,
- + priv->dma_conf.dma_buf_sz,
- chan);
- }
- }
- @@ -2403,7 +2403,7 @@ static void stmmac_dma_operation_mode(st
- static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
- {
- struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
- - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
- + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
- struct xsk_buff_pool *pool = tx_q->xsk_pool;
- unsigned int entry = tx_q->cur_tx;
- struct dma_desc *tx_desc = NULL;
- @@ -2478,7 +2478,7 @@ static bool stmmac_xdp_xmit_zc(struct st
-
- stmmac_enable_dma_transmission(priv, priv->ioaddr);
-
- - tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
- + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
- entry = tx_q->cur_tx;
- }
-
- @@ -2504,7 +2504,7 @@ static bool stmmac_xdp_xmit_zc(struct st
- */
- static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
- {
- - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
- + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
- unsigned int bytes_compl = 0, pkts_compl = 0;
- unsigned int entry, xmits = 0, count = 0;
-
- @@ -2517,7 +2517,7 @@ static int stmmac_tx_clean(struct stmmac
- entry = tx_q->dirty_tx;
-
- /* Try to clean all TX complete frame in 1 shot */
- - while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) {
- + while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
- struct xdp_frame *xdpf;
- struct sk_buff *skb;
- struct dma_desc *p;
- @@ -2617,7 +2617,7 @@ static int stmmac_tx_clean(struct stmmac
-
- stmmac_release_tx_desc(priv, p, priv->mode);
-
- - entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
- + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
- }
- tx_q->dirty_tx = entry;
-
- @@ -2682,7 +2682,7 @@ static int stmmac_tx_clean(struct stmmac
- */
- static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
- {
- - struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
- + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
-
- netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
-
- @@ -2749,8 +2749,8 @@ static int stmmac_napi_check(struct stmm
- {
- int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
- &priv->xstats, chan, dir);
- - struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
- - struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
- + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
- + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
- struct stmmac_channel *ch = &priv->channel[chan];
- struct napi_struct *rx_napi;
- struct napi_struct *tx_napi;
- @@ -2926,7 +2926,7 @@ static int stmmac_init_dma_engine(struct
-
- /* DMA RX Channel Configuration */
- for (chan = 0; chan < rx_channels_count; chan++) {
- - rx_q = &priv->rx_queue[chan];
- + rx_q = &priv->dma_conf.rx_queue[chan];
-
- stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
- rx_q->dma_rx_phy, chan);
- @@ -2940,7 +2940,7 @@ static int stmmac_init_dma_engine(struct
-
- /* DMA TX Channel Configuration */
- for (chan = 0; chan < tx_channels_count; chan++) {
- - tx_q = &priv->tx_queue[chan];
- + tx_q = &priv->dma_conf.tx_queue[chan];
-
- stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
- tx_q->dma_tx_phy, chan);
- @@ -2955,7 +2955,7 @@ static int stmmac_init_dma_engine(struct
-
- static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
- {
- - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
- + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
-
- hrtimer_start(&tx_q->txtimer,
- STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
- @@ -3005,7 +3005,7 @@ static void stmmac_init_coalesce(struct
- u32 chan;
-
- for (chan = 0; chan < tx_channel_count; chan++) {
- - struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
- + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
-
- priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
- priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
- @@ -3027,12 +3027,12 @@ static void stmmac_set_rings_length(stru
- /* set TX ring length */
- for (chan = 0; chan < tx_channels_count; chan++)
- stmmac_set_tx_ring_len(priv, priv->ioaddr,
- - (priv->dma_tx_size - 1), chan);
- + (priv->dma_conf.dma_tx_size - 1), chan);
-
- /* set RX ring length */
- for (chan = 0; chan < rx_channels_count; chan++)
- stmmac_set_rx_ring_len(priv, priv->ioaddr,
- - (priv->dma_rx_size - 1), chan);
- + (priv->dma_conf.dma_rx_size - 1), chan);
- }
-
- /**
- @@ -3367,7 +3367,7 @@ static int stmmac_hw_setup(struct net_de
- /* Enable TSO */
- if (priv->tso) {
- for (chan = 0; chan < tx_cnt; chan++) {
- - struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
- + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
-
- /* TSO and TBS cannot co-exist */
- if (tx_q->tbs & STMMAC_TBS_AVAIL)
- @@ -3389,7 +3389,7 @@ static int stmmac_hw_setup(struct net_de
-
- /* TBS */
- for (chan = 0; chan < tx_cnt; chan++) {
- - struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
- + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
- int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
-
- stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
- @@ -3433,7 +3433,7 @@ static void stmmac_free_irq(struct net_d
- for (j = irq_idx - 1; j >= 0; j--) {
- if (priv->tx_irq[j] > 0) {
- irq_set_affinity_hint(priv->tx_irq[j], NULL);
- - free_irq(priv->tx_irq[j], &priv->tx_queue[j]);
- + free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
- }
- }
- irq_idx = priv->plat->rx_queues_to_use;
- @@ -3442,7 +3442,7 @@ static void stmmac_free_irq(struct net_d
- for (j = irq_idx - 1; j >= 0; j--) {
- if (priv->rx_irq[j] > 0) {
- irq_set_affinity_hint(priv->rx_irq[j], NULL);
- - free_irq(priv->rx_irq[j], &priv->rx_queue[j]);
- + free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
- }
- }
-
- @@ -3575,7 +3575,7 @@ static int stmmac_request_irq_multi_msi(
- sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
- ret = request_irq(priv->rx_irq[i],
- stmmac_msi_intr_rx,
- - 0, int_name, &priv->rx_queue[i]);
- + 0, int_name, &priv->dma_conf.rx_queue[i]);
- if (unlikely(ret < 0)) {
- netdev_err(priv->dev,
- "%s: alloc rx-%d MSI %d (error: %d)\n",
- @@ -3598,7 +3598,7 @@ static int stmmac_request_irq_multi_msi(
- sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
- ret = request_irq(priv->tx_irq[i],
- stmmac_msi_intr_tx,
- - 0, int_name, &priv->tx_queue[i]);
- + 0, int_name, &priv->dma_conf.tx_queue[i]);
- if (unlikely(ret < 0)) {
- netdev_err(priv->dev,
- "%s: alloc tx-%d MSI %d (error: %d)\n",
- @@ -3729,21 +3729,21 @@ static int stmmac_open(struct net_device
- bfsize = 0;
-
- if (bfsize < BUF_SIZE_16KiB)
- - bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
- + bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_conf.dma_buf_sz);
-
- - priv->dma_buf_sz = bfsize;
- + priv->dma_conf.dma_buf_sz = bfsize;
- buf_sz = bfsize;
-
- priv->rx_copybreak = STMMAC_RX_COPYBREAK;
-
- - if (!priv->dma_tx_size)
- - priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
- - if (!priv->dma_rx_size)
- - priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
- + if (!priv->dma_conf.dma_tx_size)
- + priv->dma_conf.dma_tx_size = DMA_DEFAULT_TX_SIZE;
- + if (!priv->dma_conf.dma_rx_size)
- + priv->dma_conf.dma_rx_size = DMA_DEFAULT_RX_SIZE;
-
- /* Earlier check for TBS */
- for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
- - struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
- + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
- int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
-
- /* Setup per-TXQ tbs flag before TX descriptor alloc */
- @@ -3801,7 +3801,7 @@ irq_error:
- phylink_stop(priv->phylink);
-
- for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
- - hrtimer_cancel(&priv->tx_queue[chan].txtimer);
- + hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
-
- stmmac_hw_teardown(dev);
- init_error:
- @@ -3843,7 +3843,7 @@ static int stmmac_release(struct net_dev
- stmmac_disable_all_queues(priv);
-
- for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
- - hrtimer_cancel(&priv->tx_queue[chan].txtimer);
- + hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
-
- netif_tx_disable(dev);
-
- @@ -3907,7 +3907,7 @@ static bool stmmac_vlan_insert(struct st
- return false;
-
- stmmac_set_tx_owner(priv, p);
- - tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
- + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
- return true;
- }
-
- @@ -3925,7 +3925,7 @@ static bool stmmac_vlan_insert(struct st
- static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
- int total_len, bool last_segment, u32 queue)
- {
- - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
- + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
- struct dma_desc *desc;
- u32 buff_size;
- int tmp_len;
- @@ -3936,7 +3936,7 @@ static void stmmac_tso_allocator(struct
- dma_addr_t curr_addr;
-
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
- - priv->dma_tx_size);
- + priv->dma_conf.dma_tx_size);
- WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
-
- if (tx_q->tbs & STMMAC_TBS_AVAIL)
- @@ -3964,7 +3964,7 @@ static void stmmac_tso_allocator(struct
-
- static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
- {
- - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
- + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
- int desc_size;
-
- if (likely(priv->extend_desc))
- @@ -4026,7 +4026,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
- dma_addr_t des;
- int i;
-
- - tx_q = &priv->tx_queue[queue];
- + tx_q = &priv->dma_conf.tx_queue[queue];
- first_tx = tx_q->cur_tx;
-
- /* Compute header lengths */
- @@ -4066,7 +4066,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
- stmmac_set_mss(priv, mss_desc, mss);
- tx_q->mss = mss;
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
- - priv->dma_tx_size);
- + priv->dma_conf.dma_tx_size);
- WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
- }
-
- @@ -4178,7 +4178,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
- * ndo_start_xmit will fill this descriptor the next time it's
- * called and stmmac_tx_clean may clean up to this descriptor.
- */
- - tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
- + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
-
- if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
- netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
- @@ -4266,7 +4266,7 @@ static netdev_tx_t stmmac_xmit(struct sk
- int entry, first_tx;
- dma_addr_t des;
-
- - tx_q = &priv->tx_queue[queue];
- + tx_q = &priv->dma_conf.tx_queue[queue];
- first_tx = tx_q->cur_tx;
-
- if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
- @@ -4329,7 +4329,7 @@ static netdev_tx_t stmmac_xmit(struct sk
- int len = skb_frag_size(frag);
- bool last_segment = (i == (nfrags - 1));
-
- - entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
- + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
- WARN_ON(tx_q->tx_skbuff[entry]);
-
- if (likely(priv->extend_desc))
- @@ -4400,7 +4400,7 @@ static netdev_tx_t stmmac_xmit(struct sk
- * ndo_start_xmit will fill this descriptor the next time it's
- * called and stmmac_tx_clean may clean up to this descriptor.
- */
- - entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
- + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
- tx_q->cur_tx = entry;
-
- if (netif_msg_pktdata(priv)) {
- @@ -4515,7 +4515,7 @@ static void stmmac_rx_vlan(struct net_de
- */
- static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
- {
- - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
- int dirty = stmmac_rx_dirty(priv, queue);
- unsigned int entry = rx_q->dirty_rx;
-
- @@ -4565,7 +4565,7 @@ static inline void stmmac_rx_refill(stru
- dma_wmb();
- stmmac_set_rx_owner(priv, p, use_rx_wd);
-
- - entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
- + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
- }
- rx_q->dirty_rx = entry;
- rx_q->rx_tail_addr = rx_q->dma_rx_phy +
- @@ -4593,12 +4593,12 @@ static unsigned int stmmac_rx_buf1_len(s
-
- /* First descriptor, not last descriptor and not split header */
- if (status & rx_not_ls)
- - return priv->dma_buf_sz;
- + return priv->dma_conf.dma_buf_sz;
-
- plen = stmmac_get_rx_frame_len(priv, p, coe);
-
- /* First descriptor and last descriptor and not split header */
- - return min_t(unsigned int, priv->dma_buf_sz, plen);
- + return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
- }
-
- static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
- @@ -4614,7 +4614,7 @@ static unsigned int stmmac_rx_buf2_len(s
-
- /* Not last descriptor */
- if (status & rx_not_ls)
- - return priv->dma_buf_sz;
- + return priv->dma_conf.dma_buf_sz;
-
- plen = stmmac_get_rx_frame_len(priv, p, coe);
-
- @@ -4625,7 +4625,7 @@ static unsigned int stmmac_rx_buf2_len(s
- static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
- struct xdp_frame *xdpf, bool dma_map)
- {
- - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
- + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
- unsigned int entry = tx_q->cur_tx;
- struct dma_desc *tx_desc;
- dma_addr_t dma_addr;
- @@ -4688,7 +4688,7 @@ static int stmmac_xdp_xmit_xdpf(struct s
-
- stmmac_enable_dma_transmission(priv, priv->ioaddr);
-
- - entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
- + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
- tx_q->cur_tx = entry;
-
- return STMMAC_XDP_TX;
- @@ -4862,7 +4862,7 @@ static void stmmac_dispatch_skb_zc(struc
-
- static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
- {
- - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
- unsigned int entry = rx_q->dirty_rx;
- struct dma_desc *rx_desc = NULL;
- bool ret = true;
- @@ -4905,7 +4905,7 @@ static bool stmmac_rx_refill_zc(struct s
- dma_wmb();
- stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
-
- - entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
- + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
- }
-
- if (rx_desc) {
- @@ -4920,7 +4920,7 @@ static bool stmmac_rx_refill_zc(struct s
-
- static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
- {
- - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
- unsigned int count = 0, error = 0, len = 0;
- int dirty = stmmac_rx_dirty(priv, queue);
- unsigned int next_entry = rx_q->cur_rx;
- @@ -4942,7 +4942,7 @@ static int stmmac_rx_zc(struct stmmac_pr
- desc_size = sizeof(struct dma_desc);
- }
-
- - stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
- + stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
- rx_q->dma_rx_phy, desc_size);
- }
- while (count < limit) {
- @@ -4989,7 +4989,7 @@ read_again:
-
- /* Prefetch the next RX descriptor */
- rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
- - priv->dma_rx_size);
- + priv->dma_conf.dma_rx_size);
- next_entry = rx_q->cur_rx;
-
- if (priv->extend_desc)
- @@ -5110,7 +5110,7 @@ read_again:
- */
- static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
- {
- - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
- struct stmmac_channel *ch = &priv->channel[queue];
- unsigned int count = 0, error = 0, len = 0;
- int status = 0, coe = priv->hw->rx_csum;
- @@ -5123,7 +5123,7 @@ static int stmmac_rx(struct stmmac_priv
- int buf_sz;
-
- dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
- - buf_sz = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
- + buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
-
- if (netif_msg_rx_status(priv)) {
- void *rx_head;
- @@ -5137,7 +5137,7 @@ static int stmmac_rx(struct stmmac_priv
- desc_size = sizeof(struct dma_desc);
- }
-
- - stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
- + stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
- rx_q->dma_rx_phy, desc_size);
- }
- while (count < limit) {
- @@ -5181,7 +5181,7 @@ read_again:
- break;
-
- rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
- - priv->dma_rx_size);
- + priv->dma_conf.dma_rx_size);
- next_entry = rx_q->cur_rx;
-
- if (priv->extend_desc)
- @@ -5315,7 +5315,7 @@ read_again:
- buf1_len, dma_dir);
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- buf->page, buf->page_offset, buf1_len,
- - priv->dma_buf_sz);
- + priv->dma_conf.dma_buf_sz);
-
- /* Data payload appended into SKB */
- page_pool_release_page(rx_q->page_pool, buf->page);
- @@ -5327,7 +5327,7 @@ read_again:
- buf2_len, dma_dir);
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- buf->sec_page, 0, buf2_len,
- - priv->dma_buf_sz);
- + priv->dma_conf.dma_buf_sz);
-
- /* Data payload appended into SKB */
- page_pool_release_page(rx_q->page_pool, buf->sec_page);
- @@ -5770,11 +5770,13 @@ static irqreturn_t stmmac_safety_interru
- static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
- {
- struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
- + struct stmmac_dma_conf *dma_conf;
- int chan = tx_q->queue_index;
- struct stmmac_priv *priv;
- int status;
-
- - priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]);
- + dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
- + priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
-
- if (unlikely(!data)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- @@ -5814,10 +5816,12 @@ static irqreturn_t stmmac_msi_intr_tx(in
- static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
- {
- struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
- + struct stmmac_dma_conf *dma_conf;
- int chan = rx_q->queue_index;
- struct stmmac_priv *priv;
-
- - priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]);
- + dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
- + priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
-
- if (unlikely(!data)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- @@ -5848,10 +5852,10 @@ static void stmmac_poll_controller(struc
-
- if (priv->plat->multi_msi_en) {
- for (i = 0; i < priv->plat->rx_queues_to_use; i++)
- - stmmac_msi_intr_rx(0, &priv->rx_queue[i]);
- + stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]);
-
- for (i = 0; i < priv->plat->tx_queues_to_use; i++)
- - stmmac_msi_intr_tx(0, &priv->tx_queue[i]);
- + stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]);
- } else {
- disable_irq(dev->irq);
- stmmac_interrupt(dev->irq, dev);
- @@ -6032,34 +6036,34 @@ static int stmmac_rings_status_show(stru
- return 0;
-
- for (queue = 0; queue < rx_count; queue++) {
- - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
-
- seq_printf(seq, "RX Queue %d:\n", queue);
-
- if (priv->extend_desc) {
- seq_printf(seq, "Extended descriptor ring:\n");
- sysfs_display_ring((void *)rx_q->dma_erx,
- - priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
- + priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
- } else {
- seq_printf(seq, "Descriptor ring:\n");
- sysfs_display_ring((void *)rx_q->dma_rx,
- - priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
- + priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
- }
- }
-
- for (queue = 0; queue < tx_count; queue++) {
- - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
- + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
-
- seq_printf(seq, "TX Queue %d:\n", queue);
-
- if (priv->extend_desc) {
- seq_printf(seq, "Extended descriptor ring:\n");
- sysfs_display_ring((void *)tx_q->dma_etx,
- - priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
- + priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
- } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
- seq_printf(seq, "Descriptor ring:\n");
- sysfs_display_ring((void *)tx_q->dma_tx,
- - priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
- + priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
- }
- }
-
- @@ -6406,7 +6410,7 @@ void stmmac_disable_rx_queue(struct stmm
-
- void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
- {
- - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
- struct stmmac_channel *ch = &priv->channel[queue];
- unsigned long flags;
- u32 buf_size;
- @@ -6443,7 +6447,7 @@ void stmmac_enable_rx_queue(struct stmma
- rx_q->queue_index);
- } else {
- stmmac_set_dma_bfsize(priv, priv->ioaddr,
- - priv->dma_buf_sz,
- + priv->dma_conf.dma_buf_sz,
- rx_q->queue_index);
- }
-
- @@ -6469,7 +6473,7 @@ void stmmac_disable_tx_queue(struct stmm
-
- void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
- {
- - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
- + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
- struct stmmac_channel *ch = &priv->channel[queue];
- unsigned long flags;
- int ret;
- @@ -6519,7 +6523,7 @@ void stmmac_xdp_release(struct net_devic
- stmmac_disable_all_queues(priv);
-
- for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
- - hrtimer_cancel(&priv->tx_queue[chan].txtimer);
- + hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
-
- /* Free the IRQ lines */
- stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
- @@ -6578,7 +6582,7 @@ int stmmac_xdp_open(struct net_device *d
-
- /* DMA RX Channel Configuration */
- for (chan = 0; chan < rx_cnt; chan++) {
- - rx_q = &priv->rx_queue[chan];
- + rx_q = &priv->dma_conf.rx_queue[chan];
-
- stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
- rx_q->dma_rx_phy, chan);
- @@ -6596,7 +6600,7 @@ int stmmac_xdp_open(struct net_device *d
- rx_q->queue_index);
- } else {
- stmmac_set_dma_bfsize(priv, priv->ioaddr,
- - priv->dma_buf_sz,
- + priv->dma_conf.dma_buf_sz,
- rx_q->queue_index);
- }
-
- @@ -6605,7 +6609,7 @@ int stmmac_xdp_open(struct net_device *d
-
- /* DMA TX Channel Configuration */
- for (chan = 0; chan < tx_cnt; chan++) {
- - tx_q = &priv->tx_queue[chan];
- + tx_q = &priv->dma_conf.tx_queue[chan];
-
- stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
- tx_q->dma_tx_phy, chan);
- @@ -6638,7 +6642,7 @@ int stmmac_xdp_open(struct net_device *d
-
- irq_error:
- for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
- - hrtimer_cancel(&priv->tx_queue[chan].txtimer);
- + hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
-
- stmmac_hw_teardown(dev);
- init_error:
- @@ -6665,8 +6669,8 @@ int stmmac_xsk_wakeup(struct net_device
- queue >= priv->plat->tx_queues_to_use)
- return -EINVAL;
-
- - rx_q = &priv->rx_queue[queue];
- - tx_q = &priv->tx_queue[queue];
- + rx_q = &priv->dma_conf.rx_queue[queue];
- + tx_q = &priv->dma_conf.tx_queue[queue];
- ch = &priv->channel[queue];
-
- if (!rx_q->xsk_pool && !tx_q->xsk_pool)
- @@ -6926,8 +6930,8 @@ int stmmac_reinit_ringparam(struct net_d
- if (netif_running(dev))
- stmmac_release(dev);
-
- - priv->dma_rx_size = rx_size;
- - priv->dma_tx_size = tx_size;
- + priv->dma_conf.dma_rx_size = rx_size;
- + priv->dma_conf.dma_tx_size = tx_size;
-
- if (netif_running(dev))
- ret = stmmac_open(dev);
- @@ -7362,7 +7366,7 @@ int stmmac_suspend(struct device *dev)
- stmmac_disable_all_queues(priv);
-
- for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
- - hrtimer_cancel(&priv->tx_queue[chan].txtimer);
- + hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
-
- if (priv->eee_enabled) {
- priv->tx_path_in_lpi_mode = false;
- @@ -7414,7 +7418,7 @@ EXPORT_SYMBOL_GPL(stmmac_suspend);
-
- static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
- {
- - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
-
- rx_q->cur_rx = 0;
- rx_q->dirty_rx = 0;
- @@ -7422,7 +7426,7 @@ static void stmmac_reset_rx_queue(struct
-
- static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
- {
- - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
- + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
-
- tx_q->cur_tx = 0;
- tx_q->dirty_tx = 0;
- --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
- +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
- @@ -795,8 +795,8 @@ static int stmmac_test_flowctrl(struct s
- struct stmmac_channel *ch = &priv->channel[i];
- u32 tail;
-
- - tail = priv->rx_queue[i].dma_rx_phy +
- - (priv->dma_rx_size * sizeof(struct dma_desc));
- + tail = priv->dma_conf.rx_queue[i].dma_rx_phy +
- + (priv->dma_conf.dma_rx_size * sizeof(struct dma_desc));
-
- stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i);
- stmmac_start_rx(priv, priv->ioaddr, i);
- @@ -1684,7 +1684,7 @@ cleanup:
- static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue)
- {
- struct stmmac_packet_attrs attr = { };
- - int size = priv->dma_buf_sz;
- + int size = priv->dma_conf.dma_buf_sz;
-
- attr.dst = priv->dev->dev_addr;
- attr.max_size = size - ETH_FCS_LEN;
- @@ -1767,7 +1767,7 @@ static int stmmac_test_tbs(struct stmmac
-
- /* Find first TBS enabled Queue, if any */
- for (i = 0; i < priv->plat->tx_queues_to_use; i++)
- - if (priv->tx_queue[i].tbs & STMMAC_TBS_AVAIL)
- + if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_AVAIL)
- break;
-
- if (i >= priv->plat->tx_queues_to_use)
- --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
- +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
- @@ -971,13 +971,13 @@ static int tc_setup_etf(struct stmmac_pr
- return -EOPNOTSUPP;
- if (qopt->queue >= priv->plat->tx_queues_to_use)
- return -EINVAL;
- - if (!(priv->tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL))
- + if (!(priv->dma_conf.tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL))
- return -EINVAL;
-
- if (qopt->enable)
- - priv->tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN;
- + priv->dma_conf.tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN;
- else
- - priv->tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN;
- + priv->dma_conf.tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN;
-
- netdev_info(priv->dev, "%s ETF for Queue %d\n",
- qopt->enable ? "enabled" : "disabled", qopt->queue);
|