123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161 |
- From ba39b344e9240a4a5fd4ab8178200b85cd1809da Mon Sep 17 00:00:00 2001
- From: Christian Marangi <[email protected]>
- Date: Sat, 23 Jul 2022 16:29:32 +0200
- Subject: [PATCH 4/5] net: ethernet: stmicro: stmmac: generate stmmac dma conf
- before open
- Rework the driver to generate the stmmac dma_conf before stmmac_open.
- This permits a function to first check if it's possible to allocate a
- new dma_config and then pass it directly to __stmmac_open and "open" the
- interface with the new configuration.
- Signed-off-by: Christian Marangi <[email protected]>
- Signed-off-by: Jakub Kicinski <[email protected]>
- ---
- .../net/ethernet/stmicro/stmmac/stmmac_main.c | 462 +++++++++++-------
- 1 file changed, 289 insertions(+), 173 deletions(-)
- --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
- +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
- @@ -1301,7 +1301,8 @@ static int stmmac_phy_setup(struct stmma
- return 0;
- }
-
- -static void stmmac_display_rx_rings(struct stmmac_priv *priv)
- +static void stmmac_display_rx_rings(struct stmmac_priv *priv,
- + struct stmmac_dma_conf *dma_conf)
- {
- u32 rx_cnt = priv->plat->rx_queues_to_use;
- unsigned int desc_size;
- @@ -1310,7 +1311,7 @@ static void stmmac_display_rx_rings(stru
-
- /* Display RX rings */
- for (queue = 0; queue < rx_cnt; queue++) {
- - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
- + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
-
- pr_info("\tRX Queue %u rings\n", queue);
-
- @@ -1323,12 +1324,13 @@ static void stmmac_display_rx_rings(stru
- }
-
- /* Display RX ring */
- - stmmac_display_ring(priv, head_rx, priv->dma_conf.dma_rx_size, true,
- + stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
- rx_q->dma_rx_phy, desc_size);
- }
- }
-
- -static void stmmac_display_tx_rings(struct stmmac_priv *priv)
- +static void stmmac_display_tx_rings(struct stmmac_priv *priv,
- + struct stmmac_dma_conf *dma_conf)
- {
- u32 tx_cnt = priv->plat->tx_queues_to_use;
- unsigned int desc_size;
- @@ -1337,7 +1339,7 @@ static void stmmac_display_tx_rings(stru
-
- /* Display TX rings */
- for (queue = 0; queue < tx_cnt; queue++) {
- - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
- + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
-
- pr_info("\tTX Queue %d rings\n", queue);
-
- @@ -1352,18 +1354,19 @@ static void stmmac_display_tx_rings(stru
- desc_size = sizeof(struct dma_desc);
- }
-
- - stmmac_display_ring(priv, head_tx, priv->dma_conf.dma_tx_size, false,
- + stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
- tx_q->dma_tx_phy, desc_size);
- }
- }
-
- -static void stmmac_display_rings(struct stmmac_priv *priv)
- +static void stmmac_display_rings(struct stmmac_priv *priv,
- + struct stmmac_dma_conf *dma_conf)
- {
- /* Display RX ring */
- - stmmac_display_rx_rings(priv);
- + stmmac_display_rx_rings(priv, dma_conf);
-
- /* Display TX ring */
- - stmmac_display_tx_rings(priv);
- + stmmac_display_tx_rings(priv, dma_conf);
- }
-
- static int stmmac_set_bfsize(int mtu, int bufsize)
- @@ -1387,44 +1390,50 @@ static int stmmac_set_bfsize(int mtu, in
- /**
- * stmmac_clear_rx_descriptors - clear RX descriptors
- * @priv: driver private structure
- + * @dma_conf: structure to take the dma data
- * @queue: RX queue index
- * Description: this function is called to clear the RX descriptors
- * in case of both basic and extended descriptors are used.
- */
- -static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
- +static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
- + struct stmmac_dma_conf *dma_conf,
- + u32 queue)
- {
- - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
- + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
- int i;
-
- /* Clear the RX descriptors */
- - for (i = 0; i < priv->dma_conf.dma_rx_size; i++)
- + for (i = 0; i < dma_conf->dma_rx_size; i++)
- if (priv->extend_desc)
- stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
- priv->use_riwt, priv->mode,
- - (i == priv->dma_conf.dma_rx_size - 1),
- - priv->dma_conf.dma_buf_sz);
- + (i == dma_conf->dma_rx_size - 1),
- + dma_conf->dma_buf_sz);
- else
- stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
- priv->use_riwt, priv->mode,
- - (i == priv->dma_conf.dma_rx_size - 1),
- - priv->dma_conf.dma_buf_sz);
- + (i == dma_conf->dma_rx_size - 1),
- + dma_conf->dma_buf_sz);
- }
-
- /**
- * stmmac_clear_tx_descriptors - clear tx descriptors
- * @priv: driver private structure
- + * @dma_conf: structure to take the dma data
- * @queue: TX queue index.
- * Description: this function is called to clear the TX descriptors
- * in case of both basic and extended descriptors are used.
- */
- -static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
- +static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
- + struct stmmac_dma_conf *dma_conf,
- + u32 queue)
- {
- - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
- + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
- int i;
-
- /* Clear the TX descriptors */
- - for (i = 0; i < priv->dma_conf.dma_tx_size; i++) {
- - int last = (i == (priv->dma_conf.dma_tx_size - 1));
- + for (i = 0; i < dma_conf->dma_tx_size; i++) {
- + int last = (i == (dma_conf->dma_tx_size - 1));
- struct dma_desc *p;
-
- if (priv->extend_desc)
- @@ -1441,10 +1450,12 @@ static void stmmac_clear_tx_descriptors(
- /**
- * stmmac_clear_descriptors - clear descriptors
- * @priv: driver private structure
- + * @dma_conf: structure to take the dma data
- * Description: this function is called to clear the TX and RX descriptors
- * in case of both basic and extended descriptors are used.
- */
- -static void stmmac_clear_descriptors(struct stmmac_priv *priv)
- +static void stmmac_clear_descriptors(struct stmmac_priv *priv,
- + struct stmmac_dma_conf *dma_conf)
- {
- u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
- u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
- @@ -1452,16 +1463,17 @@ static void stmmac_clear_descriptors(str
-
- /* Clear the RX descriptors */
- for (queue = 0; queue < rx_queue_cnt; queue++)
- - stmmac_clear_rx_descriptors(priv, queue);
- + stmmac_clear_rx_descriptors(priv, dma_conf, queue);
-
- /* Clear the TX descriptors */
- for (queue = 0; queue < tx_queue_cnt; queue++)
- - stmmac_clear_tx_descriptors(priv, queue);
- + stmmac_clear_tx_descriptors(priv, dma_conf, queue);
- }
-
- /**
- * stmmac_init_rx_buffers - init the RX descriptor buffer.
- * @priv: driver private structure
- + * @dma_conf: structure to take the dma data
- * @p: descriptor pointer
- * @i: descriptor index
- * @flags: gfp flag
- @@ -1469,10 +1481,12 @@ static void stmmac_clear_descriptors(str
- * Description: this function is called to allocate a receive buffer, perform
- * the DMA mapping and init the descriptor.
- */
- -static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
- +static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
- + struct stmmac_dma_conf *dma_conf,
- + struct dma_desc *p,
- int i, gfp_t flags, u32 queue)
- {
- - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
- + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
- struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
-
- if (!buf->page) {
- @@ -1497,7 +1511,7 @@ static int stmmac_init_rx_buffers(struct
- buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
-
- stmmac_set_desc_addr(priv, p, buf->addr);
- - if (priv->dma_conf.dma_buf_sz == BUF_SIZE_16KiB)
- + if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
- stmmac_init_desc3(priv, p);
-
- return 0;
- @@ -1506,12 +1520,13 @@ static int stmmac_init_rx_buffers(struct
- /**
- * stmmac_free_rx_buffer - free RX dma buffers
- * @priv: private structure
- - * @queue: RX queue index
- + * @rx_q: RX queue
- * @i: buffer index.
- */
- -static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
- +static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
- + struct stmmac_rx_queue *rx_q,
- + int i)
- {
- - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
- struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
-
- if (buf->page)
- @@ -1526,12 +1541,15 @@ static void stmmac_free_rx_buffer(struct
- /**
- * stmmac_free_tx_buffer - free RX dma buffers
- * @priv: private structure
- + * @dma_conf: structure to take the dma data
- * @queue: RX queue index
- * @i: buffer index.
- */
- -static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
- +static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
- + struct stmmac_dma_conf *dma_conf,
- + u32 queue, int i)
- {
- - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
- + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
-
- if (tx_q->tx_skbuff_dma[i].buf &&
- tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
- @@ -1570,23 +1588,28 @@ static void stmmac_free_tx_buffer(struct
- /**
- * dma_free_rx_skbufs - free RX dma buffers
- * @priv: private structure
- + * @dma_conf: structure to take the dma data
- * @queue: RX queue index
- */
- -static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
- +static void dma_free_rx_skbufs(struct stmmac_priv *priv,
- + struct stmmac_dma_conf *dma_conf,
- + u32 queue)
- {
- + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
- int i;
-
- - for (i = 0; i < priv->dma_conf.dma_rx_size; i++)
- - stmmac_free_rx_buffer(priv, queue, i);
- + for (i = 0; i < dma_conf->dma_rx_size; i++)
- + stmmac_free_rx_buffer(priv, rx_q, i);
- }
-
- -static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
- - gfp_t flags)
- +static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
- + struct stmmac_dma_conf *dma_conf,
- + u32 queue, gfp_t flags)
- {
- - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
- + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
- int i;
-
- - for (i = 0; i < priv->dma_conf.dma_rx_size; i++) {
- + for (i = 0; i < dma_conf->dma_rx_size; i++) {
- struct dma_desc *p;
- int ret;
-
- @@ -1595,7 +1618,7 @@ static int stmmac_alloc_rx_buffers(struc
- else
- p = rx_q->dma_rx + i;
-
- - ret = stmmac_init_rx_buffers(priv, p, i, flags,
- + ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
- queue);
- if (ret)
- return ret;
- @@ -1609,14 +1632,17 @@ static int stmmac_alloc_rx_buffers(struc
- /**
- * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
- * @priv: private structure
- + * @dma_conf: structure to take the dma data
- * @queue: RX queue index
- */
- -static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
- +static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
- + struct stmmac_dma_conf *dma_conf,
- + u32 queue)
- {
- - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
- + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
- int i;
-
- - for (i = 0; i < priv->dma_conf.dma_rx_size; i++) {
- + for (i = 0; i < dma_conf->dma_rx_size; i++) {
- struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
-
- if (!buf->xdp)
- @@ -1627,12 +1653,14 @@ static void dma_free_rx_xskbufs(struct s
- }
- }
-
- -static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
- +static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
- + struct stmmac_dma_conf *dma_conf,
- + u32 queue)
- {
- - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
- + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
- int i;
-
- - for (i = 0; i < priv->dma_conf.dma_rx_size; i++) {
- + for (i = 0; i < dma_conf->dma_rx_size; i++) {
- struct stmmac_rx_buffer *buf;
- dma_addr_t dma_addr;
- struct dma_desc *p;
- @@ -1667,22 +1695,25 @@ static struct xsk_buff_pool *stmmac_get_
- /**
- * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
- * @priv: driver private structure
- + * @dma_conf: structure to take the dma data
- * @queue: RX queue index
- * @flags: gfp flag.
- * Description: this function initializes the DMA RX descriptors
- * and allocates the socket buffers. It supports the chained and ring
- * modes.
- */
- -static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
- +static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
- + struct stmmac_dma_conf *dma_conf,
- + u32 queue, gfp_t flags)
- {
- - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
- + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
- int ret;
-
- netif_dbg(priv, probe, priv->dev,
- "(%s) dma_rx_phy=0x%08x\n", __func__,
- (u32)rx_q->dma_rx_phy);
-
- - stmmac_clear_rx_descriptors(priv, queue);
- + stmmac_clear_rx_descriptors(priv, dma_conf, queue);
-
- xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
-
- @@ -1709,9 +1740,9 @@ static int __init_dma_rx_desc_rings(stru
- /* RX XDP ZC buffer pool may not be populated, e.g.
- * xdpsock TX-only.
- */
- - stmmac_alloc_rx_buffers_zc(priv, queue);
- + stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
- } else {
- - ret = stmmac_alloc_rx_buffers(priv, queue, flags);
- + ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
- if (ret < 0)
- return -ENOMEM;
- }
- @@ -1721,17 +1752,19 @@ static int __init_dma_rx_desc_rings(stru
- if (priv->extend_desc)
- stmmac_mode_init(priv, rx_q->dma_erx,
- rx_q->dma_rx_phy,
- - priv->dma_conf.dma_rx_size, 1);
- + dma_conf->dma_rx_size, 1);
- else
- stmmac_mode_init(priv, rx_q->dma_rx,
- rx_q->dma_rx_phy,
- - priv->dma_conf.dma_rx_size, 0);
- + dma_conf->dma_rx_size, 0);
- }
-
- return 0;
- }
-
- -static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
- +static int init_dma_rx_desc_rings(struct net_device *dev,
- + struct stmmac_dma_conf *dma_conf,
- + gfp_t flags)
- {
- struct stmmac_priv *priv = netdev_priv(dev);
- u32 rx_count = priv->plat->rx_queues_to_use;
- @@ -1743,7 +1776,7 @@ static int init_dma_rx_desc_rings(struct
- "SKB addresses:\nskb\t\tskb data\tdma data\n");
-
- for (queue = 0; queue < rx_count; queue++) {
- - ret = __init_dma_rx_desc_rings(priv, queue, flags);
- + ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
- if (ret)
- goto err_init_rx_buffers;
- }
- @@ -1752,12 +1785,12 @@ static int init_dma_rx_desc_rings(struct
-
- err_init_rx_buffers:
- while (queue >= 0) {
- - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
- + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
-
- if (rx_q->xsk_pool)
- - dma_free_rx_xskbufs(priv, queue);
- + dma_free_rx_xskbufs(priv, dma_conf, queue);
- else
- - dma_free_rx_skbufs(priv, queue);
- + dma_free_rx_skbufs(priv, dma_conf, queue);
-
- rx_q->buf_alloc_num = 0;
- rx_q->xsk_pool = NULL;
- @@ -1774,14 +1807,17 @@ err_init_rx_buffers:
- /**
- * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
- * @priv: driver private structure
- - * @queue : TX queue index
- + * @dma_conf: structure to take the dma data
- + * @queue: TX queue index
- * Description: this function initializes the DMA TX descriptors
- * and allocates the socket buffers. It supports the chained and ring
- * modes.
- */
- -static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
- +static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
- + struct stmmac_dma_conf *dma_conf,
- + u32 queue)
- {
- - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
- + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
- int i;
-
- netif_dbg(priv, probe, priv->dev,
- @@ -1793,16 +1829,16 @@ static int __init_dma_tx_desc_rings(stru
- if (priv->extend_desc)
- stmmac_mode_init(priv, tx_q->dma_etx,
- tx_q->dma_tx_phy,
- - priv->dma_conf.dma_tx_size, 1);
- + dma_conf->dma_tx_size, 1);
- else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
- stmmac_mode_init(priv, tx_q->dma_tx,
- tx_q->dma_tx_phy,
- - priv->dma_conf.dma_tx_size, 0);
- + dma_conf->dma_tx_size, 0);
- }
-
- tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
-
- - for (i = 0; i < priv->dma_conf.dma_tx_size; i++) {
- + for (i = 0; i < dma_conf->dma_tx_size; i++) {
- struct dma_desc *p;
-
- if (priv->extend_desc)
- @@ -1824,7 +1860,8 @@ static int __init_dma_tx_desc_rings(stru
- return 0;
- }
-
- -static int init_dma_tx_desc_rings(struct net_device *dev)
- +static int init_dma_tx_desc_rings(struct net_device *dev,
- + struct stmmac_dma_conf *dma_conf)
- {
- struct stmmac_priv *priv = netdev_priv(dev);
- u32 tx_queue_cnt;
- @@ -1833,7 +1870,7 @@ static int init_dma_tx_desc_rings(struct
- tx_queue_cnt = priv->plat->tx_queues_to_use;
-
- for (queue = 0; queue < tx_queue_cnt; queue++)
- - __init_dma_tx_desc_rings(priv, queue);
- + __init_dma_tx_desc_rings(priv, dma_conf, queue);
-
- return 0;
- }
- @@ -1841,26 +1878,29 @@ static int init_dma_tx_desc_rings(struct
- /**
- * init_dma_desc_rings - init the RX/TX descriptor rings
- * @dev: net device structure
- + * @dma_conf: structure to take the dma data
- * @flags: gfp flag.
- * Description: this function initializes the DMA RX/TX descriptors
- * and allocates the socket buffers. It supports the chained and ring
- * modes.
- */
- -static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
- +static int init_dma_desc_rings(struct net_device *dev,
- + struct stmmac_dma_conf *dma_conf,
- + gfp_t flags)
- {
- struct stmmac_priv *priv = netdev_priv(dev);
- int ret;
-
- - ret = init_dma_rx_desc_rings(dev, flags);
- + ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
- if (ret)
- return ret;
-
- - ret = init_dma_tx_desc_rings(dev);
- + ret = init_dma_tx_desc_rings(dev, dma_conf);
-
- - stmmac_clear_descriptors(priv);
- + stmmac_clear_descriptors(priv, dma_conf);
-
- if (netif_msg_hw(priv))
- - stmmac_display_rings(priv);
- + stmmac_display_rings(priv, dma_conf);
-
- return ret;
- }
- @@ -1868,17 +1908,20 @@ static int init_dma_desc_rings(struct ne
- /**
- * dma_free_tx_skbufs - free TX dma buffers
- * @priv: private structure
- + * @dma_conf: structure to take the dma data
- * @queue: TX queue index
- */
- -static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
- +static void dma_free_tx_skbufs(struct stmmac_priv *priv,
- + struct stmmac_dma_conf *dma_conf,
- + u32 queue)
- {
- - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
- + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
- int i;
-
- tx_q->xsk_frames_done = 0;
-
- - for (i = 0; i < priv->dma_conf.dma_tx_size; i++)
- - stmmac_free_tx_buffer(priv, queue, i);
- + for (i = 0; i < dma_conf->dma_tx_size; i++)
- + stmmac_free_tx_buffer(priv, dma_conf, queue, i);
-
- if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
- xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
- @@ -1897,34 +1940,37 @@ static void stmmac_free_tx_skbufs(struct
- u32 queue;
-
- for (queue = 0; queue < tx_queue_cnt; queue++)
- - dma_free_tx_skbufs(priv, queue);
- + dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
- }
-
- /**
- * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
- * @priv: private structure
- + * @dma_conf: structure to take the dma data
- * @queue: RX queue index
- */
- -static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
- +static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
- + struct stmmac_dma_conf *dma_conf,
- + u32 queue)
- {
- - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
- + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
-
- /* Release the DMA RX socket buffers */
- if (rx_q->xsk_pool)
- - dma_free_rx_xskbufs(priv, queue);
- + dma_free_rx_xskbufs(priv, dma_conf, queue);
- else
- - dma_free_rx_skbufs(priv, queue);
- + dma_free_rx_skbufs(priv, dma_conf, queue);
-
- rx_q->buf_alloc_num = 0;
- rx_q->xsk_pool = NULL;
-
- /* Free DMA regions of consistent memory previously allocated */
- if (!priv->extend_desc)
- - dma_free_coherent(priv->device, priv->dma_conf.dma_rx_size *
- + dma_free_coherent(priv->device, dma_conf->dma_rx_size *
- sizeof(struct dma_desc),
- rx_q->dma_rx, rx_q->dma_rx_phy);
- else
- - dma_free_coherent(priv->device, priv->dma_conf.dma_rx_size *
- + dma_free_coherent(priv->device, dma_conf->dma_rx_size *
- sizeof(struct dma_extended_desc),
- rx_q->dma_erx, rx_q->dma_rx_phy);
-
- @@ -1936,29 +1982,33 @@ static void __free_dma_rx_desc_resources
- page_pool_destroy(rx_q->page_pool);
- }
-
- -static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
- +static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
- + struct stmmac_dma_conf *dma_conf)
- {
- u32 rx_count = priv->plat->rx_queues_to_use;
- u32 queue;
-
- /* Free RX queue resources */
- for (queue = 0; queue < rx_count; queue++)
- - __free_dma_rx_desc_resources(priv, queue);
- + __free_dma_rx_desc_resources(priv, dma_conf, queue);
- }
-
- /**
- * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
- * @priv: private structure
- + * @dma_conf: structure to take the dma data
- * @queue: TX queue index
- */
- -static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
- +static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
- + struct stmmac_dma_conf *dma_conf,
- + u32 queue)
- {
- - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
- + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
- size_t size;
- void *addr;
-
- /* Release the DMA TX socket buffers */
- - dma_free_tx_skbufs(priv, queue);
- + dma_free_tx_skbufs(priv, dma_conf, queue);
-
- if (priv->extend_desc) {
- size = sizeof(struct dma_extended_desc);
- @@ -1971,7 +2021,7 @@ static void __free_dma_tx_desc_resources
- addr = tx_q->dma_tx;
- }
-
- - size *= priv->dma_conf.dma_tx_size;
- + size *= dma_conf->dma_tx_size;
-
- dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
-
- @@ -1979,28 +2029,32 @@ static void __free_dma_tx_desc_resources
- kfree(tx_q->tx_skbuff);
- }
-
- -static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
- +static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
- + struct stmmac_dma_conf *dma_conf)
- {
- u32 tx_count = priv->plat->tx_queues_to_use;
- u32 queue;
-
- /* Free TX queue resources */
- for (queue = 0; queue < tx_count; queue++)
- - __free_dma_tx_desc_resources(priv, queue);
- + __free_dma_tx_desc_resources(priv, dma_conf, queue);
- }
-
- /**
- * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
- * @priv: private structure
- + * @dma_conf: structure to take the dma data
- * @queue: RX queue index
- * Description: according to which descriptor can be used (extend or basic)
- * this function allocates the resources for TX and RX paths. In case of
- * reception, for example, it pre-allocated the RX socket buffer in order to
- * allow zero-copy mechanism.
- */
- -static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
- +static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
- + struct stmmac_dma_conf *dma_conf,
- + u32 queue)
- {
- - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
- + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
- struct stmmac_channel *ch = &priv->channel[queue];
- bool xdp_prog = stmmac_xdp_is_enabled(priv);
- struct page_pool_params pp_params = { 0 };
- @@ -2012,8 +2066,8 @@ static int __alloc_dma_rx_desc_resources
- rx_q->priv_data = priv;
-
- pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
- - pp_params.pool_size = priv->dma_conf.dma_rx_size;
- - num_pages = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE);
- + pp_params.pool_size = dma_conf->dma_rx_size;
- + num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
- pp_params.order = ilog2(num_pages);
- pp_params.nid = dev_to_node(priv->device);
- pp_params.dev = priv->device;
- @@ -2028,7 +2082,7 @@ static int __alloc_dma_rx_desc_resources
- return ret;
- }
-
- - rx_q->buf_pool = kcalloc(priv->dma_conf.dma_rx_size,
- + rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
- sizeof(*rx_q->buf_pool),
- GFP_KERNEL);
- if (!rx_q->buf_pool)
- @@ -2036,7 +2090,7 @@ static int __alloc_dma_rx_desc_resources
-
- if (priv->extend_desc) {
- rx_q->dma_erx = dma_alloc_coherent(priv->device,
- - priv->dma_conf.dma_rx_size *
- + dma_conf->dma_rx_size *
- sizeof(struct dma_extended_desc),
- &rx_q->dma_rx_phy,
- GFP_KERNEL);
- @@ -2045,7 +2099,7 @@ static int __alloc_dma_rx_desc_resources
-
- } else {
- rx_q->dma_rx = dma_alloc_coherent(priv->device,
- - priv->dma_conf.dma_rx_size *
- + dma_conf->dma_rx_size *
- sizeof(struct dma_desc),
- &rx_q->dma_rx_phy,
- GFP_KERNEL);
- @@ -2070,7 +2124,8 @@ static int __alloc_dma_rx_desc_resources
- return 0;
- }
-
- -static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
- +static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
- + struct stmmac_dma_conf *dma_conf)
- {
- u32 rx_count = priv->plat->rx_queues_to_use;
- u32 queue;
- @@ -2078,7 +2133,7 @@ static int alloc_dma_rx_desc_resources(s
-
- /* RX queues buffers and DMA */
- for (queue = 0; queue < rx_count; queue++) {
- - ret = __alloc_dma_rx_desc_resources(priv, queue);
- + ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
- if (ret)
- goto err_dma;
- }
- @@ -2086,7 +2141,7 @@ static int alloc_dma_rx_desc_resources(s
- return 0;
-
- err_dma:
- - free_dma_rx_desc_resources(priv);
- + free_dma_rx_desc_resources(priv, dma_conf);
-
- return ret;
- }
- @@ -2094,28 +2149,31 @@ err_dma:
- /**
- * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
- * @priv: private structure
- + * @dma_conf: structure to take the dma data
- * @queue: TX queue index
- * Description: according to which descriptor can be used (extend or basic)
- * this function allocates the resources for TX and RX paths. In case of
- * reception, for example, it pre-allocated the RX socket buffer in order to
- * allow zero-copy mechanism.
- */
- -static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
- +static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
- + struct stmmac_dma_conf *dma_conf,
- + u32 queue)
- {
- - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
- + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
- size_t size;
- void *addr;
-
- tx_q->queue_index = queue;
- tx_q->priv_data = priv;
-
- - tx_q->tx_skbuff_dma = kcalloc(priv->dma_conf.dma_tx_size,
- + tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
- sizeof(*tx_q->tx_skbuff_dma),
- GFP_KERNEL);
- if (!tx_q->tx_skbuff_dma)
- return -ENOMEM;
-
- - tx_q->tx_skbuff = kcalloc(priv->dma_conf.dma_tx_size,
- + tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
- sizeof(struct sk_buff *),
- GFP_KERNEL);
- if (!tx_q->tx_skbuff)
- @@ -2128,7 +2186,7 @@ static int __alloc_dma_tx_desc_resources
- else
- size = sizeof(struct dma_desc);
-
- - size *= priv->dma_conf.dma_tx_size;
- + size *= dma_conf->dma_tx_size;
-
- addr = dma_alloc_coherent(priv->device, size,
- &tx_q->dma_tx_phy, GFP_KERNEL);
- @@ -2145,7 +2203,8 @@ static int __alloc_dma_tx_desc_resources
- return 0;
- }
-
- -static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
- +static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
- + struct stmmac_dma_conf *dma_conf)
- {
- u32 tx_count = priv->plat->tx_queues_to_use;
- u32 queue;
- @@ -2153,7 +2212,7 @@ static int alloc_dma_tx_desc_resources(s
-
- /* TX queues buffers and DMA */
- for (queue = 0; queue < tx_count; queue++) {
- - ret = __alloc_dma_tx_desc_resources(priv, queue);
- + ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
- if (ret)
- goto err_dma;
- }
- @@ -2161,27 +2220,29 @@ static int alloc_dma_tx_desc_resources(s
- return 0;
-
- err_dma:
- - free_dma_tx_desc_resources(priv);
- + free_dma_tx_desc_resources(priv, dma_conf);
- return ret;
- }
-
- /**
- * alloc_dma_desc_resources - alloc TX/RX resources.
- * @priv: private structure
- + * @dma_conf: structure to take the dma data
- * Description: according to which descriptor can be used (extend or basic)
- * this function allocates the resources for TX and RX paths. In case of
- * reception, for example, it pre-allocated the RX socket buffer in order to
- * allow zero-copy mechanism.
- */
- -static int alloc_dma_desc_resources(struct stmmac_priv *priv)
- +static int alloc_dma_desc_resources(struct stmmac_priv *priv,
- + struct stmmac_dma_conf *dma_conf)
- {
- /* RX Allocation */
- - int ret = alloc_dma_rx_desc_resources(priv);
- + int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
-
- if (ret)
- return ret;
-
- - ret = alloc_dma_tx_desc_resources(priv);
- + ret = alloc_dma_tx_desc_resources(priv, dma_conf);
-
- return ret;
- }
- @@ -2189,16 +2250,18 @@ static int alloc_dma_desc_resources(stru
- /**
- * free_dma_desc_resources - free dma desc resources
- * @priv: private structure
- + * @dma_conf: structure to take the dma data
- */
- -static void free_dma_desc_resources(struct stmmac_priv *priv)
- +static void free_dma_desc_resources(struct stmmac_priv *priv,
- + struct stmmac_dma_conf *dma_conf)
- {
- /* Release the DMA TX socket buffers */
- - free_dma_tx_desc_resources(priv);
- + free_dma_tx_desc_resources(priv, dma_conf);
-
- /* Release the DMA RX socket buffers later
- * to ensure all pending XDP_TX buffers are returned.
- */
- - free_dma_rx_desc_resources(priv);
- + free_dma_rx_desc_resources(priv, dma_conf);
- }
-
- /**
- @@ -2687,8 +2750,8 @@ static void stmmac_tx_err(struct stmmac_
- netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
-
- stmmac_stop_tx_dma(priv, chan);
- - dma_free_tx_skbufs(priv, chan);
- - stmmac_clear_tx_descriptors(priv, chan);
- + dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
- + stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
- stmmac_reset_tx_queue(priv, chan);
- stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
- tx_q->dma_tx_phy, chan);
- @@ -3686,19 +3749,93 @@ static int stmmac_request_irq(struct net
- }
-
- /**
- - * stmmac_open - open entry point of the driver
- + * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
- + * @priv: driver private structure
- + * @mtu: MTU to setup the dma queue and buf with
- + * Description: Allocate and generate a dma_conf based on the provided MTU.
- + * Allocate the Tx/Rx DMA queue and init them.
- + * Return value:
- + * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
- + */
- +static struct stmmac_dma_conf *
- +stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
- +{
- + struct stmmac_dma_conf *dma_conf;
- + int chan, bfsize, ret;
- +
- + dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
- + if (!dma_conf) {
- + netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
- + __func__);
- + return ERR_PTR(-ENOMEM);
- + }
- +
- + bfsize = stmmac_set_16kib_bfsize(priv, mtu);
- + if (bfsize < 0)
- + bfsize = 0;
- +
- + if (bfsize < BUF_SIZE_16KiB)
- + bfsize = stmmac_set_bfsize(mtu, 0);
- +
- + dma_conf->dma_buf_sz = bfsize;
- + /* Chose the tx/rx size from the already defined one in the
- + * priv struct. (if defined)
- + */
- + dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
- + dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
- +
- + if (!dma_conf->dma_tx_size)
- + dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
- + if (!dma_conf->dma_rx_size)
- + dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
- +
- + /* Earlier check for TBS */
- + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
- + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
- + int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
- +
- + /* Setup per-TXQ tbs flag before TX descriptor alloc */
- + tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
- + }
- +
- + ret = alloc_dma_desc_resources(priv, dma_conf);
- + if (ret < 0) {
- + netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
- + __func__);
- + goto alloc_error;
- + }
- +
- + ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
- + if (ret < 0) {
- + netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
- + __func__);
- + goto init_error;
- + }
- +
- + return dma_conf;
- +
- +init_error:
- + free_dma_desc_resources(priv, dma_conf);
- +alloc_error:
- + kfree(dma_conf);
- + return ERR_PTR(ret);
- +}
- +
- +/**
- + * __stmmac_open - open entry point of the driver
- * @dev : pointer to the device structure.
- + * @dma_conf : structure to take the dma data
- * Description:
- * This function is the open entry point of the driver.
- * Return value:
- * 0 on success and an appropriate (-)ve integer as defined in errno.h
- * file on failure.
- */
- -static int stmmac_open(struct net_device *dev)
- +static int __stmmac_open(struct net_device *dev,
- + struct stmmac_dma_conf *dma_conf)
- {
- struct stmmac_priv *priv = netdev_priv(dev);
- int mode = priv->plat->phy_interface;
- - int bfsize = 0;
- u32 chan;
- int ret;
-
- @@ -3725,45 +3862,10 @@ static int stmmac_open(struct net_device
- memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
- priv->xstats.threshold = tc;
-
- - bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
- - if (bfsize < 0)
- - bfsize = 0;
- -
- - if (bfsize < BUF_SIZE_16KiB)
- - bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_conf.dma_buf_sz);
- -
- - priv->dma_conf.dma_buf_sz = bfsize;
- - buf_sz = bfsize;
- -
- priv->rx_copybreak = STMMAC_RX_COPYBREAK;
-
- - if (!priv->dma_conf.dma_tx_size)
- - priv->dma_conf.dma_tx_size = DMA_DEFAULT_TX_SIZE;
- - if (!priv->dma_conf.dma_rx_size)
- - priv->dma_conf.dma_rx_size = DMA_DEFAULT_RX_SIZE;
- -
- - /* Earlier check for TBS */
- - for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
- - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
- - int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
- -
- - /* Setup per-TXQ tbs flag before TX descriptor alloc */
- - tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
- - }
- -
- - ret = alloc_dma_desc_resources(priv);
- - if (ret < 0) {
- - netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
- - __func__);
- - goto dma_desc_error;
- - }
- -
- - ret = init_dma_desc_rings(dev, GFP_KERNEL);
- - if (ret < 0) {
- - netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
- - __func__);
- - goto init_error;
- - }
- + buf_sz = dma_conf->dma_buf_sz;
- + memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
-
- if (priv->plat->serdes_powerup) {
- ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
- @@ -3806,14 +3908,28 @@ irq_error:
-
- stmmac_hw_teardown(dev);
- init_error:
- - free_dma_desc_resources(priv);
- -dma_desc_error:
- + free_dma_desc_resources(priv, &priv->dma_conf);
- phylink_disconnect_phy(priv->phylink);
- init_phy_error:
- pm_runtime_put(priv->device);
- return ret;
- }
-
- +static int stmmac_open(struct net_device *dev)
- +{
- + struct stmmac_priv *priv = netdev_priv(dev);
- + struct stmmac_dma_conf *dma_conf;
- + int ret;
- +
- + dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
- + if (IS_ERR(dma_conf))
- + return PTR_ERR(dma_conf);
- +
- + ret = __stmmac_open(dev, dma_conf);
- + kfree(dma_conf);
- + return ret;
- +}
- +
- static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
- {
- set_bit(__FPE_REMOVING, &priv->fpe_task_state);
- @@ -3860,7 +3976,7 @@ static int stmmac_release(struct net_dev
- stmmac_stop_all_dma(priv);
-
- /* Release and free the Rx/Tx resources */
- - free_dma_desc_resources(priv);
- + free_dma_desc_resources(priv, &priv->dma_conf);
-
- /* Disable the MAC Rx/Tx */
- stmmac_mac_set(priv, priv->ioaddr, false);
- @@ -6403,7 +6519,7 @@ void stmmac_disable_rx_queue(struct stmm
- spin_unlock_irqrestore(&ch->lock, flags);
-
- stmmac_stop_rx_dma(priv, queue);
- - __free_dma_rx_desc_resources(priv, queue);
- + __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
- }
-
- void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
- @@ -6414,21 +6530,21 @@ void stmmac_enable_rx_queue(struct stmma
- u32 buf_size;
- int ret;
-
- - ret = __alloc_dma_rx_desc_resources(priv, queue);
- + ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
- if (ret) {
- netdev_err(priv->dev, "Failed to alloc RX desc.\n");
- return;
- }
-
- - ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL);
- + ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
- if (ret) {
- - __free_dma_rx_desc_resources(priv, queue);
- + __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
- netdev_err(priv->dev, "Failed to init RX desc.\n");
- return;
- }
-
- stmmac_reset_rx_queue(priv, queue);
- - stmmac_clear_rx_descriptors(priv, queue);
- + stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
-
- stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
- rx_q->dma_rx_phy, rx_q->queue_index);
- @@ -6466,7 +6582,7 @@ void stmmac_disable_tx_queue(struct stmm
- spin_unlock_irqrestore(&ch->lock, flags);
-
- stmmac_stop_tx_dma(priv, queue);
- - __free_dma_tx_desc_resources(priv, queue);
- + __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
- }
-
- void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
- @@ -6476,21 +6592,21 @@ void stmmac_enable_tx_queue(struct stmma
- unsigned long flags;
- int ret;
-
- - ret = __alloc_dma_tx_desc_resources(priv, queue);
- + ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
- if (ret) {
- netdev_err(priv->dev, "Failed to alloc TX desc.\n");
- return;
- }
-
- - ret = __init_dma_tx_desc_rings(priv, queue);
- + ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
- if (ret) {
- - __free_dma_tx_desc_resources(priv, queue);
- + __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
- netdev_err(priv->dev, "Failed to init TX desc.\n");
- return;
- }
-
- stmmac_reset_tx_queue(priv, queue);
- - stmmac_clear_tx_descriptors(priv, queue);
- + stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
-
- stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
- tx_q->dma_tx_phy, tx_q->queue_index);
- @@ -6530,7 +6646,7 @@ void stmmac_xdp_release(struct net_devic
- stmmac_stop_all_dma(priv);
-
- /* Release and free the Rx/Tx resources */
- - free_dma_desc_resources(priv);
- + free_dma_desc_resources(priv, &priv->dma_conf);
-
- /* Disable the MAC Rx/Tx */
- stmmac_mac_set(priv, priv->ioaddr, false);
- @@ -6555,14 +6671,14 @@ int stmmac_xdp_open(struct net_device *d
- u32 chan;
- int ret;
-
- - ret = alloc_dma_desc_resources(priv);
- + ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
- if (ret < 0) {
- netdev_err(dev, "%s: DMA descriptors allocation failed\n",
- __func__);
- goto dma_desc_error;
- }
-
- - ret = init_dma_desc_rings(dev, GFP_KERNEL);
- + ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
- if (ret < 0) {
- netdev_err(dev, "%s: DMA descriptors initialization failed\n",
- __func__);
- @@ -6644,7 +6760,7 @@ irq_error:
-
- stmmac_hw_teardown(dev);
- init_error:
- - free_dma_desc_resources(priv);
- + free_dma_desc_resources(priv, &priv->dma_conf);
- dma_desc_error:
- return ret;
- }
- @@ -7507,7 +7623,7 @@ int stmmac_resume(struct device *dev)
- stmmac_reset_queues_param(priv);
-
- stmmac_free_tx_skbufs(priv);
- - stmmac_clear_descriptors(priv);
- + stmmac_clear_descriptors(priv, &priv->dma_conf);
-
- stmmac_hw_setup(ndev, false);
- stmmac_init_coalesce(priv);
|