775-v6.0-04-net-ethernet-stmicro-stmmac-generate-stmmac-dma-conf.patch 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161
  1. From ba39b344e9240a4a5fd4ab8178200b85cd1809da Mon Sep 17 00:00:00 2001
  2. From: Christian Marangi <[email protected]>
  3. Date: Sat, 23 Jul 2022 16:29:32 +0200
  4. Subject: [PATCH 4/5] net: ethernet: stmicro: stmmac: generate stmmac dma conf
  5. before open
  6. Rework the driver to generate the stmmac dma_conf before stmmac_open.
  7. This permits a function to first check if it's possible to allocate a
  8. new dma_config and then pass it directly to __stmmac_open and "open" the
  9. interface with the new configuration.
  10. Signed-off-by: Christian Marangi <[email protected]>
  11. Signed-off-by: Jakub Kicinski <[email protected]>
  12. ---
  13. .../net/ethernet/stmicro/stmmac/stmmac_main.c | 462 +++++++++++-------
  14. 1 file changed, 289 insertions(+), 173 deletions(-)
  15. --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
  16. +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
  17. @@ -1285,7 +1285,8 @@ static int stmmac_phy_setup(struct stmma
  18. return 0;
  19. }
  20. -static void stmmac_display_rx_rings(struct stmmac_priv *priv)
  21. +static void stmmac_display_rx_rings(struct stmmac_priv *priv,
  22. + struct stmmac_dma_conf *dma_conf)
  23. {
  24. u32 rx_cnt = priv->plat->rx_queues_to_use;
  25. unsigned int desc_size;
  26. @@ -1294,7 +1295,7 @@ static void stmmac_display_rx_rings(stru
  27. /* Display RX rings */
  28. for (queue = 0; queue < rx_cnt; queue++) {
  29. - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  30. + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
  31. pr_info("\tRX Queue %u rings\n", queue);
  32. @@ -1307,12 +1308,13 @@ static void stmmac_display_rx_rings(stru
  33. }
  34. /* Display RX ring */
  35. - stmmac_display_ring(priv, head_rx, priv->dma_conf.dma_rx_size, true,
  36. + stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
  37. rx_q->dma_rx_phy, desc_size);
  38. }
  39. }
  40. -static void stmmac_display_tx_rings(struct stmmac_priv *priv)
  41. +static void stmmac_display_tx_rings(struct stmmac_priv *priv,
  42. + struct stmmac_dma_conf *dma_conf)
  43. {
  44. u32 tx_cnt = priv->plat->tx_queues_to_use;
  45. unsigned int desc_size;
  46. @@ -1321,7 +1323,7 @@ static void stmmac_display_tx_rings(stru
  47. /* Display TX rings */
  48. for (queue = 0; queue < tx_cnt; queue++) {
  49. - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
  50. + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
  51. pr_info("\tTX Queue %d rings\n", queue);
  52. @@ -1336,18 +1338,19 @@ static void stmmac_display_tx_rings(stru
  53. desc_size = sizeof(struct dma_desc);
  54. }
  55. - stmmac_display_ring(priv, head_tx, priv->dma_conf.dma_tx_size, false,
  56. + stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
  57. tx_q->dma_tx_phy, desc_size);
  58. }
  59. }
  60. -static void stmmac_display_rings(struct stmmac_priv *priv)
  61. +static void stmmac_display_rings(struct stmmac_priv *priv,
  62. + struct stmmac_dma_conf *dma_conf)
  63. {
  64. /* Display RX ring */
  65. - stmmac_display_rx_rings(priv);
  66. + stmmac_display_rx_rings(priv, dma_conf);
  67. /* Display TX ring */
  68. - stmmac_display_tx_rings(priv);
  69. + stmmac_display_tx_rings(priv, dma_conf);
  70. }
  71. static int stmmac_set_bfsize(int mtu, int bufsize)
  72. @@ -1371,44 +1374,50 @@ static int stmmac_set_bfsize(int mtu, in
  73. /**
  74. * stmmac_clear_rx_descriptors - clear RX descriptors
  75. * @priv: driver private structure
  76. + * @dma_conf: structure to take the dma data
  77. * @queue: RX queue index
  78. * Description: this function is called to clear the RX descriptors
  79. * in case of both basic and extended descriptors are used.
  80. */
  81. -static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
  82. +static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
  83. + struct stmmac_dma_conf *dma_conf,
  84. + u32 queue)
  85. {
  86. - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  87. + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
  88. int i;
  89. /* Clear the RX descriptors */
  90. - for (i = 0; i < priv->dma_conf.dma_rx_size; i++)
  91. + for (i = 0; i < dma_conf->dma_rx_size; i++)
  92. if (priv->extend_desc)
  93. stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
  94. priv->use_riwt, priv->mode,
  95. - (i == priv->dma_conf.dma_rx_size - 1),
  96. - priv->dma_conf.dma_buf_sz);
  97. + (i == dma_conf->dma_rx_size - 1),
  98. + dma_conf->dma_buf_sz);
  99. else
  100. stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
  101. priv->use_riwt, priv->mode,
  102. - (i == priv->dma_conf.dma_rx_size - 1),
  103. - priv->dma_conf.dma_buf_sz);
  104. + (i == dma_conf->dma_rx_size - 1),
  105. + dma_conf->dma_buf_sz);
  106. }
  107. /**
  108. * stmmac_clear_tx_descriptors - clear tx descriptors
  109. * @priv: driver private structure
  110. + * @dma_conf: structure to take the dma data
  111. * @queue: TX queue index.
  112. * Description: this function is called to clear the TX descriptors
  113. * in case of both basic and extended descriptors are used.
  114. */
  115. -static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
  116. +static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
  117. + struct stmmac_dma_conf *dma_conf,
  118. + u32 queue)
  119. {
  120. - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
  121. + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
  122. int i;
  123. /* Clear the TX descriptors */
  124. - for (i = 0; i < priv->dma_conf.dma_tx_size; i++) {
  125. - int last = (i == (priv->dma_conf.dma_tx_size - 1));
  126. + for (i = 0; i < dma_conf->dma_tx_size; i++) {
  127. + int last = (i == (dma_conf->dma_tx_size - 1));
  128. struct dma_desc *p;
  129. if (priv->extend_desc)
  130. @@ -1425,10 +1434,12 @@ static void stmmac_clear_tx_descriptors(
  131. /**
  132. * stmmac_clear_descriptors - clear descriptors
  133. * @priv: driver private structure
  134. + * @dma_conf: structure to take the dma data
  135. * Description: this function is called to clear the TX and RX descriptors
  136. * in case of both basic and extended descriptors are used.
  137. */
  138. -static void stmmac_clear_descriptors(struct stmmac_priv *priv)
  139. +static void stmmac_clear_descriptors(struct stmmac_priv *priv,
  140. + struct stmmac_dma_conf *dma_conf)
  141. {
  142. u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
  143. u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
  144. @@ -1436,16 +1447,17 @@ static void stmmac_clear_descriptors(str
  145. /* Clear the RX descriptors */
  146. for (queue = 0; queue < rx_queue_cnt; queue++)
  147. - stmmac_clear_rx_descriptors(priv, queue);
  148. + stmmac_clear_rx_descriptors(priv, dma_conf, queue);
  149. /* Clear the TX descriptors */
  150. for (queue = 0; queue < tx_queue_cnt; queue++)
  151. - stmmac_clear_tx_descriptors(priv, queue);
  152. + stmmac_clear_tx_descriptors(priv, dma_conf, queue);
  153. }
  154. /**
  155. * stmmac_init_rx_buffers - init the RX descriptor buffer.
  156. * @priv: driver private structure
  157. + * @dma_conf: structure to take the dma data
  158. * @p: descriptor pointer
  159. * @i: descriptor index
  160. * @flags: gfp flag
  161. @@ -1453,10 +1465,12 @@ static void stmmac_clear_descriptors(str
  162. * Description: this function is called to allocate a receive buffer, perform
  163. * the DMA mapping and init the descriptor.
  164. */
  165. -static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
  166. +static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
  167. + struct stmmac_dma_conf *dma_conf,
  168. + struct dma_desc *p,
  169. int i, gfp_t flags, u32 queue)
  170. {
  171. - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  172. + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
  173. struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
  174. if (!buf->page) {
  175. @@ -1481,7 +1495,7 @@ static int stmmac_init_rx_buffers(struct
  176. buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
  177. stmmac_set_desc_addr(priv, p, buf->addr);
  178. - if (priv->dma_conf.dma_buf_sz == BUF_SIZE_16KiB)
  179. + if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
  180. stmmac_init_desc3(priv, p);
  181. return 0;
  182. @@ -1490,12 +1504,13 @@ static int stmmac_init_rx_buffers(struct
  183. /**
  184. * stmmac_free_rx_buffer - free RX dma buffers
  185. * @priv: private structure
  186. - * @queue: RX queue index
  187. + * @rx_q: RX queue
  188. * @i: buffer index.
  189. */
  190. -static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
  191. +static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
  192. + struct stmmac_rx_queue *rx_q,
  193. + int i)
  194. {
  195. - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  196. struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
  197. if (buf->page)
  198. @@ -1510,12 +1525,15 @@ static void stmmac_free_rx_buffer(struct
  199. /**
  200. * stmmac_free_tx_buffer - free RX dma buffers
  201. * @priv: private structure
  202. + * @dma_conf: structure to take the dma data
  203. * @queue: RX queue index
  204. * @i: buffer index.
  205. */
  206. -static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
  207. +static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
  208. + struct stmmac_dma_conf *dma_conf,
  209. + u32 queue, int i)
  210. {
  211. - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
  212. + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
  213. if (tx_q->tx_skbuff_dma[i].buf &&
  214. tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
  215. @@ -1554,23 +1572,28 @@ static void stmmac_free_tx_buffer(struct
  216. /**
  217. * dma_free_rx_skbufs - free RX dma buffers
  218. * @priv: private structure
  219. + * @dma_conf: structure to take the dma data
  220. * @queue: RX queue index
  221. */
  222. -static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
  223. +static void dma_free_rx_skbufs(struct stmmac_priv *priv,
  224. + struct stmmac_dma_conf *dma_conf,
  225. + u32 queue)
  226. {
  227. + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
  228. int i;
  229. - for (i = 0; i < priv->dma_conf.dma_rx_size; i++)
  230. - stmmac_free_rx_buffer(priv, queue, i);
  231. + for (i = 0; i < dma_conf->dma_rx_size; i++)
  232. + stmmac_free_rx_buffer(priv, rx_q, i);
  233. }
  234. -static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
  235. - gfp_t flags)
  236. +static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
  237. + struct stmmac_dma_conf *dma_conf,
  238. + u32 queue, gfp_t flags)
  239. {
  240. - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  241. + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
  242. int i;
  243. - for (i = 0; i < priv->dma_conf.dma_rx_size; i++) {
  244. + for (i = 0; i < dma_conf->dma_rx_size; i++) {
  245. struct dma_desc *p;
  246. int ret;
  247. @@ -1579,7 +1602,7 @@ static int stmmac_alloc_rx_buffers(struc
  248. else
  249. p = rx_q->dma_rx + i;
  250. - ret = stmmac_init_rx_buffers(priv, p, i, flags,
  251. + ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
  252. queue);
  253. if (ret)
  254. return ret;
  255. @@ -1593,14 +1616,17 @@ static int stmmac_alloc_rx_buffers(struc
  256. /**
  257. * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
  258. * @priv: private structure
  259. + * @dma_conf: structure to take the dma data
  260. * @queue: RX queue index
  261. */
  262. -static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
  263. +static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
  264. + struct stmmac_dma_conf *dma_conf,
  265. + u32 queue)
  266. {
  267. - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  268. + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
  269. int i;
  270. - for (i = 0; i < priv->dma_conf.dma_rx_size; i++) {
  271. + for (i = 0; i < dma_conf->dma_rx_size; i++) {
  272. struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
  273. if (!buf->xdp)
  274. @@ -1611,12 +1637,14 @@ static void dma_free_rx_xskbufs(struct s
  275. }
  276. }
  277. -static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
  278. +static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
  279. + struct stmmac_dma_conf *dma_conf,
  280. + u32 queue)
  281. {
  282. - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  283. + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
  284. int i;
  285. - for (i = 0; i < priv->dma_conf.dma_rx_size; i++) {
  286. + for (i = 0; i < dma_conf->dma_rx_size; i++) {
  287. struct stmmac_rx_buffer *buf;
  288. dma_addr_t dma_addr;
  289. struct dma_desc *p;
  290. @@ -1651,22 +1679,25 @@ static struct xsk_buff_pool *stmmac_get_
  291. /**
  292. * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
  293. * @priv: driver private structure
  294. + * @dma_conf: structure to take the dma data
  295. * @queue: RX queue index
  296. * @flags: gfp flag.
  297. * Description: this function initializes the DMA RX descriptors
  298. * and allocates the socket buffers. It supports the chained and ring
  299. * modes.
  300. */
  301. -static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
  302. +static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
  303. + struct stmmac_dma_conf *dma_conf,
  304. + u32 queue, gfp_t flags)
  305. {
  306. - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  307. + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
  308. int ret;
  309. netif_dbg(priv, probe, priv->dev,
  310. "(%s) dma_rx_phy=0x%08x\n", __func__,
  311. (u32)rx_q->dma_rx_phy);
  312. - stmmac_clear_rx_descriptors(priv, queue);
  313. + stmmac_clear_rx_descriptors(priv, dma_conf, queue);
  314. xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
  315. @@ -1693,9 +1724,9 @@ static int __init_dma_rx_desc_rings(stru
  316. /* RX XDP ZC buffer pool may not be populated, e.g.
  317. * xdpsock TX-only.
  318. */
  319. - stmmac_alloc_rx_buffers_zc(priv, queue);
  320. + stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
  321. } else {
  322. - ret = stmmac_alloc_rx_buffers(priv, queue, flags);
  323. + ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
  324. if (ret < 0)
  325. return -ENOMEM;
  326. }
  327. @@ -1705,17 +1736,19 @@ static int __init_dma_rx_desc_rings(stru
  328. if (priv->extend_desc)
  329. stmmac_mode_init(priv, rx_q->dma_erx,
  330. rx_q->dma_rx_phy,
  331. - priv->dma_conf.dma_rx_size, 1);
  332. + dma_conf->dma_rx_size, 1);
  333. else
  334. stmmac_mode_init(priv, rx_q->dma_rx,
  335. rx_q->dma_rx_phy,
  336. - priv->dma_conf.dma_rx_size, 0);
  337. + dma_conf->dma_rx_size, 0);
  338. }
  339. return 0;
  340. }
  341. -static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
  342. +static int init_dma_rx_desc_rings(struct net_device *dev,
  343. + struct stmmac_dma_conf *dma_conf,
  344. + gfp_t flags)
  345. {
  346. struct stmmac_priv *priv = netdev_priv(dev);
  347. u32 rx_count = priv->plat->rx_queues_to_use;
  348. @@ -1727,7 +1760,7 @@ static int init_dma_rx_desc_rings(struct
  349. "SKB addresses:\nskb\t\tskb data\tdma data\n");
  350. for (queue = 0; queue < rx_count; queue++) {
  351. - ret = __init_dma_rx_desc_rings(priv, queue, flags);
  352. + ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
  353. if (ret)
  354. goto err_init_rx_buffers;
  355. }
  356. @@ -1736,12 +1769,12 @@ static int init_dma_rx_desc_rings(struct
  357. err_init_rx_buffers:
  358. while (queue >= 0) {
  359. - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  360. + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
  361. if (rx_q->xsk_pool)
  362. - dma_free_rx_xskbufs(priv, queue);
  363. + dma_free_rx_xskbufs(priv, dma_conf, queue);
  364. else
  365. - dma_free_rx_skbufs(priv, queue);
  366. + dma_free_rx_skbufs(priv, dma_conf, queue);
  367. rx_q->buf_alloc_num = 0;
  368. rx_q->xsk_pool = NULL;
  369. @@ -1758,14 +1791,17 @@ err_init_rx_buffers:
  370. /**
  371. * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
  372. * @priv: driver private structure
  373. - * @queue : TX queue index
  374. + * @dma_conf: structure to take the dma data
  375. + * @queue: TX queue index
  376. * Description: this function initializes the DMA TX descriptors
  377. * and allocates the socket buffers. It supports the chained and ring
  378. * modes.
  379. */
  380. -static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
  381. +static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
  382. + struct stmmac_dma_conf *dma_conf,
  383. + u32 queue)
  384. {
  385. - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
  386. + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
  387. int i;
  388. netif_dbg(priv, probe, priv->dev,
  389. @@ -1777,16 +1813,16 @@ static int __init_dma_tx_desc_rings(stru
  390. if (priv->extend_desc)
  391. stmmac_mode_init(priv, tx_q->dma_etx,
  392. tx_q->dma_tx_phy,
  393. - priv->dma_conf.dma_tx_size, 1);
  394. + dma_conf->dma_tx_size, 1);
  395. else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
  396. stmmac_mode_init(priv, tx_q->dma_tx,
  397. tx_q->dma_tx_phy,
  398. - priv->dma_conf.dma_tx_size, 0);
  399. + dma_conf->dma_tx_size, 0);
  400. }
  401. tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
  402. - for (i = 0; i < priv->dma_conf.dma_tx_size; i++) {
  403. + for (i = 0; i < dma_conf->dma_tx_size; i++) {
  404. struct dma_desc *p;
  405. if (priv->extend_desc)
  406. @@ -1808,7 +1844,8 @@ static int __init_dma_tx_desc_rings(stru
  407. return 0;
  408. }
  409. -static int init_dma_tx_desc_rings(struct net_device *dev)
  410. +static int init_dma_tx_desc_rings(struct net_device *dev,
  411. + struct stmmac_dma_conf *dma_conf)
  412. {
  413. struct stmmac_priv *priv = netdev_priv(dev);
  414. u32 tx_queue_cnt;
  415. @@ -1817,7 +1854,7 @@ static int init_dma_tx_desc_rings(struct
  416. tx_queue_cnt = priv->plat->tx_queues_to_use;
  417. for (queue = 0; queue < tx_queue_cnt; queue++)
  418. - __init_dma_tx_desc_rings(priv, queue);
  419. + __init_dma_tx_desc_rings(priv, dma_conf, queue);
  420. return 0;
  421. }
  422. @@ -1825,26 +1862,29 @@ static int init_dma_tx_desc_rings(struct
  423. /**
  424. * init_dma_desc_rings - init the RX/TX descriptor rings
  425. * @dev: net device structure
  426. + * @dma_conf: structure to take the dma data
  427. * @flags: gfp flag.
  428. * Description: this function initializes the DMA RX/TX descriptors
  429. * and allocates the socket buffers. It supports the chained and ring
  430. * modes.
  431. */
  432. -static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
  433. +static int init_dma_desc_rings(struct net_device *dev,
  434. + struct stmmac_dma_conf *dma_conf,
  435. + gfp_t flags)
  436. {
  437. struct stmmac_priv *priv = netdev_priv(dev);
  438. int ret;
  439. - ret = init_dma_rx_desc_rings(dev, flags);
  440. + ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
  441. if (ret)
  442. return ret;
  443. - ret = init_dma_tx_desc_rings(dev);
  444. + ret = init_dma_tx_desc_rings(dev, dma_conf);
  445. - stmmac_clear_descriptors(priv);
  446. + stmmac_clear_descriptors(priv, dma_conf);
  447. if (netif_msg_hw(priv))
  448. - stmmac_display_rings(priv);
  449. + stmmac_display_rings(priv, dma_conf);
  450. return ret;
  451. }
  452. @@ -1852,17 +1892,20 @@ static int init_dma_desc_rings(struct ne
  453. /**
  454. * dma_free_tx_skbufs - free TX dma buffers
  455. * @priv: private structure
  456. + * @dma_conf: structure to take the dma data
  457. * @queue: TX queue index
  458. */
  459. -static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
  460. +static void dma_free_tx_skbufs(struct stmmac_priv *priv,
  461. + struct stmmac_dma_conf *dma_conf,
  462. + u32 queue)
  463. {
  464. - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
  465. + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
  466. int i;
  467. tx_q->xsk_frames_done = 0;
  468. - for (i = 0; i < priv->dma_conf.dma_tx_size; i++)
  469. - stmmac_free_tx_buffer(priv, queue, i);
  470. + for (i = 0; i < dma_conf->dma_tx_size; i++)
  471. + stmmac_free_tx_buffer(priv, dma_conf, queue, i);
  472. if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
  473. xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
  474. @@ -1881,34 +1924,37 @@ static void stmmac_free_tx_skbufs(struct
  475. u32 queue;
  476. for (queue = 0; queue < tx_queue_cnt; queue++)
  477. - dma_free_tx_skbufs(priv, queue);
  478. + dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
  479. }
  480. /**
  481. * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
  482. * @priv: private structure
  483. + * @dma_conf: structure to take the dma data
  484. * @queue: RX queue index
  485. */
  486. -static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
  487. +static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
  488. + struct stmmac_dma_conf *dma_conf,
  489. + u32 queue)
  490. {
  491. - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  492. + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
  493. /* Release the DMA RX socket buffers */
  494. if (rx_q->xsk_pool)
  495. - dma_free_rx_xskbufs(priv, queue);
  496. + dma_free_rx_xskbufs(priv, dma_conf, queue);
  497. else
  498. - dma_free_rx_skbufs(priv, queue);
  499. + dma_free_rx_skbufs(priv, dma_conf, queue);
  500. rx_q->buf_alloc_num = 0;
  501. rx_q->xsk_pool = NULL;
  502. /* Free DMA regions of consistent memory previously allocated */
  503. if (!priv->extend_desc)
  504. - dma_free_coherent(priv->device, priv->dma_conf.dma_rx_size *
  505. + dma_free_coherent(priv->device, dma_conf->dma_rx_size *
  506. sizeof(struct dma_desc),
  507. rx_q->dma_rx, rx_q->dma_rx_phy);
  508. else
  509. - dma_free_coherent(priv->device, priv->dma_conf.dma_rx_size *
  510. + dma_free_coherent(priv->device, dma_conf->dma_rx_size *
  511. sizeof(struct dma_extended_desc),
  512. rx_q->dma_erx, rx_q->dma_rx_phy);
  513. @@ -1920,29 +1966,33 @@ static void __free_dma_rx_desc_resources
  514. page_pool_destroy(rx_q->page_pool);
  515. }
  516. -static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
  517. +static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
  518. + struct stmmac_dma_conf *dma_conf)
  519. {
  520. u32 rx_count = priv->plat->rx_queues_to_use;
  521. u32 queue;
  522. /* Free RX queue resources */
  523. for (queue = 0; queue < rx_count; queue++)
  524. - __free_dma_rx_desc_resources(priv, queue);
  525. + __free_dma_rx_desc_resources(priv, dma_conf, queue);
  526. }
  527. /**
  528. * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
  529. * @priv: private structure
  530. + * @dma_conf: structure to take the dma data
  531. * @queue: TX queue index
  532. */
  533. -static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
  534. +static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
  535. + struct stmmac_dma_conf *dma_conf,
  536. + u32 queue)
  537. {
  538. - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
  539. + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
  540. size_t size;
  541. void *addr;
  542. /* Release the DMA TX socket buffers */
  543. - dma_free_tx_skbufs(priv, queue);
  544. + dma_free_tx_skbufs(priv, dma_conf, queue);
  545. if (priv->extend_desc) {
  546. size = sizeof(struct dma_extended_desc);
  547. @@ -1955,7 +2005,7 @@ static void __free_dma_tx_desc_resources
  548. addr = tx_q->dma_tx;
  549. }
  550. - size *= priv->dma_conf.dma_tx_size;
  551. + size *= dma_conf->dma_tx_size;
  552. dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
  553. @@ -1963,28 +2013,32 @@ static void __free_dma_tx_desc_resources
  554. kfree(tx_q->tx_skbuff);
  555. }
  556. -static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
  557. +static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
  558. + struct stmmac_dma_conf *dma_conf)
  559. {
  560. u32 tx_count = priv->plat->tx_queues_to_use;
  561. u32 queue;
  562. /* Free TX queue resources */
  563. for (queue = 0; queue < tx_count; queue++)
  564. - __free_dma_tx_desc_resources(priv, queue);
  565. + __free_dma_tx_desc_resources(priv, dma_conf, queue);
  566. }
  567. /**
  568. * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
  569. * @priv: private structure
  570. + * @dma_conf: structure to take the dma data
  571. * @queue: RX queue index
  572. * Description: according to which descriptor can be used (extend or basic)
  573. * this function allocates the resources for TX and RX paths. In case of
  574. * reception, for example, it pre-allocated the RX socket buffer in order to
  575. * allow zero-copy mechanism.
  576. */
  577. -static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
  578. +static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
  579. + struct stmmac_dma_conf *dma_conf,
  580. + u32 queue)
  581. {
  582. - struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  583. + struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
  584. struct stmmac_channel *ch = &priv->channel[queue];
  585. bool xdp_prog = stmmac_xdp_is_enabled(priv);
  586. struct page_pool_params pp_params = { 0 };
  587. @@ -1996,8 +2050,8 @@ static int __alloc_dma_rx_desc_resources
  588. rx_q->priv_data = priv;
  589. pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
  590. - pp_params.pool_size = priv->dma_conf.dma_rx_size;
  591. - num_pages = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE);
  592. + pp_params.pool_size = dma_conf->dma_rx_size;
  593. + num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
  594. pp_params.order = ilog2(num_pages);
  595. pp_params.nid = dev_to_node(priv->device);
  596. pp_params.dev = priv->device;
  597. @@ -2012,7 +2066,7 @@ static int __alloc_dma_rx_desc_resources
  598. return ret;
  599. }
  600. - rx_q->buf_pool = kcalloc(priv->dma_conf.dma_rx_size,
  601. + rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
  602. sizeof(*rx_q->buf_pool),
  603. GFP_KERNEL);
  604. if (!rx_q->buf_pool)
  605. @@ -2020,7 +2074,7 @@ static int __alloc_dma_rx_desc_resources
  606. if (priv->extend_desc) {
  607. rx_q->dma_erx = dma_alloc_coherent(priv->device,
  608. - priv->dma_conf.dma_rx_size *
  609. + dma_conf->dma_rx_size *
  610. sizeof(struct dma_extended_desc),
  611. &rx_q->dma_rx_phy,
  612. GFP_KERNEL);
  613. @@ -2029,7 +2083,7 @@ static int __alloc_dma_rx_desc_resources
  614. } else {
  615. rx_q->dma_rx = dma_alloc_coherent(priv->device,
  616. - priv->dma_conf.dma_rx_size *
  617. + dma_conf->dma_rx_size *
  618. sizeof(struct dma_desc),
  619. &rx_q->dma_rx_phy,
  620. GFP_KERNEL);
  621. @@ -2054,7 +2108,8 @@ static int __alloc_dma_rx_desc_resources
  622. return 0;
  623. }
  624. -static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
  625. +static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
  626. + struct stmmac_dma_conf *dma_conf)
  627. {
  628. u32 rx_count = priv->plat->rx_queues_to_use;
  629. u32 queue;
  630. @@ -2062,7 +2117,7 @@ static int alloc_dma_rx_desc_resources(s
  631. /* RX queues buffers and DMA */
  632. for (queue = 0; queue < rx_count; queue++) {
  633. - ret = __alloc_dma_rx_desc_resources(priv, queue);
  634. + ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
  635. if (ret)
  636. goto err_dma;
  637. }
  638. @@ -2070,7 +2125,7 @@ static int alloc_dma_rx_desc_resources(s
  639. return 0;
  640. err_dma:
  641. - free_dma_rx_desc_resources(priv);
  642. + free_dma_rx_desc_resources(priv, dma_conf);
  643. return ret;
  644. }
  645. @@ -2078,28 +2133,31 @@ err_dma:
  646. /**
  647. * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
  648. * @priv: private structure
  649. + * @dma_conf: structure to take the dma data
  650. * @queue: TX queue index
  651. * Description: according to which descriptor can be used (extend or basic)
  652. * this function allocates the resources for TX and RX paths. In case of
  653. * reception, for example, it pre-allocated the RX socket buffer in order to
  654. * allow zero-copy mechanism.
  655. */
  656. -static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
  657. +static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
  658. + struct stmmac_dma_conf *dma_conf,
  659. + u32 queue)
  660. {
  661. - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
  662. + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
  663. size_t size;
  664. void *addr;
  665. tx_q->queue_index = queue;
  666. tx_q->priv_data = priv;
  667. - tx_q->tx_skbuff_dma = kcalloc(priv->dma_conf.dma_tx_size,
  668. + tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
  669. sizeof(*tx_q->tx_skbuff_dma),
  670. GFP_KERNEL);
  671. if (!tx_q->tx_skbuff_dma)
  672. return -ENOMEM;
  673. - tx_q->tx_skbuff = kcalloc(priv->dma_conf.dma_tx_size,
  674. + tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
  675. sizeof(struct sk_buff *),
  676. GFP_KERNEL);
  677. if (!tx_q->tx_skbuff)
  678. @@ -2112,7 +2170,7 @@ static int __alloc_dma_tx_desc_resources
  679. else
  680. size = sizeof(struct dma_desc);
  681. - size *= priv->dma_conf.dma_tx_size;
  682. + size *= dma_conf->dma_tx_size;
  683. addr = dma_alloc_coherent(priv->device, size,
  684. &tx_q->dma_tx_phy, GFP_KERNEL);
  685. @@ -2129,7 +2187,8 @@ static int __alloc_dma_tx_desc_resources
  686. return 0;
  687. }
  688. -static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
  689. +static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
  690. + struct stmmac_dma_conf *dma_conf)
  691. {
  692. u32 tx_count = priv->plat->tx_queues_to_use;
  693. u32 queue;
  694. @@ -2137,7 +2196,7 @@ static int alloc_dma_tx_desc_resources(s
  695. /* TX queues buffers and DMA */
  696. for (queue = 0; queue < tx_count; queue++) {
  697. - ret = __alloc_dma_tx_desc_resources(priv, queue);
  698. + ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
  699. if (ret)
  700. goto err_dma;
  701. }
  702. @@ -2145,27 +2204,29 @@ static int alloc_dma_tx_desc_resources(s
  703. return 0;
  704. err_dma:
  705. - free_dma_tx_desc_resources(priv);
  706. + free_dma_tx_desc_resources(priv, dma_conf);
  707. return ret;
  708. }
  709. /**
  710. * alloc_dma_desc_resources - alloc TX/RX resources.
  711. * @priv: private structure
  712. + * @dma_conf: structure to take the dma data
  713. * Description: according to which descriptor can be used (extend or basic)
  714. * this function allocates the resources for TX and RX paths. In case of
  715. * reception, for example, it pre-allocated the RX socket buffer in order to
  716. * allow zero-copy mechanism.
  717. */
  718. -static int alloc_dma_desc_resources(struct stmmac_priv *priv)
  719. +static int alloc_dma_desc_resources(struct stmmac_priv *priv,
  720. + struct stmmac_dma_conf *dma_conf)
  721. {
  722. /* RX Allocation */
  723. - int ret = alloc_dma_rx_desc_resources(priv);
  724. + int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
  725. if (ret)
  726. return ret;
  727. - ret = alloc_dma_tx_desc_resources(priv);
  728. + ret = alloc_dma_tx_desc_resources(priv, dma_conf);
  729. return ret;
  730. }
  731. @@ -2173,16 +2234,18 @@ static int alloc_dma_desc_resources(stru
  732. /**
  733. * free_dma_desc_resources - free dma desc resources
  734. * @priv: private structure
  735. + * @dma_conf: structure to take the dma data
  736. */
  737. -static void free_dma_desc_resources(struct stmmac_priv *priv)
  738. +static void free_dma_desc_resources(struct stmmac_priv *priv,
  739. + struct stmmac_dma_conf *dma_conf)
  740. {
  741. /* Release the DMA TX socket buffers */
  742. - free_dma_tx_desc_resources(priv);
  743. + free_dma_tx_desc_resources(priv, dma_conf);
  744. /* Release the DMA RX socket buffers later
  745. * to ensure all pending XDP_TX buffers are returned.
  746. */
  747. - free_dma_rx_desc_resources(priv);
  748. + free_dma_rx_desc_resources(priv, dma_conf);
  749. }
  750. /**
  751. @@ -2671,8 +2734,8 @@ static void stmmac_tx_err(struct stmmac_
  752. netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
  753. stmmac_stop_tx_dma(priv, chan);
  754. - dma_free_tx_skbufs(priv, chan);
  755. - stmmac_clear_tx_descriptors(priv, chan);
  756. + dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
  757. + stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
  758. stmmac_reset_tx_queue(priv, chan);
  759. stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
  760. tx_q->dma_tx_phy, chan);
  761. @@ -3669,19 +3732,93 @@ static int stmmac_request_irq(struct net
  762. }
  763. /**
  764. - * stmmac_open - open entry point of the driver
  765. + * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
  766. + * @priv: driver private structure
  767. + * @mtu: MTU to setup the dma queue and buf with
  768. + * Description: Allocate and generate a dma_conf based on the provided MTU.
  769. + * Allocate the Tx/Rx DMA queue and init them.
  770. + * Return value:
  771. + * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
  772. + */
  773. +static struct stmmac_dma_conf *
  774. +stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
  775. +{
  776. + struct stmmac_dma_conf *dma_conf;
  777. + int chan, bfsize, ret;
  778. +
  779. + dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
  780. + if (!dma_conf) {
  781. + netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
  782. + __func__);
  783. + return ERR_PTR(-ENOMEM);
  784. + }
  785. +
  786. + bfsize = stmmac_set_16kib_bfsize(priv, mtu);
  787. + if (bfsize < 0)
  788. + bfsize = 0;
  789. +
  790. + if (bfsize < BUF_SIZE_16KiB)
  791. + bfsize = stmmac_set_bfsize(mtu, 0);
  792. +
  793. + dma_conf->dma_buf_sz = bfsize;
  794. + /* Chose the tx/rx size from the already defined one in the
  795. + * priv struct. (if defined)
  796. + */
  797. + dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
  798. + dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
  799. +
  800. + if (!dma_conf->dma_tx_size)
  801. + dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
  802. + if (!dma_conf->dma_rx_size)
  803. + dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
  804. +
  805. + /* Earlier check for TBS */
  806. + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
  807. + struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
  808. + int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
  809. +
  810. + /* Setup per-TXQ tbs flag before TX descriptor alloc */
  811. + tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
  812. + }
  813. +
  814. + ret = alloc_dma_desc_resources(priv, dma_conf);
  815. + if (ret < 0) {
  816. + netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
  817. + __func__);
  818. + goto alloc_error;
  819. + }
  820. +
  821. + ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
  822. + if (ret < 0) {
  823. + netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
  824. + __func__);
  825. + goto init_error;
  826. + }
  827. +
  828. + return dma_conf;
  829. +
  830. +init_error:
  831. + free_dma_desc_resources(priv, dma_conf);
  832. +alloc_error:
  833. + kfree(dma_conf);
  834. + return ERR_PTR(ret);
  835. +}
  836. +
  837. +/**
  838. + * __stmmac_open - open entry point of the driver
  839. * @dev : pointer to the device structure.
  840. + * @dma_conf : structure to take the dma data
  841. * Description:
  842. * This function is the open entry point of the driver.
  843. * Return value:
  844. * 0 on success and an appropriate (-)ve integer as defined in errno.h
  845. * file on failure.
  846. */
  847. -static int stmmac_open(struct net_device *dev)
  848. +static int __stmmac_open(struct net_device *dev,
  849. + struct stmmac_dma_conf *dma_conf)
  850. {
  851. struct stmmac_priv *priv = netdev_priv(dev);
  852. int mode = priv->plat->phy_interface;
  853. - int bfsize = 0;
  854. u32 chan;
  855. int ret;
  856. @@ -3708,45 +3845,10 @@ static int stmmac_open(struct net_device
  857. memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
  858. priv->xstats.threshold = tc;
  859. - bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
  860. - if (bfsize < 0)
  861. - bfsize = 0;
  862. -
  863. - if (bfsize < BUF_SIZE_16KiB)
  864. - bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_conf.dma_buf_sz);
  865. -
  866. - priv->dma_conf.dma_buf_sz = bfsize;
  867. - buf_sz = bfsize;
  868. -
  869. priv->rx_copybreak = STMMAC_RX_COPYBREAK;
  870. - if (!priv->dma_conf.dma_tx_size)
  871. - priv->dma_conf.dma_tx_size = DMA_DEFAULT_TX_SIZE;
  872. - if (!priv->dma_conf.dma_rx_size)
  873. - priv->dma_conf.dma_rx_size = DMA_DEFAULT_RX_SIZE;
  874. -
  875. - /* Earlier check for TBS */
  876. - for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
  877. - struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
  878. - int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
  879. -
  880. - /* Setup per-TXQ tbs flag before TX descriptor alloc */
  881. - tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
  882. - }
  883. -
  884. - ret = alloc_dma_desc_resources(priv);
  885. - if (ret < 0) {
  886. - netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
  887. - __func__);
  888. - goto dma_desc_error;
  889. - }
  890. -
  891. - ret = init_dma_desc_rings(dev, GFP_KERNEL);
  892. - if (ret < 0) {
  893. - netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
  894. - __func__);
  895. - goto init_error;
  896. - }
  897. + buf_sz = dma_conf->dma_buf_sz;
  898. + memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
  899. if (priv->plat->serdes_powerup) {
  900. ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
  901. @@ -3789,14 +3891,28 @@ irq_error:
  902. stmmac_hw_teardown(dev);
  903. init_error:
  904. - free_dma_desc_resources(priv);
  905. -dma_desc_error:
  906. + free_dma_desc_resources(priv, &priv->dma_conf);
  907. phylink_disconnect_phy(priv->phylink);
  908. init_phy_error:
  909. pm_runtime_put(priv->device);
  910. return ret;
  911. }
  912. +static int stmmac_open(struct net_device *dev)
  913. +{
  914. + struct stmmac_priv *priv = netdev_priv(dev);
  915. + struct stmmac_dma_conf *dma_conf;
  916. + int ret;
  917. +
  918. + dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
  919. + if (IS_ERR(dma_conf))
  920. + return PTR_ERR(dma_conf);
  921. +
  922. + ret = __stmmac_open(dev, dma_conf);
  923. + kfree(dma_conf);
  924. + return ret;
  925. +}
  926. +
  927. static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
  928. {
  929. set_bit(__FPE_REMOVING, &priv->fpe_task_state);
  930. @@ -3843,7 +3959,7 @@ static int stmmac_release(struct net_dev
  931. stmmac_stop_all_dma(priv);
  932. /* Release and free the Rx/Tx resources */
  933. - free_dma_desc_resources(priv);
  934. + free_dma_desc_resources(priv, &priv->dma_conf);
  935. /* Disable the MAC Rx/Tx */
  936. stmmac_mac_set(priv, priv->ioaddr, false);
  937. @@ -6382,7 +6498,7 @@ void stmmac_disable_rx_queue(struct stmm
  938. spin_unlock_irqrestore(&ch->lock, flags);
  939. stmmac_stop_rx_dma(priv, queue);
  940. - __free_dma_rx_desc_resources(priv, queue);
  941. + __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
  942. }
  943. void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
  944. @@ -6393,21 +6509,21 @@ void stmmac_enable_rx_queue(struct stmma
  945. u32 buf_size;
  946. int ret;
  947. - ret = __alloc_dma_rx_desc_resources(priv, queue);
  948. + ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
  949. if (ret) {
  950. netdev_err(priv->dev, "Failed to alloc RX desc.\n");
  951. return;
  952. }
  953. - ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL);
  954. + ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
  955. if (ret) {
  956. - __free_dma_rx_desc_resources(priv, queue);
  957. + __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
  958. netdev_err(priv->dev, "Failed to init RX desc.\n");
  959. return;
  960. }
  961. stmmac_reset_rx_queue(priv, queue);
  962. - stmmac_clear_rx_descriptors(priv, queue);
  963. + stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
  964. stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
  965. rx_q->dma_rx_phy, rx_q->queue_index);
  966. @@ -6445,7 +6561,7 @@ void stmmac_disable_tx_queue(struct stmm
  967. spin_unlock_irqrestore(&ch->lock, flags);
  968. stmmac_stop_tx_dma(priv, queue);
  969. - __free_dma_tx_desc_resources(priv, queue);
  970. + __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
  971. }
  972. void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
  973. @@ -6455,21 +6571,21 @@ void stmmac_enable_tx_queue(struct stmma
  974. unsigned long flags;
  975. int ret;
  976. - ret = __alloc_dma_tx_desc_resources(priv, queue);
  977. + ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
  978. if (ret) {
  979. netdev_err(priv->dev, "Failed to alloc TX desc.\n");
  980. return;
  981. }
  982. - ret = __init_dma_tx_desc_rings(priv, queue);
  983. + ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
  984. if (ret) {
  985. - __free_dma_tx_desc_resources(priv, queue);
  986. + __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
  987. netdev_err(priv->dev, "Failed to init TX desc.\n");
  988. return;
  989. }
  990. stmmac_reset_tx_queue(priv, queue);
  991. - stmmac_clear_tx_descriptors(priv, queue);
  992. + stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
  993. stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
  994. tx_q->dma_tx_phy, tx_q->queue_index);
  995. @@ -6506,7 +6622,7 @@ void stmmac_xdp_release(struct net_devic
  996. stmmac_stop_all_dma(priv);
  997. /* Release and free the Rx/Tx resources */
  998. - free_dma_desc_resources(priv);
  999. + free_dma_desc_resources(priv, &priv->dma_conf);
  1000. /* Disable the MAC Rx/Tx */
  1001. stmmac_mac_set(priv, priv->ioaddr, false);
  1002. @@ -6531,14 +6647,14 @@ int stmmac_xdp_open(struct net_device *d
  1003. u32 chan;
  1004. int ret;
  1005. - ret = alloc_dma_desc_resources(priv);
  1006. + ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
  1007. if (ret < 0) {
  1008. netdev_err(dev, "%s: DMA descriptors allocation failed\n",
  1009. __func__);
  1010. goto dma_desc_error;
  1011. }
  1012. - ret = init_dma_desc_rings(dev, GFP_KERNEL);
  1013. + ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
  1014. if (ret < 0) {
  1015. netdev_err(dev, "%s: DMA descriptors initialization failed\n",
  1016. __func__);
  1017. @@ -6620,7 +6736,7 @@ irq_error:
  1018. stmmac_hw_teardown(dev);
  1019. init_error:
  1020. - free_dma_desc_resources(priv);
  1021. + free_dma_desc_resources(priv, &priv->dma_conf);
  1022. dma_desc_error:
  1023. return ret;
  1024. }
  1025. @@ -7479,7 +7595,7 @@ int stmmac_resume(struct device *dev)
  1026. stmmac_reset_queues_param(priv);
  1027. stmmac_free_tx_skbufs(priv);
  1028. - stmmac_clear_descriptors(priv);
  1029. + stmmac_clear_descriptors(priv, &priv->dma_conf);
  1030. stmmac_hw_setup(ndev, false);
  1031. stmmac_init_coalesce(priv);