775-v6.0-03-net-ethernet-stmicro-stmmac-move-dma-conf-to-dedicat.patch 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289
  1. From 8531c80800c10e8ef7952022326c2f983e1314bf Mon Sep 17 00:00:00 2001
  2. From: Christian Marangi <[email protected]>
  3. Date: Sat, 23 Jul 2022 16:29:31 +0200
  4. Subject: [PATCH 3/5] net: ethernet: stmicro: stmmac: move dma conf to
  5. dedicated struct
  6. Move dma buf conf to dedicated struct. This in preparation for code
  7. rework that will permit to allocate separate dma_conf without affecting
  8. the priv struct.
  9. Signed-off-by: Christian Marangi <[email protected]>
  10. Signed-off-by: Jakub Kicinski <[email protected]>
  11. ---
  12. .../net/ethernet/stmicro/stmmac/chain_mode.c | 6 +-
  13. .../net/ethernet/stmicro/stmmac/ring_mode.c | 4 +-
  14. drivers/net/ethernet/stmicro/stmmac/stmmac.h | 21 +-
  15. .../ethernet/stmicro/stmmac/stmmac_ethtool.c | 4 +-
  16. .../net/ethernet/stmicro/stmmac/stmmac_main.c | 286 +++++++++---------
  17. .../stmicro/stmmac/stmmac_selftests.c | 8 +-
  18. .../net/ethernet/stmicro/stmmac/stmmac_tc.c | 6 +-
  19. 7 files changed, 172 insertions(+), 163 deletions(-)
  20. --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
  21. +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
  22. @@ -46,7 +46,7 @@ static int jumbo_frm(void *p, struct sk_
  23. while (len != 0) {
  24. tx_q->tx_skbuff[entry] = NULL;
  25. - entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
  26. + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
  27. desc = tx_q->dma_tx + entry;
  28. if (len > bmax) {
  29. @@ -137,7 +137,7 @@ static void refill_desc3(void *priv_ptr,
  30. */
  31. p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy +
  32. (((rx_q->dirty_rx) + 1) %
  33. - priv->dma_rx_size) *
  34. + priv->dma_conf.dma_rx_size) *
  35. sizeof(struct dma_desc)));
  36. }
  37. @@ -155,7 +155,7 @@ static void clean_desc3(void *priv_ptr,
  38. */
  39. p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy +
  40. ((tx_q->dirty_tx + 1) %
  41. - priv->dma_tx_size))
  42. + priv->dma_conf.dma_tx_size))
  43. * sizeof(struct dma_desc)));
  44. }
  45. --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
  46. +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
  47. @@ -51,7 +51,7 @@ static int jumbo_frm(void *p, struct sk_
  48. stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
  49. STMMAC_RING_MODE, 0, false, skb->len);
  50. tx_q->tx_skbuff[entry] = NULL;
  51. - entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
  52. + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
  53. if (priv->extend_desc)
  54. desc = (struct dma_desc *)(tx_q->dma_etx + entry);
  55. @@ -107,7 +107,7 @@ static void refill_desc3(void *priv_ptr,
  56. struct stmmac_priv *priv = rx_q->priv_data;
  57. /* Fill DES3 in case of RING mode */
  58. - if (priv->dma_buf_sz == BUF_SIZE_16KiB)
  59. + if (priv->dma_conf.dma_buf_sz == BUF_SIZE_16KiB)
  60. p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
  61. }
  62. --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
  63. +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
  64. @@ -185,6 +185,18 @@ struct stmmac_rfs_entry {
  65. int tc;
  66. };
  67. +struct stmmac_dma_conf {
  68. + unsigned int dma_buf_sz;
  69. +
  70. + /* RX Queue */
  71. + struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];
  72. + unsigned int dma_rx_size;
  73. +
  74. + /* TX Queue */
  75. + struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
  76. + unsigned int dma_tx_size;
  77. +};
  78. +
  79. struct stmmac_priv {
  80. /* Frequently used values are kept adjacent for cache effect */
  81. u32 tx_coal_frames[MTL_MAX_TX_QUEUES];
  82. @@ -199,7 +211,6 @@ struct stmmac_priv {
  83. int sph_cap;
  84. u32 sarc_type;
  85. - unsigned int dma_buf_sz;
  86. unsigned int rx_copybreak;
  87. u32 rx_riwt[MTL_MAX_TX_QUEUES];
  88. int hwts_rx_en;
  89. @@ -211,13 +222,7 @@ struct stmmac_priv {
  90. int (*hwif_quirks)(struct stmmac_priv *priv);
  91. struct mutex lock;
  92. - /* RX Queue */
  93. - struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];
  94. - unsigned int dma_rx_size;
  95. -
  96. - /* TX Queue */
  97. - struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
  98. - unsigned int dma_tx_size;
  99. + struct stmmac_dma_conf dma_conf;
  100. /* Generic channel for NAPI */
  101. struct stmmac_channel channel[STMMAC_CH_MAX];
  102. --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
  103. +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
  104. @@ -484,8 +484,8 @@ static void stmmac_get_ringparam(struct
  105. ring->rx_max_pending = DMA_MAX_RX_SIZE;
  106. ring->tx_max_pending = DMA_MAX_TX_SIZE;
  107. - ring->rx_pending = priv->dma_rx_size;
  108. - ring->tx_pending = priv->dma_tx_size;
  109. + ring->rx_pending = priv->dma_conf.dma_rx_size;
  110. + ring->tx_pending = priv->dma_conf.dma_tx_size;
  111. }
  112. static int stmmac_set_ringparam(struct net_device *netdev,
  113. --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
  114. +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
  115. @@ -74,8 +74,8 @@ static int phyaddr = -1;
  116. module_param(phyaddr, int, 0444);
  117. MODULE_PARM_DESC(phyaddr, "Physical device address");
  118. -#define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4)
  119. -#define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4)
  120. +#define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
  121. +#define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
  122. /* Limit to make sure XDP TX and slow path can coexist */
  123. #define STMMAC_XSK_TX_BUDGET_MAX 256
  124. @@ -232,7 +232,7 @@ static void stmmac_disable_all_queues(st
  125. /* synchronize_rcu() needed for pending XDP buffers to drain */
  126. for (queue = 0; queue < rx_queues_cnt; queue++) {
  127. - rx_q = &priv->rx_queue[queue];
  128. + rx_q = &priv->dma_conf.rx_queue[queue];
  129. if (rx_q->xsk_pool) {
  130. synchronize_rcu();
  131. break;
  132. @@ -358,13 +358,13 @@ static void print_pkt(unsigned char *buf
  133. static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
  134. {
  135. - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  136. + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
  137. u32 avail;
  138. if (tx_q->dirty_tx > tx_q->cur_tx)
  139. avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
  140. else
  141. - avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
  142. + avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
  143. return avail;
  144. }
  145. @@ -376,13 +376,13 @@ static inline u32 stmmac_tx_avail(struct
  146. */
  147. static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
  148. {
  149. - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  150. + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  151. u32 dirty;
  152. if (rx_q->dirty_rx <= rx_q->cur_rx)
  153. dirty = rx_q->cur_rx - rx_q->dirty_rx;
  154. else
  155. - dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
  156. + dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
  157. return dirty;
  158. }
  159. @@ -410,7 +410,7 @@ static int stmmac_enable_eee_mode(struct
  160. /* check if all TX queues have the work finished */
  161. for (queue = 0; queue < tx_cnt; queue++) {
  162. - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  163. + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
  164. if (tx_q->dirty_tx != tx_q->cur_tx)
  165. return -EBUSY; /* still unfinished work */
  166. @@ -1309,7 +1309,7 @@ static void stmmac_display_rx_rings(stru
  167. /* Display RX rings */
  168. for (queue = 0; queue < rx_cnt; queue++) {
  169. - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  170. + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  171. pr_info("\tRX Queue %u rings\n", queue);
  172. @@ -1322,7 +1322,7 @@ static void stmmac_display_rx_rings(stru
  173. }
  174. /* Display RX ring */
  175. - stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
  176. + stmmac_display_ring(priv, head_rx, priv->dma_conf.dma_rx_size, true,
  177. rx_q->dma_rx_phy, desc_size);
  178. }
  179. }
  180. @@ -1336,7 +1336,7 @@ static void stmmac_display_tx_rings(stru
  181. /* Display TX rings */
  182. for (queue = 0; queue < tx_cnt; queue++) {
  183. - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  184. + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
  185. pr_info("\tTX Queue %d rings\n", queue);
  186. @@ -1351,7 +1351,7 @@ static void stmmac_display_tx_rings(stru
  187. desc_size = sizeof(struct dma_desc);
  188. }
  189. - stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
  190. + stmmac_display_ring(priv, head_tx, priv->dma_conf.dma_tx_size, false,
  191. tx_q->dma_tx_phy, desc_size);
  192. }
  193. }
  194. @@ -1392,21 +1392,21 @@ static int stmmac_set_bfsize(int mtu, in
  195. */
  196. static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
  197. {
  198. - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  199. + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  200. int i;
  201. /* Clear the RX descriptors */
  202. - for (i = 0; i < priv->dma_rx_size; i++)
  203. + for (i = 0; i < priv->dma_conf.dma_rx_size; i++)
  204. if (priv->extend_desc)
  205. stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
  206. priv->use_riwt, priv->mode,
  207. - (i == priv->dma_rx_size - 1),
  208. - priv->dma_buf_sz);
  209. + (i == priv->dma_conf.dma_rx_size - 1),
  210. + priv->dma_conf.dma_buf_sz);
  211. else
  212. stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
  213. priv->use_riwt, priv->mode,
  214. - (i == priv->dma_rx_size - 1),
  215. - priv->dma_buf_sz);
  216. + (i == priv->dma_conf.dma_rx_size - 1),
  217. + priv->dma_conf.dma_buf_sz);
  218. }
  219. /**
  220. @@ -1418,12 +1418,12 @@ static void stmmac_clear_rx_descriptors(
  221. */
  222. static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
  223. {
  224. - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  225. + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
  226. int i;
  227. /* Clear the TX descriptors */
  228. - for (i = 0; i < priv->dma_tx_size; i++) {
  229. - int last = (i == (priv->dma_tx_size - 1));
  230. + for (i = 0; i < priv->dma_conf.dma_tx_size; i++) {
  231. + int last = (i == (priv->dma_conf.dma_tx_size - 1));
  232. struct dma_desc *p;
  233. if (priv->extend_desc)
  234. @@ -1471,7 +1471,7 @@ static void stmmac_clear_descriptors(str
  235. static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
  236. int i, gfp_t flags, u32 queue)
  237. {
  238. - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  239. + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  240. struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
  241. if (!buf->page) {
  242. @@ -1496,7 +1496,7 @@ static int stmmac_init_rx_buffers(struct
  243. buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
  244. stmmac_set_desc_addr(priv, p, buf->addr);
  245. - if (priv->dma_buf_sz == BUF_SIZE_16KiB)
  246. + if (priv->dma_conf.dma_buf_sz == BUF_SIZE_16KiB)
  247. stmmac_init_desc3(priv, p);
  248. return 0;
  249. @@ -1510,7 +1510,7 @@ static int stmmac_init_rx_buffers(struct
  250. */
  251. static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
  252. {
  253. - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  254. + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  255. struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
  256. if (buf->page)
  257. @@ -1530,7 +1530,7 @@ static void stmmac_free_rx_buffer(struct
  258. */
  259. static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
  260. {
  261. - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  262. + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
  263. if (tx_q->tx_skbuff_dma[i].buf &&
  264. tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
  265. @@ -1575,17 +1575,17 @@ static void dma_free_rx_skbufs(struct st
  266. {
  267. int i;
  268. - for (i = 0; i < priv->dma_rx_size; i++)
  269. + for (i = 0; i < priv->dma_conf.dma_rx_size; i++)
  270. stmmac_free_rx_buffer(priv, queue, i);
  271. }
  272. static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
  273. gfp_t flags)
  274. {
  275. - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  276. + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  277. int i;
  278. - for (i = 0; i < priv->dma_rx_size; i++) {
  279. + for (i = 0; i < priv->dma_conf.dma_rx_size; i++) {
  280. struct dma_desc *p;
  281. int ret;
  282. @@ -1612,10 +1612,10 @@ static int stmmac_alloc_rx_buffers(struc
  283. */
  284. static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
  285. {
  286. - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  287. + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  288. int i;
  289. - for (i = 0; i < priv->dma_rx_size; i++) {
  290. + for (i = 0; i < priv->dma_conf.dma_rx_size; i++) {
  291. struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
  292. if (!buf->xdp)
  293. @@ -1628,10 +1628,10 @@ static void dma_free_rx_xskbufs(struct s
  294. static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
  295. {
  296. - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  297. + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  298. int i;
  299. - for (i = 0; i < priv->dma_rx_size; i++) {
  300. + for (i = 0; i < priv->dma_conf.dma_rx_size; i++) {
  301. struct stmmac_rx_buffer *buf;
  302. dma_addr_t dma_addr;
  303. struct dma_desc *p;
  304. @@ -1674,7 +1674,7 @@ static struct xsk_buff_pool *stmmac_get_
  305. */
  306. static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
  307. {
  308. - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  309. + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  310. int ret;
  311. netif_dbg(priv, probe, priv->dev,
  312. @@ -1720,11 +1720,11 @@ static int __init_dma_rx_desc_rings(stru
  313. if (priv->extend_desc)
  314. stmmac_mode_init(priv, rx_q->dma_erx,
  315. rx_q->dma_rx_phy,
  316. - priv->dma_rx_size, 1);
  317. + priv->dma_conf.dma_rx_size, 1);
  318. else
  319. stmmac_mode_init(priv, rx_q->dma_rx,
  320. rx_q->dma_rx_phy,
  321. - priv->dma_rx_size, 0);
  322. + priv->dma_conf.dma_rx_size, 0);
  323. }
  324. return 0;
  325. @@ -1751,7 +1751,7 @@ static int init_dma_rx_desc_rings(struct
  326. err_init_rx_buffers:
  327. while (queue >= 0) {
  328. - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  329. + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  330. if (rx_q->xsk_pool)
  331. dma_free_rx_xskbufs(priv, queue);
  332. @@ -1780,7 +1780,7 @@ err_init_rx_buffers:
  333. */
  334. static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
  335. {
  336. - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  337. + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
  338. int i;
  339. netif_dbg(priv, probe, priv->dev,
  340. @@ -1792,16 +1792,16 @@ static int __init_dma_tx_desc_rings(stru
  341. if (priv->extend_desc)
  342. stmmac_mode_init(priv, tx_q->dma_etx,
  343. tx_q->dma_tx_phy,
  344. - priv->dma_tx_size, 1);
  345. + priv->dma_conf.dma_tx_size, 1);
  346. else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
  347. stmmac_mode_init(priv, tx_q->dma_tx,
  348. tx_q->dma_tx_phy,
  349. - priv->dma_tx_size, 0);
  350. + priv->dma_conf.dma_tx_size, 0);
  351. }
  352. tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
  353. - for (i = 0; i < priv->dma_tx_size; i++) {
  354. + for (i = 0; i < priv->dma_conf.dma_tx_size; i++) {
  355. struct dma_desc *p;
  356. if (priv->extend_desc)
  357. @@ -1871,12 +1871,12 @@ static int init_dma_desc_rings(struct ne
  358. */
  359. static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
  360. {
  361. - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  362. + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
  363. int i;
  364. tx_q->xsk_frames_done = 0;
  365. - for (i = 0; i < priv->dma_tx_size; i++)
  366. + for (i = 0; i < priv->dma_conf.dma_tx_size; i++)
  367. stmmac_free_tx_buffer(priv, queue, i);
  368. if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
  369. @@ -1906,7 +1906,7 @@ static void stmmac_free_tx_skbufs(struct
  370. */
  371. static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
  372. {
  373. - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  374. + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  375. /* Release the DMA RX socket buffers */
  376. if (rx_q->xsk_pool)
  377. @@ -1919,11 +1919,11 @@ static void __free_dma_rx_desc_resources
  378. /* Free DMA regions of consistent memory previously allocated */
  379. if (!priv->extend_desc)
  380. - dma_free_coherent(priv->device, priv->dma_rx_size *
  381. + dma_free_coherent(priv->device, priv->dma_conf.dma_rx_size *
  382. sizeof(struct dma_desc),
  383. rx_q->dma_rx, rx_q->dma_rx_phy);
  384. else
  385. - dma_free_coherent(priv->device, priv->dma_rx_size *
  386. + dma_free_coherent(priv->device, priv->dma_conf.dma_rx_size *
  387. sizeof(struct dma_extended_desc),
  388. rx_q->dma_erx, rx_q->dma_rx_phy);
  389. @@ -1952,7 +1952,7 @@ static void free_dma_rx_desc_resources(s
  390. */
  391. static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
  392. {
  393. - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  394. + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
  395. size_t size;
  396. void *addr;
  397. @@ -1970,7 +1970,7 @@ static void __free_dma_tx_desc_resources
  398. addr = tx_q->dma_tx;
  399. }
  400. - size *= priv->dma_tx_size;
  401. + size *= priv->dma_conf.dma_tx_size;
  402. dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
  403. @@ -1999,7 +1999,7 @@ static void free_dma_tx_desc_resources(s
  404. */
  405. static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
  406. {
  407. - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  408. + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  409. struct stmmac_channel *ch = &priv->channel[queue];
  410. bool xdp_prog = stmmac_xdp_is_enabled(priv);
  411. struct page_pool_params pp_params = { 0 };
  412. @@ -2011,8 +2011,8 @@ static int __alloc_dma_rx_desc_resources
  413. rx_q->priv_data = priv;
  414. pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
  415. - pp_params.pool_size = priv->dma_rx_size;
  416. - num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
  417. + pp_params.pool_size = priv->dma_conf.dma_rx_size;
  418. + num_pages = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE);
  419. pp_params.order = ilog2(num_pages);
  420. pp_params.nid = dev_to_node(priv->device);
  421. pp_params.dev = priv->device;
  422. @@ -2027,7 +2027,7 @@ static int __alloc_dma_rx_desc_resources
  423. return ret;
  424. }
  425. - rx_q->buf_pool = kcalloc(priv->dma_rx_size,
  426. + rx_q->buf_pool = kcalloc(priv->dma_conf.dma_rx_size,
  427. sizeof(*rx_q->buf_pool),
  428. GFP_KERNEL);
  429. if (!rx_q->buf_pool)
  430. @@ -2035,7 +2035,7 @@ static int __alloc_dma_rx_desc_resources
  431. if (priv->extend_desc) {
  432. rx_q->dma_erx = dma_alloc_coherent(priv->device,
  433. - priv->dma_rx_size *
  434. + priv->dma_conf.dma_rx_size *
  435. sizeof(struct dma_extended_desc),
  436. &rx_q->dma_rx_phy,
  437. GFP_KERNEL);
  438. @@ -2044,7 +2044,7 @@ static int __alloc_dma_rx_desc_resources
  439. } else {
  440. rx_q->dma_rx = dma_alloc_coherent(priv->device,
  441. - priv->dma_rx_size *
  442. + priv->dma_conf.dma_rx_size *
  443. sizeof(struct dma_desc),
  444. &rx_q->dma_rx_phy,
  445. GFP_KERNEL);
  446. @@ -2101,20 +2101,20 @@ err_dma:
  447. */
  448. static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
  449. {
  450. - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  451. + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
  452. size_t size;
  453. void *addr;
  454. tx_q->queue_index = queue;
  455. tx_q->priv_data = priv;
  456. - tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
  457. + tx_q->tx_skbuff_dma = kcalloc(priv->dma_conf.dma_tx_size,
  458. sizeof(*tx_q->tx_skbuff_dma),
  459. GFP_KERNEL);
  460. if (!tx_q->tx_skbuff_dma)
  461. return -ENOMEM;
  462. - tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
  463. + tx_q->tx_skbuff = kcalloc(priv->dma_conf.dma_tx_size,
  464. sizeof(struct sk_buff *),
  465. GFP_KERNEL);
  466. if (!tx_q->tx_skbuff)
  467. @@ -2127,7 +2127,7 @@ static int __alloc_dma_tx_desc_resources
  468. else
  469. size = sizeof(struct dma_desc);
  470. - size *= priv->dma_tx_size;
  471. + size *= priv->dma_conf.dma_tx_size;
  472. addr = dma_alloc_coherent(priv->device, size,
  473. &tx_q->dma_tx_phy, GFP_KERNEL);
  474. @@ -2371,7 +2371,7 @@ static void stmmac_dma_operation_mode(st
  475. /* configure all channels */
  476. for (chan = 0; chan < rx_channels_count; chan++) {
  477. - struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
  478. + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
  479. u32 buf_size;
  480. qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
  481. @@ -2386,7 +2386,7 @@ static void stmmac_dma_operation_mode(st
  482. chan);
  483. } else {
  484. stmmac_set_dma_bfsize(priv, priv->ioaddr,
  485. - priv->dma_buf_sz,
  486. + priv->dma_conf.dma_buf_sz,
  487. chan);
  488. }
  489. }
  490. @@ -2402,7 +2402,7 @@ static void stmmac_dma_operation_mode(st
  491. static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
  492. {
  493. struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
  494. - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  495. + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
  496. struct xsk_buff_pool *pool = tx_q->xsk_pool;
  497. unsigned int entry = tx_q->cur_tx;
  498. struct dma_desc *tx_desc = NULL;
  499. @@ -2477,7 +2477,7 @@ static bool stmmac_xdp_xmit_zc(struct st
  500. stmmac_enable_dma_transmission(priv, priv->ioaddr);
  501. - tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
  502. + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
  503. entry = tx_q->cur_tx;
  504. }
  505. @@ -2503,7 +2503,7 @@ static bool stmmac_xdp_xmit_zc(struct st
  506. */
  507. static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
  508. {
  509. - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  510. + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
  511. unsigned int bytes_compl = 0, pkts_compl = 0;
  512. unsigned int entry, xmits = 0, count = 0;
  513. @@ -2516,7 +2516,7 @@ static int stmmac_tx_clean(struct stmmac
  514. entry = tx_q->dirty_tx;
  515. /* Try to clean all TX complete frame in 1 shot */
  516. - while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) {
  517. + while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
  518. struct xdp_frame *xdpf;
  519. struct sk_buff *skb;
  520. struct dma_desc *p;
  521. @@ -2616,7 +2616,7 @@ static int stmmac_tx_clean(struct stmmac
  522. stmmac_release_tx_desc(priv, p, priv->mode);
  523. - entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
  524. + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
  525. }
  526. tx_q->dirty_tx = entry;
  527. @@ -2681,7 +2681,7 @@ static int stmmac_tx_clean(struct stmmac
  528. */
  529. static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
  530. {
  531. - struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
  532. + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
  533. netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
  534. @@ -2748,8 +2748,8 @@ static int stmmac_napi_check(struct stmm
  535. {
  536. int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
  537. &priv->xstats, chan, dir);
  538. - struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
  539. - struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
  540. + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
  541. + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
  542. struct stmmac_channel *ch = &priv->channel[chan];
  543. struct napi_struct *rx_napi;
  544. struct napi_struct *tx_napi;
  545. @@ -2925,7 +2925,7 @@ static int stmmac_init_dma_engine(struct
  546. /* DMA RX Channel Configuration */
  547. for (chan = 0; chan < rx_channels_count; chan++) {
  548. - rx_q = &priv->rx_queue[chan];
  549. + rx_q = &priv->dma_conf.rx_queue[chan];
  550. stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
  551. rx_q->dma_rx_phy, chan);
  552. @@ -2939,7 +2939,7 @@ static int stmmac_init_dma_engine(struct
  553. /* DMA TX Channel Configuration */
  554. for (chan = 0; chan < tx_channels_count; chan++) {
  555. - tx_q = &priv->tx_queue[chan];
  556. + tx_q = &priv->dma_conf.tx_queue[chan];
  557. stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
  558. tx_q->dma_tx_phy, chan);
  559. @@ -2954,7 +2954,7 @@ static int stmmac_init_dma_engine(struct
  560. static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
  561. {
  562. - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  563. + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
  564. hrtimer_start(&tx_q->txtimer,
  565. STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
  566. @@ -3004,7 +3004,7 @@ static void stmmac_init_coalesce(struct
  567. u32 chan;
  568. for (chan = 0; chan < tx_channel_count; chan++) {
  569. - struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
  570. + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
  571. priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
  572. priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
  573. @@ -3026,12 +3026,12 @@ static void stmmac_set_rings_length(stru
  574. /* set TX ring length */
  575. for (chan = 0; chan < tx_channels_count; chan++)
  576. stmmac_set_tx_ring_len(priv, priv->ioaddr,
  577. - (priv->dma_tx_size - 1), chan);
  578. + (priv->dma_conf.dma_tx_size - 1), chan);
  579. /* set RX ring length */
  580. for (chan = 0; chan < rx_channels_count; chan++)
  581. stmmac_set_rx_ring_len(priv, priv->ioaddr,
  582. - (priv->dma_rx_size - 1), chan);
  583. + (priv->dma_conf.dma_rx_size - 1), chan);
  584. }
  585. /**
  586. @@ -3366,7 +3366,7 @@ static int stmmac_hw_setup(struct net_de
  587. /* Enable TSO */
  588. if (priv->tso) {
  589. for (chan = 0; chan < tx_cnt; chan++) {
  590. - struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
  591. + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
  592. /* TSO and TBS cannot co-exist */
  593. if (tx_q->tbs & STMMAC_TBS_AVAIL)
  594. @@ -3388,7 +3388,7 @@ static int stmmac_hw_setup(struct net_de
  595. /* TBS */
  596. for (chan = 0; chan < tx_cnt; chan++) {
  597. - struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
  598. + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
  599. int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
  600. stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
  601. @@ -3432,7 +3432,7 @@ static void stmmac_free_irq(struct net_d
  602. for (j = irq_idx - 1; j >= 0; j--) {
  603. if (priv->tx_irq[j] > 0) {
  604. irq_set_affinity_hint(priv->tx_irq[j], NULL);
  605. - free_irq(priv->tx_irq[j], &priv->tx_queue[j]);
  606. + free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
  607. }
  608. }
  609. irq_idx = priv->plat->rx_queues_to_use;
  610. @@ -3441,7 +3441,7 @@ static void stmmac_free_irq(struct net_d
  611. for (j = irq_idx - 1; j >= 0; j--) {
  612. if (priv->rx_irq[j] > 0) {
  613. irq_set_affinity_hint(priv->rx_irq[j], NULL);
  614. - free_irq(priv->rx_irq[j], &priv->rx_queue[j]);
  615. + free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
  616. }
  617. }
  618. @@ -3574,7 +3574,7 @@ static int stmmac_request_irq_multi_msi(
  619. sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
  620. ret = request_irq(priv->rx_irq[i],
  621. stmmac_msi_intr_rx,
  622. - 0, int_name, &priv->rx_queue[i]);
  623. + 0, int_name, &priv->dma_conf.rx_queue[i]);
  624. if (unlikely(ret < 0)) {
  625. netdev_err(priv->dev,
  626. "%s: alloc rx-%d MSI %d (error: %d)\n",
  627. @@ -3597,7 +3597,7 @@ static int stmmac_request_irq_multi_msi(
  628. sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
  629. ret = request_irq(priv->tx_irq[i],
  630. stmmac_msi_intr_tx,
  631. - 0, int_name, &priv->tx_queue[i]);
  632. + 0, int_name, &priv->dma_conf.tx_queue[i]);
  633. if (unlikely(ret < 0)) {
  634. netdev_err(priv->dev,
  635. "%s: alloc tx-%d MSI %d (error: %d)\n",
  636. @@ -3728,21 +3728,21 @@ static int stmmac_open(struct net_device
  637. bfsize = 0;
  638. if (bfsize < BUF_SIZE_16KiB)
  639. - bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
  640. + bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_conf.dma_buf_sz);
  641. - priv->dma_buf_sz = bfsize;
  642. + priv->dma_conf.dma_buf_sz = bfsize;
  643. buf_sz = bfsize;
  644. priv->rx_copybreak = STMMAC_RX_COPYBREAK;
  645. - if (!priv->dma_tx_size)
  646. - priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
  647. - if (!priv->dma_rx_size)
  648. - priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
  649. + if (!priv->dma_conf.dma_tx_size)
  650. + priv->dma_conf.dma_tx_size = DMA_DEFAULT_TX_SIZE;
  651. + if (!priv->dma_conf.dma_rx_size)
  652. + priv->dma_conf.dma_rx_size = DMA_DEFAULT_RX_SIZE;
  653. /* Earlier check for TBS */
  654. for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
  655. - struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
  656. + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
  657. int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
  658. /* Setup per-TXQ tbs flag before TX descriptor alloc */
  659. @@ -3800,7 +3800,7 @@ irq_error:
  660. phylink_stop(priv->phylink);
  661. for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
  662. - hrtimer_cancel(&priv->tx_queue[chan].txtimer);
  663. + hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
  664. stmmac_hw_teardown(dev);
  665. init_error:
  666. @@ -3842,7 +3842,7 @@ static int stmmac_release(struct net_dev
  667. stmmac_disable_all_queues(priv);
  668. for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
  669. - hrtimer_cancel(&priv->tx_queue[chan].txtimer);
  670. + hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
  671. netif_tx_disable(dev);
  672. @@ -3906,7 +3906,7 @@ static bool stmmac_vlan_insert(struct st
  673. return false;
  674. stmmac_set_tx_owner(priv, p);
  675. - tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
  676. + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
  677. return true;
  678. }
  679. @@ -3924,7 +3924,7 @@ static bool stmmac_vlan_insert(struct st
  680. static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
  681. int total_len, bool last_segment, u32 queue)
  682. {
  683. - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  684. + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
  685. struct dma_desc *desc;
  686. u32 buff_size;
  687. int tmp_len;
  688. @@ -3935,7 +3935,7 @@ static void stmmac_tso_allocator(struct
  689. dma_addr_t curr_addr;
  690. tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
  691. - priv->dma_tx_size);
  692. + priv->dma_conf.dma_tx_size);
  693. WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
  694. if (tx_q->tbs & STMMAC_TBS_AVAIL)
  695. @@ -3963,7 +3963,7 @@ static void stmmac_tso_allocator(struct
  696. static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
  697. {
  698. - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  699. + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
  700. int desc_size;
  701. if (likely(priv->extend_desc))
  702. @@ -4025,7 +4025,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
  703. dma_addr_t des;
  704. int i;
  705. - tx_q = &priv->tx_queue[queue];
  706. + tx_q = &priv->dma_conf.tx_queue[queue];
  707. first_tx = tx_q->cur_tx;
  708. /* Compute header lengths */
  709. @@ -4065,7 +4065,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
  710. stmmac_set_mss(priv, mss_desc, mss);
  711. tx_q->mss = mss;
  712. tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
  713. - priv->dma_tx_size);
  714. + priv->dma_conf.dma_tx_size);
  715. WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
  716. }
  717. @@ -4177,7 +4177,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
  718. * ndo_start_xmit will fill this descriptor the next time it's
  719. * called and stmmac_tx_clean may clean up to this descriptor.
  720. */
  721. - tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
  722. + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
  723. if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
  724. netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
  725. @@ -4265,7 +4265,7 @@ static netdev_tx_t stmmac_xmit(struct sk
  726. int entry, first_tx;
  727. dma_addr_t des;
  728. - tx_q = &priv->tx_queue[queue];
  729. + tx_q = &priv->dma_conf.tx_queue[queue];
  730. first_tx = tx_q->cur_tx;
  731. if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
  732. @@ -4328,7 +4328,7 @@ static netdev_tx_t stmmac_xmit(struct sk
  733. int len = skb_frag_size(frag);
  734. bool last_segment = (i == (nfrags - 1));
  735. - entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
  736. + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
  737. WARN_ON(tx_q->tx_skbuff[entry]);
  738. if (likely(priv->extend_desc))
  739. @@ -4399,7 +4399,7 @@ static netdev_tx_t stmmac_xmit(struct sk
  740. * ndo_start_xmit will fill this descriptor the next time it's
  741. * called and stmmac_tx_clean may clean up to this descriptor.
  742. */
  743. - entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
  744. + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
  745. tx_q->cur_tx = entry;
  746. if (netif_msg_pktdata(priv)) {
  747. @@ -4514,7 +4514,7 @@ static void stmmac_rx_vlan(struct net_de
  748. */
  749. static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
  750. {
  751. - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  752. + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  753. int dirty = stmmac_rx_dirty(priv, queue);
  754. unsigned int entry = rx_q->dirty_rx;
  755. @@ -4564,7 +4564,7 @@ static inline void stmmac_rx_refill(stru
  756. dma_wmb();
  757. stmmac_set_rx_owner(priv, p, use_rx_wd);
  758. - entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
  759. + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
  760. }
  761. rx_q->dirty_rx = entry;
  762. rx_q->rx_tail_addr = rx_q->dma_rx_phy +
  763. @@ -4592,12 +4592,12 @@ static unsigned int stmmac_rx_buf1_len(s
  764. /* First descriptor, not last descriptor and not split header */
  765. if (status & rx_not_ls)
  766. - return priv->dma_buf_sz;
  767. + return priv->dma_conf.dma_buf_sz;
  768. plen = stmmac_get_rx_frame_len(priv, p, coe);
  769. /* First descriptor and last descriptor and not split header */
  770. - return min_t(unsigned int, priv->dma_buf_sz, plen);
  771. + return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
  772. }
  773. static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
  774. @@ -4613,7 +4613,7 @@ static unsigned int stmmac_rx_buf2_len(s
  775. /* Not last descriptor */
  776. if (status & rx_not_ls)
  777. - return priv->dma_buf_sz;
  778. + return priv->dma_conf.dma_buf_sz;
  779. plen = stmmac_get_rx_frame_len(priv, p, coe);
  780. @@ -4624,7 +4624,7 @@ static unsigned int stmmac_rx_buf2_len(s
  781. static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
  782. struct xdp_frame *xdpf, bool dma_map)
  783. {
  784. - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  785. + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
  786. unsigned int entry = tx_q->cur_tx;
  787. struct dma_desc *tx_desc;
  788. dma_addr_t dma_addr;
  789. @@ -4687,7 +4687,7 @@ static int stmmac_xdp_xmit_xdpf(struct s
  790. stmmac_enable_dma_transmission(priv, priv->ioaddr);
  791. - entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
  792. + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
  793. tx_q->cur_tx = entry;
  794. return STMMAC_XDP_TX;
  795. @@ -4861,7 +4861,7 @@ static void stmmac_dispatch_skb_zc(struc
  796. static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
  797. {
  798. - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  799. + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  800. unsigned int entry = rx_q->dirty_rx;
  801. struct dma_desc *rx_desc = NULL;
  802. bool ret = true;
  803. @@ -4904,7 +4904,7 @@ static bool stmmac_rx_refill_zc(struct s
  804. dma_wmb();
  805. stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
  806. - entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
  807. + entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
  808. }
  809. if (rx_desc) {
  810. @@ -4919,7 +4919,7 @@ static bool stmmac_rx_refill_zc(struct s
  811. static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
  812. {
  813. - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  814. + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  815. unsigned int count = 0, error = 0, len = 0;
  816. int dirty = stmmac_rx_dirty(priv, queue);
  817. unsigned int next_entry = rx_q->cur_rx;
  818. @@ -4941,7 +4941,7 @@ static int stmmac_rx_zc(struct stmmac_pr
  819. desc_size = sizeof(struct dma_desc);
  820. }
  821. - stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
  822. + stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
  823. rx_q->dma_rx_phy, desc_size);
  824. }
  825. while (count < limit) {
  826. @@ -4988,7 +4988,7 @@ read_again:
  827. /* Prefetch the next RX descriptor */
  828. rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
  829. - priv->dma_rx_size);
  830. + priv->dma_conf.dma_rx_size);
  831. next_entry = rx_q->cur_rx;
  832. if (priv->extend_desc)
  833. @@ -5109,7 +5109,7 @@ read_again:
  834. */
  835. static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
  836. {
  837. - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  838. + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  839. struct stmmac_channel *ch = &priv->channel[queue];
  840. unsigned int count = 0, error = 0, len = 0;
  841. int status = 0, coe = priv->hw->rx_csum;
  842. @@ -5122,7 +5122,7 @@ static int stmmac_rx(struct stmmac_priv
  843. int buf_sz;
  844. dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
  845. - buf_sz = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
  846. + buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
  847. if (netif_msg_rx_status(priv)) {
  848. void *rx_head;
  849. @@ -5136,7 +5136,7 @@ static int stmmac_rx(struct stmmac_priv
  850. desc_size = sizeof(struct dma_desc);
  851. }
  852. - stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
  853. + stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
  854. rx_q->dma_rx_phy, desc_size);
  855. }
  856. while (count < limit) {
  857. @@ -5180,7 +5180,7 @@ read_again:
  858. break;
  859. rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
  860. - priv->dma_rx_size);
  861. + priv->dma_conf.dma_rx_size);
  862. next_entry = rx_q->cur_rx;
  863. if (priv->extend_desc)
  864. @@ -5314,7 +5314,7 @@ read_again:
  865. buf1_len, dma_dir);
  866. skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
  867. buf->page, buf->page_offset, buf1_len,
  868. - priv->dma_buf_sz);
  869. + priv->dma_conf.dma_buf_sz);
  870. /* Data payload appended into SKB */
  871. page_pool_release_page(rx_q->page_pool, buf->page);
  872. @@ -5326,7 +5326,7 @@ read_again:
  873. buf2_len, dma_dir);
  874. skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
  875. buf->sec_page, 0, buf2_len,
  876. - priv->dma_buf_sz);
  877. + priv->dma_conf.dma_buf_sz);
  878. /* Data payload appended into SKB */
  879. page_pool_release_page(rx_q->page_pool, buf->sec_page);
  880. @@ -5768,11 +5768,13 @@ static irqreturn_t stmmac_safety_interru
  881. static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
  882. {
  883. struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
  884. + struct stmmac_dma_conf *dma_conf;
  885. int chan = tx_q->queue_index;
  886. struct stmmac_priv *priv;
  887. int status;
  888. - priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]);
  889. + dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
  890. + priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
  891. if (unlikely(!data)) {
  892. netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
  893. @@ -5812,10 +5814,12 @@ static irqreturn_t stmmac_msi_intr_tx(in
  894. static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
  895. {
  896. struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
  897. + struct stmmac_dma_conf *dma_conf;
  898. int chan = rx_q->queue_index;
  899. struct stmmac_priv *priv;
  900. - priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]);
  901. + dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
  902. + priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
  903. if (unlikely(!data)) {
  904. netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
  905. @@ -5846,10 +5850,10 @@ static void stmmac_poll_controller(struc
  906. if (priv->plat->multi_msi_en) {
  907. for (i = 0; i < priv->plat->rx_queues_to_use; i++)
  908. - stmmac_msi_intr_rx(0, &priv->rx_queue[i]);
  909. + stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]);
  910. for (i = 0; i < priv->plat->tx_queues_to_use; i++)
  911. - stmmac_msi_intr_tx(0, &priv->tx_queue[i]);
  912. + stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]);
  913. } else {
  914. disable_irq(dev->irq);
  915. stmmac_interrupt(dev->irq, dev);
  916. @@ -6030,34 +6034,34 @@ static int stmmac_rings_status_show(stru
  917. return 0;
  918. for (queue = 0; queue < rx_count; queue++) {
  919. - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  920. + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  921. seq_printf(seq, "RX Queue %d:\n", queue);
  922. if (priv->extend_desc) {
  923. seq_printf(seq, "Extended descriptor ring:\n");
  924. sysfs_display_ring((void *)rx_q->dma_erx,
  925. - priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
  926. + priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
  927. } else {
  928. seq_printf(seq, "Descriptor ring:\n");
  929. sysfs_display_ring((void *)rx_q->dma_rx,
  930. - priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
  931. + priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
  932. }
  933. }
  934. for (queue = 0; queue < tx_count; queue++) {
  935. - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  936. + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
  937. seq_printf(seq, "TX Queue %d:\n", queue);
  938. if (priv->extend_desc) {
  939. seq_printf(seq, "Extended descriptor ring:\n");
  940. sysfs_display_ring((void *)tx_q->dma_etx,
  941. - priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
  942. + priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
  943. } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
  944. seq_printf(seq, "Descriptor ring:\n");
  945. sysfs_display_ring((void *)tx_q->dma_tx,
  946. - priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
  947. + priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
  948. }
  949. }
  950. @@ -6404,7 +6408,7 @@ void stmmac_disable_rx_queue(struct stmm
  951. void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
  952. {
  953. - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  954. + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  955. struct stmmac_channel *ch = &priv->channel[queue];
  956. unsigned long flags;
  957. u32 buf_size;
  958. @@ -6441,7 +6445,7 @@ void stmmac_enable_rx_queue(struct stmma
  959. rx_q->queue_index);
  960. } else {
  961. stmmac_set_dma_bfsize(priv, priv->ioaddr,
  962. - priv->dma_buf_sz,
  963. + priv->dma_conf.dma_buf_sz,
  964. rx_q->queue_index);
  965. }
  966. @@ -6467,7 +6471,7 @@ void stmmac_disable_tx_queue(struct stmm
  967. void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
  968. {
  969. - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  970. + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
  971. struct stmmac_channel *ch = &priv->channel[queue];
  972. unsigned long flags;
  973. int ret;
  974. @@ -6517,7 +6521,7 @@ void stmmac_xdp_release(struct net_devic
  975. stmmac_disable_all_queues(priv);
  976. for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
  977. - hrtimer_cancel(&priv->tx_queue[chan].txtimer);
  978. + hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
  979. /* Free the IRQ lines */
  980. stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
  981. @@ -6576,7 +6580,7 @@ int stmmac_xdp_open(struct net_device *d
  982. /* DMA RX Channel Configuration */
  983. for (chan = 0; chan < rx_cnt; chan++) {
  984. - rx_q = &priv->rx_queue[chan];
  985. + rx_q = &priv->dma_conf.rx_queue[chan];
  986. stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
  987. rx_q->dma_rx_phy, chan);
  988. @@ -6594,7 +6598,7 @@ int stmmac_xdp_open(struct net_device *d
  989. rx_q->queue_index);
  990. } else {
  991. stmmac_set_dma_bfsize(priv, priv->ioaddr,
  992. - priv->dma_buf_sz,
  993. + priv->dma_conf.dma_buf_sz,
  994. rx_q->queue_index);
  995. }
  996. @@ -6603,7 +6607,7 @@ int stmmac_xdp_open(struct net_device *d
  997. /* DMA TX Channel Configuration */
  998. for (chan = 0; chan < tx_cnt; chan++) {
  999. - tx_q = &priv->tx_queue[chan];
  1000. + tx_q = &priv->dma_conf.tx_queue[chan];
  1001. stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
  1002. tx_q->dma_tx_phy, chan);
  1003. @@ -6636,7 +6640,7 @@ int stmmac_xdp_open(struct net_device *d
  1004. irq_error:
  1005. for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
  1006. - hrtimer_cancel(&priv->tx_queue[chan].txtimer);
  1007. + hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
  1008. stmmac_hw_teardown(dev);
  1009. init_error:
  1010. @@ -6663,8 +6667,8 @@ int stmmac_xsk_wakeup(struct net_device
  1011. queue >= priv->plat->tx_queues_to_use)
  1012. return -EINVAL;
  1013. - rx_q = &priv->rx_queue[queue];
  1014. - tx_q = &priv->tx_queue[queue];
  1015. + rx_q = &priv->dma_conf.rx_queue[queue];
  1016. + tx_q = &priv->dma_conf.tx_queue[queue];
  1017. ch = &priv->channel[queue];
  1018. if (!rx_q->xsk_pool && !tx_q->xsk_pool)
  1019. @@ -6924,8 +6928,8 @@ int stmmac_reinit_ringparam(struct net_d
  1020. if (netif_running(dev))
  1021. stmmac_release(dev);
  1022. - priv->dma_rx_size = rx_size;
  1023. - priv->dma_tx_size = tx_size;
  1024. + priv->dma_conf.dma_rx_size = rx_size;
  1025. + priv->dma_conf.dma_tx_size = tx_size;
  1026. if (netif_running(dev))
  1027. ret = stmmac_open(dev);
  1028. @@ -7357,7 +7361,7 @@ int stmmac_suspend(struct device *dev)
  1029. stmmac_disable_all_queues(priv);
  1030. for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
  1031. - hrtimer_cancel(&priv->tx_queue[chan].txtimer);
  1032. + hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
  1033. if (priv->eee_enabled) {
  1034. priv->tx_path_in_lpi_mode = false;
  1035. @@ -7408,7 +7412,7 @@ EXPORT_SYMBOL_GPL(stmmac_suspend);
  1036. static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
  1037. {
  1038. - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
  1039. + struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
  1040. rx_q->cur_rx = 0;
  1041. rx_q->dirty_rx = 0;
  1042. @@ -7416,7 +7420,7 @@ static void stmmac_reset_rx_queue(struct
  1043. static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
  1044. {
  1045. - struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
  1046. + struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
  1047. tx_q->cur_tx = 0;
  1048. tx_q->dirty_tx = 0;
  1049. --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
  1050. +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
  1051. @@ -795,8 +795,8 @@ static int stmmac_test_flowctrl(struct s
  1052. struct stmmac_channel *ch = &priv->channel[i];
  1053. u32 tail;
  1054. - tail = priv->rx_queue[i].dma_rx_phy +
  1055. - (priv->dma_rx_size * sizeof(struct dma_desc));
  1056. + tail = priv->dma_conf.rx_queue[i].dma_rx_phy +
  1057. + (priv->dma_conf.dma_rx_size * sizeof(struct dma_desc));
  1058. stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i);
  1059. stmmac_start_rx(priv, priv->ioaddr, i);
  1060. @@ -1684,7 +1684,7 @@ cleanup:
  1061. static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue)
  1062. {
  1063. struct stmmac_packet_attrs attr = { };
  1064. - int size = priv->dma_buf_sz;
  1065. + int size = priv->dma_conf.dma_buf_sz;
  1066. attr.dst = priv->dev->dev_addr;
  1067. attr.max_size = size - ETH_FCS_LEN;
  1068. @@ -1767,7 +1767,7 @@ static int stmmac_test_tbs(struct stmmac
  1069. /* Find first TBS enabled Queue, if any */
  1070. for (i = 0; i < priv->plat->tx_queues_to_use; i++)
  1071. - if (priv->tx_queue[i].tbs & STMMAC_TBS_AVAIL)
  1072. + if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_AVAIL)
  1073. break;
  1074. if (i >= priv->plat->tx_queues_to_use)
  1075. --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
  1076. +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
  1077. @@ -970,13 +970,13 @@ static int tc_setup_etf(struct stmmac_pr
  1078. return -EOPNOTSUPP;
  1079. if (qopt->queue >= priv->plat->tx_queues_to_use)
  1080. return -EINVAL;
  1081. - if (!(priv->tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL))
  1082. + if (!(priv->dma_conf.tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL))
  1083. return -EINVAL;
  1084. if (qopt->enable)
  1085. - priv->tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN;
  1086. + priv->dma_conf.tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN;
  1087. else
  1088. - priv->tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN;
  1089. + priv->dma_conf.tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN;
  1090. netdev_info(priv->dev, "%s ETF for Queue %d\n",
  1091. qopt->enable ? "enabled" : "disabled", qopt->queue);