0344-net-ethernet-qualcomm-Add-Rx-Ethernet-DMA-support.patch 72 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454
  1. From b5c8c5d3888328321e8be1db50b75dff8f514e51 Mon Sep 17 00:00:00 2001
  2. From: Suruchi Agarwal <[email protected]>
  3. Date: Thu, 21 Mar 2024 16:21:19 -0700
  4. Subject: [PATCH] net: ethernet: qualcomm: Add Rx Ethernet DMA support
  5. Add Rx queues, rings, descriptors configurations and
  6. DMA support for the EDMA.
  7. Change-Id: I612bcd661e74d5bf3ecb33de10fd5298d18ff7e9
  8. Co-developed-by: Pavithra R <[email protected]>
  9. Signed-off-by: Pavithra R <[email protected]>
  10. Signed-off-by: Suruchi Agarwal <[email protected]>
  11. Alex G: add missing functions that were previously in ppe_api.c:
  12. - ppe_edma_queue_resource_get()
  13. - ppe_edma_ring_to_queues_config()
  14. Signed-off-by: Alexandru Gagniuc <[email protected]>
  15. ---
  16. drivers/net/ethernet/qualcomm/ppe/Makefile | 2 +-
  17. drivers/net/ethernet/qualcomm/ppe/edma.c | 214 +++-
  18. drivers/net/ethernet/qualcomm/ppe/edma.h | 22 +-
  19. .../net/ethernet/qualcomm/ppe/edma_cfg_rx.c | 964 ++++++++++++++++++
  20. .../net/ethernet/qualcomm/ppe/edma_cfg_rx.h | 48 +
  21. drivers/net/ethernet/qualcomm/ppe/edma_port.c | 39 +-
  22. drivers/net/ethernet/qualcomm/ppe/edma_port.h | 31 +
  23. drivers/net/ethernet/qualcomm/ppe/edma_rx.c | 622 +++++++++++
  24. drivers/net/ethernet/qualcomm/ppe/edma_rx.h | 287 ++++++
  25. 9 files changed, 2224 insertions(+), 5 deletions(-)
  26. create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.c
  27. create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.h
  28. create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_rx.c
  29. create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_rx.h
  30. --- a/drivers/net/ethernet/qualcomm/ppe/Makefile
  31. +++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
  32. @@ -7,4 +7,4 @@ obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
  33. qcom-ppe-objs := ppe.o ppe_config.o ppe_debugfs.o ppe_port.o
  34. #EDMA
  35. -qcom-ppe-objs += edma.o edma_port.o
  36. +qcom-ppe-objs += edma.o edma_cfg_rx.o edma_port.o edma_rx.o
  37. --- a/drivers/net/ethernet/qualcomm/ppe/edma.c
  38. +++ b/drivers/net/ethernet/qualcomm/ppe/edma.c
  39. @@ -18,12 +18,23 @@
  40. #include <linux/reset.h>
  41. #include "edma.h"
  42. +#include "edma_cfg_rx.h"
  43. #include "ppe_regs.h"
  44. #define EDMA_IRQ_NAME_SIZE 32
  45. /* Global EDMA context. */
  46. struct edma_context *edma_ctx;
  47. +static char **edma_rxdesc_irq_name;
  48. +
  49. +/* Module params. */
  50. +static int page_mode;
  51. +module_param(page_mode, int, 0);
  52. +MODULE_PARM_DESC(page_mode, "Enable page mode (default:0)");
  53. +
  54. +static int rx_buff_size;
  55. +module_param(rx_buff_size, int, 0640);
  56. +MODULE_PARM_DESC(rx_buff_size, "Rx Buffer size for Jumbo MRU value (default:0)");
  57. /* Priority to multi-queue mapping. */
  58. static u8 edma_pri_map[PPE_QUEUE_INTER_PRI_NUM] = {
  59. @@ -178,6 +189,59 @@ static int edma_configure_ucast_prio_map
  60. return ret;
  61. }
  62. +static int edma_irq_register(void)
  63. +{
  64. + struct edma_hw_info *hw_info = edma_ctx->hw_info;
  65. + struct edma_ring_info *rx = hw_info->rx;
  66. + int ret;
  67. + u32 i;
  68. +
  69. + /* Request IRQ for RXDESC rings. */
  70. + edma_rxdesc_irq_name = kzalloc((sizeof(char *) * rx->num_rings),
  71. + GFP_KERNEL);
  72. + if (!edma_rxdesc_irq_name)
  73. + return -ENOMEM;
  74. +
  75. + for (i = 0; i < rx->num_rings; i++) {
  76. + edma_rxdesc_irq_name[i] = kzalloc((sizeof(char *) * EDMA_IRQ_NAME_SIZE),
  77. + GFP_KERNEL);
  78. + if (!edma_rxdesc_irq_name[i]) {
  79. + ret = -ENOMEM;
  80. + goto rxdesc_irq_name_alloc_fail;
  81. + }
  82. +
  83. + snprintf(edma_rxdesc_irq_name[i], 20, "edma_rxdesc_%d",
  84. + rx->ring_start + i);
  85. +
  86. + irq_set_status_flags(edma_ctx->intr_info.intr_rx[i], IRQ_DISABLE_UNLAZY);
  87. +
  88. + ret = request_irq(edma_ctx->intr_info.intr_rx[i],
  89. + edma_rx_handle_irq, IRQF_SHARED,
  90. + edma_rxdesc_irq_name[i],
  91. + (void *)&edma_ctx->rx_rings[i]);
  92. + if (ret) {
  93. + pr_err("RXDESC ring IRQ:%d request failed\n",
  94. + edma_ctx->intr_info.intr_rx[i]);
  95. + goto rx_desc_ring_intr_req_fail;
  96. + }
  97. +
  98. + pr_debug("RXDESC ring: %d IRQ:%d request success: %s\n",
  99. + rx->ring_start + i,
  100. + edma_ctx->intr_info.intr_rx[i],
  101. + edma_rxdesc_irq_name[i]);
  102. + }
  103. +
  104. + return 0;
  105. +
  106. +rx_desc_ring_intr_req_fail:
  107. + for (i = 0; i < rx->num_rings; i++)
  108. + kfree(edma_rxdesc_irq_name[i]);
  109. +rxdesc_irq_name_alloc_fail:
  110. + kfree(edma_rxdesc_irq_name);
  111. +
  112. + return ret;
  113. +}
  114. +
  115. static int edma_irq_init(void)
  116. {
  117. struct edma_hw_info *hw_info = edma_ctx->hw_info;
  118. @@ -260,6 +324,16 @@ static int edma_irq_init(void)
  119. return 0;
  120. }
  121. +static int edma_alloc_rings(void)
  122. +{
  123. + if (edma_cfg_rx_rings_alloc()) {
  124. + pr_err("Error in allocating Rx rings\n");
  125. + return -ENOMEM;
  126. + }
  127. +
  128. + return 0;
  129. +}
  130. +
  131. static int edma_hw_reset(void)
  132. {
  133. struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
  134. @@ -343,6 +417,40 @@ static int edma_hw_configure(void)
  135. if (!edma_ctx->netdev_arr)
  136. return -ENOMEM;
  137. + edma_ctx->dummy_dev = alloc_netdev_dummy(0);
  138. + if (!edma_ctx->dummy_dev) {
  139. + ret = -ENOMEM;
  140. + pr_err("Failed to allocate dummy device. ret: %d\n", ret);
  141. + goto dummy_dev_alloc_failed;
  142. + }
  143. +
  144. + /* Set EDMA jumbo MRU if enabled or set page mode. */
  145. + if (edma_ctx->rx_buf_size) {
  146. + edma_ctx->rx_page_mode = false;
  147. + pr_debug("Rx Jumbo mru is enabled: %d\n", edma_ctx->rx_buf_size);
  148. + } else {
  149. + edma_ctx->rx_page_mode = page_mode;
  150. + }
  151. +
  152. + ret = edma_alloc_rings();
  153. + if (ret) {
  154. + pr_err("Error in initializaing the rings. ret: %d\n", ret);
  155. + goto edma_alloc_rings_failed;
  156. + }
  157. +
  158. + /* Disable interrupts. */
  159. + edma_cfg_rx_disable_interrupts();
  160. +
  161. + edma_cfg_rx_rings_disable();
  162. +
  163. + edma_cfg_rx_ring_mappings();
  164. +
  165. + ret = edma_cfg_rx_rings();
  166. + if (ret) {
  167. + pr_err("Error in configuring Rx rings. ret: %d\n", ret);
  168. + goto edma_cfg_rx_rings_failed;
  169. + }
  170. +
  171. /* Configure DMA request priority, DMA read burst length,
  172. * and AXI write size.
  173. */
  174. @@ -376,6 +484,10 @@ static int edma_hw_configure(void)
  175. data |= EDMA_MISC_TX_TIMEOUT_MASK;
  176. edma_ctx->intr_info.intr_mask_misc = data;
  177. + edma_cfg_rx_rings_enable();
  178. + edma_cfg_rx_napi_add();
  179. + edma_cfg_rx_napi_enable();
  180. +
  181. /* Global EDMA enable and padding enable. */
  182. data = EDMA_PORT_PAD_EN | EDMA_PORT_EDMA_EN;
  183. @@ -389,11 +501,32 @@ static int edma_hw_configure(void)
  184. if (ret) {
  185. pr_err("Failed to initialize unicast priority map table: %d\n",
  186. ret);
  187. - kfree(edma_ctx->netdev_arr);
  188. - return ret;
  189. + goto configure_ucast_prio_map_tbl_failed;
  190. + }
  191. +
  192. + /* Initialize RPS hash map table. */
  193. + ret = edma_cfg_rx_rps_hash_map();
  194. + if (ret) {
  195. + pr_err("Failed to configure rps hash table: %d\n",
  196. + ret);
  197. + goto edma_cfg_rx_rps_hash_map_failed;
  198. }
  199. return 0;
  200. +
  201. +edma_cfg_rx_rps_hash_map_failed:
  202. +configure_ucast_prio_map_tbl_failed:
  203. + edma_cfg_rx_napi_disable();
  204. + edma_cfg_rx_napi_delete();
  205. + edma_cfg_rx_rings_disable();
  206. +edma_cfg_rx_rings_failed:
  207. + edma_cfg_rx_rings_cleanup();
  208. +edma_alloc_rings_failed:
  209. + free_netdev(edma_ctx->dummy_dev);
  210. +dummy_dev_alloc_failed:
  211. + kfree(edma_ctx->netdev_arr);
  212. +
  213. + return ret;
  214. }
  215. /**
  216. @@ -404,8 +537,31 @@ static int edma_hw_configure(void)
  217. */
  218. void edma_destroy(struct ppe_device *ppe_dev)
  219. {
  220. + struct edma_hw_info *hw_info = edma_ctx->hw_info;
  221. + struct edma_ring_info *rx = hw_info->rx;
  222. + u32 i;
  223. +
  224. + /* Disable interrupts. */
  225. + edma_cfg_rx_disable_interrupts();
  226. +
  227. + /* Free IRQ for RXDESC rings. */
  228. + for (i = 0; i < rx->num_rings; i++) {
  229. + synchronize_irq(edma_ctx->intr_info.intr_rx[i]);
  230. + free_irq(edma_ctx->intr_info.intr_rx[i],
  231. + (void *)&edma_ctx->rx_rings[i]);
  232. + kfree(edma_rxdesc_irq_name[i]);
  233. + }
  234. + kfree(edma_rxdesc_irq_name);
  235. +
  236. kfree(edma_ctx->intr_info.intr_rx);
  237. kfree(edma_ctx->intr_info.intr_txcmpl);
  238. +
  239. + edma_cfg_rx_napi_disable();
  240. + edma_cfg_rx_napi_delete();
  241. + edma_cfg_rx_rings_disable();
  242. + edma_cfg_rx_rings_cleanup();
  243. +
  244. + free_netdev(edma_ctx->dummy_dev);
  245. kfree(edma_ctx->netdev_arr);
  246. }
  247. @@ -428,6 +584,7 @@ int edma_setup(struct ppe_device *ppe_de
  248. edma_ctx->hw_info = &ipq9574_hw_info;
  249. edma_ctx->ppe_dev = ppe_dev;
  250. + edma_ctx->rx_buf_size = rx_buff_size;
  251. /* Configure the EDMA common clocks. */
  252. ret = edma_clock_init();
  253. @@ -450,6 +607,16 @@ int edma_setup(struct ppe_device *ppe_de
  254. return ret;
  255. }
  256. + ret = edma_irq_register();
  257. + if (ret) {
  258. + dev_err(dev, "Error in irq registration\n");
  259. + kfree(edma_ctx->intr_info.intr_rx);
  260. + kfree(edma_ctx->intr_info.intr_txcmpl);
  261. + return ret;
  262. + }
  263. +
  264. + edma_cfg_rx_enable_interrupts();
  265. +
  266. dev_info(dev, "EDMA configuration successful\n");
  267. return 0;
  268. @@ -478,3 +645,46 @@ int ppe_edma_queue_offset_config(struct
  269. return ppe_queue_ucast_offset_hash_set(ppe_dev, 0,
  270. index, queue_offset);
  271. }
  272. +
  273. +/**
  274. + * ppe_edma_queue_resource_get - Get EDMA queue resource
  275. + * @ppe_dev: PPE device
  276. + * @type: Resource type
  277. + * @res_start: Resource start ID returned
  278. + * @res_end: Resource end ID returned
  279. + *
  280. + * PPE EDMA queue resource includes unicast queue and multicast queue.
  281. + *
  282. + * Return 0 on success, negative error code on failure.
  283. + */
  284. +int ppe_edma_queue_resource_get(struct ppe_device *ppe_dev, int type,
  285. + int *res_start, int *res_end)
  286. +{
  287. + if (type != PPE_RES_UCAST && type != PPE_RES_MCAST)
  288. + return -EINVAL;
  289. +
  290. + return ppe_port_resource_get(ppe_dev, 0, type, res_start, res_end);
  291. +};
  292. +
  293. +/**
  294. + * ppe_edma_ring_to_queues_config - Map EDMA ring to PPE queues
  295. + * @ppe_dev: PPE device
  296. + * @ring_id: EDMA ring ID
  297. + * @num: Number of queues mapped to EDMA ring
  298. + * @queues: PPE queue IDs
  299. + *
  300. + * PPE queues are configured to map with the special EDMA ring ID.
  301. + *
  302. + * Return 0 on success, negative error code on failure.
  303. + */
  304. +int ppe_edma_ring_to_queues_config(struct ppe_device *ppe_dev, int ring_id,
  305. + int num, int queues[] __counted_by(num))
  306. +{
  307. + u32 queue_bmap[PPE_RING_TO_QUEUE_BITMAP_WORD_CNT] = {};
  308. + int index;
  309. +
  310. + for (index = 0; index < num; index++)
  311. + queue_bmap[queues[index] / 32] |= BIT_MASK(queues[index] % 32);
  312. +
  313. + return ppe_ring_queue_map_set(ppe_dev, ring_id, queue_bmap);
  314. +}
  315. --- a/drivers/net/ethernet/qualcomm/ppe/edma.h
  316. +++ b/drivers/net/ethernet/qualcomm/ppe/edma.h
  317. @@ -6,6 +6,7 @@
  318. #define __EDMA_MAIN__
  319. #include "ppe_config.h"
  320. +#include "edma_rx.h"
  321. /* One clock cycle = 1/(EDMA clock frequency in Mhz) micro seconds.
  322. *
  323. @@ -29,6 +30,11 @@
  324. /* Interface ID start. */
  325. #define EDMA_START_IFNUM 1
  326. +#define EDMA_DESC_AVAIL_COUNT(head, tail, _max) ({ \
  327. + typeof(_max) (max) = (_max); \
  328. + ((((head) - (tail)) + \
  329. + (max)) & ((max) - 1)); })
  330. +
  331. /**
  332. * enum ppe_queue_class_type - PPE queue class type
  333. * @PPE_QUEUE_CLASS_PRIORITY: Queue offset configured from internal priority
  334. @@ -92,18 +98,28 @@ struct edma_intr_info {
  335. /**
  336. * struct edma_context - EDMA context.
  337. * @netdev_arr: Net device for each EDMA port
  338. + * @dummy_dev: Dummy netdevice for RX DMA
  339. * @ppe_dev: PPE device
  340. * @hw_info: EDMA Hardware info
  341. * @intr_info: EDMA Interrupt info
  342. + * @rxfill_rings: Rx fill Rings, SW is producer
  343. + * @rx_rings: Rx Desc Rings, SW is consumer
  344. + * @rx_page_mode: Page mode enabled or disabled
  345. + * @rx_buf_size: Rx buffer size for Jumbo MRU
  346. */
  347. struct edma_context {
  348. struct net_device **netdev_arr;
  349. + struct net_device *dummy_dev;
  350. struct ppe_device *ppe_dev;
  351. struct edma_hw_info *hw_info;
  352. struct edma_intr_info intr_info;
  353. + struct edma_rxfill_ring *rxfill_rings;
  354. + struct edma_rxdesc_ring *rx_rings;
  355. + u32 rx_page_mode;
  356. + u32 rx_buf_size;
  357. };
  358. -/* Global EDMA context. */
  359. +/* Global EDMA context */
  360. extern struct edma_context *edma_ctx;
  361. void edma_destroy(struct ppe_device *ppe_dev);
  362. @@ -111,6 +127,10 @@ int edma_setup(struct ppe_device *ppe_de
  363. int ppe_edma_queue_offset_config(struct ppe_device *ppe_dev,
  364. enum ppe_queue_class_type class,
  365. int index, int queue_offset);
  366. +int ppe_edma_queue_resource_get(struct ppe_device *ppe_dev, int type,
  367. + int *res_start, int *res_end);
  368. +int ppe_edma_ring_to_queues_config(struct ppe_device *ppe_dev, int ring_id,
  369. + int num, int queues[] __counted_by(num));
  370. #endif
  371. --- /dev/null
  372. +++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.c
  373. @@ -0,0 +1,964 @@
  374. +// SPDX-License-Identifier: GPL-2.0-only
  375. +/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
  376. + */
  377. +
  378. +/* Configure rings, Buffers and NAPI for receive path along with
  379. + * providing APIs to enable, disable, clean and map the Rx rings.
  380. + */
  381. +
  382. +#include <linux/cpumask.h>
  383. +#include <linux/dma-mapping.h>
  384. +#include <linux/kernel.h>
  385. +#include <linux/netdevice.h>
  386. +#include <linux/printk.h>
  387. +#include <linux/regmap.h>
  388. +#include <linux/skbuff.h>
  389. +
  390. +#include "edma.h"
  391. +#include "edma_cfg_rx.h"
  392. +#include "ppe.h"
  393. +#include "ppe_regs.h"
  394. +
  395. +/* EDMA Queue ID to Ring ID Table. */
  396. +#define EDMA_QID2RID_TABLE_MEM(q) (0xb9000 + (0x4 * (q)))
  397. +
  398. +/* Rx ring queue offset. */
  399. +#define EDMA_QUEUE_OFFSET(q_id) ((q_id) / EDMA_MAX_PRI_PER_CORE)
  400. +
  401. +/* Rx EDMA maximum queue supported. */
  402. +#define EDMA_CPU_PORT_QUEUE_MAX(queue_start) \
  403. + ((queue_start) + (EDMA_MAX_PRI_PER_CORE * num_possible_cpus()) - 1)
  404. +
  405. +/* EDMA Queue ID to Ring ID configuration. */
  406. +#define EDMA_QID2RID_NUM_PER_REG 4
  407. +
  408. +int rx_queues[] = {0, 8, 16, 24};
  409. +
  410. +static u32 edma_rx_ring_queue_map[][EDMA_MAX_CORE] = {{ 0, 8, 16, 24 },
  411. + { 1, 9, 17, 25 },
  412. + { 2, 10, 18, 26 },
  413. + { 3, 11, 19, 27 },
  414. + { 4, 12, 20, 28 },
  415. + { 5, 13, 21, 29 },
  416. + { 6, 14, 22, 30 },
  417. + { 7, 15, 23, 31 }};
  418. +
  419. +static int edma_cfg_rx_desc_rings_reset_queue_mapping(void)
  420. +{
  421. + struct edma_hw_info *hw_info = edma_ctx->hw_info;
  422. + struct edma_ring_info *rx = hw_info->rx;
  423. + u32 i, ret;
  424. +
  425. + for (i = 0; i < rx->num_rings; i++) {
  426. + struct edma_rxdesc_ring *rxdesc_ring;
  427. +
  428. + rxdesc_ring = &edma_ctx->rx_rings[i];
  429. +
  430. + ret = ppe_edma_ring_to_queues_config(edma_ctx->ppe_dev, rxdesc_ring->ring_id,
  431. + ARRAY_SIZE(rx_queues), rx_queues);
  432. + if (ret) {
  433. + pr_err("Error in unmapping rxdesc ring %d to PPE queue mapping to disable its backpressure configuration\n",
  434. + i);
  435. + return ret;
  436. + }
  437. + }
  438. +
  439. + return 0;
  440. +}
  441. +
  442. +static int edma_cfg_rx_desc_ring_reset_queue_priority(u32 rxdesc_ring_idx)
  443. +{
  444. + u32 i, queue_id, ret;
  445. +
  446. + for (i = 0; i < EDMA_MAX_PRI_PER_CORE; i++) {
  447. + queue_id = edma_rx_ring_queue_map[i][rxdesc_ring_idx];
  448. +
  449. + ret = ppe_queue_priority_set(edma_ctx->ppe_dev, queue_id, i);
  450. + if (ret) {
  451. + pr_err("Error in resetting %u queue's priority\n",
  452. + queue_id);
  453. + return ret;
  454. + }
  455. + }
  456. +
  457. + return 0;
  458. +}
  459. +
  460. +static int edma_cfg_rx_desc_ring_reset_queue_config(void)
  461. +{
  462. + struct edma_hw_info *hw_info = edma_ctx->hw_info;
  463. + struct edma_ring_info *rx = hw_info->rx;
  464. + u32 i, ret;
  465. +
  466. + if (unlikely(rx->num_rings > num_possible_cpus())) {
  467. + pr_err("Invalid count of rxdesc rings: %d\n",
  468. + rx->num_rings);
  469. + return -EINVAL;
  470. + }
  471. +
  472. + /* Unmap Rxdesc ring to PPE queue mapping */
  473. + ret = edma_cfg_rx_desc_rings_reset_queue_mapping();
  474. + if (ret) {
  475. + pr_err("Error in resetting Rx desc ring backpressure config\n");
  476. + return ret;
  477. + }
  478. +
  479. + /* Reset the priority for PPE queues mapped to Rx rings */
  480. + for (i = 0; i < rx->num_rings; i++) {
  481. + ret = edma_cfg_rx_desc_ring_reset_queue_priority(i);
  482. + if (ret) {
  483. + pr_err("Error in resetting ring:%d queue's priority\n",
  484. + i + rx->ring_start);
  485. + return ret;
  486. + }
  487. + }
  488. +
  489. + return 0;
  490. +}
  491. +
  492. +static int edma_cfg_rx_desc_ring_to_queue_mapping(void)
  493. +{
  494. + struct edma_hw_info *hw_info = edma_ctx->hw_info;
  495. + struct edma_ring_info *rx = hw_info->rx;
  496. + u32 i;
  497. + int ret;
  498. +
  499. + /* Rxdesc ring to PPE queue mapping */
  500. + for (i = 0; i < rx->num_rings; i++) {
  501. + struct edma_rxdesc_ring *rxdesc_ring;
  502. +
  503. + rxdesc_ring = &edma_ctx->rx_rings[i];
  504. +
  505. + ret = ppe_edma_ring_to_queues_config(edma_ctx->ppe_dev,
  506. + rxdesc_ring->ring_id,
  507. + ARRAY_SIZE(rx_queues), rx_queues);
  508. + if (ret) {
  509. + pr_err("Error in configuring Rx ring to PPE queue mapping, ret: %d, id: %d\n",
  510. + ret, rxdesc_ring->ring_id);
  511. + if (!edma_cfg_rx_desc_rings_reset_queue_mapping())
  512. + pr_err("Error in resetting Rx desc ringbackpressure configurations\n");
  513. +
  514. + return ret;
  515. + }
  516. +
  517. + pr_debug("Rx desc ring %d to PPE queue mapping for backpressure:\n",
  518. + rxdesc_ring->ring_id);
  519. + }
  520. +
  521. + return 0;
  522. +}
  523. +
  524. +static void edma_cfg_rx_desc_ring_configure(struct edma_rxdesc_ring *rxdesc_ring)
  525. +{
  526. + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
  527. + struct regmap *regmap = ppe_dev->regmap;
  528. + u32 data, reg;
  529. +
  530. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_BA(rxdesc_ring->ring_id);
  531. + regmap_write(regmap, reg, (u32)(rxdesc_ring->pdma & EDMA_RXDESC_BA_MASK));
  532. +
  533. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_PREHEADER_BA(rxdesc_ring->ring_id);
  534. + regmap_write(regmap, reg, (u32)(rxdesc_ring->sdma & EDMA_RXDESC_PREHEADER_BA_MASK));
  535. +
  536. + data = rxdesc_ring->count & EDMA_RXDESC_RING_SIZE_MASK;
  537. + data |= (EDMA_RXDESC_PL_DEFAULT_VALUE & EDMA_RXDESC_PL_OFFSET_MASK)
  538. + << EDMA_RXDESC_PL_OFFSET_SHIFT;
  539. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_RING_SIZE(rxdesc_ring->ring_id);
  540. + regmap_write(regmap, reg, data);
  541. +
  542. + /* Configure the Mitigation timer */
  543. + data = EDMA_MICROSEC_TO_TIMER_UNIT(EDMA_RX_MITIGATION_TIMER_DEF,
  544. + ppe_dev->clk_rate / MHZ);
  545. + data = ((data & EDMA_RX_MOD_TIMER_INIT_MASK)
  546. + << EDMA_RX_MOD_TIMER_INIT_SHIFT);
  547. + pr_debug("EDMA Rx mitigation timer value: %d\n", data);
  548. + reg = EDMA_BASE_OFFSET + EDMA_REG_RX_MOD_TIMER(rxdesc_ring->ring_id);
  549. + regmap_write(regmap, reg, data);
  550. +
  551. + /* Configure the Mitigation packet count */
  552. + data = (EDMA_RX_MITIGATION_PKT_CNT_DEF & EDMA_RXDESC_LOW_THRE_MASK)
  553. + << EDMA_RXDESC_LOW_THRE_SHIFT;
  554. + pr_debug("EDMA Rx mitigation packet count value: %d\n", data);
  555. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_UGT_THRE(rxdesc_ring->ring_id);
  556. + regmap_write(regmap, reg, data);
  557. +
  558. + /* Enable ring. Set ret mode to 'opaque'. */
  559. + reg = EDMA_BASE_OFFSET + EDMA_REG_RX_INT_CTRL(rxdesc_ring->ring_id);
  560. + regmap_write(regmap, reg, EDMA_RX_NE_INT_EN);
  561. +}
  562. +
  563. +static void edma_cfg_rx_qid_to_rx_desc_ring_mapping(void)
  564. +{
  565. + u32 desc_index, ring_index, reg_index, data, q_id;
  566. + struct edma_hw_info *hw_info = edma_ctx->hw_info;
  567. + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
  568. + struct regmap *regmap = ppe_dev->regmap;
  569. + struct edma_ring_info *rx = hw_info->rx;
  570. + u32 mcast_start, mcast_end, reg;
  571. + int ret;
  572. +
  573. + desc_index = (rx->ring_start & EDMA_RX_RING_ID_MASK);
  574. +
  575. + /* Here map all the queues to ring. */
  576. + for (q_id = EDMA_RX_QUEUE_START;
  577. + q_id <= EDMA_CPU_PORT_QUEUE_MAX(EDMA_RX_QUEUE_START);
  578. + q_id += EDMA_QID2RID_NUM_PER_REG) {
  579. + reg_index = q_id / EDMA_QID2RID_NUM_PER_REG;
  580. + ring_index = desc_index + EDMA_QUEUE_OFFSET(q_id);
  581. +
  582. + data = FIELD_PREP(EDMA_RX_RING_ID_QUEUE0_MASK, ring_index);
  583. + data |= FIELD_PREP(EDMA_RX_RING_ID_QUEUE1_MASK, ring_index);
  584. + data |= FIELD_PREP(EDMA_RX_RING_ID_QUEUE2_MASK, ring_index);
  585. + data |= FIELD_PREP(EDMA_RX_RING_ID_QUEUE3_MASK, ring_index);
  586. +
  587. + reg = EDMA_BASE_OFFSET + EDMA_QID2RID_TABLE_MEM(reg_index);
  588. + regmap_write(regmap, reg, data);
  589. + pr_debug("Configure QID2RID: %d reg:0x%x to 0x%x, desc_index: %d, reg_index: %d\n",
  590. + q_id, EDMA_QID2RID_TABLE_MEM(reg_index), data, desc_index, reg_index);
  591. + }
  592. +
  593. + ret = ppe_edma_queue_resource_get(edma_ctx->ppe_dev, PPE_RES_MCAST,
  594. + &mcast_start, &mcast_end);
  595. + if (ret < 0) {
  596. + pr_err("Error in extracting multicast queue values\n");
  597. + return;
  598. + }
  599. +
  600. + /* Map multicast queues to the first Rx ring. */
  601. + desc_index = (rx->ring_start & EDMA_RX_RING_ID_MASK);
  602. + for (q_id = mcast_start; q_id <= mcast_end;
  603. + q_id += EDMA_QID2RID_NUM_PER_REG) {
  604. + reg_index = q_id / EDMA_QID2RID_NUM_PER_REG;
  605. +
  606. + data = FIELD_PREP(EDMA_RX_RING_ID_QUEUE0_MASK, desc_index);
  607. + data |= FIELD_PREP(EDMA_RX_RING_ID_QUEUE1_MASK, desc_index);
  608. + data |= FIELD_PREP(EDMA_RX_RING_ID_QUEUE2_MASK, desc_index);
  609. + data |= FIELD_PREP(EDMA_RX_RING_ID_QUEUE3_MASK, desc_index);
  610. +
  611. + reg = EDMA_BASE_OFFSET + EDMA_QID2RID_TABLE_MEM(reg_index);
  612. + regmap_write(regmap, reg, data);
  613. +
  614. + pr_debug("Configure QID2RID: %d reg:0x%x to 0x%x\n",
  615. + q_id, EDMA_QID2RID_TABLE_MEM(reg_index), data);
  616. + }
  617. +}
  618. +
  619. +static void edma_cfg_rx_rings_to_rx_fill_mapping(void)
  620. +{
  621. + struct edma_hw_info *hw_info = edma_ctx->hw_info;
  622. + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
  623. + struct regmap *regmap = ppe_dev->regmap;
  624. + struct edma_ring_info *rx = hw_info->rx;
  625. + u32 i, data, reg;
  626. +
  627. + regmap_write(regmap, EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_0_ADDR, 0);
  628. + regmap_write(regmap, EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_1_ADDR, 0);
  629. + regmap_write(regmap, EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_2_ADDR, 0);
  630. +
  631. + for (i = 0; i < rx->num_rings; i++) {
  632. + struct edma_rxdesc_ring *rxdesc_ring = &edma_ctx->rx_rings[i];
  633. + u32 data, reg, ring_id;
  634. +
  635. + ring_id = rxdesc_ring->ring_id;
  636. + if (ring_id >= 0 && ring_id <= 9)
  637. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_0_ADDR;
  638. + else if (ring_id >= 10 && ring_id <= 19)
  639. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_1_ADDR;
  640. + else
  641. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_2_ADDR;
  642. +
  643. + pr_debug("Configure RXDESC:%u to use RXFILL:%u\n",
  644. + ring_id,
  645. + rxdesc_ring->rxfill->ring_id);
  646. +
  647. + /* Set the Rx fill ring number in the mapping register. */
  648. + regmap_read(regmap, reg, &data);
  649. + data |= (rxdesc_ring->rxfill->ring_id &
  650. + EDMA_RXDESC2FILL_MAP_RXDESC_MASK) <<
  651. + ((ring_id % 10) * 3);
  652. + regmap_write(regmap, reg, data);
  653. + }
  654. +
  655. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_0_ADDR;
  656. + regmap_read(regmap, reg, &data);
  657. + pr_debug("EDMA_REG_RXDESC2FILL_MAP_0_ADDR: 0x%x\n", data);
  658. +
  659. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_1_ADDR;
  660. + regmap_read(regmap, reg, &data);
  661. + pr_debug("EDMA_REG_RXDESC2FILL_MAP_1_ADDR: 0x%x\n", data);
  662. +
  663. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC2FILL_MAP_2_ADDR;
  664. + regmap_read(regmap, reg, &data);
  665. + pr_debug("EDMA_REG_RXDESC2FILL_MAP_2_ADDR: 0x%x\n", data);
  666. +}
  667. +
  668. +/**
  669. + * edma_cfg_rx_rings_enable - Enable Rx and Rxfill rings
  670. + *
  671. + * Enable Rx and Rxfill rings.
  672. + */
  673. +void edma_cfg_rx_rings_enable(void)
  674. +{
  675. + struct edma_hw_info *hw_info = edma_ctx->hw_info;
  676. + struct edma_ring_info *rxfill = hw_info->rxfill;
  677. + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
  678. + struct regmap *regmap = ppe_dev->regmap;
  679. + struct edma_ring_info *rx = hw_info->rx;
  680. + u32 i, reg;
  681. +
  682. + /* Enable Rx rings */
  683. + for (i = rx->ring_start; i < rx->ring_start + rx->num_rings; i++) {
  684. + u32 data;
  685. +
  686. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_CTRL(i);
  687. + regmap_read(regmap, reg, &data);
  688. + data |= EDMA_RXDESC_RX_EN;
  689. + regmap_write(regmap, reg, data);
  690. + }
  691. +
  692. + for (i = rxfill->ring_start; i < rxfill->ring_start + rxfill->num_rings; i++) {
  693. + u32 data;
  694. +
  695. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXFILL_RING_EN(i);
  696. + regmap_read(regmap, reg, &data);
  697. + data |= EDMA_RXFILL_RING_EN;
  698. + regmap_write(regmap, reg, data);
  699. + }
  700. +}
  701. +
  702. +/**
  703. + * edma_cfg_rx_rings_disable - Disable Rx and Rxfill rings
  704. + *
  705. + * Disable Rx and Rxfill rings.
  706. + */
  707. +void edma_cfg_rx_rings_disable(void)
  708. +{
  709. + struct edma_hw_info *hw_info = edma_ctx->hw_info;
  710. + struct edma_ring_info *rxfill = hw_info->rxfill;
  711. + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
  712. + struct regmap *regmap = ppe_dev->regmap;
  713. + struct edma_ring_info *rx = hw_info->rx;
  714. + u32 i, reg;
  715. +
  716. + /* Disable Rx rings */
  717. + for (i = 0; i < rx->num_rings; i++) {
  718. + struct edma_rxdesc_ring *rxdesc_ring = NULL;
  719. + u32 data;
  720. +
  721. + rxdesc_ring = &edma_ctx->rx_rings[i];
  722. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_CTRL(rxdesc_ring->ring_id);
  723. + regmap_read(regmap, reg, &data);
  724. + data &= ~EDMA_RXDESC_RX_EN;
  725. + regmap_write(regmap, reg, data);
  726. + }
  727. +
  728. + /* Disable RxFill Rings */
  729. + for (i = 0; i < rxfill->num_rings; i++) {
  730. + struct edma_rxfill_ring *rxfill_ring = NULL;
  731. + u32 data;
  732. +
  733. + rxfill_ring = &edma_ctx->rxfill_rings[i];
  734. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXFILL_RING_EN(rxfill_ring->ring_id);
  735. + regmap_read(regmap, reg, &data);
  736. + data &= ~EDMA_RXFILL_RING_EN;
  737. + regmap_write(regmap, reg, data);
  738. + }
  739. +}
  740. +
  741. +/**
  742. + * edma_cfg_rx_mappings - Setup RX ring mapping
  743. + *
  744. + * Setup queue ID to Rx desc ring mapping.
  745. + */
  746. +void edma_cfg_rx_ring_mappings(void)
  747. +{
  748. + edma_cfg_rx_qid_to_rx_desc_ring_mapping();
  749. + edma_cfg_rx_rings_to_rx_fill_mapping();
  750. +}
  751. +
  752. +static void edma_cfg_rx_fill_ring_cleanup(struct edma_rxfill_ring *rxfill_ring)
  753. +{
  754. + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
  755. + struct regmap *regmap = ppe_dev->regmap;
  756. + struct device *dev = ppe_dev->dev;
  757. + u16 cons_idx, curr_idx;
  758. + u32 data, reg;
  759. +
  760. + /* Get RxFill ring producer index */
  761. + curr_idx = rxfill_ring->prod_idx & EDMA_RXFILL_PROD_IDX_MASK;
  762. +
  763. + /* Get RxFill ring consumer index */
  764. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXFILL_CONS_IDX(rxfill_ring->ring_id);
  765. + regmap_read(regmap, reg, &data);
  766. + cons_idx = data & EDMA_RXFILL_CONS_IDX_MASK;
  767. +
  768. + while (curr_idx != cons_idx) {
  769. + struct edma_rxfill_desc *rxfill_desc;
  770. + struct sk_buff *skb;
  771. +
  772. + /* Get RxFill descriptor */
  773. + rxfill_desc = EDMA_RXFILL_DESC(rxfill_ring, cons_idx);
  774. +
  775. + cons_idx = (cons_idx + 1) & EDMA_RX_RING_SIZE_MASK;
  776. +
  777. + /* Get skb from opaque */
  778. + skb = (struct sk_buff *)EDMA_RXFILL_OPAQUE_GET(rxfill_desc);
  779. + if (unlikely(!skb)) {
  780. + pr_err("Empty skb reference at index:%d\n",
  781. + cons_idx);
  782. + continue;
  783. + }
  784. +
  785. + dev_kfree_skb_any(skb);
  786. + }
  787. +
  788. + /* Free RxFill ring descriptors */
  789. + dma_free_coherent(dev, (sizeof(struct edma_rxfill_desc)
  790. + * rxfill_ring->count),
  791. + rxfill_ring->desc, rxfill_ring->dma);
  792. + rxfill_ring->desc = NULL;
  793. + rxfill_ring->dma = (dma_addr_t)0;
  794. +}
  795. +
  796. +static int edma_cfg_rx_fill_ring_dma_alloc(struct edma_rxfill_ring *rxfill_ring)
  797. +{
  798. + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
  799. + struct device *dev = ppe_dev->dev;
  800. +
  801. + /* Allocate RxFill ring descriptors */
  802. + rxfill_ring->desc = dma_alloc_coherent(dev, (sizeof(struct edma_rxfill_desc)
  803. + * rxfill_ring->count),
  804. + &rxfill_ring->dma,
  805. + GFP_KERNEL | __GFP_ZERO);
  806. + if (unlikely(!rxfill_ring->desc))
  807. + return -ENOMEM;
  808. +
  809. + return 0;
  810. +}
  811. +
  812. +static int edma_cfg_rx_desc_ring_dma_alloc(struct edma_rxdesc_ring *rxdesc_ring)
  813. +{
  814. + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
  815. + struct device *dev = ppe_dev->dev;
  816. +
  817. + rxdesc_ring->pdesc = dma_alloc_coherent(dev, (sizeof(struct edma_rxdesc_pri)
  818. + * rxdesc_ring->count),
  819. + &rxdesc_ring->pdma, GFP_KERNEL | __GFP_ZERO);
  820. + if (unlikely(!rxdesc_ring->pdesc))
  821. + return -ENOMEM;
  822. +
  823. + rxdesc_ring->sdesc = dma_alloc_coherent(dev, (sizeof(struct edma_rxdesc_sec)
  824. + * rxdesc_ring->count),
  825. + &rxdesc_ring->sdma, GFP_KERNEL | __GFP_ZERO);
  826. + if (unlikely(!rxdesc_ring->sdesc)) {
  827. + dma_free_coherent(dev, (sizeof(struct edma_rxdesc_pri)
  828. + * rxdesc_ring->count),
  829. + rxdesc_ring->pdesc,
  830. + rxdesc_ring->pdma);
  831. + rxdesc_ring->pdesc = NULL;
  832. + rxdesc_ring->pdma = (dma_addr_t)0;
  833. + return -ENOMEM;
  834. + }
  835. +
  836. + return 0;
  837. +}
  838. +
  839. +static void edma_cfg_rx_desc_ring_cleanup(struct edma_rxdesc_ring *rxdesc_ring)
  840. +{
  841. + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
  842. + struct regmap *regmap = ppe_dev->regmap;
  843. + struct device *dev = ppe_dev->dev;
  844. + u32 prod_idx, cons_idx, reg;
  845. +
  846. + /* Get Rxdesc consumer & producer indices */
  847. + cons_idx = rxdesc_ring->cons_idx & EDMA_RXDESC_CONS_IDX_MASK;
  848. +
  849. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_PROD_IDX(rxdesc_ring->ring_id);
  850. + regmap_read(regmap, reg, &prod_idx);
  851. + prod_idx = prod_idx & EDMA_RXDESC_PROD_IDX_MASK;
  852. +
  853. + /* Free any buffers assigned to any descriptors */
  854. + while (cons_idx != prod_idx) {
  855. + struct edma_rxdesc_pri *rxdesc_pri =
  856. + EDMA_RXDESC_PRI_DESC(rxdesc_ring, cons_idx);
  857. + struct sk_buff *skb;
  858. +
  859. + /* Update consumer index */
  860. + cons_idx = (cons_idx + 1) & EDMA_RX_RING_SIZE_MASK;
  861. +
  862. + /* Get opaque from Rxdesc */
  863. + skb = (struct sk_buff *)EDMA_RXDESC_OPAQUE_GET(rxdesc_pri);
  864. + if (unlikely(!skb)) {
  865. + pr_warn("Empty skb reference at index:%d\n",
  866. + cons_idx);
  867. + continue;
  868. + }
  869. +
  870. + dev_kfree_skb_any(skb);
  871. + }
  872. +
  873. + /* Update the consumer index */
  874. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_CONS_IDX(rxdesc_ring->ring_id);
  875. + regmap_write(regmap, reg, cons_idx);
  876. +
  877. + /* Free Rxdesc ring descriptor */
  878. + dma_free_coherent(dev, (sizeof(struct edma_rxdesc_pri)
  879. + * rxdesc_ring->count), rxdesc_ring->pdesc,
  880. + rxdesc_ring->pdma);
  881. + rxdesc_ring->pdesc = NULL;
  882. + rxdesc_ring->pdma = (dma_addr_t)0;
  883. +
  884. + /* Free any buffers assigned to any secondary ring descriptors */
  885. + dma_free_coherent(dev, (sizeof(struct edma_rxdesc_sec)
  886. + * rxdesc_ring->count), rxdesc_ring->sdesc,
  887. + rxdesc_ring->sdma);
  888. + rxdesc_ring->sdesc = NULL;
  889. + rxdesc_ring->sdma = (dma_addr_t)0;
  890. +}
  891. +
  892. +static int edma_cfg_rx_rings_setup(void)
  893. +{
  894. + struct edma_hw_info *hw_info = edma_ctx->hw_info;
  895. + struct edma_ring_info *rxfill = hw_info->rxfill;
  896. + struct edma_ring_info *rx = hw_info->rx;
  897. + u32 ring_idx, alloc_size, buf_len;
  898. +
  899. + /* Set buffer allocation size */
  900. + if (edma_ctx->rx_buf_size) {
  901. + alloc_size = edma_ctx->rx_buf_size +
  902. + EDMA_RX_SKB_HEADROOM + NET_IP_ALIGN;
  903. + buf_len = alloc_size - EDMA_RX_SKB_HEADROOM - NET_IP_ALIGN;
  904. + } else if (edma_ctx->rx_page_mode) {
  905. + alloc_size = EDMA_RX_PAGE_MODE_SKB_SIZE +
  906. + EDMA_RX_SKB_HEADROOM + NET_IP_ALIGN;
  907. + buf_len = PAGE_SIZE;
  908. + } else {
  909. + alloc_size = EDMA_RX_BUFFER_SIZE;
  910. + buf_len = alloc_size - EDMA_RX_SKB_HEADROOM - NET_IP_ALIGN;
  911. + }
  912. +
  913. + pr_debug("EDMA ctx:%p rx_ring alloc_size=%d, buf_len=%d\n",
  914. + edma_ctx, alloc_size, buf_len);
  915. +
  916. + /* Allocate Rx fill ring descriptors */
  917. + for (ring_idx = 0; ring_idx < rxfill->num_rings; ring_idx++) {
  918. + u32 ret;
  919. + struct edma_rxfill_ring *rxfill_ring = NULL;
  920. +
  921. + rxfill_ring = &edma_ctx->rxfill_rings[ring_idx];
  922. + rxfill_ring->count = EDMA_RX_RING_SIZE;
  923. + rxfill_ring->ring_id = rxfill->ring_start + ring_idx;
  924. + rxfill_ring->alloc_size = alloc_size;
  925. + rxfill_ring->buf_len = buf_len;
  926. + rxfill_ring->page_mode = edma_ctx->rx_page_mode;
  927. +
  928. + ret = edma_cfg_rx_fill_ring_dma_alloc(rxfill_ring);
  929. + if (ret) {
  930. + pr_err("Error in setting up %d rxfill ring. ret: %d",
  931. + rxfill_ring->ring_id, ret);
  932. + while (--ring_idx >= 0)
  933. + edma_cfg_rx_fill_ring_cleanup(&edma_ctx->rxfill_rings[ring_idx]);
  934. +
  935. + return -ENOMEM;
  936. + }
  937. + }
  938. +
  939. + /* Allocate RxDesc ring descriptors */
  940. + for (ring_idx = 0; ring_idx < rx->num_rings; ring_idx++) {
  941. + u32 index, queue_id = EDMA_RX_QUEUE_START;
  942. + struct edma_rxdesc_ring *rxdesc_ring = NULL;
  943. + u32 ret;
  944. +
  945. + rxdesc_ring = &edma_ctx->rx_rings[ring_idx];
  946. + rxdesc_ring->count = EDMA_RX_RING_SIZE;
  947. + rxdesc_ring->ring_id = rx->ring_start + ring_idx;
  948. +
  949. + if (queue_id > EDMA_CPU_PORT_QUEUE_MAX(EDMA_RX_QUEUE_START)) {
  950. + pr_err("Invalid queue_id: %d\n", queue_id);
  951. + while (--ring_idx >= 0)
  952. + edma_cfg_rx_desc_ring_cleanup(&edma_ctx->rx_rings[ring_idx]);
  953. +
  954. + goto rxdesc_mem_alloc_fail;
  955. + }
  956. +
  957. + /* Create a mapping between RX Desc ring and Rx fill ring.
  958. + * Number of fill rings are lesser than the descriptor rings
  959. + * Share the fill rings across descriptor rings.
  960. + */
  961. + index = rxfill->ring_start +
  962. + (ring_idx % rxfill->num_rings);
  963. + rxdesc_ring->rxfill = &edma_ctx->rxfill_rings[index
  964. + - rxfill->ring_start];
  965. +
  966. + ret = edma_cfg_rx_desc_ring_dma_alloc(rxdesc_ring);
  967. + if (ret) {
  968. + pr_err("Error in setting up %d rxdesc ring. ret: %d",
  969. + rxdesc_ring->ring_id, ret);
  970. + while (--ring_idx >= 0)
  971. + edma_cfg_rx_desc_ring_cleanup(&edma_ctx->rx_rings[ring_idx]);
  972. +
  973. + goto rxdesc_mem_alloc_fail;
  974. + }
  975. + }
  976. +
  977. + pr_debug("Rx descriptor count for Rx desc and Rx fill rings : %d\n",
  978. + EDMA_RX_RING_SIZE);
  979. +
  980. + return 0;
  981. +
  982. +rxdesc_mem_alloc_fail:
  983. + for (ring_idx = 0; ring_idx < rxfill->num_rings; ring_idx++)
  984. + edma_cfg_rx_fill_ring_cleanup(&edma_ctx->rxfill_rings[ring_idx]);
  985. +
  986. + return -ENOMEM;
  987. +}
  988. +
  989. +/**
  990. + * edma_cfg_rx_buff_size_setup - Configure EDMA Rx jumbo buffer
  991. + *
  992. + * Configure EDMA Rx jumbo buffer
  993. + */
  994. +void edma_cfg_rx_buff_size_setup(void)
  995. +{
  996. + if (edma_ctx->rx_buf_size) {
  997. + edma_ctx->rx_page_mode = false;
  998. + pr_debug("Rx Jumbo mru is enabled: %d\n", edma_ctx->rx_buf_size);
  999. + }
  1000. +}
  1001. +
  1002. +/**
  1003. + * edma_cfg_rx_rings_alloc - Allocate EDMA Rx rings
  1004. + *
  1005. + * Allocate EDMA Rx rings.
  1006. + *
  1007. + * Return 0 on success, negative error code on failure.
  1008. + */
  1009. +int edma_cfg_rx_rings_alloc(void)
  1010. +{
  1011. + struct edma_hw_info *hw_info = edma_ctx->hw_info;
  1012. + struct edma_ring_info *rxfill = hw_info->rxfill;
  1013. + struct edma_ring_info *rx = hw_info->rx;
  1014. + int ret;
  1015. +
  1016. + edma_ctx->rxfill_rings = kzalloc((sizeof(*edma_ctx->rxfill_rings) *
  1017. + rxfill->num_rings),
  1018. + GFP_KERNEL);
  1019. + if (!edma_ctx->rxfill_rings)
  1020. + return -ENOMEM;
  1021. +
  1022. + edma_ctx->rx_rings = kzalloc((sizeof(*edma_ctx->rx_rings) *
  1023. + rx->num_rings),
  1024. + GFP_KERNEL);
  1025. + if (!edma_ctx->rx_rings)
  1026. + goto rxdesc_ring_alloc_fail;
  1027. +
  1028. + pr_debug("RxDesc:%u rx (%u-%u) RxFill:%u (%u-%u)\n",
  1029. + rx->num_rings, rx->ring_start,
  1030. + (rx->ring_start + rx->num_rings - 1),
  1031. + rxfill->num_rings, rxfill->ring_start,
  1032. + (rxfill->ring_start + rxfill->num_rings - 1));
  1033. +
  1034. + if (edma_cfg_rx_rings_setup()) {
  1035. + pr_err("Error in setting up Rx rings\n");
  1036. + goto rx_rings_setup_fail;
  1037. + }
  1038. +
  1039. + /* Reset Rx descriptor ring mapped queue's configurations */
  1040. + ret = edma_cfg_rx_desc_ring_reset_queue_config();
  1041. + if (ret) {
  1042. + pr_err("Error in resetting the Rx descriptor rings configurations\n");
  1043. + edma_cfg_rx_rings_cleanup();
  1044. + return ret;
  1045. + }
  1046. +
  1047. + return 0;
  1048. +
  1049. +rx_rings_setup_fail:
  1050. + kfree(edma_ctx->rx_rings);
  1051. + edma_ctx->rx_rings = NULL;
  1052. +rxdesc_ring_alloc_fail:
  1053. + kfree(edma_ctx->rxfill_rings);
  1054. + edma_ctx->rxfill_rings = NULL;
  1055. +
  1056. + return -ENOMEM;
  1057. +}
  1058. +
  1059. +/**
  1060. + * edma_cfg_rx_rings_cleanup - Cleanup EDMA Rx rings
  1061. + *
  1062. + * Cleanup EDMA Rx rings
  1063. + */
  1064. +void edma_cfg_rx_rings_cleanup(void)
  1065. +{
  1066. + struct edma_hw_info *hw_info = edma_ctx->hw_info;
  1067. + struct edma_ring_info *rxfill = hw_info->rxfill;
  1068. + struct edma_ring_info *rx = hw_info->rx;
  1069. + u32 i;
  1070. +
  1071. + /* Free RxFill ring descriptors */
  1072. + for (i = 0; i < rxfill->num_rings; i++)
  1073. + edma_cfg_rx_fill_ring_cleanup(&edma_ctx->rxfill_rings[i]);
  1074. +
  1075. + /* Free Rx completion ring descriptors */
  1076. + for (i = 0; i < rx->num_rings; i++)
  1077. + edma_cfg_rx_desc_ring_cleanup(&edma_ctx->rx_rings[i]);
  1078. +
  1079. + kfree(edma_ctx->rxfill_rings);
  1080. + kfree(edma_ctx->rx_rings);
  1081. + edma_ctx->rxfill_rings = NULL;
  1082. + edma_ctx->rx_rings = NULL;
  1083. +}
  1084. +
  1085. +static void edma_cfg_rx_fill_ring_configure(struct edma_rxfill_ring *rxfill_ring)
  1086. +{
  1087. + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
  1088. + struct regmap *regmap = ppe_dev->regmap;
  1089. + u32 ring_sz, reg;
  1090. +
  1091. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXFILL_BA(rxfill_ring->ring_id);
  1092. + regmap_write(regmap, reg, (u32)(rxfill_ring->dma & EDMA_RING_DMA_MASK));
  1093. +
  1094. + ring_sz = rxfill_ring->count & EDMA_RXFILL_RING_SIZE_MASK;
  1095. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXFILL_RING_SIZE(rxfill_ring->ring_id);
  1096. + regmap_write(regmap, reg, ring_sz);
  1097. +
  1098. + edma_rx_alloc_buffer(rxfill_ring, rxfill_ring->count - 1);
  1099. +}
  1100. +
  1101. +static void edma_cfg_rx_desc_ring_flow_control(u32 threshold_xoff, u32 threshold_xon)
  1102. +{
  1103. + struct edma_hw_info *hw_info = edma_ctx->hw_info;
  1104. + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
  1105. + struct regmap *regmap = ppe_dev->regmap;
  1106. + struct edma_ring_info *rx = hw_info->rx;
  1107. + u32 data, i, reg;
  1108. +
  1109. + data = (threshold_xoff & EDMA_RXDESC_FC_XOFF_THRE_MASK) << EDMA_RXDESC_FC_XOFF_THRE_SHIFT;
  1110. + data |= ((threshold_xon & EDMA_RXDESC_FC_XON_THRE_MASK) << EDMA_RXDESC_FC_XON_THRE_SHIFT);
  1111. +
  1112. + for (i = 0; i < rx->num_rings; i++) {
  1113. + struct edma_rxdesc_ring *rxdesc_ring;
  1114. +
  1115. + rxdesc_ring = &edma_ctx->rx_rings[i];
  1116. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_FC_THRE(rxdesc_ring->ring_id);
  1117. + regmap_write(regmap, reg, data);
  1118. + }
  1119. +}
  1120. +
  1121. +static void edma_cfg_rx_fill_ring_flow_control(int threshold_xoff, int threshold_xon)
  1122. +{
  1123. + struct edma_hw_info *hw_info = edma_ctx->hw_info;
  1124. + struct edma_ring_info *rxfill = hw_info->rxfill;
  1125. + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
  1126. + struct regmap *regmap = ppe_dev->regmap;
  1127. + u32 data, i, reg;
  1128. +
  1129. + data = (threshold_xoff & EDMA_RXFILL_FC_XOFF_THRE_MASK) << EDMA_RXFILL_FC_XOFF_THRE_SHIFT;
  1130. + data |= ((threshold_xon & EDMA_RXFILL_FC_XON_THRE_MASK) << EDMA_RXFILL_FC_XON_THRE_SHIFT);
  1131. +
  1132. + for (i = 0; i < rxfill->num_rings; i++) {
  1133. + struct edma_rxfill_ring *rxfill_ring;
  1134. +
  1135. + rxfill_ring = &edma_ctx->rxfill_rings[i];
  1136. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXFILL_FC_THRE(rxfill_ring->ring_id);
  1137. + regmap_write(regmap, reg, data);
  1138. + }
  1139. +}
  1140. +
  1141. +/**
  1142. + * edma_cfg_rx_rings - Configure EDMA Rx rings.
  1143. + *
  1144. + * Configure EDMA Rx rings.
  1145. + */
  1146. +int edma_cfg_rx_rings(void)
  1147. +{
  1148. + struct edma_hw_info *hw_info = edma_ctx->hw_info;
  1149. + struct edma_ring_info *rxfill = hw_info->rxfill;
  1150. + struct edma_ring_info *rx = hw_info->rx;
  1151. + u32 i;
  1152. +
  1153. + for (i = 0; i < rxfill->num_rings; i++)
  1154. + edma_cfg_rx_fill_ring_configure(&edma_ctx->rxfill_rings[i]);
  1155. +
  1156. + for (i = 0; i < rx->num_rings; i++)
  1157. + edma_cfg_rx_desc_ring_configure(&edma_ctx->rx_rings[i]);
  1158. +
  1159. + /* Configure Rx flow control configurations */
  1160. + edma_cfg_rx_desc_ring_flow_control(EDMA_RX_FC_XOFF_DEF, EDMA_RX_FC_XON_DEF);
  1161. + edma_cfg_rx_fill_ring_flow_control(EDMA_RX_FC_XOFF_DEF, EDMA_RX_FC_XON_DEF);
  1162. +
  1163. + return edma_cfg_rx_desc_ring_to_queue_mapping();
  1164. +}
  1165. +
  1166. +/**
  1167. + * edma_cfg_rx_disable_interrupts - EDMA disable RX interrupts
  1168. + *
  1169. + * Disable RX interrupt masks
  1170. + */
  1171. +void edma_cfg_rx_disable_interrupts(void)
  1172. +{
  1173. + struct edma_hw_info *hw_info = edma_ctx->hw_info;
  1174. + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
  1175. + struct regmap *regmap = ppe_dev->regmap;
  1176. + struct edma_ring_info *rx = hw_info->rx;
  1177. + u32 i, reg;
  1178. +
  1179. + for (i = 0; i < rx->num_rings; i++) {
  1180. + struct edma_rxdesc_ring *rxdesc_ring =
  1181. + &edma_ctx->rx_rings[i];
  1182. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->ring_id);
  1183. + regmap_write(regmap, reg, EDMA_MASK_INT_CLEAR);
  1184. + }
  1185. +}
  1186. +
  1187. +/**
  1188. + * edma_cfg_rx_enable_interrupts - EDMA enable RX interrupts
  1189. + *
  1190. + * Enable RX interrupt masks
  1191. + */
  1192. +void edma_cfg_rx_enable_interrupts(void)
  1193. +{
  1194. + struct edma_hw_info *hw_info = edma_ctx->hw_info;
  1195. + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
  1196. + struct regmap *regmap = ppe_dev->regmap;
  1197. + struct edma_ring_info *rx = hw_info->rx;
  1198. + u32 i, reg;
  1199. +
  1200. + for (i = 0; i < rx->num_rings; i++) {
  1201. + struct edma_rxdesc_ring *rxdesc_ring =
  1202. + &edma_ctx->rx_rings[i];
  1203. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->ring_id);
  1204. + regmap_write(regmap, reg, edma_ctx->intr_info.intr_mask_rx);
  1205. + }
  1206. +}
  1207. +
  1208. +/**
  1209. + * edma_cfg_rx_napi_disable - Disable NAPI for Rx
  1210. + *
  1211. + * Disable NAPI for Rx
  1212. + */
  1213. +void edma_cfg_rx_napi_disable(void)
  1214. +{
  1215. + struct edma_hw_info *hw_info = edma_ctx->hw_info;
  1216. + struct edma_ring_info *rx = hw_info->rx;
  1217. + u32 i;
  1218. +
  1219. + for (i = 0; i < rx->num_rings; i++) {
  1220. + struct edma_rxdesc_ring *rxdesc_ring;
  1221. +
  1222. + rxdesc_ring = &edma_ctx->rx_rings[i];
  1223. +
  1224. + if (!rxdesc_ring->napi_added)
  1225. + continue;
  1226. +
  1227. + napi_disable(&rxdesc_ring->napi);
  1228. + }
  1229. +}
  1230. +
  1231. +/**
  1232. + * edma_cfg_rx_napi_enable - Enable NAPI for Rx
  1233. + *
  1234. + * Enable NAPI for Rx
  1235. + */
  1236. +void edma_cfg_rx_napi_enable(void)
  1237. +{
  1238. + struct edma_hw_info *hw_info = edma_ctx->hw_info;
  1239. + struct edma_ring_info *rx = hw_info->rx;
  1240. + u32 i;
  1241. +
  1242. + for (i = 0; i < rx->num_rings; i++) {
  1243. + struct edma_rxdesc_ring *rxdesc_ring;
  1244. +
  1245. + rxdesc_ring = &edma_ctx->rx_rings[i];
  1246. +
  1247. + if (!rxdesc_ring->napi_added)
  1248. + continue;
  1249. +
  1250. + napi_enable(&rxdesc_ring->napi);
  1251. + }
  1252. +}
  1253. +
  1254. +/**
  1255. + * edma_cfg_rx_napi_delete - Delete Rx NAPI
  1256. + *
  1257. + * Delete RX NAPI
  1258. + */
  1259. +void edma_cfg_rx_napi_delete(void)
  1260. +{
  1261. + struct edma_hw_info *hw_info = edma_ctx->hw_info;
  1262. + struct edma_ring_info *rx = hw_info->rx;
  1263. + u32 i;
  1264. +
  1265. + for (i = 0; i < rx->num_rings; i++) {
  1266. + struct edma_rxdesc_ring *rxdesc_ring;
  1267. +
  1268. + rxdesc_ring = &edma_ctx->rx_rings[i];
  1269. +
  1270. + if (!rxdesc_ring->napi_added)
  1271. + continue;
  1272. +
  1273. + netif_napi_del(&rxdesc_ring->napi);
  1274. + rxdesc_ring->napi_added = false;
  1275. + }
  1276. +}
  1277. +
  1278. +/* Add Rx NAPI */
  1279. +/**
  1280. + * edma_cfg_rx_napi_add - Add Rx NAPI
  1281. + * @netdev: Netdevice
  1282. + *
  1283. + * Add RX NAPI
  1284. + */
  1285. +void edma_cfg_rx_napi_add(void)
  1286. +{
  1287. + struct edma_hw_info *hw_info = edma_ctx->hw_info;
  1288. + struct edma_ring_info *rx = hw_info->rx;
  1289. + u32 i;
  1290. +
  1291. + for (i = 0; i < rx->num_rings; i++) {
  1292. + struct edma_rxdesc_ring *rxdesc_ring = &edma_ctx->rx_rings[i];
  1293. +
  1294. + netif_napi_add_weight(edma_ctx->dummy_dev, &rxdesc_ring->napi,
  1295. + edma_rx_napi_poll, hw_info->napi_budget_rx);
  1296. + rxdesc_ring->napi_added = true;
  1297. + }
  1298. +
  1299. + netdev_dbg(edma_ctx->dummy_dev, "Rx NAPI budget: %d\n", hw_info->napi_budget_rx);
  1300. +}
  1301. +
  1302. +/**
  1303. + * edma_cfg_rx_rps_hash_map - Configure rx rps hash map.
  1304. + *
  1305. + * Initialize and configure RPS hash map for queues
  1306. + */
  1307. +int edma_cfg_rx_rps_hash_map(void)
  1308. +{
  1309. + cpumask_t edma_rps_cpumask = {{EDMA_RX_DEFAULT_BITMAP}};
  1310. + int map_len = 0, idx = 0, ret = 0;
  1311. + u32 q_off = EDMA_RX_QUEUE_START;
  1312. + u32 q_map[EDMA_MAX_CORE] = {0};
  1313. + u32 hash, cpu;
  1314. +
  1315. + /* Map all possible hash values to queues used by the EDMA Rx
  1316. + * rings based on a bitmask, which represents the cores to be mapped.
  1317. + * These queues are expected to be mapped to different Rx rings
  1318. + * which are assigned to different cores using IRQ affinity configuration.
  1319. + */
  1320. + for_each_cpu(cpu, &edma_rps_cpumask) {
  1321. + q_map[map_len] = q_off + (cpu * EDMA_MAX_PRI_PER_CORE);
  1322. + map_len++;
  1323. + }
  1324. +
  1325. + for (hash = 0; hash < PPE_QUEUE_HASH_NUM; hash++) {
  1326. + ret = ppe_edma_queue_offset_config(edma_ctx->ppe_dev,
  1327. + PPE_QUEUE_CLASS_HASH, hash, q_map[idx]);
  1328. + if (ret)
  1329. + return ret;
  1330. +
  1331. + pr_debug("profile_id: %u, hash: %u, q_off: %u\n",
  1332. + EDMA_CPU_PORT_PROFILE_ID, hash, q_map[idx]);
  1333. + idx = (idx + 1) % map_len;
  1334. + }
  1335. +
  1336. + return 0;
  1337. +}
  1338. --- /dev/null
  1339. +++ b/drivers/net/ethernet/qualcomm/ppe/edma_cfg_rx.h
  1340. @@ -0,0 +1,48 @@
  1341. +/* SPDX-License-Identifier: GPL-2.0-only
  1342. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
  1343. + */
  1344. +
  1345. +#ifndef __EDMA_CFG_RX__
  1346. +#define __EDMA_CFG_RX__
  1347. +
  1348. +/* SKB payload size used in page mode */
  1349. +#define EDMA_RX_PAGE_MODE_SKB_SIZE 256
  1350. +
  1351. +/* Rx flow control X-OFF default value */
  1352. +#define EDMA_RX_FC_XOFF_DEF 32
  1353. +
  1354. +/* Rx flow control X-ON default value */
  1355. +#define EDMA_RX_FC_XON_DEF 64
  1356. +
  1357. +/* Rx AC flow control original threshold */
  1358. +#define EDMA_RX_AC_FC_THRE_ORIG 0x190
  1359. +
  1360. +/* Rx AC flow control default threshold */
  1361. +#define EDMA_RX_AC_FC_THRES_DEF 0x104
  1362. +/* Rx mitigation timer's default value in microseconds */
  1363. +#define EDMA_RX_MITIGATION_TIMER_DEF 25
  1364. +
  1365. +/* Rx mitigation packet count's default value */
  1366. +#define EDMA_RX_MITIGATION_PKT_CNT_DEF 16
  1367. +
  1368. +/* Default bitmap of cores for RPS to ARM cores */
  1369. +#define EDMA_RX_DEFAULT_BITMAP ((1 << EDMA_MAX_CORE) - 1)
  1370. +
  1371. +int edma_cfg_rx_rings(void);
  1372. +int edma_cfg_rx_rings_alloc(void);
  1373. +void edma_cfg_rx_ring_mappings(void);
  1374. +void edma_cfg_rx_rings_cleanup(void);
  1375. +void edma_cfg_rx_disable_interrupts(void);
  1376. +void edma_cfg_rx_enable_interrupts(void);
  1377. +void edma_cfg_rx_napi_disable(void);
  1378. +void edma_cfg_rx_napi_enable(void);
  1379. +void edma_cfg_rx_napi_delete(void);
  1380. +void edma_cfg_rx_napi_add(void);
  1381. +void edma_cfg_rx_mapping(void);
  1382. +void edma_cfg_rx_rings_enable(void);
  1383. +void edma_cfg_rx_rings_disable(void);
  1384. +void edma_cfg_rx_buff_size_setup(void);
  1385. +int edma_cfg_rx_rps_hash_map(void);
  1386. +int edma_cfg_rx_rps(struct ctl_table *table, int write,
  1387. + void *buffer, size_t *lenp, loff_t *ppos);
  1388. +#endif
  1389. --- a/drivers/net/ethernet/qualcomm/ppe/edma_port.c
  1390. +++ b/drivers/net/ethernet/qualcomm/ppe/edma_port.c
  1391. @@ -12,12 +12,39 @@
  1392. #include <linux/printk.h>
  1393. #include "edma.h"
  1394. +#include "edma_cfg_rx.h"
  1395. #include "edma_port.h"
  1396. #include "ppe_regs.h"
  1397. /* Number of netdev queues. */
  1398. #define EDMA_NETDEV_QUEUE_NUM 4
  1399. +static int edma_port_stats_alloc(struct net_device *netdev)
  1400. +{
  1401. + struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
  1402. +
  1403. + if (!port_priv)
  1404. + return -EINVAL;
  1405. +
  1406. + /* Allocate per-cpu stats memory. */
  1407. + port_priv->pcpu_stats.rx_stats =
  1408. + netdev_alloc_pcpu_stats(struct edma_port_rx_stats);
  1409. + if (!port_priv->pcpu_stats.rx_stats) {
  1410. + netdev_err(netdev, "Per-cpu EDMA Rx stats alloc failed for %s\n",
  1411. + netdev->name);
  1412. + return -ENOMEM;
  1413. + }
  1414. +
  1415. + return 0;
  1416. +}
  1417. +
  1418. +static void edma_port_stats_free(struct net_device *netdev)
  1419. +{
  1420. + struct edma_port_priv *port_priv = (struct edma_port_priv *)netdev_priv(netdev);
  1421. +
  1422. + free_percpu(port_priv->pcpu_stats.rx_stats);
  1423. +}
  1424. +
  1425. static u16 __maybe_unused edma_port_select_queue(__maybe_unused struct net_device *netdev,
  1426. __maybe_unused struct sk_buff *skb,
  1427. __maybe_unused struct net_device *sb_dev)
  1428. @@ -172,6 +199,7 @@ void edma_port_destroy(struct ppe_port *
  1429. int port_id = port->port_id;
  1430. struct net_device *netdev = edma_ctx->netdev_arr[port_id - 1];
  1431. + edma_port_stats_free(netdev);
  1432. unregister_netdev(netdev);
  1433. free_netdev(netdev);
  1434. ppe_port_phylink_destroy(port);
  1435. @@ -232,6 +260,13 @@ int edma_port_setup(struct ppe_port *por
  1436. port_id, netdev->dev_addr);
  1437. }
  1438. + /* Allocate memory for EDMA port statistics. */
  1439. + ret = edma_port_stats_alloc(netdev);
  1440. + if (ret) {
  1441. + netdev_dbg(netdev, "EDMA port stats alloc failed\n");
  1442. + goto stats_alloc_fail;
  1443. + }
  1444. +
  1445. netdev_dbg(netdev, "Configuring the port %s(qcom-id:%d)\n",
  1446. netdev->name, port_id);
  1447. @@ -263,8 +298,10 @@ int edma_port_setup(struct ppe_port *por
  1448. register_netdev_fail:
  1449. ppe_port_phylink_destroy(port);
  1450. port_phylink_setup_fail:
  1451. - free_netdev(netdev);
  1452. edma_ctx->netdev_arr[port_id - 1] = NULL;
  1453. + edma_port_stats_free(netdev);
  1454. +stats_alloc_fail:
  1455. + free_netdev(netdev);
  1456. return ret;
  1457. }
  1458. --- a/drivers/net/ethernet/qualcomm/ppe/edma_port.h
  1459. +++ b/drivers/net/ethernet/qualcomm/ppe/edma_port.h
  1460. @@ -15,14 +15,45 @@
  1461. | NETIF_F_TSO6)
  1462. /**
  1463. + * struct edma_port_rx_stats - EDMA RX per CPU stats for the port.
  1464. + * @rx_pkts: Number of Rx packets
  1465. + * @rx_bytes: Number of Rx bytes
  1466. + * @rx_drops: Number of Rx drops
  1467. + * @rx_nr_frag_pkts: Number of Rx nr_frags packets
  1468. + * @rx_fraglist_pkts: Number of Rx fraglist packets
  1469. + * @rx_nr_frag_headroom_err: nr_frags headroom error packets
  1470. + * @syncp: Synchronization pointer
  1471. + */
  1472. +struct edma_port_rx_stats {
  1473. + u64 rx_pkts;
  1474. + u64 rx_bytes;
  1475. + u64 rx_drops;
  1476. + u64 rx_nr_frag_pkts;
  1477. + u64 rx_fraglist_pkts;
  1478. + u64 rx_nr_frag_headroom_err;
  1479. + struct u64_stats_sync syncp;
  1480. +};
  1481. +
  1482. +/**
  1483. + * struct edma_port_pcpu_stats - EDMA per cpu stats data structure for the port.
  1484. + * @rx_stats: Per CPU Rx statistics
  1485. + */
  1486. +struct edma_port_pcpu_stats {
  1487. + struct edma_port_rx_stats __percpu *rx_stats;
  1488. +};
  1489. +
  1490. +/**
  1491. * struct edma_port_priv - EDMA port priv structure.
  1492. * @ppe_port: Pointer to PPE port
  1493. * @netdev: Corresponding netdevice
  1494. + * @pcpu_stats: Per CPU netdev statistics
  1495. + * @txr_map: Tx ring per-core mapping
  1496. * @flags: Feature flags
  1497. */
  1498. struct edma_port_priv {
  1499. struct ppe_port *ppe_port;
  1500. struct net_device *netdev;
  1501. + struct edma_port_pcpu_stats pcpu_stats;
  1502. unsigned long flags;
  1503. };
  1504. --- /dev/null
  1505. +++ b/drivers/net/ethernet/qualcomm/ppe/edma_rx.c
  1506. @@ -0,0 +1,622 @@
  1507. +// SPDX-License-Identifier: GPL-2.0-only
  1508. +/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
  1509. + */
  1510. +
  1511. +/* Provides APIs to alloc Rx Buffers, reap the buffers, receive and
  1512. + * process linear and Scatter Gather packets.
  1513. + */
  1514. +
  1515. +#include <linux/dma-mapping.h>
  1516. +#include <linux/etherdevice.h>
  1517. +#include <linux/irqreturn.h>
  1518. +#include <linux/kernel.h>
  1519. +#include <linux/netdevice.h>
  1520. +#include <linux/platform_device.h>
  1521. +#include <linux/printk.h>
  1522. +#include <linux/regmap.h>
  1523. +
  1524. +#include "edma.h"
  1525. +#include "edma_cfg_rx.h"
  1526. +#include "edma_port.h"
  1527. +#include "ppe.h"
  1528. +#include "ppe_regs.h"
  1529. +
  1530. +static int edma_rx_alloc_buffer_list(struct edma_rxfill_ring *rxfill_ring, int alloc_count)
  1531. +{
  1532. + struct edma_rxfill_stats *rxfill_stats = &rxfill_ring->rxfill_stats;
  1533. + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
  1534. + u32 rx_alloc_size = rxfill_ring->alloc_size;
  1535. + struct regmap *regmap = ppe_dev->regmap;
  1536. + bool page_mode = rxfill_ring->page_mode;
  1537. + struct edma_rxfill_desc *rxfill_desc;
  1538. + u32 buf_len = rxfill_ring->buf_len;
  1539. + struct device *dev = ppe_dev->dev;
  1540. + u16 prod_idx, start_idx;
  1541. + u16 num_alloc = 0;
  1542. + u32 reg;
  1543. +
  1544. + prod_idx = rxfill_ring->prod_idx;
  1545. + start_idx = prod_idx;
  1546. +
  1547. + while (likely(alloc_count--)) {
  1548. + dma_addr_t buff_addr;
  1549. + struct sk_buff *skb;
  1550. + struct page *pg;
  1551. +
  1552. + rxfill_desc = EDMA_RXFILL_DESC(rxfill_ring, prod_idx);
  1553. +
  1554. + skb = dev_alloc_skb(rx_alloc_size);
  1555. + if (unlikely(!skb)) {
  1556. + u64_stats_update_begin(&rxfill_stats->syncp);
  1557. + ++rxfill_stats->alloc_failed;
  1558. + u64_stats_update_end(&rxfill_stats->syncp);
  1559. + break;
  1560. + }
  1561. +
  1562. + skb_reserve(skb, EDMA_RX_SKB_HEADROOM + NET_IP_ALIGN);
  1563. +
  1564. + if (likely(!page_mode)) {
  1565. + buff_addr = dma_map_single(dev, skb->data, rx_alloc_size, DMA_FROM_DEVICE);
  1566. + if (dma_mapping_error(dev, buff_addr)) {
  1567. + dev_dbg(dev, "edma_context:%p Unable to dma for non page mode",
  1568. + edma_ctx);
  1569. + dev_kfree_skb_any(skb);
  1570. + break;
  1571. + }
  1572. + } else {
  1573. + pg = alloc_page(GFP_ATOMIC);
  1574. + if (unlikely(!pg)) {
  1575. + u64_stats_update_begin(&rxfill_stats->syncp);
  1576. + ++rxfill_stats->page_alloc_failed;
  1577. + u64_stats_update_end(&rxfill_stats->syncp);
  1578. + dev_kfree_skb_any(skb);
  1579. + dev_dbg(dev, "edma_context:%p Unable to allocate page",
  1580. + edma_ctx);
  1581. + break;
  1582. + }
  1583. +
  1584. + buff_addr = dma_map_page(dev, pg, 0, PAGE_SIZE, DMA_FROM_DEVICE);
  1585. + if (dma_mapping_error(dev, buff_addr)) {
  1586. + dev_dbg(dev, "edma_context:%p Mapping error for page mode",
  1587. + edma_ctx);
  1588. + __free_page(pg);
  1589. + dev_kfree_skb_any(skb);
  1590. + break;
  1591. + }
  1592. +
  1593. + skb_fill_page_desc(skb, 0, pg, 0, PAGE_SIZE);
  1594. + }
  1595. +
  1596. + EDMA_RXFILL_BUFFER_ADDR_SET(rxfill_desc, buff_addr);
  1597. +
  1598. + EDMA_RXFILL_OPAQUE_LO_SET(rxfill_desc, skb);
  1599. +#ifdef __LP64__
  1600. + EDMA_RXFILL_OPAQUE_HI_SET(rxfill_desc, skb);
  1601. +#endif
  1602. + EDMA_RXFILL_PACKET_LEN_SET(rxfill_desc,
  1603. + (u32)(buf_len) & EDMA_RXFILL_BUF_SIZE_MASK);
  1604. + prod_idx = (prod_idx + 1) & EDMA_RX_RING_SIZE_MASK;
  1605. + num_alloc++;
  1606. + }
  1607. +
  1608. + if (likely(num_alloc)) {
  1609. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXFILL_PROD_IDX(rxfill_ring->ring_id);
  1610. + regmap_write(regmap, reg, prod_idx);
  1611. + rxfill_ring->prod_idx = prod_idx;
  1612. + }
  1613. +
  1614. + return num_alloc;
  1615. +}
  1616. +
  1617. +/**
  1618. + * edma_rx_alloc_buffer - EDMA Rx alloc buffer.
  1619. + * @rxfill_ring: EDMA Rxfill ring
  1620. + * @alloc_count: Number of rings to alloc
  1621. + *
  1622. + * Alloc Rx buffers for RxFill ring.
  1623. + *
  1624. + * Return the number of rings allocated.
  1625. + */
  1626. +int edma_rx_alloc_buffer(struct edma_rxfill_ring *rxfill_ring, int alloc_count)
  1627. +{
  1628. + return edma_rx_alloc_buffer_list(rxfill_ring, alloc_count);
  1629. +}
  1630. +
  1631. +/* Mark ip_summed appropriately in the skb as per the L3/L4 checksum
  1632. + * status in descriptor.
  1633. + */
  1634. +static void edma_rx_checksum_verify(struct edma_rxdesc_pri *rxdesc_pri,
  1635. + struct sk_buff *skb)
  1636. +{
  1637. + u8 pid = EDMA_RXDESC_PID_GET(rxdesc_pri);
  1638. +
  1639. + skb_checksum_none_assert(skb);
  1640. +
  1641. + if (likely(EDMA_RX_PID_IS_IPV4(pid))) {
  1642. + if (likely(EDMA_RXDESC_L3CSUM_STATUS_GET(rxdesc_pri)) &&
  1643. + likely(EDMA_RXDESC_L4CSUM_STATUS_GET(rxdesc_pri)))
  1644. + skb->ip_summed = CHECKSUM_UNNECESSARY;
  1645. + } else if (likely(EDMA_RX_PID_IS_IPV6(pid))) {
  1646. + if (likely(EDMA_RXDESC_L4CSUM_STATUS_GET(rxdesc_pri)))
  1647. + skb->ip_summed = CHECKSUM_UNNECESSARY;
  1648. + }
  1649. +}
  1650. +
  1651. +static void edma_rx_process_last_segment(struct edma_rxdesc_ring *rxdesc_ring,
  1652. + struct edma_rxdesc_pri *rxdesc_pri,
  1653. + struct sk_buff *skb)
  1654. +{
  1655. + bool page_mode = rxdesc_ring->rxfill->page_mode;
  1656. + struct edma_port_pcpu_stats *pcpu_stats;
  1657. + struct edma_port_rx_stats *rx_stats;
  1658. + struct edma_port_priv *port_dev;
  1659. + struct sk_buff *skb_head;
  1660. + struct net_device *dev;
  1661. + u32 pkt_length;
  1662. +
  1663. + /* Get packet length. */
  1664. + pkt_length = EDMA_RXDESC_PACKET_LEN_GET(rxdesc_pri);
  1665. +
  1666. + skb_head = rxdesc_ring->head;
  1667. + dev = skb_head->dev;
  1668. +
  1669. + /* Check Rx checksum offload status. */
  1670. + if (likely(dev->features & NETIF_F_RXCSUM))
  1671. + edma_rx_checksum_verify(rxdesc_pri, skb_head);
  1672. +
  1673. + /* Get stats for the netdevice. */
  1674. + port_dev = netdev_priv(dev);
  1675. + pcpu_stats = &port_dev->pcpu_stats;
  1676. + rx_stats = this_cpu_ptr(pcpu_stats->rx_stats);
  1677. +
  1678. + if (unlikely(page_mode)) {
  1679. + if (unlikely(!pskb_may_pull(skb_head, ETH_HLEN))) {
  1680. + /* Discard the SKB that we have been building,
  1681. + * in addition to the SKB linked to current descriptor.
  1682. + */
  1683. + dev_kfree_skb_any(skb_head);
  1684. + rxdesc_ring->head = NULL;
  1685. + rxdesc_ring->last = NULL;
  1686. + rxdesc_ring->pdesc_head = NULL;
  1687. +
  1688. + u64_stats_update_begin(&rx_stats->syncp);
  1689. + rx_stats->rx_nr_frag_headroom_err++;
  1690. + u64_stats_update_end(&rx_stats->syncp);
  1691. +
  1692. + return;
  1693. + }
  1694. + }
  1695. +
  1696. + if (unlikely(!pskb_pull(skb_head, EDMA_RXDESC_DATA_OFFSET_GET(rxdesc_ring->pdesc_head)))) {
  1697. + dev_kfree_skb_any(skb_head);
  1698. + rxdesc_ring->head = NULL;
  1699. + rxdesc_ring->last = NULL;
  1700. + rxdesc_ring->pdesc_head = NULL;
  1701. +
  1702. + u64_stats_update_begin(&rx_stats->syncp);
  1703. + rx_stats->rx_nr_frag_headroom_err++;
  1704. + u64_stats_update_end(&rx_stats->syncp);
  1705. +
  1706. + return;
  1707. + }
  1708. +
  1709. + u64_stats_update_begin(&rx_stats->syncp);
  1710. + rx_stats->rx_pkts++;
  1711. + rx_stats->rx_bytes += skb_head->len;
  1712. + rx_stats->rx_nr_frag_pkts += (u64)page_mode;
  1713. + rx_stats->rx_fraglist_pkts += (u64)(!page_mode);
  1714. + u64_stats_update_end(&rx_stats->syncp);
  1715. +
  1716. + pr_debug("edma_context:%p skb:%p Jumbo pkt_length:%u\n",
  1717. + edma_ctx, skb_head, skb_head->len);
  1718. +
  1719. + skb_head->protocol = eth_type_trans(skb_head, dev);
  1720. +
  1721. + /* Send packet up the stack. */
  1722. + if (dev->features & NETIF_F_GRO)
  1723. + napi_gro_receive(&rxdesc_ring->napi, skb_head);
  1724. + else
  1725. + netif_receive_skb(skb_head);
  1726. +
  1727. + rxdesc_ring->head = NULL;
  1728. + rxdesc_ring->last = NULL;
  1729. + rxdesc_ring->pdesc_head = NULL;
  1730. +}
  1731. +
  1732. +static void edma_rx_handle_frag_list(struct edma_rxdesc_ring *rxdesc_ring,
  1733. + struct edma_rxdesc_pri *rxdesc_pri,
  1734. + struct sk_buff *skb)
  1735. +{
  1736. + u32 pkt_length;
  1737. +
  1738. + /* Get packet length. */
  1739. + pkt_length = EDMA_RXDESC_PACKET_LEN_GET(rxdesc_pri);
  1740. + pr_debug("edma_context:%p skb:%p fragment pkt_length:%u\n",
  1741. + edma_ctx, skb, pkt_length);
  1742. +
  1743. + if (!(rxdesc_ring->head)) {
  1744. + skb_put(skb, pkt_length);
  1745. + rxdesc_ring->head = skb;
  1746. + rxdesc_ring->last = NULL;
  1747. + rxdesc_ring->pdesc_head = rxdesc_pri;
  1748. +
  1749. + return;
  1750. + }
  1751. +
  1752. + /* Append it to the fraglist of head if this is second frame
  1753. + * If not second frame append to tail.
  1754. + */
  1755. + skb_put(skb, pkt_length);
  1756. + if (!skb_has_frag_list(rxdesc_ring->head))
  1757. + skb_shinfo(rxdesc_ring->head)->frag_list = skb;
  1758. + else
  1759. + rxdesc_ring->last->next = skb;
  1760. +
  1761. + rxdesc_ring->last = skb;
  1762. + rxdesc_ring->last->next = NULL;
  1763. + rxdesc_ring->head->len += pkt_length;
  1764. + rxdesc_ring->head->data_len += pkt_length;
  1765. + rxdesc_ring->head->truesize += skb->truesize;
  1766. +
  1767. + /* If there are more segments for this packet,
  1768. + * then we have nothing to do. Otherwise process
  1769. + * last segment and send packet to stack.
  1770. + */
  1771. + if (EDMA_RXDESC_MORE_BIT_GET(rxdesc_pri))
  1772. + return;
  1773. +
  1774. + edma_rx_process_last_segment(rxdesc_ring, rxdesc_pri, skb);
  1775. +}
  1776. +
  1777. +static void edma_rx_handle_nr_frags(struct edma_rxdesc_ring *rxdesc_ring,
  1778. + struct edma_rxdesc_pri *rxdesc_pri,
  1779. + struct sk_buff *skb)
  1780. +{
  1781. + skb_frag_t *frag = NULL;
  1782. + u32 pkt_length;
  1783. +
  1784. + /* Get packet length. */
  1785. + pkt_length = EDMA_RXDESC_PACKET_LEN_GET(rxdesc_pri);
  1786. + pr_debug("edma_context:%p skb:%p fragment pkt_length:%u\n",
  1787. + edma_ctx, skb, pkt_length);
  1788. +
  1789. + if (!(rxdesc_ring->head)) {
  1790. + skb->len = pkt_length;
  1791. + skb->data_len = pkt_length;
  1792. + skb->truesize = SKB_TRUESIZE(PAGE_SIZE);
  1793. + rxdesc_ring->head = skb;
  1794. + rxdesc_ring->last = NULL;
  1795. + rxdesc_ring->pdesc_head = rxdesc_pri;
  1796. +
  1797. + return;
  1798. + }
  1799. +
  1800. + frag = &skb_shinfo(skb)->frags[0];
  1801. +
  1802. + /* Append current frag at correct index as nr_frag of parent. */
  1803. + skb_add_rx_frag(rxdesc_ring->head, skb_shinfo(rxdesc_ring->head)->nr_frags,
  1804. + skb_frag_page(frag), 0, pkt_length, PAGE_SIZE);
  1805. + skb_shinfo(skb)->nr_frags = 0;
  1806. +
  1807. + /* Free the SKB after we have appended its frag page to the head skb. */
  1808. + dev_kfree_skb_any(skb);
  1809. +
  1810. + /* If there are more segments for this packet,
  1811. + * then we have nothing to do. Otherwise process
  1812. + * last segment and send packet to stack.
  1813. + */
  1814. + if (EDMA_RXDESC_MORE_BIT_GET(rxdesc_pri))
  1815. + return;
  1816. +
  1817. + edma_rx_process_last_segment(rxdesc_ring, rxdesc_pri, skb);
  1818. +}
  1819. +
  1820. +static bool edma_rx_handle_linear_packets(struct edma_rxdesc_ring *rxdesc_ring,
  1821. + struct edma_rxdesc_pri *rxdesc_pri,
  1822. + struct sk_buff *skb)
  1823. +{
  1824. + bool page_mode = rxdesc_ring->rxfill->page_mode;
  1825. + struct edma_port_pcpu_stats *pcpu_stats;
  1826. + struct edma_port_rx_stats *rx_stats;
  1827. + struct edma_port_priv *port_dev;
  1828. + skb_frag_t *frag = NULL;
  1829. + u32 pkt_length;
  1830. +
  1831. + /* Get stats for the netdevice. */
  1832. + port_dev = netdev_priv(skb->dev);
  1833. + pcpu_stats = &port_dev->pcpu_stats;
  1834. + rx_stats = this_cpu_ptr(pcpu_stats->rx_stats);
  1835. +
  1836. + /* Get packet length. */
  1837. + pkt_length = EDMA_RXDESC_PACKET_LEN_GET(rxdesc_pri);
  1838. +
  1839. + if (likely(!page_mode)) {
  1840. + skb_put(skb, pkt_length);
  1841. + goto send_to_stack;
  1842. + }
  1843. +
  1844. + /* Handle linear packet in page mode. */
  1845. + frag = &skb_shinfo(skb)->frags[0];
  1846. + skb_add_rx_frag(skb, 0, skb_frag_page(frag), 0, pkt_length, PAGE_SIZE);
  1847. +
  1848. + /* Pull ethernet header into SKB data area for header processing. */
  1849. + if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) {
  1850. + u64_stats_update_begin(&rx_stats->syncp);
  1851. + rx_stats->rx_nr_frag_headroom_err++;
  1852. + u64_stats_update_end(&rx_stats->syncp);
  1853. + dev_kfree_skb_any(skb);
  1854. +
  1855. + return false;
  1856. + }
  1857. +
  1858. +send_to_stack:
  1859. +
  1860. + __skb_pull(skb, EDMA_RXDESC_DATA_OFFSET_GET(rxdesc_pri));
  1861. +
  1862. + /* Check Rx checksum offload status. */
  1863. + if (likely(skb->dev->features & NETIF_F_RXCSUM))
  1864. + edma_rx_checksum_verify(rxdesc_pri, skb);
  1865. +
  1866. + u64_stats_update_begin(&rx_stats->syncp);
  1867. + rx_stats->rx_pkts++;
  1868. + rx_stats->rx_bytes += pkt_length;
  1869. + rx_stats->rx_nr_frag_pkts += (u64)page_mode;
  1870. + u64_stats_update_end(&rx_stats->syncp);
  1871. +
  1872. + skb->protocol = eth_type_trans(skb, skb->dev);
  1873. + if (skb->dev->features & NETIF_F_GRO)
  1874. + napi_gro_receive(&rxdesc_ring->napi, skb);
  1875. + else
  1876. + netif_receive_skb(skb);
  1877. +
  1878. + netdev_dbg(skb->dev, "edma_context:%p, skb:%p pkt_length:%u\n",
  1879. + edma_ctx, skb, skb->len);
  1880. +
  1881. + return true;
  1882. +}
  1883. +
  1884. +static struct net_device *edma_rx_get_src_dev(struct edma_rxdesc_stats *rxdesc_stats,
  1885. + struct edma_rxdesc_pri *rxdesc_pri,
  1886. + struct sk_buff *skb)
  1887. +{
  1888. + u32 src_info = EDMA_RXDESC_SRC_INFO_GET(rxdesc_pri);
  1889. + struct edma_hw_info *hw_info = edma_ctx->hw_info;
  1890. + struct net_device *ndev = NULL;
  1891. + u8 src_port_num;
  1892. +
  1893. + /* Check src_info. */
  1894. + if (likely((src_info & EDMA_RXDESC_SRCINFO_TYPE_MASK)
  1895. + == EDMA_RXDESC_SRCINFO_TYPE_PORTID)) {
  1896. + src_port_num = src_info & EDMA_RXDESC_PORTNUM_BITS;
  1897. + } else {
  1898. + if (net_ratelimit()) {
  1899. + pr_warn("Invalid src info_type:0x%x. Drop skb:%p\n",
  1900. + (src_info & EDMA_RXDESC_SRCINFO_TYPE_MASK), skb);
  1901. + }
  1902. +
  1903. + u64_stats_update_begin(&rxdesc_stats->syncp);
  1904. + ++rxdesc_stats->src_port_inval_type;
  1905. + u64_stats_update_end(&rxdesc_stats->syncp);
  1906. +
  1907. + return NULL;
  1908. + }
  1909. +
  1910. + /* Packet with PP source. */
  1911. + if (likely(src_port_num <= hw_info->max_ports)) {
  1912. + if (unlikely(src_port_num < EDMA_START_IFNUM)) {
  1913. + if (net_ratelimit())
  1914. + pr_warn("Port number error :%d. Drop skb:%p\n",
  1915. + src_port_num, skb);
  1916. +
  1917. + u64_stats_update_begin(&rxdesc_stats->syncp);
  1918. + ++rxdesc_stats->src_port_inval;
  1919. + u64_stats_update_end(&rxdesc_stats->syncp);
  1920. +
  1921. + return NULL;
  1922. + }
  1923. +
  1924. + /* Get netdev for this port using the source port
  1925. + * number as index into the netdev array. We need to
  1926. + * subtract one since the indices start form '0' and
  1927. + * port numbers start from '1'.
  1928. + */
  1929. + ndev = edma_ctx->netdev_arr[src_port_num - 1];
  1930. + }
  1931. +
  1932. + if (likely(ndev))
  1933. + return ndev;
  1934. +
  1935. + if (net_ratelimit())
  1936. + pr_warn("Netdev Null src_info_type:0x%x src port num:%d Drop skb:%p\n",
  1937. + (src_info & EDMA_RXDESC_SRCINFO_TYPE_MASK),
  1938. + src_port_num, skb);
  1939. +
  1940. + u64_stats_update_begin(&rxdesc_stats->syncp);
  1941. + ++rxdesc_stats->src_port_inval_netdev;
  1942. + u64_stats_update_end(&rxdesc_stats->syncp);
  1943. +
  1944. + return NULL;
  1945. +}
  1946. +
  1947. +static int edma_rx_reap(struct edma_rxdesc_ring *rxdesc_ring, int budget)
  1948. +{
  1949. + struct edma_rxdesc_stats *rxdesc_stats = &rxdesc_ring->rxdesc_stats;
  1950. + u32 alloc_size = rxdesc_ring->rxfill->alloc_size;
  1951. + bool page_mode = rxdesc_ring->rxfill->page_mode;
  1952. + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
  1953. + struct edma_rxdesc_pri *next_rxdesc_pri;
  1954. + struct regmap *regmap = ppe_dev->regmap;
  1955. + struct device *dev = ppe_dev->dev;
  1956. + u32 prod_idx, cons_idx, end_idx;
  1957. + u32 work_to_do, work_done = 0;
  1958. + struct sk_buff *next_skb;
  1959. + u32 work_leftover, reg;
  1960. +
  1961. + /* Get Rx ring producer and consumer indices. */
  1962. + cons_idx = rxdesc_ring->cons_idx;
  1963. +
  1964. + if (likely(rxdesc_ring->work_leftover > EDMA_RX_MAX_PROCESS)) {
  1965. + work_to_do = rxdesc_ring->work_leftover;
  1966. + } else {
  1967. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_PROD_IDX(rxdesc_ring->ring_id);
  1968. + regmap_read(regmap, reg, &prod_idx);
  1969. + prod_idx = prod_idx & EDMA_RXDESC_PROD_IDX_MASK;
  1970. + work_to_do = EDMA_DESC_AVAIL_COUNT(prod_idx,
  1971. + cons_idx, EDMA_RX_RING_SIZE);
  1972. + rxdesc_ring->work_leftover = work_to_do;
  1973. + }
  1974. +
  1975. + if (work_to_do > budget)
  1976. + work_to_do = budget;
  1977. +
  1978. + rxdesc_ring->work_leftover -= work_to_do;
  1979. + end_idx = (cons_idx + work_to_do) & EDMA_RX_RING_SIZE_MASK;
  1980. + next_rxdesc_pri = EDMA_RXDESC_PRI_DESC(rxdesc_ring, cons_idx);
  1981. +
  1982. + /* Get opaque from RXDESC. */
  1983. + next_skb = (struct sk_buff *)EDMA_RXDESC_OPAQUE_GET(next_rxdesc_pri);
  1984. +
  1985. + work_leftover = work_to_do & (EDMA_RX_MAX_PROCESS - 1);
  1986. + while (likely(work_to_do--)) {
  1987. + struct edma_rxdesc_pri *rxdesc_pri;
  1988. + struct net_device *ndev;
  1989. + struct sk_buff *skb;
  1990. + dma_addr_t dma_addr;
  1991. +
  1992. + skb = next_skb;
  1993. + rxdesc_pri = next_rxdesc_pri;
  1994. + dma_addr = EDMA_RXDESC_BUFFER_ADDR_GET(rxdesc_pri);
  1995. +
  1996. + if (!page_mode)
  1997. + dma_unmap_single(dev, dma_addr, alloc_size,
  1998. + DMA_TO_DEVICE);
  1999. + else
  2000. + dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_TO_DEVICE);
  2001. +
  2002. + /* Update consumer index. */
  2003. + cons_idx = (cons_idx + 1) & EDMA_RX_RING_SIZE_MASK;
  2004. +
  2005. + /* Get the next Rx descriptor. */
  2006. + next_rxdesc_pri = EDMA_RXDESC_PRI_DESC(rxdesc_ring, cons_idx);
  2007. +
  2008. + /* Handle linear packets or initial segments first. */
  2009. + if (likely(!(rxdesc_ring->head))) {
  2010. + ndev = edma_rx_get_src_dev(rxdesc_stats, rxdesc_pri, skb);
  2011. + if (unlikely(!ndev)) {
  2012. + dev_kfree_skb_any(skb);
  2013. + goto next_rx_desc;
  2014. + }
  2015. +
  2016. + /* Update skb fields for head skb. */
  2017. + skb->dev = ndev;
  2018. + skb->skb_iif = ndev->ifindex;
  2019. +
  2020. + /* Handle linear packets. */
  2021. + if (likely(!EDMA_RXDESC_MORE_BIT_GET(rxdesc_pri))) {
  2022. + next_skb =
  2023. + (struct sk_buff *)EDMA_RXDESC_OPAQUE_GET(next_rxdesc_pri);
  2024. +
  2025. + if (unlikely(!
  2026. + edma_rx_handle_linear_packets(rxdesc_ring,
  2027. + rxdesc_pri, skb)))
  2028. + dev_kfree_skb_any(skb);
  2029. +
  2030. + goto next_rx_desc;
  2031. + }
  2032. + }
  2033. +
  2034. + next_skb = (struct sk_buff *)EDMA_RXDESC_OPAQUE_GET(next_rxdesc_pri);
  2035. +
  2036. + /* Handle scatter frame processing for first/middle/last segments. */
  2037. + page_mode ? edma_rx_handle_nr_frags(rxdesc_ring, rxdesc_pri, skb) :
  2038. + edma_rx_handle_frag_list(rxdesc_ring, rxdesc_pri, skb);
  2039. +
  2040. +next_rx_desc:
  2041. + /* Update work done. */
  2042. + work_done++;
  2043. +
  2044. + /* Check if we can refill EDMA_RX_MAX_PROCESS worth buffers,
  2045. + * if yes, refill and update index before continuing.
  2046. + */
  2047. + if (unlikely(!(work_done & (EDMA_RX_MAX_PROCESS - 1)))) {
  2048. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_CONS_IDX(rxdesc_ring->ring_id);
  2049. + regmap_write(regmap, reg, cons_idx);
  2050. + rxdesc_ring->cons_idx = cons_idx;
  2051. + edma_rx_alloc_buffer_list(rxdesc_ring->rxfill, EDMA_RX_MAX_PROCESS);
  2052. + }
  2053. + }
  2054. +
  2055. + /* Check if we need to refill and update
  2056. + * index for any buffers before exit.
  2057. + */
  2058. + if (unlikely(work_leftover)) {
  2059. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_CONS_IDX(rxdesc_ring->ring_id);
  2060. + regmap_write(regmap, reg, cons_idx);
  2061. + rxdesc_ring->cons_idx = cons_idx;
  2062. + edma_rx_alloc_buffer_list(rxdesc_ring->rxfill, work_leftover);
  2063. + }
  2064. +
  2065. + return work_done;
  2066. +}
  2067. +
  2068. +/**
  2069. + * edma_rx_napi_poll - EDMA Rx napi poll.
  2070. + * @napi: NAPI structure
  2071. + * @budget: Rx NAPI budget
  2072. + *
  2073. + * EDMA RX NAPI handler to handle the NAPI poll.
  2074. + *
  2075. + * Return the number of packets processed.
  2076. + */
  2077. +int edma_rx_napi_poll(struct napi_struct *napi, int budget)
  2078. +{
  2079. + struct edma_rxdesc_ring *rxdesc_ring = (struct edma_rxdesc_ring *)napi;
  2080. + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
  2081. + struct regmap *regmap = ppe_dev->regmap;
  2082. + int work_done = 0;
  2083. + u32 status, reg;
  2084. +
  2085. + do {
  2086. + work_done += edma_rx_reap(rxdesc_ring, budget - work_done);
  2087. + if (likely(work_done >= budget))
  2088. + return work_done;
  2089. +
  2090. + /* Check if there are more packets to process. */
  2091. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_INT_STAT(rxdesc_ring->ring_id);
  2092. + regmap_read(regmap, reg, &status);
  2093. + status = status & EDMA_RXDESC_RING_INT_STATUS_MASK;
  2094. + } while (likely(status));
  2095. +
  2096. + napi_complete(napi);
  2097. +
  2098. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->ring_id);
  2099. + regmap_write(regmap, reg, edma_ctx->intr_info.intr_mask_rx);
  2100. +
  2101. + return work_done;
  2102. +}
  2103. +
  2104. +/**
  2105. + * edma_rx_handle_irq - EDMA Rx handle irq.
  2106. + * @irq: Interrupt to handle
  2107. + * @ctx: Context
  2108. + *
  2109. + * Process RX IRQ and schedule NAPI.
  2110. + *
  2111. + * Return IRQ_HANDLED(1) on success.
  2112. + */
  2113. +irqreturn_t edma_rx_handle_irq(int irq, void *ctx)
  2114. +{
  2115. + struct edma_rxdesc_ring *rxdesc_ring = (struct edma_rxdesc_ring *)ctx;
  2116. + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
  2117. + struct regmap *regmap = ppe_dev->regmap;
  2118. + u32 reg;
  2119. +
  2120. + if (likely(napi_schedule_prep(&rxdesc_ring->napi))) {
  2121. + /* Disable RxDesc interrupt. */
  2122. + reg = EDMA_BASE_OFFSET + EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->ring_id);
  2123. + regmap_write(regmap, reg, EDMA_MASK_INT_DISABLE);
  2124. + __napi_schedule(&rxdesc_ring->napi);
  2125. + }
  2126. +
  2127. + return IRQ_HANDLED;
  2128. +}
  2129. --- /dev/null
  2130. +++ b/drivers/net/ethernet/qualcomm/ppe/edma_rx.h
  2131. @@ -0,0 +1,287 @@
  2132. +/* SPDX-License-Identifier: GPL-2.0-only
  2133. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
  2134. + */
  2135. +
  2136. +#ifndef __EDMA_RX__
  2137. +#define __EDMA_RX__
  2138. +
  2139. +#include <linux/netdevice.h>
  2140. +
  2141. +#define EDMA_RXFILL_RING_PER_CORE_MAX 1
  2142. +#define EDMA_RXDESC_RING_PER_CORE_MAX 1
  2143. +
  2144. +/* Max Rx processing without replenishing RxFill ring. */
  2145. +#define EDMA_RX_MAX_PROCESS 32
  2146. +
  2147. +#define EDMA_RX_SKB_HEADROOM 128
  2148. +#define EDMA_RX_QUEUE_START 0
  2149. +#define EDMA_RX_BUFFER_SIZE 1984
  2150. +#define EDMA_MAX_CORE 4
  2151. +
  2152. +#define EDMA_GET_DESC(R, i, type) (&(((type *)((R)->desc))[(i)]))
  2153. +#define EDMA_GET_PDESC(R, i, type) (&(((type *)((R)->pdesc))[(i)]))
  2154. +#define EDMA_GET_SDESC(R, i, type) (&(((type *)((R)->sdesc))[(i)]))
  2155. +#define EDMA_RXFILL_DESC(R, i) EDMA_GET_DESC(R, i, \
  2156. + struct edma_rxfill_desc)
  2157. +#define EDMA_RXDESC_PRI_DESC(R, i) EDMA_GET_PDESC(R, i, \
  2158. + struct edma_rxdesc_pri)
  2159. +#define EDMA_RXDESC_SEC_DESC(R, i) EDMA_GET_SDESC(R, i, \
  2160. + struct edma_rxdesc_sec)
  2161. +
  2162. +#define EDMA_RX_RING_SIZE 2048
  2163. +
  2164. +#define EDMA_RX_RING_SIZE_MASK (EDMA_RX_RING_SIZE - 1)
  2165. +#define EDMA_RX_RING_ID_MASK 0x1F
  2166. +
  2167. +#define EDMA_MAX_PRI_PER_CORE 8
  2168. +#define EDMA_RX_PID_IPV4_MAX 0x3
  2169. +#define EDMA_RX_PID_IPV6 0x4
  2170. +#define EDMA_RX_PID_IS_IPV4(pid) (!((pid) & (~EDMA_RX_PID_IPV4_MAX)))
  2171. +#define EDMA_RX_PID_IS_IPV6(pid) (!(!((pid) & EDMA_RX_PID_IPV6)))
  2172. +
  2173. +#define EDMA_RXDESC_BUFFER_ADDR_GET(desc) \
  2174. + ((u32)(le32_to_cpu((__force __le32)((desc)->word0))))
  2175. +#define EDMA_RXDESC_OPAQUE_GET(_desc) ({ \
  2176. + typeof(_desc) (desc) = (_desc); \
  2177. + ((uintptr_t)((u64)((desc)->word2) | \
  2178. + ((u64)((desc)->word3) << 0x20))); })
  2179. +
  2180. +#define EDMA_RXDESC_SRCINFO_TYPE_PORTID 0x2000
  2181. +#define EDMA_RXDESC_SRCINFO_TYPE_MASK 0xF000
  2182. +#define EDMA_RXDESC_L3CSUM_STATUS_MASK BIT(13)
  2183. +#define EDMA_RXDESC_L4CSUM_STATUS_MASK BIT(12)
  2184. +#define EDMA_RXDESC_PORTNUM_BITS 0x0FFF
  2185. +
  2186. +#define EDMA_RXDESC_PACKET_LEN_MASK 0x3FFFF
  2187. +#define EDMA_RXDESC_PACKET_LEN_GET(_desc) ({ \
  2188. + typeof(_desc) (desc) = (_desc); \
  2189. + ((le32_to_cpu((__force __le32)((desc)->word5))) & \
  2190. + EDMA_RXDESC_PACKET_LEN_MASK); })
  2191. +
  2192. +#define EDMA_RXDESC_MORE_BIT_MASK 0x40000000
  2193. +#define EDMA_RXDESC_MORE_BIT_GET(desc) ((le32_to_cpu((__force __le32)((desc)->word1))) & \
  2194. + EDMA_RXDESC_MORE_BIT_MASK)
  2195. +#define EDMA_RXDESC_SRC_DST_INFO_GET(desc) \
  2196. + ((u32)((le32_to_cpu((__force __le32)((desc)->word4)))))
  2197. +
  2198. +#define EDMA_RXDESC_L3_OFFSET_MASK GENMASK(23, 16)
  2199. +#define EDMA_RXDESC_L3_OFFSET_GET(desc) FIELD_GET(EDMA_RXDESC_L3_OFFSET_MASK, \
  2200. + le32_to_cpu((__force __le32)((desc)->word7)))
  2201. +
  2202. +#define EDMA_RXDESC_PID_MASK GENMASK(15, 12)
  2203. +#define EDMA_RXDESC_PID_GET(desc) FIELD_GET(EDMA_RXDESC_PID_MASK, \
  2204. + le32_to_cpu((__force __le32)((desc)->word7)))
  2205. +
  2206. +#define EDMA_RXDESC_DST_INFO_MASK GENMASK(31, 16)
  2207. +#define EDMA_RXDESC_DST_INFO_GET(desc) FIELD_GET(EDMA_RXDESC_DST_INFO_MASK, \
  2208. + le32_to_cpu((__force __le32)((desc)->word4)))
  2209. +
  2210. +#define EDMA_RXDESC_SRC_INFO_MASK GENMASK(15, 0)
  2211. +#define EDMA_RXDESC_SRC_INFO_GET(desc) FIELD_GET(EDMA_RXDESC_SRC_INFO_MASK, \
  2212. + le32_to_cpu((__force __le32)((desc)->word4)))
  2213. +
  2214. +#define EDMA_RXDESC_PORT_ID_MASK GENMASK(11, 0)
  2215. +#define EDMA_RXDESC_PORT_ID_GET(x) FIELD_GET(EDMA_RXDESC_PORT_ID_MASK, x)
  2216. +
  2217. +#define EDMA_RXDESC_SRC_PORT_ID_GET(desc) (EDMA_RXDESC_PORT_ID_GET \
  2218. + (EDMA_RXDESC_SRC_INFO_GET(desc)))
  2219. +#define EDMA_RXDESC_DST_PORT_ID_GET(desc) (EDMA_RXDESC_PORT_ID_GET \
  2220. + (EDMA_RXDESC_DST_INFO_GET(desc)))
  2221. +
  2222. +#define EDMA_RXDESC_DST_PORT (0x2 << EDMA_RXDESC_PID_SHIFT)
  2223. +
  2224. +#define EDMA_RXDESC_L3CSUM_STATUS_GET(desc) FIELD_GET(EDMA_RXDESC_L3CSUM_STATUS_MASK, \
  2225. + le32_to_cpu((__force __le32)(desc)->word6))
  2226. +#define EDMA_RXDESC_L4CSUM_STATUS_GET(desc) FIELD_GET(EDMA_RXDESC_L4CSUM_STATUS_MASK, \
  2227. + le32_to_cpu((__force __le32)(desc)->word6))
  2228. +
  2229. +#define EDMA_RXDESC_DATA_OFFSET_MASK GENMASK(11, 0)
  2230. +#define EDMA_RXDESC_DATA_OFFSET_GET(desc) FIELD_GET(EDMA_RXDESC_DATA_OFFSET_MASK, \
  2231. + le32_to_cpu((__force __le32)(desc)->word6))
  2232. +
  2233. +#define EDMA_RXFILL_BUF_SIZE_MASK 0xFFFF
  2234. +#define EDMA_RXFILL_BUF_SIZE_SHIFT 16
  2235. +
  2236. +/* Opaque values are not accessed by the EDMA HW,
  2237. + * so endianness conversion is not needed.
  2238. + */
  2239. +
  2240. +#define EDMA_RXFILL_OPAQUE_LO_SET(desc, ptr) (((desc)->word2) = \
  2241. + (u32)(uintptr_t)(ptr))
  2242. +#ifdef __LP64__
  2243. +#define EDMA_RXFILL_OPAQUE_HI_SET(desc, ptr) (((desc)->word3) = \
  2244. + (u32)((u64)(ptr) >> 0x20))
  2245. +#endif
  2246. +
  2247. +#define EDMA_RXFILL_OPAQUE_GET(_desc) ({ \
  2248. + typeof(_desc) (desc) = (_desc); \
  2249. + ((uintptr_t)((u64)((desc)->word2) | \
  2250. + ((u64)((desc)->word3) << 0x20))); })
  2251. +
  2252. +#define EDMA_RXFILL_PACKET_LEN_SET(desc, len) { \
  2253. + (((desc)->word1) = (u32)((((u32)len) << EDMA_RXFILL_BUF_SIZE_SHIFT) & \
  2254. + 0xFFFF0000)); \
  2255. +}
  2256. +
  2257. +#define EDMA_RXFILL_BUFFER_ADDR_SET(desc, addr) (((desc)->word0) = (u32)(addr))
  2258. +
  2259. +/* Opaque values are set in word2 and word3, they are not accessed by the EDMA HW,
  2260. + * so endianness conversion is not needed.
  2261. + */
  2262. +#define EDMA_RXFILL_ENDIAN_SET(_desc) ({ \
  2263. + typeof(_desc) (desc) = (_desc); \
  2264. + cpu_to_le32s(&((desc)->word0)); \
  2265. + cpu_to_le32s(&((desc)->word1)); \
  2266. +})
  2267. +
  2268. +/* RX DESC size shift to obtain index from descriptor pointer. */
  2269. +#define EDMA_RXDESC_SIZE_SHIFT 5
  2270. +
  2271. +/**
  2272. + * struct edma_rxdesc_stats - RX descriptor ring stats.
  2273. + * @src_port_inval: Invalid source port number
  2274. + * @src_port_inval_type: Source type is not PORT ID
  2275. + * @src_port_inval_netdev: Invalid net device for the source port
  2276. + * @syncp: Synchronization pointer
  2277. + */
  2278. +struct edma_rxdesc_stats {
  2279. + u64 src_port_inval;
  2280. + u64 src_port_inval_type;
  2281. + u64 src_port_inval_netdev;
  2282. + struct u64_stats_sync syncp;
  2283. +};
  2284. +
  2285. +/**
  2286. + * struct edma_rxfill_stats - Rx fill descriptor ring stats.
  2287. + * @alloc_failed: Buffer allocation failure count
  2288. + * @page_alloc_failed: Page allocation failure count for page mode
  2289. + * @syncp: Synchronization pointer
  2290. + */
  2291. +struct edma_rxfill_stats {
  2292. + u64 alloc_failed;
  2293. + u64 page_alloc_failed;
  2294. + struct u64_stats_sync syncp;
  2295. +};
  2296. +
  2297. +/**
  2298. + * struct edma_rxdesc_pri - Rx descriptor.
  2299. + * @word0: Buffer address
  2300. + * @word1: More bit, priority bit, service code
  2301. + * @word2: Opaque low bits
  2302. + * @word3: Opaque high bits
  2303. + * @word4: Destination and source information
  2304. + * @word5: WiFi QoS, data length
  2305. + * @word6: Hash value, check sum status
  2306. + * @word7: DSCP, packet offsets
  2307. + */
  2308. +struct edma_rxdesc_pri {
  2309. + u32 word0;
  2310. + u32 word1;
  2311. + u32 word2;
  2312. + u32 word3;
  2313. + u32 word4;
  2314. + u32 word5;
  2315. + u32 word6;
  2316. + u32 word7;
  2317. +};
  2318. +
  2319. + /**
  2320. + * struct edma_rxdesc_sec - Rx secondary descriptor.
  2321. + * @word0: Timestamp
  2322. + * @word1: Secondary checksum status
  2323. + * @word2: QoS tag
  2324. + * @word3: Flow index details
  2325. + * @word4: Secondary packet offsets
  2326. + * @word5: Multicast bit, checksum
  2327. + * @word6: SVLAN, CVLAN
  2328. + * @word7: Secondary SVLAN, CVLAN
  2329. + */
  2330. +struct edma_rxdesc_sec {
  2331. + u32 word0;
  2332. + u32 word1;
  2333. + u32 word2;
  2334. + u32 word3;
  2335. + u32 word4;
  2336. + u32 word5;
  2337. + u32 word6;
  2338. + u32 word7;
  2339. +};
  2340. +
  2341. +/**
  2342. + * struct edma_rxfill_desc - RxFill descriptor.
  2343. + * @word0: Buffer address
  2344. + * @word1: Buffer size
  2345. + * @word2: Opaque low bits
  2346. + * @word3: Opaque high bits
  2347. + */
  2348. +struct edma_rxfill_desc {
  2349. + u32 word0;
  2350. + u32 word1;
  2351. + u32 word2;
  2352. + u32 word3;
  2353. +};
  2354. +
  2355. +/**
  2356. + * struct edma_rxfill_ring - RxFill ring
  2357. + * @ring_id: RxFill ring number
  2358. + * @count: Number of descriptors in the ring
  2359. + * @prod_idx: Ring producer index
  2360. + * @alloc_size: Buffer size to allocate
  2361. + * @desc: Descriptor ring virtual address
  2362. + * @dma: Descriptor ring physical address
  2363. + * @buf_len: Buffer length for rxfill descriptor
  2364. + * @page_mode: Page mode for Rx processing
  2365. + * @rx_fill_stats: Rx fill ring statistics
  2366. + */
  2367. +struct edma_rxfill_ring {
  2368. + u32 ring_id;
  2369. + u32 count;
  2370. + u32 prod_idx;
  2371. + u32 alloc_size;
  2372. + struct edma_rxfill_desc *desc;
  2373. + dma_addr_t dma;
  2374. + u32 buf_len;
  2375. + bool page_mode;
  2376. + struct edma_rxfill_stats rxfill_stats;
  2377. +};
  2378. +
  2379. +/**
  2380. + * struct edma_rxdesc_ring - RxDesc ring
  2381. + * @napi: Pointer to napi
  2382. + * @ring_id: Rxdesc ring number
  2383. + * @count: Number of descriptors in the ring
  2384. + * @work_leftover: Leftover descriptors to be processed
  2385. + * @cons_idx: Ring consumer index
  2386. + * @pdesc: Primary descriptor ring virtual address
  2387. + * @pdesc_head: Primary descriptor head in case of scatter-gather frame
  2388. + * @sdesc: Secondary descriptor ring virtual address
  2389. + * @rxdesc_stats: Rx descriptor ring statistics
  2390. + * @rxfill: RxFill ring used
  2391. + * @napi_added: Flag to indicate NAPI add status
  2392. + * @pdma: Primary descriptor ring physical address
  2393. + * @sdma: Secondary descriptor ring physical address
  2394. + * @head: Head of the skb list in case of scatter-gather frame
  2395. + * @last: Last skb of the skb list in case of scatter-gather frame
  2396. + */
  2397. +struct edma_rxdesc_ring {
  2398. + struct napi_struct napi;
  2399. + u32 ring_id;
  2400. + u32 count;
  2401. + u32 work_leftover;
  2402. + u32 cons_idx;
  2403. + struct edma_rxdesc_pri *pdesc;
  2404. + struct edma_rxdesc_pri *pdesc_head;
  2405. + struct edma_rxdesc_sec *sdesc;
  2406. + struct edma_rxdesc_stats rxdesc_stats;
  2407. + struct edma_rxfill_ring *rxfill;
  2408. + bool napi_added;
  2409. + dma_addr_t pdma;
  2410. + dma_addr_t sdma;
  2411. + struct sk_buff *head;
  2412. + struct sk_buff *last;
  2413. +};
  2414. +
  2415. +irqreturn_t edma_rx_handle_irq(int irq, void *ctx);
  2416. +int edma_rx_alloc_buffer(struct edma_rxfill_ring *rxfill_ring, int alloc_count);
  2417. +int edma_rx_napi_poll(struct napi_struct *napi, int budget);
  2418. +#endif