0346-net-ethernet-qualcomm-Add-miscellaneous-error-interr.patch 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730
  1. From 8a924457c0b71acee96c8f78ef386e2a354a2aca Mon Sep 17 00:00:00 2001
  2. From: Suruchi Agarwal <[email protected]>
  3. Date: Thu, 21 Mar 2024 16:31:04 -0700
  4. Subject: [PATCH] net: ethernet: qualcomm: Add miscellaneous error interrupts
  5. and counters
  6. Miscellaneous error interrupts, EDMA Tx/Rx and error counters are supported
  7. using debugfs framework.
  8. Change-Id: I7da8b978a7e93947b03a45269a81b401f35da31c
  9. Co-developed-by: Pavithra R <[email protected]>
  10. Signed-off-by: Pavithra R <[email protected]>
  11. Signed-off-by: Suruchi Agarwal <[email protected]>
  12. ---
  13. drivers/net/ethernet/qualcomm/ppe/Makefile | 2 +-
  14. drivers/net/ethernet/qualcomm/ppe/edma.c | 162 ++++++++
  15. drivers/net/ethernet/qualcomm/ppe/edma.h | 30 ++
  16. .../net/ethernet/qualcomm/ppe/edma_debugfs.c | 370 ++++++++++++++++++
  17. .../net/ethernet/qualcomm/ppe/ppe_debugfs.c | 17 +
  18. 5 files changed, 580 insertions(+), 1 deletion(-)
  19. create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_debugfs.c
  20. --- a/drivers/net/ethernet/qualcomm/ppe/Makefile
  21. +++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
  22. @@ -7,4 +7,4 @@ obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
  23. qcom-ppe-objs := ppe.o ppe_config.o ppe_debugfs.o ppe_port.o
  24. #EDMA
  25. -qcom-ppe-objs += edma.o edma_cfg_rx.o edma_cfg_tx.o edma_port.o edma_rx.o edma_tx.o
  26. +qcom-ppe-objs += edma.o edma_cfg_rx.o edma_cfg_tx.o edma_debugfs.o edma_port.o edma_rx.o edma_tx.o
  27. --- a/drivers/net/ethernet/qualcomm/ppe/edma.c
  28. +++ b/drivers/net/ethernet/qualcomm/ppe/edma.c
  29. @@ -152,6 +152,42 @@ static int edma_clock_init(void)
  30. }
  31. /**
  32. + * edma_err_stats_alloc - Allocate stats memory
  33. + *
  34. + * Allocate memory for per-CPU error stats.
  35. + */
  36. +int edma_err_stats_alloc(void)
  37. +{
  38. + u32 i;
  39. +
  40. + edma_ctx->err_stats = alloc_percpu(*edma_ctx->err_stats);
  41. + if (!edma_ctx->err_stats)
  42. + return -ENOMEM;
  43. +
  44. + for_each_possible_cpu(i) {
  45. + struct edma_err_stats *stats;
  46. +
  47. + stats = per_cpu_ptr(edma_ctx->err_stats, i);
  48. + u64_stats_init(&stats->syncp);
  49. + }
  50. +
  51. + return 0;
  52. +}
  53. +
  54. +/**
  55. + * edma_err_stats_free - Free stats memory
  56. + *
  57. + * Free memory of per-CPU error stats.
  58. + */
  59. +void edma_err_stats_free(void)
  60. +{
  61. + if (edma_ctx->err_stats) {
  62. + free_percpu(edma_ctx->err_stats);
  63. + edma_ctx->err_stats = NULL;
  64. + }
  65. +}
  66. +
  67. +/**
  68. * edma_configure_ucast_prio_map_tbl - Configure unicast priority map table.
  69. *
  70. * Map int_priority values to priority class and initialize
  71. @@ -191,11 +227,113 @@ static int edma_configure_ucast_prio_map
  72. return ret;
  73. }
  74. +static void edma_disable_misc_interrupt(void)
  75. +{
  76. + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
  77. + struct regmap *regmap = ppe_dev->regmap;
  78. + u32 reg;
  79. +
  80. + reg = EDMA_BASE_OFFSET + EDMA_REG_MISC_INT_MASK_ADDR;
  81. + regmap_write(regmap, reg, EDMA_MASK_INT_CLEAR);
  82. +}
  83. +
  84. +static void edma_enable_misc_interrupt(void)
  85. +{
  86. + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
  87. + struct regmap *regmap = ppe_dev->regmap;
  88. + u32 reg;
  89. +
  90. + reg = EDMA_BASE_OFFSET + EDMA_REG_MISC_INT_MASK_ADDR;
  91. + regmap_write(regmap, reg, edma_ctx->intr_info.intr_mask_misc);
  92. +}
  93. +
  94. +static irqreturn_t edma_misc_handle_irq(int irq,
  95. + __maybe_unused void *ctx)
  96. +{
  97. + struct edma_err_stats *stats = this_cpu_ptr(edma_ctx->err_stats);
  98. + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
  99. + struct regmap *regmap = ppe_dev->regmap;
  100. + u32 misc_intr_status, data, reg;
  101. +
  102. + /* Read Misc intr status */
  103. + reg = EDMA_BASE_OFFSET + EDMA_REG_MISC_INT_STAT_ADDR;
  104. + regmap_read(regmap, reg, &data);
  105. + misc_intr_status = data & edma_ctx->intr_info.intr_mask_misc;
  106. +
  107. + pr_debug("Received misc irq %d, status: %d\n", irq, misc_intr_status);
  108. +
  109. + if (FIELD_GET(EDMA_MISC_AXI_RD_ERR_MASK, misc_intr_status)) {
  110. + pr_err("MISC AXI read error received\n");
  111. + u64_stats_update_begin(&stats->syncp);
  112. + ++stats->edma_axi_read_err;
  113. + u64_stats_update_end(&stats->syncp);
  114. + }
  115. +
  116. + if (FIELD_GET(EDMA_MISC_AXI_WR_ERR_MASK, misc_intr_status)) {
  117. + pr_err("MISC AXI write error received\n");
  118. + u64_stats_update_begin(&stats->syncp);
  119. + ++stats->edma_axi_write_err;
  120. + u64_stats_update_end(&stats->syncp);
  121. + }
  122. +
  123. + if (FIELD_GET(EDMA_MISC_RX_DESC_FIFO_FULL_MASK, misc_intr_status)) {
  124. + if (net_ratelimit())
  125. + pr_err("MISC Rx descriptor fifo full error received\n");
  126. + u64_stats_update_begin(&stats->syncp);
  127. + ++stats->edma_rxdesc_fifo_full;
  128. + u64_stats_update_end(&stats->syncp);
  129. + }
  130. +
  131. + if (FIELD_GET(EDMA_MISC_RX_ERR_BUF_SIZE_MASK, misc_intr_status)) {
  132. + if (net_ratelimit())
  133. + pr_err("MISC Rx buffer size error received\n");
  134. + u64_stats_update_begin(&stats->syncp);
  135. + ++stats->edma_rx_buf_size_err;
  136. + u64_stats_update_end(&stats->syncp);
  137. + }
  138. +
  139. + if (FIELD_GET(EDMA_MISC_TX_SRAM_FULL_MASK, misc_intr_status)) {
  140. + if (net_ratelimit())
  141. + pr_err("MISC Tx SRAM full error received\n");
  142. + u64_stats_update_begin(&stats->syncp);
  143. + ++stats->edma_tx_sram_full;
  144. + u64_stats_update_end(&stats->syncp);
  145. + }
  146. +
  147. + if (FIELD_GET(EDMA_MISC_TX_CMPL_BUF_FULL_MASK, misc_intr_status)) {
  148. + if (net_ratelimit())
  149. + pr_err("MISC Tx complete buffer full error received\n");
  150. + u64_stats_update_begin(&stats->syncp);
  151. + ++stats->edma_txcmpl_buf_full;
  152. + u64_stats_update_end(&stats->syncp);
  153. + }
  154. +
  155. + if (FIELD_GET(EDMA_MISC_DATA_LEN_ERR_MASK, misc_intr_status)) {
  156. + if (net_ratelimit())
  157. + pr_err("MISC data length error received\n");
  158. + u64_stats_update_begin(&stats->syncp);
  159. + ++stats->edma_tx_data_len_err;
  160. + u64_stats_update_end(&stats->syncp);
  161. + }
  162. +
  163. + if (FIELD_GET(EDMA_MISC_TX_TIMEOUT_MASK, misc_intr_status)) {
  164. + if (net_ratelimit())
  165. + pr_err("MISC Tx timeout error received\n");
  166. + u64_stats_update_begin(&stats->syncp);
  167. + ++stats->edma_tx_timeout;
  168. + u64_stats_update_end(&stats->syncp);
  169. + }
  170. +
  171. + return IRQ_HANDLED;
  172. +}
  173. +
  174. static int edma_irq_register(void)
  175. {
  176. struct edma_hw_info *hw_info = edma_ctx->hw_info;
  177. struct edma_ring_info *txcmpl = hw_info->txcmpl;
  178. + struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
  179. struct edma_ring_info *rx = hw_info->rx;
  180. + struct device *dev = ppe_dev->dev;
  181. int ret;
  182. u32 i;
  183. @@ -270,8 +408,25 @@ static int edma_irq_register(void)
  184. edma_rxdesc_irq_name[i]);
  185. }
  186. + /* Request Misc IRQ */
  187. + ret = request_irq(edma_ctx->intr_info.intr_misc, edma_misc_handle_irq,
  188. + IRQF_SHARED, "edma_misc",
  189. + (void *)dev);
  190. + if (ret) {
  191. + pr_err("MISC IRQ:%d request failed\n",
  192. + edma_ctx->intr_info.intr_misc);
  193. + goto misc_intr_req_fail;
  194. + }
  195. +
  196. return 0;
  197. +misc_intr_req_fail:
  198. + /* Free IRQ for RXDESC rings */
  199. + for (i = 0; i < rx->num_rings; i++) {
  200. + synchronize_irq(edma_ctx->intr_info.intr_rx[i]);
  201. + free_irq(edma_ctx->intr_info.intr_rx[i],
  202. + (void *)&edma_ctx->rx_rings[i]);
  203. + }
  204. rx_desc_ring_intr_req_fail:
  205. for (i = 0; i < rx->num_rings; i++)
  206. kfree(edma_rxdesc_irq_name[i]);
  207. @@ -503,6 +658,7 @@ static int edma_hw_configure(void)
  208. edma_cfg_tx_disable_interrupts(i);
  209. edma_cfg_rx_disable_interrupts();
  210. + edma_disable_misc_interrupt();
  211. edma_cfg_rx_rings_disable();
  212. @@ -614,6 +770,7 @@ void edma_destroy(struct ppe_device *ppe
  213. edma_cfg_tx_disable_interrupts(i);
  214. edma_cfg_rx_disable_interrupts();
  215. + edma_disable_misc_interrupt();
  216. /* Free IRQ for TXCMPL rings. */
  217. for (i = 0; i < txcmpl->num_rings; i++) {
  218. @@ -634,6 +791,10 @@ void edma_destroy(struct ppe_device *ppe
  219. }
  220. kfree(edma_rxdesc_irq_name);
  221. + /* Free Misc IRQ */
  222. + synchronize_irq(edma_ctx->intr_info.intr_misc);
  223. + free_irq(edma_ctx->intr_info.intr_misc, (void *)(ppe_dev->dev));
  224. +
  225. kfree(edma_ctx->intr_info.intr_rx);
  226. kfree(edma_ctx->intr_info.intr_txcmpl);
  227. @@ -699,6 +860,7 @@ int edma_setup(struct ppe_device *ppe_de
  228. }
  229. edma_cfg_rx_enable_interrupts();
  230. + edma_enable_misc_interrupt();
  231. dev_info(dev, "EDMA configuration successful\n");
  232. --- a/drivers/net/ethernet/qualcomm/ppe/edma.h
  233. +++ b/drivers/net/ethernet/qualcomm/ppe/edma.h
  234. @@ -47,6 +47,30 @@ enum ppe_queue_class_type {
  235. };
  236. /**
  237. + * struct edma_err_stats - EDMA error stats
  238. + * @edma_axi_read_err: AXI read error
  239. + * @edma_axi_write_err: AXI write error
  240. + * @edma_rxdesc_fifo_full: Rx desc FIFO full error
  241. + * @edma_rx_buf_size_err: Rx buffer size too small error
  242. + * @edma_tx_sram_full: Tx packet SRAM buffer full error
  243. + * @edma_tx_data_len_err: Tx data length error
  244. + * @edma_tx_timeout: Tx timeout error
  245. + * @edma_txcmpl_buf_full: Tx completion buffer full error
  246. + * @syncp: Synchronization pointer
  247. + */
  248. +struct edma_err_stats {
  249. + u64 edma_axi_read_err;
  250. + u64 edma_axi_write_err;
  251. + u64 edma_rxdesc_fifo_full;
  252. + u64 edma_rx_buf_size_err;
  253. + u64 edma_tx_sram_full;
  254. + u64 edma_tx_data_len_err;
  255. + u64 edma_tx_timeout;
  256. + u64 edma_txcmpl_buf_full;
  257. + struct u64_stats_sync syncp;
  258. +};
  259. +
  260. +/**
  261. * struct edma_ring_info - EDMA ring data structure.
  262. * @max_rings: Maximum number of rings
  263. * @ring_start: Ring start ID
  264. @@ -107,6 +131,7 @@ struct edma_intr_info {
  265. * @rx_rings: Rx Desc Rings, SW is consumer
  266. * @tx_rings: Tx Descriptor Ring, SW is producer
  267. * @txcmpl_rings: Tx complete Ring, SW is consumer
  268. + * @err_stats: Per CPU error statistics
  269. * @rx_page_mode: Page mode enabled or disabled
  270. * @rx_buf_size: Rx buffer size for Jumbo MRU
  271. * @tx_requeue_stop: Tx requeue stop enabled or disabled
  272. @@ -121,6 +146,7 @@ struct edma_context {
  273. struct edma_rxdesc_ring *rx_rings;
  274. struct edma_txdesc_ring *tx_rings;
  275. struct edma_txcmpl_ring *txcmpl_rings;
  276. + struct edma_err_stats __percpu *err_stats;
  277. u32 rx_page_mode;
  278. u32 rx_buf_size;
  279. bool tx_requeue_stop;
  280. @@ -129,8 +155,12 @@ struct edma_context {
  281. /* Global EDMA context */
  282. extern struct edma_context *edma_ctx;
  283. +int edma_err_stats_alloc(void);
  284. +void edma_err_stats_free(void);
  285. void edma_destroy(struct ppe_device *ppe_dev);
  286. int edma_setup(struct ppe_device *ppe_dev);
  287. +void edma_debugfs_teardown(void);
  288. +int edma_debugfs_setup(struct ppe_device *ppe_dev);
  289. int ppe_edma_queue_offset_config(struct ppe_device *ppe_dev,
  290. enum ppe_queue_class_type class,
  291. int index, int queue_offset);
  292. --- /dev/null
  293. +++ b/drivers/net/ethernet/qualcomm/ppe/edma_debugfs.c
  294. @@ -0,0 +1,370 @@
  295. +// SPDX-License-Identifier: GPL-2.0-only
  296. +/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
  297. + */
  298. +
  299. +/* EDMA debugfs routines for display of Tx/Rx counters. */
  300. +
  301. +#include <linux/cpumask.h>
  302. +#include <linux/debugfs.h>
  303. +#include <linux/kernel.h>
  304. +#include <linux/netdevice.h>
  305. +#include <linux/printk.h>
  306. +
  307. +#include "edma.h"
  308. +
  309. +#define EDMA_STATS_BANNER_MAX_LEN 80
  310. +#define EDMA_RX_RING_STATS_NODE_NAME "EDMA_RX"
  311. +#define EDMA_TX_RING_STATS_NODE_NAME "EDMA_TX"
  312. +#define EDMA_ERR_STATS_NODE_NAME "EDMA_ERR"
  313. +
  314. +static struct dentry *edma_dentry;
  315. +static struct dentry *stats_dentry;
  316. +
  317. +static void edma_debugfs_print_banner(struct seq_file *m, char *node)
  318. +{
  319. + u32 banner_char_len, i;
  320. +
  321. + for (i = 0; i < EDMA_STATS_BANNER_MAX_LEN; i++)
  322. + seq_puts(m, "_");
  323. + banner_char_len = (EDMA_STATS_BANNER_MAX_LEN - (strlen(node) + 2)) / 2;
  324. + seq_puts(m, "\n\n");
  325. +
  326. + for (i = 0; i < banner_char_len; i++)
  327. + seq_puts(m, "<");
  328. + seq_printf(m, " %s ", node);
  329. +
  330. + for (i = 0; i < banner_char_len; i++)
  331. + seq_puts(m, ">");
  332. + seq_puts(m, "\n");
  333. +
  334. + for (i = 0; i < EDMA_STATS_BANNER_MAX_LEN; i++)
  335. + seq_puts(m, "_");
  336. + seq_puts(m, "\n\n");
  337. +}
  338. +
  339. +static int edma_debugfs_rx_rings_stats_show(struct seq_file *m,
  340. + void __maybe_unused *p)
  341. +{
  342. + struct edma_hw_info *hw_info = edma_ctx->hw_info;
  343. + struct edma_ring_info *rxfill = hw_info->rxfill;
  344. + struct edma_rxfill_stats *rxfill_stats;
  345. + struct edma_rxdesc_stats *rxdesc_stats;
  346. + struct edma_ring_info *rx = hw_info->rx;
  347. + unsigned int start;
  348. + u32 i;
  349. +
  350. + rxfill_stats = kcalloc(rxfill->num_rings, sizeof(*rxfill_stats), GFP_KERNEL);
  351. + if (!rxfill_stats)
  352. + return -ENOMEM;
  353. +
  354. + rxdesc_stats = kcalloc(rx->num_rings, sizeof(*rxdesc_stats), GFP_KERNEL);
  355. + if (!rxdesc_stats) {
  356. + kfree(rxfill_stats);
  357. + return -ENOMEM;
  358. + }
  359. +
  360. + /* Get stats for Rx fill rings. */
  361. + for (i = 0; i < rxfill->num_rings; i++) {
  362. + struct edma_rxfill_ring *rxfill_ring;
  363. + struct edma_rxfill_stats *stats;
  364. +
  365. + rxfill_ring = &edma_ctx->rxfill_rings[i];
  366. + stats = &rxfill_ring->rxfill_stats;
  367. + do {
  368. + start = u64_stats_fetch_begin(&stats->syncp);
  369. + rxfill_stats[i].alloc_failed = stats->alloc_failed;
  370. + rxfill_stats[i].page_alloc_failed = stats->page_alloc_failed;
  371. + } while (u64_stats_fetch_retry(&stats->syncp, start));
  372. + }
  373. +
  374. + /* Get stats for Rx Desc rings. */
  375. + for (i = 0; i < rx->num_rings; i++) {
  376. + struct edma_rxdesc_ring *rxdesc_ring;
  377. + struct edma_rxdesc_stats *stats;
  378. +
  379. + rxdesc_ring = &edma_ctx->rx_rings[i];
  380. + stats = &rxdesc_ring->rxdesc_stats;
  381. + do {
  382. + start = u64_stats_fetch_begin(&stats->syncp);
  383. + rxdesc_stats[i].src_port_inval = stats->src_port_inval;
  384. + rxdesc_stats[i].src_port_inval_type = stats->src_port_inval_type;
  385. + rxdesc_stats[i].src_port_inval_netdev = stats->src_port_inval_netdev;
  386. + } while (u64_stats_fetch_retry(&stats->syncp, start));
  387. + }
  388. +
  389. + edma_debugfs_print_banner(m, EDMA_RX_RING_STATS_NODE_NAME);
  390. +
  391. + seq_puts(m, "\n#EDMA RX descriptor rings stats:\n\n");
  392. + for (i = 0; i < rx->num_rings; i++) {
  393. + seq_printf(m, "\t\tEDMA RX descriptor %d ring stats:\n", i + rx->ring_start);
  394. + seq_printf(m, "\t\t rxdesc[%d]:src_port_inval = %llu\n",
  395. + i + rx->ring_start, rxdesc_stats[i].src_port_inval);
  396. + seq_printf(m, "\t\t rxdesc[%d]:src_port_inval_type = %llu\n",
  397. + i + rx->ring_start, rxdesc_stats[i].src_port_inval_type);
  398. + seq_printf(m, "\t\t rxdesc[%d]:src_port_inval_netdev = %llu\n",
  399. + i + rx->ring_start,
  400. + rxdesc_stats[i].src_port_inval_netdev);
  401. + seq_puts(m, "\n");
  402. + }
  403. +
  404. + seq_puts(m, "\n#EDMA RX fill rings stats:\n\n");
  405. + for (i = 0; i < rxfill->num_rings; i++) {
  406. + seq_printf(m, "\t\tEDMA RX fill %d ring stats:\n", i + rxfill->ring_start);
  407. + seq_printf(m, "\t\t rxfill[%d]:alloc_failed = %llu\n",
  408. + i + rxfill->ring_start, rxfill_stats[i].alloc_failed);
  409. + seq_printf(m, "\t\t rxfill[%d]:page_alloc_failed = %llu\n",
  410. + i + rxfill->ring_start, rxfill_stats[i].page_alloc_failed);
  411. + seq_puts(m, "\n");
  412. + }
  413. +
  414. + kfree(rxfill_stats);
  415. + kfree(rxdesc_stats);
  416. + return 0;
  417. +}
  418. +
  419. +static int edma_debugfs_tx_rings_stats_show(struct seq_file *m,
  420. + void __maybe_unused *p)
  421. +{
  422. + struct edma_hw_info *hw_info = edma_ctx->hw_info;
  423. + struct edma_ring_info *txcmpl = hw_info->txcmpl;
  424. + struct edma_ring_info *tx = hw_info->tx;
  425. + struct edma_txcmpl_stats *txcmpl_stats;
  426. + struct edma_txdesc_stats *txdesc_stats;
  427. + unsigned int start;
  428. + u32 i;
  429. +
  430. + txcmpl_stats = kcalloc(txcmpl->num_rings, sizeof(*txcmpl_stats), GFP_KERNEL);
  431. + if (!txcmpl_stats)
  432. + return -ENOMEM;
  433. +
  434. + txdesc_stats = kcalloc(tx->num_rings, sizeof(*txdesc_stats), GFP_KERNEL);
  435. + if (!txdesc_stats) {
  436. + kfree(txcmpl_stats);
  437. + return -ENOMEM;
  438. + }
  439. +
  440. + /* Get stats for Tx desc rings. */
  441. + for (i = 0; i < tx->num_rings; i++) {
  442. + struct edma_txdesc_ring *txdesc_ring;
  443. + struct edma_txdesc_stats *stats;
  444. +
  445. + txdesc_ring = &edma_ctx->tx_rings[i];
  446. + stats = &txdesc_ring->txdesc_stats;
  447. + do {
  448. + start = u64_stats_fetch_begin(&stats->syncp);
  449. + txdesc_stats[i].no_desc_avail = stats->no_desc_avail;
  450. + txdesc_stats[i].tso_max_seg_exceed = stats->tso_max_seg_exceed;
  451. + } while (u64_stats_fetch_retry(&stats->syncp, start));
  452. + }
  453. +
  454. + /* Get stats for Tx Complete rings. */
  455. + for (i = 0; i < txcmpl->num_rings; i++) {
  456. + struct edma_txcmpl_ring *txcmpl_ring;
  457. + struct edma_txcmpl_stats *stats;
  458. +
  459. + txcmpl_ring = &edma_ctx->txcmpl_rings[i];
  460. + stats = &txcmpl_ring->txcmpl_stats;
  461. + do {
  462. + start = u64_stats_fetch_begin(&stats->syncp);
  463. + txcmpl_stats[i].invalid_buffer = stats->invalid_buffer;
  464. + txcmpl_stats[i].errors = stats->errors;
  465. + txcmpl_stats[i].desc_with_more_bit = stats->desc_with_more_bit;
  466. + txcmpl_stats[i].no_pending_desc = stats->no_pending_desc;
  467. + } while (u64_stats_fetch_retry(&stats->syncp, start));
  468. + }
  469. +
  470. + edma_debugfs_print_banner(m, EDMA_TX_RING_STATS_NODE_NAME);
  471. +
  472. + seq_puts(m, "\n#EDMA TX complete rings stats:\n\n");
  473. + for (i = 0; i < txcmpl->num_rings; i++) {
  474. + seq_printf(m, "\t\tEDMA TX complete %d ring stats:\n", i + txcmpl->ring_start);
  475. + seq_printf(m, "\t\t txcmpl[%d]:invalid_buffer = %llu\n",
  476. + i + txcmpl->ring_start, txcmpl_stats[i].invalid_buffer);
  477. + seq_printf(m, "\t\t txcmpl[%d]:errors = %llu\n",
  478. + i + txcmpl->ring_start, txcmpl_stats[i].errors);
  479. + seq_printf(m, "\t\t txcmpl[%d]:desc_with_more_bit = %llu\n",
  480. + i + txcmpl->ring_start, txcmpl_stats[i].desc_with_more_bit);
  481. + seq_printf(m, "\t\t txcmpl[%d]:no_pending_desc = %llu\n",
  482. + i + txcmpl->ring_start, txcmpl_stats[i].no_pending_desc);
  483. + seq_puts(m, "\n");
  484. + }
  485. +
  486. + seq_puts(m, "\n#EDMA TX descriptor rings stats:\n\n");
  487. + for (i = 0; i < tx->num_rings; i++) {
  488. + seq_printf(m, "\t\tEDMA TX descriptor %d ring stats:\n", i + tx->ring_start);
  489. + seq_printf(m, "\t\t txdesc[%d]:no_desc_avail = %llu\n",
  490. + i + tx->ring_start, txdesc_stats[i].no_desc_avail);
  491. + seq_printf(m, "\t\t txdesc[%d]:tso_max_seg_exceed = %llu\n",
  492. + i + tx->ring_start, txdesc_stats[i].tso_max_seg_exceed);
  493. + seq_puts(m, "\n");
  494. + }
  495. +
  496. + kfree(txcmpl_stats);
  497. + kfree(txdesc_stats);
  498. + return 0;
  499. +}
  500. +
  501. +static int edma_debugfs_err_stats_show(struct seq_file *m,
  502. + void __maybe_unused *p)
  503. +{
  504. + struct edma_err_stats *err_stats, *pcpu_err_stats;
  505. + unsigned int start;
  506. + u32 cpu;
  507. +
  508. + err_stats = kzalloc(sizeof(*err_stats), GFP_KERNEL);
  509. + if (!err_stats)
  510. + return -ENOMEM;
  511. +
  512. + /* Get percpu EDMA miscellaneous stats. */
  513. + for_each_possible_cpu(cpu) {
  514. + pcpu_err_stats = per_cpu_ptr(edma_ctx->err_stats, cpu);
  515. + do {
  516. + start = u64_stats_fetch_begin(&pcpu_err_stats->syncp);
  517. + err_stats->edma_axi_read_err +=
  518. + pcpu_err_stats->edma_axi_read_err;
  519. + err_stats->edma_axi_write_err +=
  520. + pcpu_err_stats->edma_axi_write_err;
  521. + err_stats->edma_rxdesc_fifo_full +=
  522. + pcpu_err_stats->edma_rxdesc_fifo_full;
  523. + err_stats->edma_rx_buf_size_err +=
  524. + pcpu_err_stats->edma_rx_buf_size_err;
  525. + err_stats->edma_tx_sram_full +=
  526. + pcpu_err_stats->edma_tx_sram_full;
  527. + err_stats->edma_tx_data_len_err +=
  528. + pcpu_err_stats->edma_tx_data_len_err;
  529. + err_stats->edma_tx_timeout +=
  530. + pcpu_err_stats->edma_tx_timeout;
  531. + err_stats->edma_txcmpl_buf_full +=
  532. + pcpu_err_stats->edma_txcmpl_buf_full;
  533. + } while (u64_stats_fetch_retry(&pcpu_err_stats->syncp, start));
  534. + }
  535. +
  536. + edma_debugfs_print_banner(m, EDMA_ERR_STATS_NODE_NAME);
  537. +
  538. + seq_puts(m, "\n#EDMA error stats:\n\n");
  539. + seq_printf(m, "\t\t axi read error = %llu\n",
  540. + err_stats->edma_axi_read_err);
  541. + seq_printf(m, "\t\t axi write error = %llu\n",
  542. + err_stats->edma_axi_write_err);
  543. + seq_printf(m, "\t\t Rx descriptor fifo full = %llu\n",
  544. + err_stats->edma_rxdesc_fifo_full);
  545. + seq_printf(m, "\t\t Rx buffer size error = %llu\n",
  546. + err_stats->edma_rx_buf_size_err);
  547. + seq_printf(m, "\t\t Tx SRAM full = %llu\n",
  548. + err_stats->edma_tx_sram_full);
  549. + seq_printf(m, "\t\t Tx data length error = %llu\n",
  550. + err_stats->edma_tx_data_len_err);
  551. + seq_printf(m, "\t\t Tx timeout = %llu\n",
  552. + err_stats->edma_tx_timeout);
  553. + seq_printf(m, "\t\t Tx completion buffer full = %llu\n",
  554. + err_stats->edma_txcmpl_buf_full);
  555. +
  556. + kfree(err_stats);
  557. + return 0;
  558. +}
  559. +
  560. +static int edma_debugs_rx_rings_stats_open(struct inode *inode,
  561. + struct file *file)
  562. +{
  563. + return single_open(file, edma_debugfs_rx_rings_stats_show,
  564. + inode->i_private);
  565. +}
  566. +
  567. +static const struct file_operations edma_debugfs_rx_rings_file_ops = {
  568. + .open = edma_debugs_rx_rings_stats_open,
  569. + .read = seq_read,
  570. + .llseek = seq_lseek,
  571. + .release = seq_release
  572. +};
  573. +
  574. +static int edma_debugs_tx_rings_stats_open(struct inode *inode, struct file *file)
  575. +{
  576. + return single_open(file, edma_debugfs_tx_rings_stats_show, inode->i_private);
  577. +}
  578. +
  579. +static const struct file_operations edma_debugfs_tx_rings_file_ops = {
  580. + .open = edma_debugs_tx_rings_stats_open,
  581. + .read = seq_read,
  582. + .llseek = seq_lseek,
  583. + .release = seq_release
  584. +};
  585. +
  586. +static int edma_debugs_err_stats_open(struct inode *inode, struct file *file)
  587. +{
  588. + return single_open(file, edma_debugfs_err_stats_show, inode->i_private);
  589. +}
  590. +
  591. +static const struct file_operations edma_debugfs_misc_file_ops = {
  592. + .open = edma_debugs_err_stats_open,
  593. + .read = seq_read,
  594. + .llseek = seq_lseek,
  595. + .release = seq_release
  596. +};
  597. +
  598. +/**
  599. + * edma_debugfs_teardown - EDMA debugfs teardown.
  600. + *
  601. + * EDMA debugfs teardown and free stats memory.
  602. + */
  603. +void edma_debugfs_teardown(void)
  604. +{
  605. + /* Free EDMA miscellaneous stats memory */
  606. + edma_err_stats_free();
  607. +
  608. + debugfs_remove_recursive(edma_dentry);
  609. + edma_dentry = NULL;
  610. + stats_dentry = NULL;
  611. +}
  612. +
  613. +/**
  614. + * edma_debugfs_setup - EDMA debugfs setup.
  615. + * @ppe_dev: PPE Device
  616. + *
  617. + * EDMA debugfs setup.
  618. + */
  619. +int edma_debugfs_setup(struct ppe_device *ppe_dev)
  620. +{
  621. + edma_dentry = debugfs_create_dir("edma", ppe_dev->debugfs_root);
  622. + if (!edma_dentry) {
  623. + pr_err("Unable to create debugfs edma directory in debugfs\n");
  624. + goto debugfs_dir_failed;
  625. + }
  626. +
  627. + stats_dentry = debugfs_create_dir("stats", edma_dentry);
  628. + if (!stats_dentry) {
  629. + pr_err("Unable to create debugfs stats directory in debugfs\n");
  630. + goto debugfs_dir_failed;
  631. + }
  632. +
  633. + if (!debugfs_create_file("rx_ring_stats", 0444, stats_dentry,
  634. + NULL, &edma_debugfs_rx_rings_file_ops)) {
  635. + pr_err("Unable to create Rx rings statistics file entry in debugfs\n");
  636. + goto debugfs_dir_failed;
  637. + }
  638. +
  639. + if (!debugfs_create_file("tx_ring_stats", 0444, stats_dentry,
  640. + NULL, &edma_debugfs_tx_rings_file_ops)) {
  641. + pr_err("Unable to create Tx rings statistics file entry in debugfs\n");
  642. + goto debugfs_dir_failed;
  643. + }
  644. +
  645. + /* Allocate memory for EDMA miscellaneous stats */
  646. + if (edma_err_stats_alloc() < 0) {
  647. + pr_err("Unable to allocate miscellaneous percpu stats\n");
  648. + goto debugfs_dir_failed;
  649. + }
  650. +
  651. + if (!debugfs_create_file("err_stats", 0444, stats_dentry,
  652. + NULL, &edma_debugfs_misc_file_ops)) {
  653. + pr_err("Unable to create EDMA miscellaneous statistics file entry in debugfs\n");
  654. + goto debugfs_dir_failed;
  655. + }
  656. +
  657. + return 0;
  658. +
  659. +debugfs_dir_failed:
  660. + debugfs_remove_recursive(edma_dentry);
  661. + edma_dentry = NULL;
  662. + stats_dentry = NULL;
  663. + return -ENOMEM;
  664. +}
  665. --- a/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c
  666. +++ b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c
  667. @@ -7,9 +7,11 @@
  668. #include <linux/bitfield.h>
  669. #include <linux/debugfs.h>
  670. +#include <linux/netdevice.h>
  671. #include <linux/regmap.h>
  672. #include <linux/seq_file.h>
  673. +#include "edma.h"
  674. #include "ppe.h"
  675. #include "ppe_config.h"
  676. #include "ppe_debugfs.h"
  677. @@ -678,15 +680,30 @@ static const struct file_operations ppe_
  678. void ppe_debugfs_setup(struct ppe_device *ppe_dev)
  679. {
  680. + int ret;
  681. +
  682. ppe_dev->debugfs_root = debugfs_create_dir("ppe", NULL);
  683. debugfs_create_file("packet_counters", 0444,
  684. ppe_dev->debugfs_root,
  685. ppe_dev,
  686. &ppe_debugfs_packet_counter_fops);
  687. +
  688. + if (!ppe_dev->debugfs_root) {
  689. + dev_err(ppe_dev->dev, "Error in PPE debugfs setup\n");
  690. + return;
  691. + }
  692. +
  693. + ret = edma_debugfs_setup(ppe_dev);
  694. + if (ret) {
  695. + dev_err(ppe_dev->dev, "Error in EDMA debugfs setup API. ret: %d\n", ret);
  696. + debugfs_remove_recursive(ppe_dev->debugfs_root);
  697. + ppe_dev->debugfs_root = NULL;
  698. + }
  699. }
  700. void ppe_debugfs_teardown(struct ppe_device *ppe_dev)
  701. {
  702. + edma_debugfs_teardown();
  703. debugfs_remove_recursive(ppe_dev->debugfs_root);
  704. ppe_dev->debugfs_root = NULL;
  705. }