751-04-v6.4-net-ethernet-mediatek-fix-ppe-flow-accounting-for-L2.patch 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343
  1. From: Felix Fietkau <[email protected]>
  2. Date: Thu, 23 Mar 2023 11:05:22 +0100
  3. Subject: [PATCH] net: ethernet: mediatek: fix ppe flow accounting for L2
  4. flows
  5. For L2 flows, the packet/byte counters should report the sum of the
  6. counters of their subflows, both current and expired.
  7. In order to make this work, change the way that accounting data is tracked.
  8. Reset counters when a flow enters bind. Once it expires (or enters unbind),
  9. store the last counter value in struct mtk_flow_entry.
  10. Signed-off-by: Felix Fietkau <[email protected]>
  11. ---
  12. --- a/drivers/net/ethernet/mediatek/mtk_ppe.c
  13. +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
  14. @@ -80,9 +80,9 @@ static int mtk_ppe_mib_wait_busy(struct
  15. int ret;
  16. u32 val;
  17. - ret = readl_poll_timeout(ppe->base + MTK_PPE_MIB_SER_CR, val,
  18. - !(val & MTK_PPE_MIB_SER_CR_ST),
  19. - 20, MTK_PPE_WAIT_TIMEOUT_US);
  20. + ret = readl_poll_timeout_atomic(ppe->base + MTK_PPE_MIB_SER_CR, val,
  21. + !(val & MTK_PPE_MIB_SER_CR_ST),
  22. + 20, MTK_PPE_WAIT_TIMEOUT_US);
  23. if (ret)
  24. dev_err(ppe->dev, "MIB table busy");
  25. @@ -90,17 +90,31 @@ static int mtk_ppe_mib_wait_busy(struct
  26. return ret;
  27. }
  28. -static int mtk_mib_entry_read(struct mtk_ppe *ppe, u16 index, u64 *bytes, u64 *packets)
  29. +static inline struct mtk_foe_accounting *
  30. +mtk_ppe_acct_data(struct mtk_ppe *ppe, u16 index)
  31. +{
  32. + if (!ppe->acct_table)
  33. + return NULL;
  34. +
  35. + return ppe->acct_table + index * sizeof(struct mtk_foe_accounting);
  36. +}
  37. +
  38. +struct mtk_foe_accounting *mtk_ppe_mib_entry_read(struct mtk_ppe *ppe, u16 index)
  39. {
  40. u32 val, cnt_r0, cnt_r1, cnt_r2;
  41. + struct mtk_foe_accounting *acct;
  42. int ret;
  43. val = FIELD_PREP(MTK_PPE_MIB_SER_CR_ADDR, index) | MTK_PPE_MIB_SER_CR_ST;
  44. ppe_w32(ppe, MTK_PPE_MIB_SER_CR, val);
  45. + acct = mtk_ppe_acct_data(ppe, index);
  46. + if (!acct)
  47. + return NULL;
  48. +
  49. ret = mtk_ppe_mib_wait_busy(ppe);
  50. if (ret)
  51. - return ret;
  52. + return acct;
  53. cnt_r0 = readl(ppe->base + MTK_PPE_MIB_SER_R0);
  54. cnt_r1 = readl(ppe->base + MTK_PPE_MIB_SER_R1);
  55. @@ -109,19 +123,19 @@ static int mtk_mib_entry_read(struct mtk
  56. if (mtk_is_netsys_v3_or_greater(ppe->eth)) {
  57. /* 64 bit for each counter */
  58. u32 cnt_r3 = readl(ppe->base + MTK_PPE_MIB_SER_R3);
  59. - *bytes = ((u64)cnt_r1 << 32) | cnt_r0;
  60. - *packets = ((u64)cnt_r3 << 32) | cnt_r2;
  61. + acct->bytes += ((u64)cnt_r1 << 32) | cnt_r0;
  62. + acct->packets += ((u64)cnt_r3 << 32) | cnt_r2;
  63. } else {
  64. /* 48 bit byte counter, 40 bit packet counter */
  65. u32 byte_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R0_BYTE_CNT_LOW, cnt_r0);
  66. u32 byte_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH, cnt_r1);
  67. u32 pkt_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R1_PKT_CNT_LOW, cnt_r1);
  68. u32 pkt_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH, cnt_r2);
  69. - *bytes = ((u64)byte_cnt_high << 32) | byte_cnt_low;
  70. - *packets = ((u64)pkt_cnt_high << 16) | pkt_cnt_low;
  71. + acct->bytes += ((u64)byte_cnt_high << 32) | byte_cnt_low;
  72. + acct->packets += ((u64)pkt_cnt_high << 16) | pkt_cnt_low;
  73. }
  74. - return 0;
  75. + return acct;
  76. }
  77. static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
  78. @@ -520,14 +534,6 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp
  79. hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
  80. dma_wmb();
  81. mtk_ppe_cache_clear(ppe);
  82. -
  83. - if (ppe->accounting) {
  84. - struct mtk_foe_accounting *acct;
  85. -
  86. - acct = ppe->acct_table + entry->hash * sizeof(*acct);
  87. - acct->packets = 0;
  88. - acct->bytes = 0;
  89. - }
  90. }
  91. entry->hash = 0xffff;
  92. @@ -552,11 +558,14 @@ static int __mtk_foe_entry_idle_time(str
  93. }
  94. static bool
  95. -mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
  96. +mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
  97. + u64 *packets, u64 *bytes)
  98. {
  99. + struct mtk_foe_accounting *acct;
  100. struct mtk_foe_entry foe = {};
  101. struct mtk_foe_entry *hwe;
  102. u16 hash = entry->hash;
  103. + bool ret = false;
  104. int len;
  105. if (hash == 0xffff)
  106. @@ -567,18 +576,35 @@ mtk_flow_entry_update(struct mtk_ppe *pp
  107. memcpy(&foe, hwe, len);
  108. if (!mtk_flow_entry_match(ppe->eth, entry, &foe, len) ||
  109. - FIELD_GET(MTK_FOE_IB1_STATE, foe.ib1) != MTK_FOE_STATE_BIND)
  110. - return false;
  111. + FIELD_GET(MTK_FOE_IB1_STATE, foe.ib1) != MTK_FOE_STATE_BIND) {
  112. + acct = mtk_ppe_acct_data(ppe, hash);
  113. + if (acct) {
  114. + entry->prev_packets += acct->packets;
  115. + entry->prev_bytes += acct->bytes;
  116. + }
  117. +
  118. + goto out;
  119. + }
  120. entry->data.ib1 = foe.ib1;
  121. + acct = mtk_ppe_mib_entry_read(ppe, hash);
  122. + ret = true;
  123. +
  124. +out:
  125. + if (acct) {
  126. + *packets += acct->packets;
  127. + *bytes += acct->bytes;
  128. + }
  129. - return true;
  130. + return ret;
  131. }
  132. static void
  133. mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
  134. {
  135. u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
  136. + u64 *packets = &entry->packets;
  137. + u64 *bytes = &entry->bytes;
  138. struct mtk_flow_entry *cur;
  139. struct hlist_node *tmp;
  140. int idle;
  141. @@ -587,7 +613,9 @@ mtk_flow_entry_update_l2(struct mtk_ppe
  142. hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_list) {
  143. int cur_idle;
  144. - if (!mtk_flow_entry_update(ppe, cur)) {
  145. + if (!mtk_flow_entry_update(ppe, cur, packets, bytes)) {
  146. + entry->prev_packets += cur->prev_packets;
  147. + entry->prev_bytes += cur->prev_bytes;
  148. __mtk_foe_entry_clear(ppe, entry, false);
  149. continue;
  150. }
  151. @@ -602,10 +630,29 @@ mtk_flow_entry_update_l2(struct mtk_ppe
  152. }
  153. }
  154. +void mtk_foe_entry_get_stats(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
  155. + int *idle)
  156. +{
  157. + entry->packets = entry->prev_packets;
  158. + entry->bytes = entry->prev_bytes;
  159. +
  160. + spin_lock_bh(&ppe_lock);
  161. +
  162. + if (entry->type == MTK_FLOW_TYPE_L2)
  163. + mtk_flow_entry_update_l2(ppe, entry);
  164. + else
  165. + mtk_flow_entry_update(ppe, entry, &entry->packets, &entry->bytes);
  166. +
  167. + *idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
  168. +
  169. + spin_unlock_bh(&ppe_lock);
  170. +}
  171. +
  172. static void
  173. __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
  174. u16 hash)
  175. {
  176. + struct mtk_foe_accounting *acct;
  177. struct mtk_eth *eth = ppe->eth;
  178. u16 timestamp = mtk_eth_timestamp(eth);
  179. struct mtk_foe_entry *hwe;
  180. @@ -636,6 +683,12 @@ __mtk_foe_entry_commit(struct mtk_ppe *p
  181. dma_wmb();
  182. + acct = mtk_ppe_mib_entry_read(ppe, hash);
  183. + if (acct) {
  184. + acct->packets = 0;
  185. + acct->bytes = 0;
  186. + }
  187. +
  188. mtk_ppe_cache_clear(ppe);
  189. }
  190. @@ -800,21 +853,6 @@ out:
  191. spin_unlock_bh(&ppe_lock);
  192. }
  193. -int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
  194. -{
  195. - int idle;
  196. -
  197. - spin_lock_bh(&ppe_lock);
  198. - if (entry->type == MTK_FLOW_TYPE_L2)
  199. - mtk_flow_entry_update_l2(ppe, entry);
  200. - else
  201. - mtk_flow_entry_update(ppe, entry);
  202. - idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
  203. - spin_unlock_bh(&ppe_lock);
  204. -
  205. - return idle;
  206. -}
  207. -
  208. int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
  209. {
  210. if (!ppe)
  211. @@ -842,32 +880,6 @@ int mtk_ppe_prepare_reset(struct mtk_ppe
  212. return mtk_ppe_wait_busy(ppe);
  213. }
  214. -struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index,
  215. - struct mtk_foe_accounting *diff)
  216. -{
  217. - struct mtk_foe_accounting *acct;
  218. - int size = sizeof(struct mtk_foe_accounting);
  219. - u64 bytes, packets;
  220. -
  221. - if (!ppe->accounting)
  222. - return NULL;
  223. -
  224. - if (mtk_mib_entry_read(ppe, index, &bytes, &packets))
  225. - return NULL;
  226. -
  227. - acct = ppe->acct_table + index * size;
  228. -
  229. - acct->bytes += bytes;
  230. - acct->packets += packets;
  231. -
  232. - if (diff) {
  233. - diff->bytes = bytes;
  234. - diff->packets = packets;
  235. - }
  236. -
  237. - return acct;
  238. -}
  239. -
  240. struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int index)
  241. {
  242. bool accounting = eth->soc->has_accounting;
  243. --- a/drivers/net/ethernet/mediatek/mtk_ppe.h
  244. +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
  245. @@ -304,6 +304,8 @@ struct mtk_flow_entry {
  246. struct mtk_foe_entry data;
  247. struct rhash_head node;
  248. unsigned long cookie;
  249. + u64 prev_packets, prev_bytes;
  250. + u64 packets, bytes;
  251. };
  252. struct mtk_mib_entry {
  253. @@ -348,6 +350,7 @@ void mtk_ppe_deinit(struct mtk_eth *eth)
  254. void mtk_ppe_start(struct mtk_ppe *ppe);
  255. int mtk_ppe_stop(struct mtk_ppe *ppe);
  256. int mtk_ppe_prepare_reset(struct mtk_ppe *ppe);
  257. +struct mtk_foe_accounting *mtk_ppe_mib_entry_read(struct mtk_ppe *ppe, u16 index);
  258. void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
  259. @@ -396,9 +399,8 @@ int mtk_foe_entry_set_queue(struct mtk_e
  260. unsigned int queue);
  261. int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
  262. void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
  263. -int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
  264. int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index);
  265. -struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index,
  266. - struct mtk_foe_accounting *diff);
  267. +void mtk_foe_entry_get_stats(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
  268. + int *idle);
  269. #endif
  270. --- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
  271. +++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
  272. @@ -96,7 +96,7 @@ mtk_ppe_debugfs_foe_show(struct seq_file
  273. if (bind && state != MTK_FOE_STATE_BIND)
  274. continue;
  275. - acct = mtk_foe_entry_get_mib(ppe, i, NULL);
  276. + acct = mtk_ppe_mib_entry_read(ppe, i);
  277. type = mtk_get_ib1_pkt_type(ppe->eth, entry->ib1);
  278. seq_printf(m, "%05x %s %7s", i,
  279. --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
  280. +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
  281. @@ -501,24 +501,21 @@ static int
  282. mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
  283. {
  284. struct mtk_flow_entry *entry;
  285. - struct mtk_foe_accounting diff;
  286. - u32 idle;
  287. + u64 packets, bytes;
  288. + int idle;
  289. entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
  290. mtk_flow_ht_params);
  291. if (!entry)
  292. return -ENOENT;
  293. - idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry);
  294. + packets = entry->packets;
  295. + bytes = entry->bytes;
  296. + mtk_foe_entry_get_stats(eth->ppe[entry->ppe_index], entry, &idle);
  297. + f->stats.pkts += entry->packets - packets;
  298. + f->stats.bytes += entry->bytes - bytes;
  299. f->stats.lastused = jiffies - idle * HZ;
  300. - if (entry->hash != 0xFFFF &&
  301. - mtk_foe_entry_get_mib(eth->ppe[entry->ppe_index], entry->hash,
  302. - &diff)) {
  303. - f->stats.pkts += diff.packets;
  304. - f->stats.bytes += diff.bytes;
  305. - }
  306. -
  307. return 0;
  308. }