751-03-v6.4-net-ethernet-mtk_eth_soc-improve-keeping-track-of-of.patch 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334
  1. From: Felix Fietkau <[email protected]>
  2. Date: Thu, 23 Mar 2023 10:24:11 +0100
  3. Subject: [PATCH] net: ethernet: mtk_eth_soc: improve keeping track of
  4. offloaded flows
  5. Unify tracking of L2 and L3 flows. Use the generic list field in struct
  6. mtk_foe_entry for tracking L2 subflows. Preparation for improving
  7. flow accounting support.
  8. Signed-off-by: Felix Fietkau <[email protected]>
  9. ---
  10. drivers/net/ethernet/mediatek/mtk_ppe.c | 162 ++++++++++++------------
  11. drivers/net/ethernet/mediatek/mtk_ppe.h | 15 +--
  12. 2 files changed, 86 insertions(+), 91 deletions(-)
  13. --- a/drivers/net/ethernet/mediatek/mtk_ppe.c
  14. +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
  15. @@ -477,42 +477,43 @@ int mtk_foe_entry_set_queue(struct mtk_e
  16. return 0;
  17. }
  18. +static int
  19. +mtk_flow_entry_match_len(struct mtk_eth *eth, struct mtk_foe_entry *entry)
  20. +{
  21. + int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
  22. +
  23. + if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
  24. + return offsetof(struct mtk_foe_entry, ipv6._rsv);
  25. + else
  26. + return offsetof(struct mtk_foe_entry, ipv4.ib2);
  27. +}
  28. +
  29. static bool
  30. mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
  31. - struct mtk_foe_entry *data)
  32. + struct mtk_foe_entry *data, int len)
  33. {
  34. - int type, len;
  35. -
  36. if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
  37. return false;
  38. - type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
  39. - if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
  40. - len = offsetof(struct mtk_foe_entry, ipv6._rsv);
  41. - else
  42. - len = offsetof(struct mtk_foe_entry, ipv4.ib2);
  43. -
  44. return !memcmp(&entry->data.data, &data->data, len - 4);
  45. }
  46. static void
  47. -__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
  48. +__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
  49. + bool set_state)
  50. {
  51. - struct hlist_head *head;
  52. struct hlist_node *tmp;
  53. if (entry->type == MTK_FLOW_TYPE_L2) {
  54. rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
  55. mtk_flow_l2_ht_params);
  56. - head = &entry->l2_flows;
  57. - hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
  58. - __mtk_foe_entry_clear(ppe, entry);
  59. + hlist_for_each_entry_safe(entry, tmp, &entry->l2_flows, l2_list)
  60. + __mtk_foe_entry_clear(ppe, entry, set_state);
  61. return;
  62. }
  63. - hlist_del_init(&entry->list);
  64. - if (entry->hash != 0xffff) {
  65. + if (entry->hash != 0xffff && set_state) {
  66. struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
  67. hwe->ib1 &= ~MTK_FOE_IB1_STATE;
  68. @@ -533,7 +534,8 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp
  69. if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
  70. return;
  71. - hlist_del_init(&entry->l2_data.list);
  72. + hlist_del_init(&entry->l2_list);
  73. + hlist_del_init(&entry->list);
  74. kfree(entry);
  75. }
  76. @@ -549,66 +551,55 @@ static int __mtk_foe_entry_idle_time(str
  77. return now - timestamp;
  78. }
  79. +static bool
  80. +mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
  81. +{
  82. + struct mtk_foe_entry foe = {};
  83. + struct mtk_foe_entry *hwe;
  84. + u16 hash = entry->hash;
  85. + int len;
  86. +
  87. + if (hash == 0xffff)
  88. + return false;
  89. +
  90. + hwe = mtk_foe_get_entry(ppe, hash);
  91. + len = mtk_flow_entry_match_len(ppe->eth, &entry->data);
  92. + memcpy(&foe, hwe, len);
  93. +
  94. + if (!mtk_flow_entry_match(ppe->eth, entry, &foe, len) ||
  95. + FIELD_GET(MTK_FOE_IB1_STATE, foe.ib1) != MTK_FOE_STATE_BIND)
  96. + return false;
  97. +
  98. + entry->data.ib1 = foe.ib1;
  99. +
  100. + return true;
  101. +}
  102. +
  103. static void
  104. mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
  105. {
  106. u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
  107. struct mtk_flow_entry *cur;
  108. - struct mtk_foe_entry *hwe;
  109. struct hlist_node *tmp;
  110. int idle;
  111. idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
  112. - hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
  113. + hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_list) {
  114. int cur_idle;
  115. - u32 ib1;
  116. -
  117. - hwe = mtk_foe_get_entry(ppe, cur->hash);
  118. - ib1 = READ_ONCE(hwe->ib1);
  119. - if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
  120. - cur->hash = 0xffff;
  121. - __mtk_foe_entry_clear(ppe, cur);
  122. + if (!mtk_flow_entry_update(ppe, cur)) {
  123. + __mtk_foe_entry_clear(ppe, entry, false);
  124. continue;
  125. }
  126. - cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
  127. + cur_idle = __mtk_foe_entry_idle_time(ppe, cur->data.ib1);
  128. if (cur_idle >= idle)
  129. continue;
  130. idle = cur_idle;
  131. entry->data.ib1 &= ~ib1_ts_mask;
  132. - entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
  133. - }
  134. -}
  135. -
  136. -static void
  137. -mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
  138. -{
  139. - struct mtk_foe_entry foe = {};
  140. - struct mtk_foe_entry *hwe;
  141. -
  142. - spin_lock_bh(&ppe_lock);
  143. -
  144. - if (entry->type == MTK_FLOW_TYPE_L2) {
  145. - mtk_flow_entry_update_l2(ppe, entry);
  146. - goto out;
  147. + entry->data.ib1 |= cur->data.ib1 & ib1_ts_mask;
  148. }
  149. -
  150. - if (entry->hash == 0xffff)
  151. - goto out;
  152. -
  153. - hwe = mtk_foe_get_entry(ppe, entry->hash);
  154. - memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
  155. - if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
  156. - entry->hash = 0xffff;
  157. - goto out;
  158. - }
  159. -
  160. - entry->data.ib1 = foe.ib1;
  161. -
  162. -out:
  163. - spin_unlock_bh(&ppe_lock);
  164. }
  165. static void
  166. @@ -651,7 +642,8 @@ __mtk_foe_entry_commit(struct mtk_ppe *p
  167. void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
  168. {
  169. spin_lock_bh(&ppe_lock);
  170. - __mtk_foe_entry_clear(ppe, entry);
  171. + __mtk_foe_entry_clear(ppe, entry, true);
  172. + hlist_del_init(&entry->list);
  173. spin_unlock_bh(&ppe_lock);
  174. }
  175. @@ -698,8 +690,8 @@ mtk_foe_entry_commit_subflow(struct mtk_
  176. {
  177. const struct mtk_soc_data *soc = ppe->eth->soc;
  178. struct mtk_flow_entry *flow_info;
  179. - struct mtk_foe_entry foe = {}, *hwe;
  180. struct mtk_foe_mac_info *l2;
  181. + struct mtk_foe_entry *hwe;
  182. u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
  183. int type;
  184. @@ -707,30 +699,30 @@ mtk_foe_entry_commit_subflow(struct mtk_
  185. if (!flow_info)
  186. return;
  187. - flow_info->l2_data.base_flow = entry;
  188. flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
  189. flow_info->hash = hash;
  190. hlist_add_head(&flow_info->list,
  191. &ppe->foe_flow[hash / soc->hash_offset]);
  192. - hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
  193. + hlist_add_head(&flow_info->l2_list, &entry->l2_flows);
  194. hwe = mtk_foe_get_entry(ppe, hash);
  195. - memcpy(&foe, hwe, soc->foe_entry_size);
  196. - foe.ib1 &= ib1_mask;
  197. - foe.ib1 |= entry->data.ib1 & ~ib1_mask;
  198. + memcpy(&flow_info->data, hwe, soc->foe_entry_size);
  199. + flow_info->data.ib1 &= ib1_mask;
  200. + flow_info->data.ib1 |= entry->data.ib1 & ~ib1_mask;
  201. - l2 = mtk_foe_entry_l2(ppe->eth, &foe);
  202. + l2 = mtk_foe_entry_l2(ppe->eth, &flow_info->data);
  203. memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
  204. - type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
  205. + type = mtk_get_ib1_pkt_type(ppe->eth, flow_info->data.ib1);
  206. if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
  207. - memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
  208. + memcpy(&flow_info->data.ipv4.new, &flow_info->data.ipv4.orig,
  209. + sizeof(flow_info->data.ipv4.new));
  210. else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
  211. l2->etype = ETH_P_IPV6;
  212. - *mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
  213. + *mtk_foe_entry_ib2(ppe->eth, &flow_info->data) = entry->data.bridge.ib2;
  214. - __mtk_foe_entry_commit(ppe, &foe, hash);
  215. + __mtk_foe_entry_commit(ppe, &flow_info->data, hash);
  216. }
  217. void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
  218. @@ -740,9 +732,11 @@ void __mtk_ppe_check_skb(struct mtk_ppe
  219. struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
  220. struct mtk_flow_entry *entry;
  221. struct mtk_foe_bridge key = {};
  222. + struct mtk_foe_entry foe = {};
  223. struct hlist_node *n;
  224. struct ethhdr *eh;
  225. bool found = false;
  226. + int entry_len;
  227. u8 *tag;
  228. spin_lock_bh(&ppe_lock);
  229. @@ -750,20 +744,14 @@ void __mtk_ppe_check_skb(struct mtk_ppe
  230. if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
  231. goto out;
  232. - hlist_for_each_entry_safe(entry, n, head, list) {
  233. - if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
  234. - if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
  235. - MTK_FOE_STATE_BIND))
  236. - continue;
  237. -
  238. - entry->hash = 0xffff;
  239. - __mtk_foe_entry_clear(ppe, entry);
  240. - continue;
  241. - }
  242. + entry_len = mtk_flow_entry_match_len(ppe->eth, hwe);
  243. + memcpy(&foe, hwe, entry_len);
  244. - if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
  245. + hlist_for_each_entry_safe(entry, n, head, list) {
  246. + if (found ||
  247. + !mtk_flow_entry_match(ppe->eth, entry, &foe, entry_len)) {
  248. if (entry->hash != 0xffff)
  249. - entry->hash = 0xffff;
  250. + __mtk_foe_entry_clear(ppe, entry, false);
  251. continue;
  252. }
  253. @@ -814,9 +802,17 @@ out:
  254. int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
  255. {
  256. - mtk_flow_entry_update(ppe, entry);
  257. + int idle;
  258. +
  259. + spin_lock_bh(&ppe_lock);
  260. + if (entry->type == MTK_FLOW_TYPE_L2)
  261. + mtk_flow_entry_update_l2(ppe, entry);
  262. + else
  263. + mtk_flow_entry_update(ppe, entry);
  264. + idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
  265. + spin_unlock_bh(&ppe_lock);
  266. - return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
  267. + return idle;
  268. }
  269. int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
  270. --- a/drivers/net/ethernet/mediatek/mtk_ppe.h
  271. +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
  272. @@ -286,7 +286,12 @@ enum {
  273. struct mtk_flow_entry {
  274. union {
  275. - struct hlist_node list;
  276. + /* regular flows + L2 subflows */
  277. + struct {
  278. + struct hlist_node list;
  279. + struct hlist_node l2_list;
  280. + };
  281. + /* L2 flows */
  282. struct {
  283. struct rhash_head l2_node;
  284. struct hlist_head l2_flows;
  285. @@ -296,13 +301,7 @@ struct mtk_flow_entry {
  286. s8 wed_index;
  287. u8 ppe_index;
  288. u16 hash;
  289. - union {
  290. - struct mtk_foe_entry data;
  291. - struct {
  292. - struct mtk_flow_entry *base_flow;
  293. - struct hlist_node list;
  294. - } l2_data;
  295. - };
  296. + struct mtk_foe_entry data;
  297. struct rhash_head node;
  298. unsigned long cookie;
  299. };