736-01-net-ethernet-mtk_eth_soc-add-code-for-offloading-flo.patch 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266
  1. From: Felix Fietkau <[email protected]>
  2. Date: Mon, 20 Mar 2023 11:44:30 +0100
  3. Subject: [PATCH] net: ethernet: mtk_eth_soc: add code for offloading flows
  4. from wlan devices
  5. WED version 2 (on MT7986 and later) can offload flows originating from wireless
  6. devices. In order to make that work, ndo_setup_tc needs to be implemented on
  7. the netdevs. This adds the required code to offload flows coming in from WED,
  8. while keeping track of the incoming wed index used for selecting the correct
  9. PPE device.
  10. Signed-off-by: Felix Fietkau <[email protected]>
  11. ---
  12. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  13. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  14. @@ -1448,6 +1448,9 @@ int mtk_gmac_rgmii_path_setup(struct mtk
  15. int mtk_eth_offload_init(struct mtk_eth *eth);
  16. int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
  17. void *type_data);
  18. +int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
  19. + int ppe_index);
  20. +void mtk_flow_offload_cleanup(struct mtk_eth *eth, struct list_head *list);
  21. void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
  22. --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
  23. +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
  24. @@ -235,7 +235,8 @@ out:
  25. }
  26. static int
  27. -mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
  28. +mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
  29. + int ppe_index)
  30. {
  31. struct flow_rule *rule = flow_cls_offload_flow_rule(f);
  32. struct flow_action_entry *act;
  33. @@ -452,6 +453,7 @@ mtk_flow_offload_replace(struct mtk_eth
  34. entry->cookie = f->cookie;
  35. memcpy(&entry->data, &foe, sizeof(entry->data));
  36. entry->wed_index = wed_index;
  37. + entry->ppe_index = ppe_index;
  38. err = mtk_foe_entry_commit(eth->ppe[entry->ppe_index], entry);
  39. if (err < 0)
  40. @@ -520,25 +522,15 @@ mtk_flow_offload_stats(struct mtk_eth *e
  41. static DEFINE_MUTEX(mtk_flow_offload_mutex);
  42. -static int
  43. -mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
  44. +int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
  45. + int ppe_index)
  46. {
  47. - struct flow_cls_offload *cls = type_data;
  48. - struct net_device *dev = cb_priv;
  49. - struct mtk_mac *mac = netdev_priv(dev);
  50. - struct mtk_eth *eth = mac->hw;
  51. int err;
  52. - if (!tc_can_offload(dev))
  53. - return -EOPNOTSUPP;
  54. -
  55. - if (type != TC_SETUP_CLSFLOWER)
  56. - return -EOPNOTSUPP;
  57. -
  58. mutex_lock(&mtk_flow_offload_mutex);
  59. switch (cls->command) {
  60. case FLOW_CLS_REPLACE:
  61. - err = mtk_flow_offload_replace(eth, cls);
  62. + err = mtk_flow_offload_replace(eth, cls, ppe_index);
  63. break;
  64. case FLOW_CLS_DESTROY:
  65. err = mtk_flow_offload_destroy(eth, cls);
  66. @@ -556,6 +548,23 @@ mtk_eth_setup_tc_block_cb(enum tc_setup_
  67. }
  68. static int
  69. +mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
  70. +{
  71. + struct flow_cls_offload *cls = type_data;
  72. + struct net_device *dev = cb_priv;
  73. + struct mtk_mac *mac = netdev_priv(dev);
  74. + struct mtk_eth *eth = mac->hw;
  75. +
  76. + if (!tc_can_offload(dev))
  77. + return -EOPNOTSUPP;
  78. +
  79. + if (type != TC_SETUP_CLSFLOWER)
  80. + return -EOPNOTSUPP;
  81. +
  82. + return mtk_flow_offload_cmd(eth, cls, 0);
  83. +}
  84. +
  85. +static int
  86. mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
  87. {
  88. struct mtk_mac *mac = netdev_priv(dev);
  89. --- a/drivers/net/ethernet/mediatek/mtk_wed.c
  90. +++ b/drivers/net/ethernet/mediatek/mtk_wed.c
  91. @@ -13,6 +13,8 @@
  92. #include <linux/mfd/syscon.h>
  93. #include <linux/debugfs.h>
  94. #include <linux/soc/mediatek/mtk_wed.h>
  95. +#include <net/flow_offload.h>
  96. +#include <net/pkt_cls.h>
  97. #include "mtk_eth_soc.h"
  98. #include "mtk_wed_regs.h"
  99. #include "mtk_wed.h"
  100. @@ -41,6 +43,11 @@
  101. static struct mtk_wed_hw *hw_list[2];
  102. static DEFINE_MUTEX(hw_lock);
  103. +struct mtk_wed_flow_block_priv {
  104. + struct mtk_wed_hw *hw;
  105. + struct net_device *dev;
  106. +};
  107. +
  108. static void
  109. wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
  110. {
  111. @@ -1760,6 +1767,99 @@ out:
  112. mutex_unlock(&hw_lock);
  113. }
  114. +static int
  115. +mtk_wed_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
  116. +{
  117. + struct mtk_wed_flow_block_priv *priv = cb_priv;
  118. + struct flow_cls_offload *cls = type_data;
  119. + struct mtk_wed_hw *hw = priv->hw;
  120. +
  121. + if (!tc_can_offload(priv->dev))
  122. + return -EOPNOTSUPP;
  123. +
  124. + if (type != TC_SETUP_CLSFLOWER)
  125. + return -EOPNOTSUPP;
  126. +
  127. + return mtk_flow_offload_cmd(hw->eth, cls, hw->index);
  128. +}
  129. +
  130. +static int
  131. +mtk_wed_setup_tc_block(struct mtk_wed_hw *hw, struct net_device *dev,
  132. + struct flow_block_offload *f)
  133. +{
  134. + struct mtk_wed_flow_block_priv *priv;
  135. + static LIST_HEAD(block_cb_list);
  136. + struct flow_block_cb *block_cb;
  137. + struct mtk_eth *eth = hw->eth;
  138. + flow_setup_cb_t *cb;
  139. +
  140. + if (!eth->soc->offload_version)
  141. + return -EOPNOTSUPP;
  142. +
  143. + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
  144. + return -EOPNOTSUPP;
  145. +
  146. + cb = mtk_wed_setup_tc_block_cb;
  147. + f->driver_block_list = &block_cb_list;
  148. +
  149. + switch (f->command) {
  150. + case FLOW_BLOCK_BIND:
  151. + block_cb = flow_block_cb_lookup(f->block, cb, dev);
  152. + if (block_cb) {
  153. + flow_block_cb_incref(block_cb);
  154. + return 0;
  155. + }
  156. +
  157. + priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  158. + if (!priv)
  159. + return -ENOMEM;
  160. +
  161. + priv->hw = hw;
  162. + priv->dev = dev;
  163. + block_cb = flow_block_cb_alloc(cb, dev, priv, NULL);
  164. + if (IS_ERR(block_cb)) {
  165. + kfree(priv);
  166. + return PTR_ERR(block_cb);
  167. + }
  168. +
  169. + flow_block_cb_incref(block_cb);
  170. + flow_block_cb_add(block_cb, f);
  171. + list_add_tail(&block_cb->driver_list, &block_cb_list);
  172. + return 0;
  173. + case FLOW_BLOCK_UNBIND:
  174. + block_cb = flow_block_cb_lookup(f->block, cb, dev);
  175. + if (!block_cb)
  176. + return -ENOENT;
  177. +
  178. + if (!flow_block_cb_decref(block_cb)) {
  179. + flow_block_cb_remove(block_cb, f);
  180. + list_del(&block_cb->driver_list);
  181. + kfree(block_cb->cb_priv);
  182. + }
  183. + return 0;
  184. + default:
  185. + return -EOPNOTSUPP;
  186. + }
  187. +}
  188. +
  189. +static int
  190. +mtk_wed_setup_tc(struct mtk_wed_device *wed, struct net_device *dev,
  191. + enum tc_setup_type type, void *type_data)
  192. +{
  193. + struct mtk_wed_hw *hw = wed->hw;
  194. +
  195. + if (hw->version < 2)
  196. + return -EOPNOTSUPP;
  197. +
  198. + switch (type) {
  199. + case TC_SETUP_BLOCK:
  200. + case TC_SETUP_FT:
  201. + return mtk_wed_setup_tc_block(hw, dev, type_data);
  202. + default:
  203. + return -EOPNOTSUPP;
  204. + }
  205. +}
  206. +
  207. void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
  208. void __iomem *wdma, phys_addr_t wdma_phy,
  209. int index)
  210. @@ -1779,6 +1879,7 @@ void mtk_wed_add_hw(struct device_node *
  211. .irq_set_mask = mtk_wed_irq_set_mask,
  212. .detach = mtk_wed_detach,
  213. .ppe_check = mtk_wed_ppe_check,
  214. + .setup_tc = mtk_wed_setup_tc,
  215. };
  216. struct device_node *eth_np = eth->dev->of_node;
  217. struct platform_device *pdev;
  218. --- a/include/linux/soc/mediatek/mtk_wed.h
  219. +++ b/include/linux/soc/mediatek/mtk_wed.h
  220. @@ -6,6 +6,7 @@
  221. #include <linux/regmap.h>
  222. #include <linux/pci.h>
  223. #include <linux/skbuff.h>
  224. +#include <linux/netdevice.h>
  225. #define MTK_WED_TX_QUEUES 2
  226. #define MTK_WED_RX_QUEUES 2
  227. @@ -180,6 +181,8 @@ struct mtk_wed_ops {
  228. u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask);
  229. void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
  230. + int (*setup_tc)(struct mtk_wed_device *wed, struct net_device *dev,
  231. + enum tc_setup_type type, void *type_data);
  232. };
  233. extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
  234. @@ -238,6 +241,8 @@ mtk_wed_get_rx_capa(struct mtk_wed_devic
  235. (_dev)->ops->msg_update(_dev, _id, _msg, _len)
  236. #define mtk_wed_device_stop(_dev) (_dev)->ops->stop(_dev)
  237. #define mtk_wed_device_dma_reset(_dev) (_dev)->ops->reset_dma(_dev)
  238. +#define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) \
  239. + (_dev)->ops->setup_tc(_dev, _netdev, _type, _type_data)
  240. #else
  241. static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
  242. {
  243. @@ -256,6 +261,7 @@ static inline bool mtk_wed_device_active
  244. #define mtk_wed_device_update_msg(_dev, _id, _msg, _len) -ENODEV
  245. #define mtk_wed_device_stop(_dev) do {} while (0)
  246. #define mtk_wed_device_dma_reset(_dev) do {} while (0)
  247. +#define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) -EOPNOTSUPP
  248. #endif
  249. #endif