2
0

790-net-ethernet-mtk_eth_soc-add-the-capability-to-run-m.patch 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317
  1. From patchwork Thu Sep 8 19:33:39 2022
  2. Content-Type: text/plain; charset="utf-8"
  3. MIME-Version: 1.0
  4. Content-Transfer-Encoding: 7bit
  5. X-Patchwork-Submitter: Lorenzo Bianconi <[email protected]>
  6. X-Patchwork-Id: 12970559
  7. X-Patchwork-Delegate: [email protected]
  8. Return-Path: <[email protected]>
  9. From: Lorenzo Bianconi <[email protected]>
  10. To: [email protected]
  11. Cc: [email protected], [email protected], [email protected],
  12. [email protected], [email protected], [email protected],
  13. [email protected], [email protected], [email protected],
  14. [email protected], [email protected],
  15. [email protected], [email protected],
  16. [email protected], [email protected],
  17. [email protected], [email protected]
  18. Subject: [PATCH net-next 05/12] net: ethernet: mtk_eth_soc: add the capability
  19. to run multiple ppe
  20. Date: Thu, 8 Sep 2022 21:33:39 +0200
  21. Message-Id:
  22. <dd0254775390eb031c67c448df8b19e87df58558.1662661555.git.lorenzo@kernel.org>
  23. X-Mailer: git-send-email 2.37.3
  24. In-Reply-To: <[email protected]>
  25. References: <[email protected]>
  26. MIME-Version: 1.0
  27. Precedence: bulk
  28. List-ID: <netdev.vger.kernel.org>
  29. X-Mailing-List: [email protected]
  30. X-Patchwork-Delegate: [email protected]
  31. mt7986 chipset support multiple packet engines for wlan <-> eth
  32. packet forwarding.
  33. Co-developed-by: Bo Jiao <[email protected]>
  34. Signed-off-by: Bo Jiao <[email protected]>
  35. Co-developed-by: Sujuan Chen <[email protected]>
  36. Signed-off-by: Sujuan Chen <[email protected]>
  37. Signed-off-by: Lorenzo Bianconi <[email protected]>
  38. ---
  39. drivers/net/ethernet/mediatek/mtk_eth_soc.c | 35 ++++++++++++-------
  40. drivers/net/ethernet/mediatek/mtk_eth_soc.h | 2 +-
  41. drivers/net/ethernet/mediatek/mtk_ppe.c | 14 +++++---
  42. drivers/net/ethernet/mediatek/mtk_ppe.h | 9 +++--
  43. .../net/ethernet/mediatek/mtk_ppe_debugfs.c | 8 ++---
  44. .../net/ethernet/mediatek/mtk_ppe_offload.c | 13 +++----
  45. 6 files changed, 48 insertions(+), 33 deletions(-)
  46. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  47. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  48. @@ -1871,7 +1871,7 @@ static int mtk_poll_rx(struct napi_struc
  49. reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
  50. if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
  51. - mtk_ppe_check_skb(eth->ppe, skb, hash);
  52. + mtk_ppe_check_skb(eth->ppe[0], skb, hash);
  53. if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
  54. if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  55. @@ -2929,15 +2929,19 @@ static int mtk_open(struct net_device *d
  56. /* we run 2 netdevs on the same dma ring so we only bring it up once */
  57. if (!refcount_read(&eth->dma_refcnt)) {
  58. const struct mtk_soc_data *soc = eth->soc;
  59. - u32 gdm_config = MTK_GDMA_TO_PDMA;
  60. + u32 gdm_config;
  61. + int i;
  62. int err;
  63. err = mtk_start_dma(eth);
  64. if (err)
  65. return err;
  66. - if (soc->offload_version && mtk_ppe_start(eth->ppe) == 0)
  67. - gdm_config = soc->reg_map->gdma_to_ppe0;
  68. + for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
  69. + mtk_ppe_start(eth->ppe[i]);
  70. +
  71. + gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe0
  72. + : MTK_GDMA_TO_PDMA;
  73. mtk_gdm_config(eth, gdm_config);
  74. @@ -2982,6 +2986,7 @@ static int mtk_stop(struct net_device *d
  75. {
  76. struct mtk_mac *mac = netdev_priv(dev);
  77. struct mtk_eth *eth = mac->hw;
  78. + int i;
  79. phylink_stop(mac->phylink);
  80. @@ -3009,8 +3014,8 @@ static int mtk_stop(struct net_device *d
  81. mtk_dma_free(eth);
  82. - if (eth->soc->offload_version)
  83. - mtk_ppe_stop(eth->ppe);
  84. + for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
  85. + mtk_ppe_stop(eth->ppe[i]);
  86. return 0;
  87. }
  88. @@ -4050,12 +4055,19 @@ static int mtk_probe(struct platform_dev
  89. }
  90. if (eth->soc->offload_version) {
  91. - u32 ppe_addr = eth->soc->reg_map->ppe_base;
  92. + u32 num_ppe;
  93. - eth->ppe = mtk_ppe_init(eth, eth->base + ppe_addr, 2);
  94. - if (!eth->ppe) {
  95. - err = -ENOMEM;
  96. - goto err_free_dev;
  97. + num_ppe = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
  98. + num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
  99. + for (i = 0; i < num_ppe; i++) {
  100. + u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
  101. +
  102. + eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr,
  103. + eth->soc->offload_version, i);
  104. + if (!eth->ppe[i]) {
  105. + err = -ENOMEM;
  106. + goto err_free_dev;
  107. + }
  108. }
  109. err = mtk_eth_offload_init(eth);
  110. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  111. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  112. @@ -1111,7 +1111,7 @@ struct mtk_eth {
  113. int ip_align;
  114. - struct mtk_ppe *ppe;
  115. + struct mtk_ppe *ppe[2];
  116. struct rhashtable flow_table;
  117. struct bpf_prog __rcu *prog;
  118. --- a/drivers/net/ethernet/mediatek/mtk_ppe.c
  119. +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
  120. @@ -682,7 +682,7 @@ int mtk_foe_entry_idle_time(struct mtk_p
  121. }
  122. struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
  123. - int version)
  124. + int version, int index)
  125. {
  126. const struct mtk_soc_data *soc = eth->soc;
  127. struct device *dev = eth->dev;
  128. @@ -717,7 +717,7 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_
  129. if (!ppe->foe_flow)
  130. return NULL;
  131. - mtk_ppe_debugfs_init(ppe);
  132. + mtk_ppe_debugfs_init(ppe, index);
  133. return ppe;
  134. }
  135. @@ -738,10 +738,13 @@ static void mtk_ppe_init_foe_table(struc
  136. ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC;
  137. }
  138. -int mtk_ppe_start(struct mtk_ppe *ppe)
  139. +void mtk_ppe_start(struct mtk_ppe *ppe)
  140. {
  141. u32 val;
  142. + if (!ppe)
  143. + return;
  144. +
  145. mtk_ppe_init_foe_table(ppe);
  146. ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
  147. @@ -809,8 +812,6 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
  148. ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
  149. ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
  150. -
  151. - return 0;
  152. }
  153. int mtk_ppe_stop(struct mtk_ppe *ppe)
  154. @@ -818,6 +819,9 @@ int mtk_ppe_stop(struct mtk_ppe *ppe)
  155. u32 val;
  156. int i;
  157. + if (!ppe)
  158. + return 0;
  159. +
  160. for (i = 0; i < MTK_PPE_ENTRIES; i++)
  161. ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
  162. MTK_FOE_STATE_INVALID);
  163. --- a/drivers/net/ethernet/mediatek/mtk_ppe.h
  164. +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
  165. @@ -247,6 +247,7 @@ struct mtk_flow_entry {
  166. };
  167. u8 type;
  168. s8 wed_index;
  169. + u8 ppe_index;
  170. u16 hash;
  171. union {
  172. struct mtk_foe_entry data;
  173. @@ -265,6 +266,7 @@ struct mtk_ppe {
  174. struct device *dev;
  175. void __iomem *base;
  176. int version;
  177. + char dirname[5];
  178. struct mtk_foe_entry *foe_table;
  179. dma_addr_t foe_phys;
  180. @@ -277,8 +279,9 @@ struct mtk_ppe {
  181. void *acct_table;
  182. };
  183. -struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version);
  184. -int mtk_ppe_start(struct mtk_ppe *ppe);
  185. +struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
  186. + int version, int index);
  187. +void mtk_ppe_start(struct mtk_ppe *ppe);
  188. int mtk_ppe_stop(struct mtk_ppe *ppe);
  189. void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
  190. @@ -317,6 +320,6 @@ int mtk_foe_entry_set_wdma(struct mtk_fo
  191. int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
  192. void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
  193. int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
  194. -int mtk_ppe_debugfs_init(struct mtk_ppe *ppe);
  195. +int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index);
  196. #endif
  197. --- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
  198. +++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
  199. @@ -187,7 +187,7 @@ mtk_ppe_debugfs_foe_open_bind(struct ino
  200. inode->i_private);
  201. }
  202. -int mtk_ppe_debugfs_init(struct mtk_ppe *ppe)
  203. +int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index)
  204. {
  205. static const struct file_operations fops_all = {
  206. .open = mtk_ppe_debugfs_foe_open_all,
  207. @@ -195,17 +195,17 @@ int mtk_ppe_debugfs_init(struct mtk_ppe
  208. .llseek = seq_lseek,
  209. .release = single_release,
  210. };
  211. -
  212. static const struct file_operations fops_bind = {
  213. .open = mtk_ppe_debugfs_foe_open_bind,
  214. .read = seq_read,
  215. .llseek = seq_lseek,
  216. .release = single_release,
  217. };
  218. -
  219. struct dentry *root;
  220. - root = debugfs_create_dir("mtk_ppe", NULL);
  221. + snprintf(ppe->dirname, sizeof(ppe->dirname), "ppe%d", index);
  222. +
  223. + root = debugfs_create_dir(ppe->dirname, NULL);
  224. if (!root)
  225. return -ENOMEM;
  226. --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
  227. +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
  228. @@ -434,7 +434,7 @@ mtk_flow_offload_replace(struct mtk_eth
  229. memcpy(&entry->data, &foe, sizeof(entry->data));
  230. entry->wed_index = wed_index;
  231. - err = mtk_foe_entry_commit(eth->ppe, entry);
  232. + err = mtk_foe_entry_commit(eth->ppe[entry->ppe_index], entry);
  233. if (err < 0)
  234. goto free;
  235. @@ -446,7 +446,7 @@ mtk_flow_offload_replace(struct mtk_eth
  236. return 0;
  237. clear:
  238. - mtk_foe_entry_clear(eth->ppe, entry);
  239. + mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
  240. free:
  241. kfree(entry);
  242. if (wed_index >= 0)
  243. @@ -464,7 +464,7 @@ mtk_flow_offload_destroy(struct mtk_eth
  244. if (!entry)
  245. return -ENOENT;
  246. - mtk_foe_entry_clear(eth->ppe, entry);
  247. + mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
  248. rhashtable_remove_fast(&eth->flow_table, &entry->node,
  249. mtk_flow_ht_params);
  250. if (entry->wed_index >= 0)
  251. @@ -485,7 +485,7 @@ mtk_flow_offload_stats(struct mtk_eth *e
  252. if (!entry)
  253. return -ENOENT;
  254. - idle = mtk_foe_entry_idle_time(eth->ppe, entry);
  255. + idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry);
  256. f->stats.lastused = jiffies - idle * HZ;
  257. return 0;
  258. @@ -537,7 +537,7 @@ mtk_eth_setup_tc_block(struct net_device
  259. struct flow_block_cb *block_cb;
  260. flow_setup_cb_t *cb;
  261. - if (!eth->ppe || !eth->ppe->foe_table)
  262. + if (!eth->soc->offload_version)
  263. return -EOPNOTSUPP;
  264. if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
  265. @@ -589,8 +589,5 @@ int mtk_eth_setup_tc(struct net_device *
  266. int mtk_eth_offload_init(struct mtk_eth *eth)
  267. {
  268. - if (!eth->ppe || !eth->ppe->foe_table)
  269. - return 0;
  270. -
  271. return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
  272. }