723-v6.0-net-ethernet-mtk_eth_soc-introduce-flow-offloading-s.patch 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882
  1. From 93408c858e5dc01d97c55efa721268f63fde2ae5 Mon Sep 17 00:00:00 2001
  2. Message-Id: <93408c858e5dc01d97c55efa721268f63fde2ae5.1662886034.git.lorenzo@kernel.org>
  3. In-Reply-To: <e5ecb4f619197b93fa682d722452dc8412864cdb.1662886033.git.lorenzo@kernel.org>
  4. References: <e5ecb4f619197b93fa682d722452dc8412864cdb.1662886033.git.lorenzo@kernel.org>
  5. From: Lorenzo Bianconi <[email protected]>
  6. Date: Sat, 3 Sep 2022 18:34:09 +0200
  7. Subject: [PATCH net-next 4/5] net: ethernet: mtk_eth_soc: introduce flow
  8. offloading support for mt7986
  9. Introduce hw flow offload support for mt7986 chipset. PPE is not enabled
  10. yet in mt7986 since mt76 support is not available yet.
  11. Co-developed-by: Bo Jiao <[email protected]>
  12. Signed-off-by: Bo Jiao <[email protected]>
  13. Co-developed-by: Sujuan Chen <[email protected]>
  14. Signed-off-by: Sujuan Chen <[email protected]>
  15. Signed-off-by: Lorenzo Bianconi <[email protected]>
  16. ---
  17. drivers/net/ethernet/mediatek/mtk_eth_soc.c | 11 +-
  18. drivers/net/ethernet/mediatek/mtk_eth_soc.h | 72 ++++++
  19. drivers/net/ethernet/mediatek/mtk_ppe.c | 213 +++++++++++-------
  20. drivers/net/ethernet/mediatek/mtk_ppe.h | 52 ++++-
  21. .../net/ethernet/mediatek/mtk_ppe_offload.c | 49 ++--
  22. drivers/net/ethernet/mediatek/mtk_ppe_regs.h | 8 +
  23. 6 files changed, 289 insertions(+), 116 deletions(-)
  24. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  25. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
  26. @@ -1858,12 +1858,14 @@ static int mtk_poll_rx(struct napi_struc
  27. bytes += skb->len;
  28. if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  29. + reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
  30. hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
  31. if (hash != MTK_RXD5_FOE_ENTRY)
  32. skb_set_hash(skb, jhash_1word(hash, 0),
  33. PKT_HASH_TYPE_L4);
  34. rxdcsum = &trxd.rxd3;
  35. } else {
  36. + reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
  37. hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
  38. if (hash != MTK_RXD4_FOE_ENTRY)
  39. skb_set_hash(skb, jhash_1word(hash, 0),
  40. @@ -1877,7 +1879,6 @@ static int mtk_poll_rx(struct napi_struc
  41. skb_checksum_none_assert(skb);
  42. skb->protocol = eth_type_trans(skb, netdev);
  43. - reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
  44. if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
  45. mtk_ppe_check_skb(eth->ppe[0], skb, hash);
  46. @@ -4179,7 +4180,7 @@ static const struct mtk_soc_data mt7621_
  47. .required_pctl = false,
  48. .offload_version = 2,
  49. .hash_offset = 2,
  50. - .foe_entry_size = sizeof(struct mtk_foe_entry),
  51. + .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
  52. .txrx = {
  53. .txd_size = sizeof(struct mtk_tx_dma),
  54. .rxd_size = sizeof(struct mtk_rx_dma),
  55. @@ -4199,7 +4200,7 @@ static const struct mtk_soc_data mt7622_
  56. .required_pctl = false,
  57. .offload_version = 2,
  58. .hash_offset = 2,
  59. - .foe_entry_size = sizeof(struct mtk_foe_entry),
  60. + .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
  61. .txrx = {
  62. .txd_size = sizeof(struct mtk_tx_dma),
  63. .rxd_size = sizeof(struct mtk_rx_dma),
  64. @@ -4218,7 +4219,7 @@ static const struct mtk_soc_data mt7623_
  65. .required_pctl = true,
  66. .offload_version = 2,
  67. .hash_offset = 2,
  68. - .foe_entry_size = sizeof(struct mtk_foe_entry),
  69. + .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
  70. .txrx = {
  71. .txd_size = sizeof(struct mtk_tx_dma),
  72. .rxd_size = sizeof(struct mtk_rx_dma),
  73. @@ -4250,9 +4251,11 @@ static const struct mtk_soc_data mt7986_
  74. .reg_map = &mt7986_reg_map,
  75. .ana_rgc3 = 0x128,
  76. .caps = MT7986_CAPS,
  77. + .hw_features = MTK_HW_FEATURES,
  78. .required_clks = MT7986_CLKS_BITMAP,
  79. .required_pctl = false,
  80. .hash_offset = 4,
  81. + .foe_entry_size = sizeof(struct mtk_foe_entry),
  82. .txrx = {
  83. .txd_size = sizeof(struct mtk_tx_dma_v2),
  84. .rxd_size = sizeof(struct mtk_rx_dma_v2),
  85. --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  86. +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
  87. @@ -1151,6 +1151,78 @@ mtk_foe_get_entry(struct mtk_ppe *ppe, u
  88. return ppe->foe_table + hash * soc->foe_entry_size;
  89. }
  90. +static inline u32 mtk_get_ib1_ts_mask(struct mtk_eth *eth)
  91. +{
  92. + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  93. + return MTK_FOE_IB1_BIND_TIMESTAMP_V2;
  94. +
  95. + return MTK_FOE_IB1_BIND_TIMESTAMP;
  96. +}
  97. +
  98. +static inline u32 mtk_get_ib1_ppoe_mask(struct mtk_eth *eth)
  99. +{
  100. + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  101. + return MTK_FOE_IB1_BIND_PPPOE_V2;
  102. +
  103. + return MTK_FOE_IB1_BIND_PPPOE;
  104. +}
  105. +
  106. +static inline u32 mtk_get_ib1_vlan_tag_mask(struct mtk_eth *eth)
  107. +{
  108. + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  109. + return MTK_FOE_IB1_BIND_VLAN_TAG_V2;
  110. +
  111. + return MTK_FOE_IB1_BIND_VLAN_TAG;
  112. +}
  113. +
  114. +static inline u32 mtk_get_ib1_vlan_layer_mask(struct mtk_eth *eth)
  115. +{
  116. + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  117. + return MTK_FOE_IB1_BIND_VLAN_LAYER_V2;
  118. +
  119. + return MTK_FOE_IB1_BIND_VLAN_LAYER;
  120. +}
  121. +
  122. +static inline u32 mtk_prep_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
  123. +{
  124. + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  125. + return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
  126. +
  127. + return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
  128. +}
  129. +
  130. +static inline u32 mtk_get_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
  131. +{
  132. + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  133. + return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
  134. +
  135. + return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
  136. +}
  137. +
  138. +static inline u32 mtk_get_ib1_pkt_type_mask(struct mtk_eth *eth)
  139. +{
  140. + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  141. + return MTK_FOE_IB1_PACKET_TYPE_V2;
  142. +
  143. + return MTK_FOE_IB1_PACKET_TYPE;
  144. +}
  145. +
  146. +static inline u32 mtk_get_ib1_pkt_type(struct mtk_eth *eth, u32 val)
  147. +{
  148. + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  149. + return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE_V2, val);
  150. +
  151. + return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, val);
  152. +}
  153. +
  154. +static inline u32 mtk_get_ib2_multicast_mask(struct mtk_eth *eth)
  155. +{
  156. + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
  157. + return MTK_FOE_IB2_MULTICAST_V2;
  158. +
  159. + return MTK_FOE_IB2_MULTICAST;
  160. +}
  161. +
  162. /* read the hardware status register */
  163. void mtk_stats_update_mac(struct mtk_mac *mac);
  164. --- a/drivers/net/ethernet/mediatek/mtk_ppe.c
  165. +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
  166. @@ -56,7 +56,7 @@ static u32 ppe_clear(struct mtk_ppe *ppe
  167. static u32 mtk_eth_timestamp(struct mtk_eth *eth)
  168. {
  169. - return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
  170. + return mtk_r32(eth, 0x0010) & mtk_get_ib1_ts_mask(eth);
  171. }
  172. static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
  173. @@ -93,7 +93,7 @@ static u32 mtk_ppe_hash_entry(struct mtk
  174. u32 hv1, hv2, hv3;
  175. u32 hash;
  176. - switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
  177. + switch (mtk_get_ib1_pkt_type(eth, e->ib1)) {
  178. case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
  179. case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
  180. hv1 = e->ipv4.orig.ports;
  181. @@ -129,9 +129,9 @@ static u32 mtk_ppe_hash_entry(struct mtk
  182. }
  183. static inline struct mtk_foe_mac_info *
  184. -mtk_foe_entry_l2(struct mtk_foe_entry *entry)
  185. +mtk_foe_entry_l2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
  186. {
  187. - int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
  188. + int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
  189. if (type == MTK_PPE_PKT_TYPE_BRIDGE)
  190. return &entry->bridge.l2;
  191. @@ -143,9 +143,9 @@ mtk_foe_entry_l2(struct mtk_foe_entry *e
  192. }
  193. static inline u32 *
  194. -mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
  195. +mtk_foe_entry_ib2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
  196. {
  197. - int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
  198. + int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
  199. if (type == MTK_PPE_PKT_TYPE_BRIDGE)
  200. return &entry->bridge.ib2;
  201. @@ -156,27 +156,38 @@ mtk_foe_entry_ib2(struct mtk_foe_entry *
  202. return &entry->ipv4.ib2;
  203. }
  204. -int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
  205. - u8 pse_port, u8 *src_mac, u8 *dest_mac)
  206. +int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
  207. + int type, int l4proto, u8 pse_port, u8 *src_mac,
  208. + u8 *dest_mac)
  209. {
  210. struct mtk_foe_mac_info *l2;
  211. u32 ports_pad, val;
  212. memset(entry, 0, sizeof(*entry));
  213. - val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
  214. - FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
  215. - FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
  216. - MTK_FOE_IB1_BIND_TTL |
  217. - MTK_FOE_IB1_BIND_CACHE;
  218. - entry->ib1 = val;
  219. -
  220. - val = FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
  221. - FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f) |
  222. - FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port);
  223. + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  224. + val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
  225. + FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) |
  226. + FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
  227. + MTK_FOE_IB1_BIND_CACHE_V2 | MTK_FOE_IB1_BIND_TTL_V2;
  228. + entry->ib1 = val;
  229. +
  230. + val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) |
  231. + FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf);
  232. + } else {
  233. + val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
  234. + FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
  235. + FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
  236. + MTK_FOE_IB1_BIND_CACHE | MTK_FOE_IB1_BIND_TTL;
  237. + entry->ib1 = val;
  238. +
  239. + val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) |
  240. + FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
  241. + FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f);
  242. + }
  243. if (is_multicast_ether_addr(dest_mac))
  244. - val |= MTK_FOE_IB2_MULTICAST;
  245. + val |= mtk_get_ib2_multicast_mask(eth);
  246. ports_pad = 0xa5a5a500 | (l4proto & 0xff);
  247. if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
  248. @@ -210,24 +221,30 @@ int mtk_foe_entry_prepare(struct mtk_foe
  249. return 0;
  250. }
  251. -int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port)
  252. +int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
  253. + struct mtk_foe_entry *entry, u8 port)
  254. {
  255. - u32 *ib2 = mtk_foe_entry_ib2(entry);
  256. - u32 val;
  257. + u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
  258. + u32 val = *ib2;
  259. - val = *ib2;
  260. - val &= ~MTK_FOE_IB2_DEST_PORT;
  261. - val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
  262. + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  263. + val &= ~MTK_FOE_IB2_DEST_PORT_V2;
  264. + val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port);
  265. + } else {
  266. + val &= ~MTK_FOE_IB2_DEST_PORT;
  267. + val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
  268. + }
  269. *ib2 = val;
  270. return 0;
  271. }
  272. -int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress,
  273. +int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
  274. + struct mtk_foe_entry *entry, bool egress,
  275. __be32 src_addr, __be16 src_port,
  276. __be32 dest_addr, __be16 dest_port)
  277. {
  278. - int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
  279. + int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
  280. struct mtk_ipv4_tuple *t;
  281. switch (type) {
  282. @@ -262,11 +279,12 @@ int mtk_foe_entry_set_ipv4_tuple(struct
  283. return 0;
  284. }
  285. -int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
  286. +int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
  287. + struct mtk_foe_entry *entry,
  288. __be32 *src_addr, __be16 src_port,
  289. __be32 *dest_addr, __be16 dest_port)
  290. {
  291. - int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
  292. + int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
  293. u32 *src, *dest;
  294. int i;
  295. @@ -297,39 +315,41 @@ int mtk_foe_entry_set_ipv6_tuple(struct
  296. return 0;
  297. }
  298. -int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port)
  299. +int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
  300. + int port)
  301. {
  302. - struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
  303. + struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
  304. l2->etype = BIT(port);
  305. - if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER))
  306. - entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
  307. + if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)))
  308. + entry->ib1 |= mtk_prep_ib1_vlan_layer(eth, 1);
  309. else
  310. l2->etype |= BIT(8);
  311. - entry->ib1 &= ~MTK_FOE_IB1_BIND_VLAN_TAG;
  312. + entry->ib1 &= ~mtk_get_ib1_vlan_tag_mask(eth);
  313. return 0;
  314. }
  315. -int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid)
  316. +int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
  317. + int vid)
  318. {
  319. - struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
  320. + struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
  321. - switch (FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, entry->ib1)) {
  322. + switch (mtk_prep_ib1_vlan_layer(eth, entry->ib1)) {
  323. case 0:
  324. - entry->ib1 |= MTK_FOE_IB1_BIND_VLAN_TAG |
  325. - FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
  326. + entry->ib1 |= mtk_get_ib1_vlan_tag_mask(eth) |
  327. + mtk_prep_ib1_vlan_layer(eth, 1);
  328. l2->vlan1 = vid;
  329. return 0;
  330. case 1:
  331. - if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG)) {
  332. + if (!(entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) {
  333. l2->vlan1 = vid;
  334. l2->etype |= BIT(8);
  335. } else {
  336. l2->vlan2 = vid;
  337. - entry->ib1 += FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
  338. + entry->ib1 += mtk_prep_ib1_vlan_layer(eth, 1);
  339. }
  340. return 0;
  341. default:
  342. @@ -337,34 +357,42 @@ int mtk_foe_entry_set_vlan(struct mtk_fo
  343. }
  344. }
  345. -int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
  346. +int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
  347. + int sid)
  348. {
  349. - struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
  350. + struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
  351. - if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER) ||
  352. - (entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG))
  353. + if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)) ||
  354. + (entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth)))
  355. l2->etype = ETH_P_PPP_SES;
  356. - entry->ib1 |= MTK_FOE_IB1_BIND_PPPOE;
  357. + entry->ib1 |= mtk_get_ib1_ppoe_mask(eth);
  358. l2->pppoe_id = sid;
  359. return 0;
  360. }
  361. -int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
  362. - int bss, int wcid)
  363. +int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
  364. + int wdma_idx, int txq, int bss, int wcid)
  365. {
  366. - struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
  367. - u32 *ib2 = mtk_foe_entry_ib2(entry);
  368. + struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
  369. + u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
  370. - *ib2 &= ~MTK_FOE_IB2_PORT_MG;
  371. - *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
  372. - if (wdma_idx)
  373. - *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
  374. -
  375. - l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
  376. - FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
  377. - FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
  378. + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  379. + *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
  380. + *ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
  381. + MTK_FOE_IB2_WDMA_WINFO_V2;
  382. + l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
  383. + FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
  384. + } else {
  385. + *ib2 &= ~MTK_FOE_IB2_PORT_MG;
  386. + *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
  387. + if (wdma_idx)
  388. + *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
  389. + l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
  390. + FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
  391. + FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
  392. + }
  393. return 0;
  394. }
  395. @@ -376,14 +404,15 @@ static inline bool mtk_foe_entry_usable(
  396. }
  397. static bool
  398. -mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data)
  399. +mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
  400. + struct mtk_foe_entry *data)
  401. {
  402. int type, len;
  403. if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
  404. return false;
  405. - type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
  406. + type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
  407. if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
  408. len = offsetof(struct mtk_foe_entry, ipv6._rsv);
  409. else
  410. @@ -427,14 +456,12 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp
  411. static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
  412. {
  413. - u16 timestamp;
  414. - u16 now;
  415. -
  416. - now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP;
  417. - timestamp = ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
  418. + u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
  419. + u16 now = mtk_eth_timestamp(ppe->eth);
  420. + u16 timestamp = ib1 & ib1_ts_mask;
  421. if (timestamp > now)
  422. - return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now;
  423. + return ib1_ts_mask + 1 - timestamp + now;
  424. else
  425. return now - timestamp;
  426. }
  427. @@ -442,6 +469,7 @@ static int __mtk_foe_entry_idle_time(str
  428. static void
  429. mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
  430. {
  431. + u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
  432. struct mtk_flow_entry *cur;
  433. struct mtk_foe_entry *hwe;
  434. struct hlist_node *tmp;
  435. @@ -466,8 +494,8 @@ mtk_flow_entry_update_l2(struct mtk_ppe
  436. continue;
  437. idle = cur_idle;
  438. - entry->data.ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
  439. - entry->data.ib1 |= hwe->ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
  440. + entry->data.ib1 &= ~ib1_ts_mask;
  441. + entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
  442. }
  443. }
  444. @@ -489,7 +517,7 @@ mtk_flow_entry_update(struct mtk_ppe *pp
  445. hwe = mtk_foe_get_entry(ppe, entry->hash);
  446. memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
  447. - if (!mtk_flow_entry_match(entry, &foe)) {
  448. + if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
  449. entry->hash = 0xffff;
  450. goto out;
  451. }
  452. @@ -504,16 +532,22 @@ static void
  453. __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
  454. u16 hash)
  455. {
  456. + struct mtk_eth *eth = ppe->eth;
  457. + u16 timestamp = mtk_eth_timestamp(eth);
  458. struct mtk_foe_entry *hwe;
  459. - u16 timestamp;
  460. - timestamp = mtk_eth_timestamp(ppe->eth);
  461. - timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
  462. - entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
  463. - entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
  464. + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  465. + entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2;
  466. + entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2,
  467. + timestamp);
  468. + } else {
  469. + entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
  470. + entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP,
  471. + timestamp);
  472. + }
  473. hwe = mtk_foe_get_entry(ppe, hash);
  474. - memcpy(&hwe->data, &entry->data, ppe->eth->soc->foe_entry_size);
  475. + memcpy(&hwe->data, &entry->data, eth->soc->foe_entry_size);
  476. wmb();
  477. hwe->ib1 = entry->ib1;
  478. @@ -540,8 +574,8 @@ mtk_foe_entry_commit_l2(struct mtk_ppe *
  479. int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
  480. {
  481. - int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
  482. const struct mtk_soc_data *soc = ppe->eth->soc;
  483. + int type = mtk_get_ib1_pkt_type(ppe->eth, entry->data.ib1);
  484. u32 hash;
  485. if (type == MTK_PPE_PKT_TYPE_BRIDGE)
  486. @@ -564,7 +598,7 @@ mtk_foe_entry_commit_subflow(struct mtk_
  487. struct mtk_flow_entry *flow_info;
  488. struct mtk_foe_entry foe = {}, *hwe;
  489. struct mtk_foe_mac_info *l2;
  490. - u32 ib1_mask = MTK_FOE_IB1_PACKET_TYPE | MTK_FOE_IB1_UDP;
  491. + u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
  492. int type;
  493. flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
  494. @@ -584,16 +618,16 @@ mtk_foe_entry_commit_subflow(struct mtk_
  495. foe.ib1 &= ib1_mask;
  496. foe.ib1 |= entry->data.ib1 & ~ib1_mask;
  497. - l2 = mtk_foe_entry_l2(&foe);
  498. + l2 = mtk_foe_entry_l2(ppe->eth, &foe);
  499. memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
  500. - type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, foe.ib1);
  501. + type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
  502. if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
  503. memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
  504. else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
  505. l2->etype = ETH_P_IPV6;
  506. - *mtk_foe_entry_ib2(&foe) = entry->data.bridge.ib2;
  507. + *mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
  508. __mtk_foe_entry_commit(ppe, &foe, hash);
  509. }
  510. @@ -626,7 +660,7 @@ void __mtk_ppe_check_skb(struct mtk_ppe
  511. continue;
  512. }
  513. - if (found || !mtk_flow_entry_match(entry, hwe)) {
  514. + if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
  515. if (entry->hash != 0xffff)
  516. entry->hash = 0xffff;
  517. continue;
  518. @@ -771,6 +805,8 @@ void mtk_ppe_start(struct mtk_ppe *ppe)
  519. MTK_PPE_SCAN_MODE_CHECK_AGE) |
  520. FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
  521. MTK_PPE_ENTRIES_SHIFT);
  522. + if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
  523. + val |= MTK_PPE_TB_CFG_INFO_SEL;
  524. ppe_w32(ppe, MTK_PPE_TB_CFG, val);
  525. ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
  526. @@ -778,15 +814,21 @@ void mtk_ppe_start(struct mtk_ppe *ppe)
  527. mtk_ppe_cache_enable(ppe, true);
  528. - val = MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
  529. - MTK_PPE_FLOW_CFG_IP4_UDP_FRAG |
  530. - MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
  531. + val = MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
  532. MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
  533. MTK_PPE_FLOW_CFG_IP6_6RD |
  534. MTK_PPE_FLOW_CFG_IP4_NAT |
  535. MTK_PPE_FLOW_CFG_IP4_NAPT |
  536. MTK_PPE_FLOW_CFG_IP4_DSLITE |
  537. MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
  538. + if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
  539. + val |= MTK_PPE_MD_TOAP_BYP_CRSN0 |
  540. + MTK_PPE_MD_TOAP_BYP_CRSN1 |
  541. + MTK_PPE_MD_TOAP_BYP_CRSN2 |
  542. + MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY;
  543. + else
  544. + val |= MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
  545. + MTK_PPE_FLOW_CFG_IP4_UDP_FRAG;
  546. ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
  547. val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
  548. @@ -820,6 +862,11 @@ void mtk_ppe_start(struct mtk_ppe *ppe)
  549. ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
  550. ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
  551. +
  552. + if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) {
  553. + ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
  554. + ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
  555. + }
  556. }
  557. int mtk_ppe_stop(struct mtk_ppe *ppe)
  558. --- a/drivers/net/ethernet/mediatek/mtk_ppe.h
  559. +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
  560. @@ -32,6 +32,15 @@
  561. #define MTK_FOE_IB1_UDP BIT(30)
  562. #define MTK_FOE_IB1_STATIC BIT(31)
  563. +/* CONFIG_MEDIATEK_NETSYS_V2 */
  564. +#define MTK_FOE_IB1_BIND_TIMESTAMP_V2 GENMASK(7, 0)
  565. +#define MTK_FOE_IB1_BIND_VLAN_LAYER_V2 GENMASK(16, 14)
  566. +#define MTK_FOE_IB1_BIND_PPPOE_V2 BIT(17)
  567. +#define MTK_FOE_IB1_BIND_VLAN_TAG_V2 BIT(18)
  568. +#define MTK_FOE_IB1_BIND_CACHE_V2 BIT(20)
  569. +#define MTK_FOE_IB1_BIND_TTL_V2 BIT(22)
  570. +#define MTK_FOE_IB1_PACKET_TYPE_V2 GENMASK(27, 23)
  571. +
  572. enum {
  573. MTK_PPE_PKT_TYPE_IPV4_HNAPT = 0,
  574. MTK_PPE_PKT_TYPE_IPV4_ROUTE = 1,
  575. @@ -53,14 +62,25 @@ enum {
  576. #define MTK_FOE_IB2_PORT_MG GENMASK(17, 12)
  577. +#define MTK_FOE_IB2_RX_IDX GENMASK(18, 17)
  578. #define MTK_FOE_IB2_PORT_AG GENMASK(23, 18)
  579. #define MTK_FOE_IB2_DSCP GENMASK(31, 24)
  580. +/* CONFIG_MEDIATEK_NETSYS_V2 */
  581. +#define MTK_FOE_IB2_PORT_MG_V2 BIT(7)
  582. +#define MTK_FOE_IB2_DEST_PORT_V2 GENMASK(12, 9)
  583. +#define MTK_FOE_IB2_MULTICAST_V2 BIT(13)
  584. +#define MTK_FOE_IB2_WDMA_WINFO_V2 BIT(19)
  585. +#define MTK_FOE_IB2_PORT_AG_V2 GENMASK(23, 20)
  586. +
  587. #define MTK_FOE_VLAN2_WINFO_BSS GENMASK(5, 0)
  588. #define MTK_FOE_VLAN2_WINFO_WCID GENMASK(13, 6)
  589. #define MTK_FOE_VLAN2_WINFO_RING GENMASK(15, 14)
  590. +#define MTK_FOE_WINFO_BSS GENMASK(5, 0)
  591. +#define MTK_FOE_WINFO_WCID GENMASK(15, 6)
  592. +
  593. enum {
  594. MTK_FOE_STATE_INVALID,
  595. MTK_FOE_STATE_UNBIND,
  596. @@ -81,6 +101,9 @@ struct mtk_foe_mac_info {
  597. u16 pppoe_id;
  598. u16 src_mac_lo;
  599. +
  600. + u16 minfo;
  601. + u16 winfo;
  602. };
  603. /* software-only entry type */
  604. @@ -198,7 +221,7 @@ struct mtk_foe_entry {
  605. struct mtk_foe_ipv4_dslite dslite;
  606. struct mtk_foe_ipv6 ipv6;
  607. struct mtk_foe_ipv6_6rd ipv6_6rd;
  608. - u32 data[19];
  609. + u32 data[23];
  610. };
  611. };
  612. @@ -306,20 +329,27 @@ mtk_ppe_check_skb(struct mtk_ppe *ppe, s
  613. __mtk_ppe_check_skb(ppe, skb, hash);
  614. }
  615. -int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
  616. - u8 pse_port, u8 *src_mac, u8 *dest_mac);
  617. -int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port);
  618. -int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool orig,
  619. +int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
  620. + int type, int l4proto, u8 pse_port, u8 *src_mac,
  621. + u8 *dest_mac);
  622. +int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
  623. + struct mtk_foe_entry *entry, u8 port);
  624. +int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
  625. + struct mtk_foe_entry *entry, bool orig,
  626. __be32 src_addr, __be16 src_port,
  627. __be32 dest_addr, __be16 dest_port);
  628. -int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
  629. +int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
  630. + struct mtk_foe_entry *entry,
  631. __be32 *src_addr, __be16 src_port,
  632. __be32 *dest_addr, __be16 dest_port);
  633. -int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port);
  634. -int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
  635. -int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
  636. -int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
  637. - int bss, int wcid);
  638. +int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
  639. + int port);
  640. +int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
  641. + int vid);
  642. +int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
  643. + int sid);
  644. +int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
  645. + int wdma_idx, int txq, int bss, int wcid);
  646. int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
  647. void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
  648. int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
  649. --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
  650. +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
  651. @@ -52,18 +52,19 @@ static const struct rhashtable_params mt
  652. };
  653. static int
  654. -mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
  655. - bool egress)
  656. +mtk_flow_set_ipv4_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe,
  657. + struct mtk_flow_data *data, bool egress)
  658. {
  659. - return mtk_foe_entry_set_ipv4_tuple(foe, egress,
  660. + return mtk_foe_entry_set_ipv4_tuple(eth, foe, egress,
  661. data->v4.src_addr, data->src_port,
  662. data->v4.dst_addr, data->dst_port);
  663. }
  664. static int
  665. -mtk_flow_set_ipv6_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data)
  666. +mtk_flow_set_ipv6_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe,
  667. + struct mtk_flow_data *data)
  668. {
  669. - return mtk_foe_entry_set_ipv6_tuple(foe,
  670. + return mtk_foe_entry_set_ipv6_tuple(eth, foe,
  671. data->v6.src_addr.s6_addr32, data->src_port,
  672. data->v6.dst_addr.s6_addr32, data->dst_port);
  673. }
  674. @@ -190,16 +191,29 @@ mtk_flow_set_output_device(struct mtk_et
  675. int pse_port, dsa_port;
  676. if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
  677. - mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss,
  678. - info.wcid);
  679. - pse_port = 3;
  680. + mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue,
  681. + info.bss, info.wcid);
  682. + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
  683. + switch (info.wdma_idx) {
  684. + case 0:
  685. + pse_port = 8;
  686. + break;
  687. + case 1:
  688. + pse_port = 9;
  689. + break;
  690. + default:
  691. + return -EINVAL;
  692. + }
  693. + } else {
  694. + pse_port = 3;
  695. + }
  696. *wed_index = info.wdma_idx;
  697. goto out;
  698. }
  699. dsa_port = mtk_flow_get_dsa_port(&dev);
  700. if (dsa_port >= 0)
  701. - mtk_foe_entry_set_dsa(foe, dsa_port);
  702. + mtk_foe_entry_set_dsa(eth, foe, dsa_port);
  703. if (dev == eth->netdev[0])
  704. pse_port = 1;
  705. @@ -209,7 +223,7 @@ mtk_flow_set_output_device(struct mtk_et
  706. return -EOPNOTSUPP;
  707. out:
  708. - mtk_foe_entry_set_pse_port(foe, pse_port);
  709. + mtk_foe_entry_set_pse_port(eth, foe, pse_port);
  710. return 0;
  711. }
  712. @@ -333,9 +347,8 @@ mtk_flow_offload_replace(struct mtk_eth
  713. !is_valid_ether_addr(data.eth.h_dest))
  714. return -EINVAL;
  715. - err = mtk_foe_entry_prepare(&foe, offload_type, l4proto, 0,
  716. - data.eth.h_source,
  717. - data.eth.h_dest);
  718. + err = mtk_foe_entry_prepare(eth, &foe, offload_type, l4proto, 0,
  719. + data.eth.h_source, data.eth.h_dest);
  720. if (err)
  721. return err;
  722. @@ -360,7 +373,7 @@ mtk_flow_offload_replace(struct mtk_eth
  723. data.v4.src_addr = addrs.key->src;
  724. data.v4.dst_addr = addrs.key->dst;
  725. - mtk_flow_set_ipv4_addr(&foe, &data, false);
  726. + mtk_flow_set_ipv4_addr(eth, &foe, &data, false);
  727. }
  728. if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
  729. @@ -371,7 +384,7 @@ mtk_flow_offload_replace(struct mtk_eth
  730. data.v6.src_addr = addrs.key->src;
  731. data.v6.dst_addr = addrs.key->dst;
  732. - mtk_flow_set_ipv6_addr(&foe, &data);
  733. + mtk_flow_set_ipv6_addr(eth, &foe, &data);
  734. }
  735. flow_action_for_each(i, act, &rule->action) {
  736. @@ -401,7 +414,7 @@ mtk_flow_offload_replace(struct mtk_eth
  737. }
  738. if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
  739. - err = mtk_flow_set_ipv4_addr(&foe, &data, true);
  740. + err = mtk_flow_set_ipv4_addr(eth, &foe, &data, true);
  741. if (err)
  742. return err;
  743. }
  744. @@ -413,10 +426,10 @@ mtk_flow_offload_replace(struct mtk_eth
  745. if (data.vlan.proto != htons(ETH_P_8021Q))
  746. return -EOPNOTSUPP;
  747. - mtk_foe_entry_set_vlan(&foe, data.vlan.id);
  748. + mtk_foe_entry_set_vlan(eth, &foe, data.vlan.id);
  749. }
  750. if (data.pppoe.num == 1)
  751. - mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
  752. + mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid);
  753. err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
  754. &wed_index);
  755. --- a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
  756. +++ b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
  757. @@ -21,6 +21,9 @@
  758. #define MTK_PPE_GLO_CFG_BUSY BIT(31)
  759. #define MTK_PPE_FLOW_CFG 0x204
  760. +#define MTK_PPE_MD_TOAP_BYP_CRSN0 BIT(1)
  761. +#define MTK_PPE_MD_TOAP_BYP_CRSN1 BIT(2)
  762. +#define MTK_PPE_MD_TOAP_BYP_CRSN2 BIT(3)
  763. #define MTK_PPE_FLOW_CFG_IP4_TCP_FRAG BIT(6)
  764. #define MTK_PPE_FLOW_CFG_IP4_UDP_FRAG BIT(7)
  765. #define MTK_PPE_FLOW_CFG_IP6_3T_ROUTE BIT(8)
  766. @@ -54,6 +57,7 @@
  767. #define MTK_PPE_TB_CFG_HASH_MODE GENMASK(15, 14)
  768. #define MTK_PPE_TB_CFG_SCAN_MODE GENMASK(17, 16)
  769. #define MTK_PPE_TB_CFG_HASH_DEBUG GENMASK(19, 18)
  770. +#define MTK_PPE_TB_CFG_INFO_SEL BIT(20)
  771. enum {
  772. MTK_PPE_SCAN_MODE_DISABLED,
  773. @@ -112,6 +116,8 @@ enum {
  774. #define MTK_PPE_DEFAULT_CPU_PORT 0x248
  775. #define MTK_PPE_DEFAULT_CPU_PORT_MASK(_n) (GENMASK(2, 0) << ((_n) * 4))
  776. +#define MTK_PPE_DEFAULT_CPU_PORT1 0x24c
  777. +
  778. #define MTK_PPE_MTU_DROP 0x308
  779. #define MTK_PPE_VLAN_MTU0 0x30c
  780. @@ -141,4 +147,6 @@ enum {
  781. #define MTK_PPE_MIB_CACHE_CTL_EN BIT(0)
  782. #define MTK_PPE_MIB_CACHE_CTL_FLUSH BIT(2)
  783. +#define MTK_PPE_SBW_CTRL 0x374
  784. +
  785. #endif