701-net-0337-enetc-add-support-tsn-capabilities-qbv-qci-qbu-cbs.patch 79 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911
  1. From bf3f81f3773cc9f6b273d769aca96512780c6189 Mon Sep 17 00:00:00 2001
  2. From: Po Liu <[email protected]>
  3. Date: Tue, 3 Dec 2019 16:52:57 +0800
  4. Subject: [PATCH] enetc: add support tsn capabilities qbv/qci/qbu/cbs
  5. Support Qbv/Qci/Qbu/Credit Base Shaper etc.
  6. This patch using the generic netlink adapt layer driver net/tsn/*
  7. and include/net/tsn.h interface load by user space. The user space
  8. refer the include/uapi/linux/tsn.h.
  9. Signed-off-by: Po Liu <[email protected]>
  10. ---
  11. drivers/net/ethernet/freescale/enetc/Kconfig | 10 +
  12. drivers/net/ethernet/freescale/enetc/Makefile | 1 +
  13. drivers/net/ethernet/freescale/enetc/enetc.c | 13 +-
  14. drivers/net/ethernet/freescale/enetc/enetc.h | 38 +
  15. .../net/ethernet/freescale/enetc/enetc_ethtool.c | 59 +
  16. drivers/net/ethernet/freescale/enetc/enetc_hw.h | 438 ++++-
  17. drivers/net/ethernet/freescale/enetc/enetc_pf.c | 15 +-
  18. drivers/net/ethernet/freescale/enetc/enetc_tsn.c | 2049 ++++++++++++++++++++
  19. 8 files changed, 2614 insertions(+), 9 deletions(-)
  20. create mode 100644 drivers/net/ethernet/freescale/enetc/enetc_tsn.c
  21. --- a/drivers/net/ethernet/freescale/enetc/Kconfig
  22. +++ b/drivers/net/ethernet/freescale/enetc/Kconfig
  23. @@ -60,3 +60,13 @@ config FSL_ENETC_QOS
  24. enable/disable from user space via Qos commands(tc). In the kernel
  25. side, it can be loaded by Qos driver. Currently, it is only support
  26. taprio(802.1Qbv) and Credit Based Shaper(802.1Qbu).
  27. +
  28. +config ENETC_TSN
  29. + bool "TSN Support for NXP ENETC driver"
  30. + default n
  31. + depends on TSN && FSL_ENETC
  32. + help
  33. + This driver supports TSN on Freescale ENETC driver. Provide
  34. + interface to config the tsn capabilities of ENETC. The interface link
  35. + to the /net/tsn/* and include/net/tsn.h. User space refer the
  36. + include/uapi/linux/tsn.h.
  37. --- a/drivers/net/ethernet/freescale/enetc/Makefile
  38. +++ b/drivers/net/ethernet/freescale/enetc/Makefile
  39. @@ -6,6 +6,7 @@ obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o
  40. fsl-enetc-y := enetc_pf.o enetc_mdio.o $(common-objs)
  41. fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o
  42. fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
  43. +fsl-enetc-$(CONFIG_ENETC_TSN) += enetc_tsn.o
  44. obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
  45. fsl-enetc-vf-y := enetc_vf.o $(common-objs)
  46. --- a/drivers/net/ethernet/freescale/enetc/enetc.c
  47. +++ b/drivers/net/ethernet/freescale/enetc/enetc.c
  48. @@ -145,7 +145,8 @@ static int enetc_map_tx_buffs(struct ene
  49. do_tstamp = (active_offloads & ENETC_F_TX_TSTAMP) &&
  50. (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP);
  51. tx_swbd->do_tstamp = do_tstamp;
  52. - tx_swbd->check_wb = tx_swbd->do_tstamp;
  53. + tx_swbd->qbv_en = !!(active_offloads & ENETC_F_QBV);
  54. + tx_swbd->check_wb = tx_swbd->do_tstamp || tx_swbd->qbv_en;
  55. if (do_vlan || do_tstamp)
  56. flags |= ENETC_TXBD_FLAGS_EX;
  57. @@ -342,7 +343,7 @@ static void enetc_tstamp_tx(struct sk_bu
  58. static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
  59. {
  60. struct net_device *ndev = tx_ring->ndev;
  61. - int tx_frm_cnt = 0, tx_byte_cnt = 0;
  62. + int tx_frm_cnt = 0, tx_byte_cnt = 0, tx_win_drop = 0;
  63. struct enetc_tx_swbd *tx_swbd;
  64. int i, bds_to_clean;
  65. bool do_tstamp;
  66. @@ -372,6 +373,10 @@ static bool enetc_clean_tx_ring(struct e
  67. &tstamp);
  68. do_tstamp = true;
  69. }
  70. +
  71. + if (tx_swbd->qbv_en &&
  72. + txbd->wb.status & ENETC_TXBD_STATS_WIN)
  73. + tx_win_drop++;
  74. }
  75. if (likely(tx_swbd->dma))
  76. @@ -415,6 +420,7 @@ static bool enetc_clean_tx_ring(struct e
  77. tx_ring->next_to_clean = i;
  78. tx_ring->stats.packets += tx_frm_cnt;
  79. tx_ring->stats.bytes += tx_byte_cnt;
  80. + tx_ring->stats.win_drop += tx_win_drop;
  81. if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
  82. __netif_subqueue_stopped(ndev, tx_ring->index) &&
  83. @@ -778,6 +784,9 @@ void enetc_get_si_caps(struct enetc_si *
  84. if (val & ENETC_SIPCAPR0_QBV)
  85. si->hw_features |= ENETC_SI_F_QBV;
  86. +
  87. + if (val & ENETC_SIPCAPR0_QBU)
  88. + si->hw_features |= ENETC_SI_F_QBU;
  89. }
  90. static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
  91. --- a/drivers/net/ethernet/freescale/enetc/enetc.h
  92. +++ b/drivers/net/ethernet/freescale/enetc/enetc.h
  93. @@ -10,6 +10,7 @@
  94. #include <linux/ethtool.h>
  95. #include <linux/if_vlan.h>
  96. #include <linux/phy.h>
  97. +#include <net/tsn.h>
  98. #include "enetc_hw.h"
  99. @@ -24,6 +25,7 @@ struct enetc_tx_swbd {
  100. u8 is_dma_page:1;
  101. u8 check_wb:1;
  102. u8 do_tstamp:1;
  103. + u8 qbv_en:1;
  104. };
  105. #define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE
  106. @@ -42,6 +44,7 @@ struct enetc_ring_stats {
  107. unsigned int packets;
  108. unsigned int bytes;
  109. unsigned int rx_alloc_errs;
  110. + unsigned int win_drop;
  111. };
  112. #define ENETC_BDR_DEFAULT_SIZE 1024
  113. @@ -111,6 +114,28 @@ struct enetc_msg_swbd {
  114. int size;
  115. };
  116. +#ifdef CONFIG_ENETC_TSN
  117. +/* Credit-Based Shaper parameters */
  118. +struct cbs {
  119. + u8 tc;
  120. + bool enable;
  121. + u8 bw;
  122. + u32 hi_credit;
  123. + u32 lo_credit;
  124. + u32 idle_slope;
  125. + u32 send_slope;
  126. + u32 tc_max_sized_frame;
  127. + u32 max_interfrence_size;
  128. +};
  129. +
  130. +struct enetc_cbs {
  131. + u32 port_transmit_rate;
  132. + u32 port_max_size_frame;
  133. + u8 tc_nums;
  134. + struct cbs cbs[0];
  135. +};
  136. +#endif
  137. +
  138. #define ENETC_REV1 0x1
  139. enum enetc_errata {
  140. ENETC_ERR_TXCSUM = BIT(0),
  141. @@ -119,6 +144,7 @@ enum enetc_errata {
  142. };
  143. #define ENETC_SI_F_QBV BIT(0)
  144. +#define ENETC_SI_F_QBU BIT(1)
  145. /* PCI IEP device data */
  146. struct enetc_si {
  147. @@ -136,6 +162,10 @@ struct enetc_si {
  148. int num_rss; /* number of RSS buckets */
  149. unsigned short pad;
  150. int hw_features;
  151. +#ifdef CONFIG_ENETC_TSN
  152. + struct enetc_cbs *ecbs;
  153. +#endif
  154. +
  155. };
  156. #define ENETC_SI_ALIGN 32
  157. @@ -177,6 +207,7 @@ enum enetc_active_offloads {
  158. ENETC_F_RX_TSTAMP = BIT(0),
  159. ENETC_F_TX_TSTAMP = BIT(1),
  160. ENETC_F_QBV = BIT(2),
  161. + ENETC_F_QBU = BIT(3),
  162. };
  163. struct enetc_ndev_priv {
  164. @@ -261,3 +292,10 @@ int enetc_setup_tc_cbs(struct net_device
  165. #define enetc_sched_speed_set(ndev) (void)0
  166. #define enetc_setup_tc_cbs(ndev, type_data) -EOPNOTSUPP
  167. #endif
  168. +#ifdef CONFIG_ENETC_TSN
  169. +void enetc_tsn_pf_init(struct net_device *netdev, struct pci_dev *pdev);
  170. +void enetc_tsn_pf_deinit(struct net_device *netdev);
  171. +#else
  172. +#define enetc_tsn_pf_init(netdev, pdev) (void)0
  173. +#define enetc_tsn_pf_deinit(netdev) (void)0
  174. +#endif
  175. --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
  176. +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
  177. @@ -187,6 +187,21 @@ static const struct {
  178. { ENETC_PICDR(3), "ICM DR3 discarded frames" },
  179. };
  180. +static const struct {
  181. + int reg;
  182. + char name[ETH_GSTRING_LEN];
  183. +} enetc_pmac_counters[] = {
  184. + { ENETC_PM1_RFRM, "PMAC rx frames" },
  185. + { ENETC_PM1_RPKT, "PMAC rx packets" },
  186. + { ENETC_PM1_RDRP, "PMAC rx dropped packets" },
  187. + { ENETC_PM1_RFRG, "PMAC rx fragment packets" },
  188. + { ENETC_PM1_TFRM, "PMAC tx frames" },
  189. + { ENETC_PM1_TERR, "PMAC tx error frames" },
  190. + { ENETC_PM1_TPKT, "PMAC tx packets" },
  191. + { ENETC_MAC_MERGE_MMFCRXR, "MAC merge fragment rx counter" },
  192. + { ENETC_MAC_MERGE_MMFCTXR, "MAC merge fragment tx counter"},
  193. +};
  194. +
  195. static const char rx_ring_stats[][ETH_GSTRING_LEN] = {
  196. "Rx ring %2d frames",
  197. "Rx ring %2d alloc errors",
  198. @@ -196,6 +211,10 @@ static const char tx_ring_stats[][ETH_GS
  199. "Tx ring %2d frames",
  200. };
  201. +static const char tx_windrop_stats[][ETH_GSTRING_LEN] = {
  202. + "Tx window drop %2d frames",
  203. +};
  204. +
  205. static int enetc_get_sset_count(struct net_device *ndev, int sset)
  206. {
  207. struct enetc_ndev_priv *priv = netdev_priv(ndev);
  208. @@ -213,6 +232,12 @@ static int enetc_get_sset_count(struct n
  209. len += ARRAY_SIZE(enetc_port_counters);
  210. + if (priv->active_offloads & ENETC_F_QBU)
  211. + len += ARRAY_SIZE(enetc_pmac_counters);
  212. +
  213. + if (priv->active_offloads & ENETC_F_QBV)
  214. + len += ARRAY_SIZE(tx_windrop_stats) * priv->num_tx_rings;
  215. +
  216. return len;
  217. }
  218. @@ -251,6 +276,28 @@ static void enetc_get_strings(struct net
  219. ETH_GSTRING_LEN);
  220. p += ETH_GSTRING_LEN;
  221. }
  222. +
  223. + if (!(priv->active_offloads & ENETC_F_QBU))
  224. + break;
  225. +
  226. + for (i = 0; i < ARRAY_SIZE(enetc_pmac_counters); i++) {
  227. + strlcpy(p, enetc_pmac_counters[i].name,
  228. + ETH_GSTRING_LEN);
  229. + p += ETH_GSTRING_LEN;
  230. + }
  231. +
  232. + if (!((priv->active_offloads & ENETC_F_QBV)))
  233. + break;
  234. +
  235. + for (i = 0; i < priv->num_tx_rings; i++) {
  236. + for (j = 0; j < ARRAY_SIZE(tx_windrop_stats); j++) {
  237. + snprintf(p, ETH_GSTRING_LEN,
  238. + tx_windrop_stats[j],
  239. + i);
  240. + p += ETH_GSTRING_LEN;
  241. + }
  242. + }
  243. +
  244. break;
  245. }
  246. }
  247. @@ -278,6 +325,18 @@ static void enetc_get_ethtool_stats(stru
  248. for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++)
  249. data[o++] = enetc_port_rd(hw, enetc_port_counters[i].reg);
  250. +
  251. + if (!(priv->active_offloads & ENETC_F_QBU))
  252. + return;
  253. +
  254. + for (i = 0; i < ARRAY_SIZE(enetc_pmac_counters); i++)
  255. + data[o++] = enetc_port_rd(hw, enetc_pmac_counters[i].reg);
  256. +
  257. + if (!((priv->active_offloads & ENETC_F_QBV)))
  258. + return;
  259. +
  260. + for (i = 0; i < priv->num_tx_rings; i++)
  261. + data[o++] = priv->tx_ring[i]->stats.win_drop;
  262. }
  263. #define ENETC_RSSHASH_L3 (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO | RXH_IP_SRC | \
  264. --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
  265. +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
  266. @@ -19,6 +19,7 @@
  267. #define ENETC_SICTR1 0x1c
  268. #define ENETC_SIPCAPR0 0x20
  269. #define ENETC_SIPCAPR0_QBV BIT(4)
  270. +#define ENETC_SIPCAPR0_QBU BIT(3)
  271. #define ENETC_SIPCAPR0_RSS BIT(8)
  272. #define ENETC_SIPCAPR1 0x24
  273. #define ENETC_SITGTGR 0x30
  274. @@ -241,10 +242,20 @@ enum enetc_bdr_type {TX, RX};
  275. #define ENETC_PCS_IF_MODE_SGMII_AN 0x0003
  276. #define ENETC_PM0_IF_MODE 0x8300
  277. +#define ENETC_PM1_IF_MODE 0x9300
  278. #define ENETC_PMO_IFM_RG BIT(2)
  279. #define ENETC_PM0_IFM_RLP (BIT(5) | BIT(11))
  280. #define ENETC_PM0_IFM_RGAUTO (BIT(15) | ENETC_PMO_IFM_RG | BIT(1))
  281. #define ENETC_PM0_IFM_XGMII BIT(12)
  282. +#define ENETC_PSIDCAPR 0x1b08
  283. +#define ENETC_PSIDCAPR_MSK GENMASK(15, 0)
  284. +#define ENETC_PSFCAPR 0x1b18
  285. +#define ENETC_PSFCAPR_MSK GENMASK(15, 0)
  286. +#define ENETC_PSGCAPR 0x1b28
  287. +#define ENETC_PSGCAPR_GCL_MSK GENMASK(18, 16)
  288. +#define ENETC_PSGCAPR_SGIT_MSK GENMASK(15, 0)
  289. +#define ENETC_PFMCAPR 0x1b38
  290. +#define ENETC_PFMCAPR_MSK GENMASK(15, 0)
  291. /* MAC counters */
  292. #define ENETC_PM0_REOCT 0x8100
  293. @@ -298,6 +309,15 @@ enum enetc_bdr_type {TX, RX};
  294. #define ENETC_PM0_TSCOL 0x82E0
  295. #define ENETC_PM0_TLCOL 0x82E8
  296. #define ENETC_PM0_TECOL 0x82F0
  297. +#define ENETC_PM1_RFRM 0x9120
  298. +#define ENETC_PM1_RDRP 0x9158
  299. +#define ENETC_PM1_RPKT 0x9160
  300. +#define ENETC_PM1_RFRG 0x91B8
  301. +#define ENETC_PM1_TFRM 0x9220
  302. +#define ENETC_PM1_TERR 0x9238
  303. +#define ENETC_PM1_TPKT 0x9260
  304. +#define ENETC_MAC_MERGE_MMFCRXR 0x1f14
  305. +#define ENETC_MAC_MERGE_MMFCTXR 0x1f18
  306. /* Port counters */
  307. #define ENETC_PICDR(n) (0x0700 + (n) * 8) /* n = [0..3] */
  308. @@ -456,6 +476,7 @@ union enetc_tx_bd {
  309. #define ENETC_TXBD_FLAGS_CSUM BIT(3)
  310. #define ENETC_TXBD_FLAGS_EX BIT(6)
  311. #define ENETC_TXBD_FLAGS_F BIT(7)
  312. +#define ENETC_TXBD_STATS_WIN BIT(7)
  313. static inline void enetc_clear_tx_bd(union enetc_tx_bd *txbd)
  314. {
  315. @@ -483,6 +504,8 @@ static inline __le16 enetc_txbd_l3_csoff
  316. #define ENETC_TXBD_L4_UDP BIT(5)
  317. #define ENETC_TXBD_L4_TCP BIT(6)
  318. +#define enetc_tsn_is_enabled() IS_ENABLED(CONFIG_ENETC_TSN)
  319. +
  320. union enetc_rx_bd {
  321. struct {
  322. __le64 addr;
  323. @@ -629,21 +652,307 @@ enum bdcr_cmd_class {
  324. BDCR_CMD_RFS,
  325. BDCR_CMD_PORT_GCL,
  326. BDCR_CMD_RECV_CLASSIFIER,
  327. + BDCR_CMD_STREAM_IDENTIFY,
  328. + BDCR_CMD_STREAM_FILTER,
  329. + BDCR_CMD_STREAM_GCL,
  330. + BDCR_CMD_FLOW_METER,
  331. __BDCR_CMD_MAX_LEN,
  332. BDCR_CMD_MAX_LEN = __BDCR_CMD_MAX_LEN - 1,
  333. };
  334. +/* class 7, command 0, Stream Identity Entry Configuration */
  335. +struct streamid_conf {
  336. + __le32 stream_handle; /* init gate value */
  337. + __le32 iports;
  338. + u8 id_type;
  339. + u8 oui[3];
  340. + u8 res[3];
  341. + u8 en;
  342. +};
  343. +
  344. +#define ENETC_CBDR_SID_VID_MASK 0xfff
  345. +#define ENETC_CBDR_SID_VIDM BIT(12)
  346. +#define ENETC_CBDR_SID_TG_MASK 0xc000
  347. +/* streamid_conf address point to this data space */
  348. +struct null_streamid_data {
  349. + u8 dmac[6];
  350. + u16 vid_vidm_tg;
  351. +};
  352. +
  353. +struct smac_streamid_data {
  354. + u8 smac[6];
  355. + u16 vid_vidm_tg;
  356. +};
  357. +
  358. +/* class 7, command 1, query config , long format */
  359. +/* No need structure define */
  360. +
  361. +#define ENETC_CDBR_SID_ENABLE BIT(7)
  362. +/* Stream ID Query Response Data Buffer */
  363. +struct streamid_query_resp {
  364. + u32 stream_handle;
  365. + u32 input_ports;
  366. + u8 id_type;
  367. + u8 oui[3];
  368. + u8 mac[6];
  369. + u16 vid_vidm_tg;
  370. + u8 res[3];
  371. + u8 en;
  372. +};
  373. +
  374. +/* class 7, command 2, qeury status count, Stream ID query long format */
  375. +struct streamid_stat_query {
  376. + u8 res[12];
  377. + __le32 input_ports;
  378. +};
  379. +
  380. +/* Stream Identity Statistics Query */
  381. +struct streamid_stat_query_resp {
  382. + u32 psinl;
  383. + u32 psinh;
  384. + u64 pspi[32];
  385. +};
  386. +
  387. +#define ENETC_CBDR_SFI_PRI_MASK 0x7
  388. +#define ENETC_CBDR_SFI_PRIM BIT(3)
  389. +#define ENETC_CBDR_SFI_BLOV BIT(4)
  390. +#define ENETC_CBDR_SFI_BLEN BIT(5)
  391. +#define ENETC_CBDR_SFI_MSDUEN BIT(6)
  392. +#define ENETC_CBDR_SFI_FMITEN BIT(7)
  393. +#define ENETC_CBDR_SFI_ENABLE BIT(7)
  394. +/* class 8, command 0, Stream Filter Instance, Short Format */
  395. +struct sfi_conf {
  396. + __le32 stream_handle;
  397. + u8 multi;
  398. + u8 res[2];
  399. + u8 sthm;
  400. + /* Max Service Data Unit or Flow Meter Instance Table index.
  401. + * Depending on the value of FLT this represents either Max
  402. + * Service Data Unit (max frame size) allowed by the filter
  403. + * entry or is an index into the Flow Meter Instance table
  404. + * index identifying the policer which will be used to police
  405. + * it.
  406. + */
  407. + __le16 fm_inst_table_index;
  408. + __le16 msdu;
  409. + __le16 sg_inst_table_index;
  410. + u8 res1[2];
  411. + __le32 input_ports;
  412. + u8 res2[3];
  413. + u8 en;
  414. +};
  415. +
  416. +/* class 8, command 1, Stream Filter Instance, write back, short Format */
  417. +struct sfi_query {
  418. + u32 stream_handle;
  419. + u8 multi;
  420. + u8 res[2];
  421. + u8 sthm;
  422. + u16 fm_inst_table_index;
  423. + u16 msdu;
  424. + u16 sg_inst_table_index;
  425. + u8 res1[2];
  426. + u32 input_ports;
  427. + u8 res2[3];
  428. + u8 en;
  429. +};
  430. +
  431. +/* class 8, command 2 stream Filter Instance status query short format
  432. + * command no need structure define
  433. + * Stream Filter Instance Query Statistics Response data
  434. + */
  435. +struct sfi_counter_data {
  436. + u32 matchl;
  437. + u32 matchh;
  438. + u32 msdu_dropl;
  439. + u32 msdu_droph;
  440. + u32 stream_gate_dropl;
  441. + u32 stream_gate_droph;
  442. + u32 flow_meter_dropl;
  443. + u32 flow_meter_droph;
  444. +};
  445. +
  446. +#define ENETC_CBDR_SGI_OIPV_MASK 0x7
  447. +#define ENETC_CBDR_SGI_OIPV_EN BIT(3)
  448. +#define ENETC_CBDR_SGI_CGTST BIT(6)
  449. +#define ENETC_CBDR_SGI_OGTST BIT(7)
  450. +#define ENETC_CBDR_SGI_CFG_CHG BIT(1)
  451. +#define ENETC_CBDR_SGI_CFG_PND BIT(2)
  452. +#define ENETC_CBDR_SGI_OEX BIT(4)
  453. +#define ENETC_CBDR_SGI_OEXEN BIT(5)
  454. +#define ENETC_CBDR_SGI_IRX BIT(6)
  455. +#define ENETC_CBDR_SGI_IRXEN BIT(7)
  456. +#define ENETC_CBDR_SGI_ACLLEN_MASK 0x3
  457. +#define ENETC_CBDR_SGI_OCLLEN_MASK 0xc
  458. +#define ENETC_CBDR_SGI_EN BIT(7)
  459. +/* class 9, command 0, Stream Gate Instance Table, Short Format
  460. + * class 9, command 2, Stream Gate Instance Table entry query write back
  461. + * Short Format
  462. + */
  463. +struct sgi_table {
  464. + u8 res[8];
  465. + u8 oipv;
  466. + u8 res0[2];
  467. + u8 ocgtst;
  468. + u8 res1[7];
  469. + u8 gset;
  470. + u8 oacl_len;
  471. + u8 res2[2];
  472. + u8 en;
  473. +};
  474. +
  475. +#define ENETC_CBDR_SGI_AIPV_MASK 0x7
  476. +#define ENETC_CBDR_SGI_AIPV_EN BIT(3)
  477. +#define ENETC_CBDR_SGI_AGTST BIT(7)
  478. +
  479. +/* class 9, command 1, Stream Gate Control List, Long Format */
  480. +struct sgcl_conf {
  481. + u8 aipv;
  482. + u8 res[2];
  483. + u8 agtst;
  484. + u8 res1[4];
  485. + union {
  486. + struct {
  487. + u8 res2[4];
  488. + u8 acl_len;
  489. + u8 res3[3];
  490. + };
  491. + u8 cct[8]; /* Config change time */
  492. + };
  493. +};
  494. +
  495. +/* stream control list class 9 , cmd 1 data buffer */
  496. +struct sgcl_data {
  497. + u32 btl;
  498. + u32 bth;
  499. + u32 ct;
  500. + u32 cte;
  501. + /*struct sgce *sgcl;*/
  502. +};
  503. +
  504. +/* class 9, command 2, stream gate instant table enery query, short format
  505. + * write back see struct sgi_table. Do not need define.
  506. + * class 9, command 3 Stream Gate Control List Query Descriptor - Long Format
  507. + * ocl_len or acl_len to be 0, oper or admin would not show in the data space
  508. + * true len will be write back in the space.
  509. + */
  510. +struct sgcl_query {
  511. + u8 res[12];
  512. + u8 oacl_len;
  513. + u8 res1[3];
  514. +};
  515. +
  516. +/* define for 'stat' */
  517. +#define ENETC_CBDR_SGIQ_AIPV_MASK 0x7
  518. +#define ENETC_CBDR_SGIQ_AIPV_EN BIT(3)
  519. +#define ENETC_CBDR_SGIQ_AGTST BIT(4)
  520. +#define ENETC_CBDR_SGIQ_ACL_LEN_MASK 0x60
  521. +#define ENETC_CBDR_SGIQ_OIPV_MASK 0x380
  522. +#define ENETC_CBDR_SGIQ_OIPV_EN BIT(10)
  523. +#define ENETC_CBDR_SGIQ_OGTST BIT(11)
  524. +#define ENETC_CBDR_SGIQ_OCL_LEN_MASK 0x3000
  525. +/* class 9, command 3 data space */
  526. +struct sgcl_query_resp {
  527. + u16 stat;
  528. + u16 res;
  529. + u32 abtl;
  530. + u32 abth;
  531. + u32 act;
  532. + u32 acte;
  533. + u32 cctl;
  534. + u32 ccth;
  535. + u32 obtl;
  536. + u32 obth;
  537. + u32 oct;
  538. + u32 octe;
  539. +};
  540. +
  541. +/* class 9, command 4 Stream Gate Instance Table Query Statistics Response
  542. + * short command, write back, no command define
  543. + */
  544. +struct sgi_query_stat_resp {
  545. + u32 pgcl;
  546. + u32 pgch;
  547. + u32 dgcl;
  548. + u32 dgch;
  549. + u16 msdu_avail;
  550. + u8 res[6];
  551. +};
  552. +
  553. +#define ENETC_CBDR_FMI_MR BIT(0)
  554. +#define ENETC_CBDR_FMI_MREN BIT(1)
  555. +#define ENETC_CBDR_FMI_DOY BIT(2)
  556. +#define ENETC_CBDR_FMI_CM BIT(3)
  557. +#define ENETC_CBDR_FMI_CF BIT(4)
  558. +#define ENETC_CBDR_FMI_NDOR BIT(5)
  559. +#define ENETC_CBDR_FMI_OALEN BIT(6)
  560. +#define ENETC_CBDR_FMI_IRFPP_MASK 0x1f
  561. +/* class 10: command 0/1, Flow Meter Instance Set, short Format */
  562. +struct fmi_conf {
  563. + __le32 cir;
  564. + __le32 cbs;
  565. + __le32 eir;
  566. + __le32 ebs;
  567. + u8 conf;
  568. + u8 res1;
  569. + u8 ir_fpp;
  570. + u8 res2[4];
  571. + u8 en;
  572. +};
  573. +
  574. +/* class:10, command:2, Flow Meter Instance Statistics Query Response */
  575. +struct fmi_query_stat_resp {
  576. + u32 bcl;
  577. + u32 bch;
  578. + u32 dfl;
  579. + u32 dfh;
  580. + u32 d0gfl;
  581. + u32 d0gfh;
  582. + u32 d1gfl;
  583. + u32 d1gfh;
  584. + u32 dyfl;
  585. + u32 dyfh;
  586. + u32 ryfl;
  587. + u32 ryfh;
  588. + u32 drfl;
  589. + u32 drfh;
  590. + u32 rrfl;
  591. + u32 rrfh;
  592. + u32 lts;
  593. + u32 bci;
  594. + u32 bcf;
  595. + u32 bei;
  596. + u32 bef;
  597. +};
  598. +
  599. /* class 5, command 0 */
  600. struct tgs_gcl_conf {
  601. u8 atc; /* init gate value */
  602. u8 res[7];
  603. - struct {
  604. - u8 res1[4];
  605. - __le16 acl_len;
  606. - u8 res2[2];
  607. + union {
  608. + struct {
  609. + u8 res1[4];
  610. + __le16 acl_len;
  611. + u8 res2[2];
  612. + };
  613. + struct {
  614. + u32 cctl;
  615. + u32 ccth;
  616. + };
  617. };
  618. };
  619. +#define ENETC_CBDR_SGL_IOMEN BIT(0)
  620. +#define ENETC_CBDR_SGL_IPVEN BIT(3)
  621. +#define ENETC_CBDR_SGL_GTST BIT(4)
  622. +#define ENETC_CBDR_SGL_IPV_MASK 0xe
  623. +/* Stream Gate Control List Entry */
  624. +struct sgce {
  625. + u32 interval;
  626. + u8 msdu[3];
  627. + u8 multi;
  628. +};
  629. +
  630. /* gate control list entry */
  631. struct gce {
  632. __le32 period;
  633. @@ -660,13 +969,55 @@ struct tgs_gcl_data {
  634. struct gce entry[0];
  635. };
  636. +/* class 5, command 1 */
  637. +struct tgs_gcl_query {
  638. + u8 res[12];
  639. + union {
  640. + struct {
  641. + __le16 acl_len; /* admin list length */
  642. + __le16 ocl_len; /* operation list length */
  643. + };
  644. + struct {
  645. + u16 admin_list_len;
  646. + u16 oper_list_len;
  647. + };
  648. + };
  649. +
  650. +};
  651. +
  652. +/* tgs_gcl_query command response data format */
  653. +struct tgs_gcl_resp {
  654. + u32 abtl; /* base time */
  655. + u32 abth;
  656. + u32 act; /* cycle time */
  657. + u32 acte; /* cycle time extend */
  658. + u32 cctl; /* config change time */
  659. + u32 ccth;
  660. + u32 obtl; /* operation base time */
  661. + u32 obth;
  662. + u32 oct; /* operation cycle time */
  663. + u32 octe; /* operation cycle time extend */
  664. + u32 ccel; /* config change error */
  665. + u32 cceh;
  666. + /*struct gce *gcl;*/
  667. +};
  668. +
  669. struct enetc_cbd {
  670. union{
  671. + struct sfi_conf sfi_conf;
  672. + struct sgi_table sgi_table;
  673. + struct sgi_query_stat_resp sgi_query_stat_resp;
  674. + struct fmi_conf fmi_conf;
  675. struct {
  676. __le32 addr[2];
  677. union {
  678. __le32 opt[4];
  679. - struct tgs_gcl_conf gcl_conf;
  680. + struct tgs_gcl_conf gcl_conf;
  681. + struct tgs_gcl_query gcl_query;
  682. + struct streamid_conf sid_set;
  683. + struct streamid_stat_query sid_stat;
  684. + struct sgcl_conf sgcl_conf;
  685. + struct sgcl_query sgcl_query;
  686. };
  687. }; /* Long format */
  688. __le32 data[6];
  689. @@ -681,11 +1032,88 @@ struct enetc_cbd {
  690. #define ENETC_CLK 400000000ULL
  691. +#define ENETC_PTCFPR(n) (0x1910 + (n) * 4) /* n = [0 ..7] */
  692. +#define ENETC_FPE BIT(31)
  693. +
  694. +/* Port capability register 0 */
  695. +#define ENETC_PCAPR0_PSFPM BIT(10)
  696. +#define ENETC_PCAPR0_PSFP BIT(9)
  697. +#define ENETC_PCAPR0_TSN BIT(4)
  698. +#define ENETC_PCAPR0_QBU BIT(3)
  699. +
  700. /* port time gating control register */
  701. #define ENETC_QBV_PTGCR_OFFSET 0x11a00
  702. #define ENETC_QBV_TGE BIT(31)
  703. #define ENETC_QBV_TGPE BIT(30)
  704. +#define ENETC_QBV_TGDROP_DISABLE BIT(29)
  705. /* Port time gating capability register */
  706. #define ENETC_QBV_PTGCAPR_OFFSET 0x11a08
  707. #define ENETC_QBV_MAX_GCL_LEN_MASK GENMASK(15, 0)
  708. +
  709. +/* Port time gating tick granularity register */
  710. +#define ENETC_QBV_PTGTGR_OFFSET 0x11a0c
  711. +#define ENETC_QBV_TICK_GRAN_MASK 0xffffffff
  712. +
  713. +/* Port time gating admin gate list status register */
  714. +#define ENETC_QBV_PTGAGLSR_OFFSET 0x11a10
  715. +
  716. +#define ENETC_QBV_CFG_PEND_MASK 0x00000002
  717. +
  718. +/* Port time gating admin gate list length register */
  719. +#define ENETC_QBV_PTGAGLLR_OFFSET 0x11a14
  720. +#define ENETC_QBV_ADMIN_GATE_LIST_LENGTH_MASK 0xffff
  721. +
  722. +/* Port time gating operational gate list status register */
  723. +#define ENETC_QBV_PTGOGLSR_OFFSET 0x11a18
  724. +#define ENETC_QBV_HTA_POS_MASK 0xffff0000
  725. +
  726. +#define ENETC_QBV_CURR_POS_MASK 0x0000ffff
  727. +
  728. +/* Port time gating operational gate list length register */
  729. +#define ENETC_QBV_PTGOGLLR_OFFSET 0x11a1c
  730. +#define ENETC_QBV_OPER_GATE_LIST_LENGTH_MASK 0xffff
  731. +
  732. +/* Port time gating current time register */
  733. +#define ENETC_QBV_PTGCTR_OFFSET 0x11a20
  734. +#define ENETC_QBV_CURR_TIME_MASK 0xffffffffffffffff
  735. +
  736. +/* Port traffic class a time gating control register */
  737. +#define ENETC_QBV_PTC0TGCR_OFFSET 0x11a40
  738. +#define ENETC_QBV_PTC1TGCR_OFFSET 0x11a50
  739. +#define ENETC_QBV_PTC2TGCR_OFFSET 0x11a60
  740. +#define ENETC_QBV_PTC3TGCR_OFFSET 0x11a70
  741. +#define ENETC_QBV_PTC4TGCR_OFFSET 0x11a80
  742. +#define ENETC_QBV_PTC5TGCR_OFFSET 0x11a90
  743. +#define ENETC_QBV_PTC6TGCR_OFFSET 0x11aa0
  744. +#define ENETC_QBV_PTC7TGCR_OFFSET 0x11ab0
  745. +
  746. +/* Maximum Service Data Unit. */
  747. +#define ENETC_PTC0MSDUR 0x12020
  748. +#define ENETC_PTC1MSDUR 0x12024
  749. +#define ENETC_PTC2MSDUR 0x12028
  750. +#define ENETC_PTC3MSDUR 0x1202c
  751. +#define ENETC_PTC4MSDUR 0x12030
  752. +#define ENETC_PTC5MSDUR 0x12034
  753. +#define ENETC_PTC6MSDUR 0x12038
  754. +#define ENETC_PTC7MSDUR 0x1203c
  755. +
  756. +#define ENETC_QBV_MAXSDU_MASK 0xffff
  757. +
  758. +/* Port traffic class a time gating status register */
  759. +#define ENETC_QBV_PTC0TGSR_OFFSET 0x11a44
  760. +#define ENETC_QBV_HTA_STATE_MASK 0x10000
  761. +#define ENETC_QBV_CURR_STATE_MASK 0x1
  762. +
  763. +/* Port traffic class a time gating transmission overrun counter register*/
  764. +#define ENETC_QBV_PTC0TGTOCR_OFFSET 0x11a48
  765. +#define ENETC_QBV_TX_OVERRUN_MASK 0xffffffffffffffff
  766. +#define ENETC_TGLSTR 0xa200
  767. +#define ENETC_TGS_MIN_DIS_MASK 0x80000000
  768. +#define ENETC_MIN_LOOKAHEAD_MASK 0xffff
  769. +
  770. +#define ENETC_PPSFPMR 0x11b00
  771. +#define ENETC_PPSFPMR_PSFPEN BIT(0)
  772. +#define ENETC_PPSFPMR_VS BIT(1)
  773. +#define ENETC_PPSFPMR_PVC BIT(2)
  774. +#define ENETC_PPSFPMR_PVZC BIT(3)
  775. --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
  776. +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
  777. @@ -525,12 +525,16 @@ static void enetc_configure_port_mac(str
  778. ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
  779. /* set auto-speed for RGMII */
  780. if (enetc_port_rd(hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG ||
  781. - phy_mode == PHY_INTERFACE_MODE_RGMII)
  782. + phy_mode == PHY_INTERFACE_MODE_RGMII) {
  783. enetc_port_wr(hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_RGAUTO);
  784. + enetc_port_wr(hw, ENETC_PM1_IF_MODE, ENETC_PM0_IFM_RGAUTO);
  785. + }
  786. if (phy_mode == PHY_INTERFACE_MODE_XGMII ||
  787. - phy_mode == PHY_INTERFACE_MODE_USXGMII)
  788. + phy_mode == PHY_INTERFACE_MODE_USXGMII) {
  789. enetc_port_wr(hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_XGMII);
  790. + enetc_port_wr(hw, ENETC_PM1_IF_MODE, ENETC_PM0_IFM_XGMII);
  791. + }
  792. }
  793. static void enetc_configure_port_pmac(struct enetc_hw *hw)
  794. @@ -749,6 +753,9 @@ static void enetc_pf_netdev_setup(struct
  795. if (si->hw_features & ENETC_SI_F_QBV)
  796. priv->active_offloads |= ENETC_F_QBV;
  797. + if (enetc_tsn_is_enabled() && (si->hw_features & ENETC_SI_F_QBU))
  798. + priv->active_offloads |= ENETC_F_QBU;
  799. +
  800. /* pick up primary MAC address from SI */
  801. enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
  802. }
  803. @@ -942,6 +949,8 @@ static int enetc_pf_probe(struct pci_dev
  804. netif_info(priv, probe, ndev, "%s v%s\n",
  805. enetc_drv_name, enetc_drv_ver);
  806. + enetc_tsn_pf_init(ndev, pdev);
  807. +
  808. return 0;
  809. err_reg_netdev:
  810. @@ -974,6 +983,8 @@ static void enetc_pf_remove(struct pci_d
  811. netif_info(priv, drv, si->ndev, "%s v%s remove\n",
  812. enetc_drv_name, enetc_drv_ver);
  813. + enetc_tsn_pf_deinit(si->ndev);
  814. +
  815. unregister_netdev(si->ndev);
  816. enetc_mdio_remove(pf);
  817. --- /dev/null
  818. +++ b/drivers/net/ethernet/freescale/enetc/enetc_tsn.c
  819. @@ -0,0 +1,2049 @@
  820. +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
  821. +/* Copyright 2017-2019 NXP */
  822. +
  823. +#ifdef CONFIG_ENETC_TSN
  824. +#include "enetc.h"
  825. +
  826. +#include <net/tsn.h>
  827. +#include <linux/module.h>
  828. +#include <linux/irqflags.h>
  829. +#include <linux/preempt.h>
  830. +
  831. +static u32 get_ndev_speed(struct net_device *netdev);
  832. +
  833. +static int alloc_cbdr(struct enetc_si *si, struct enetc_cbd **curr_cbd)
  834. +{
  835. + struct enetc_cbdr *ring = &si->cbd_ring;
  836. + int i;
  837. +
  838. + i = ring->next_to_use;
  839. + *curr_cbd = ENETC_CBD(*ring, i);
  840. +
  841. + memset(*curr_cbd, 0, sizeof(struct enetc_cbd));
  842. + return i;
  843. +}
  844. +
  845. +/* Transmit the BD control ring by writing the pir register.
  846. + * Update the counters maintained by software.
  847. + */
  848. +static int xmit_cbdr(struct enetc_si *si, int i)
  849. +{
  850. + struct enetc_cbdr *ring = &si->cbd_ring;
  851. + struct enetc_cbd *dest_cbd;
  852. + int nc, timeout;
  853. +
  854. + i = (i + 1) % ring->bd_count;
  855. +
  856. + ring->next_to_use = i;
  857. + /* let H/W know BD ring has been updated */
  858. + enetc_wr_reg(ring->pir, i);
  859. +
  860. + timeout = ENETC_CBDR_TIMEOUT;
  861. +
  862. + do {
  863. + if (enetc_rd_reg(ring->cir) == i)
  864. + break;
  865. + usleep_range(10, 20);
  866. + timeout -= 10;
  867. + } while (timeout);
  868. +
  869. + if (!timeout)
  870. + return -EBUSY;
  871. +
  872. + nc = ring->next_to_clean;
  873. +
  874. + while (enetc_rd_reg(ring->cir) != nc) {
  875. + dest_cbd = ENETC_CBD(*ring, nc);
  876. + if (dest_cbd->status_flags & ENETC_CBD_STATUS_MASK)
  877. + WARN_ON(1);
  878. +
  879. + nc = (nc + 1) % ring->bd_count;
  880. + }
  881. +
  882. + ring->next_to_clean = nc;
  883. +
  884. + return 0;
  885. +}
  886. +
  887. +static inline u64 get_current_time(struct enetc_si *si)
  888. +{
  889. + u64 tmp = 0;
  890. +
  891. + tmp = (u64)enetc_rd(&si->hw, ENETC_SICTR0);
  892. + return ((u64)enetc_rd(&si->hw, ENETC_SICTR1) << 32) + tmp;
  893. +}
  894. +
  895. +/* Class 10: Flow Meter Instance Statistics Query Descriptor - Long Format */
  896. +int enetc_qci_fmi_counters_get(struct net_device *ndev, u32 index,
  897. + struct fmi_query_stat_resp *counters)
  898. +{
  899. + struct enetc_cbd *cbdr;
  900. + struct fmi_query_stat_resp *fmi_data;
  901. + dma_addr_t dma;
  902. + u16 data_size, dma_size;
  903. + int curr_cbd;
  904. + struct enetc_ndev_priv *priv = netdev_priv(ndev);
  905. +
  906. + curr_cbd = alloc_cbdr(priv->si, &cbdr);
  907. +
  908. + cbdr->index = cpu_to_le16((u16)index);
  909. + cbdr->cmd = 2;
  910. + cbdr->cls = BDCR_CMD_FLOW_METER;
  911. + cbdr->status_flags = 0;
  912. +
  913. + data_size = sizeof(struct fmi_query_stat_resp);
  914. +
  915. + fmi_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
  916. + if (!fmi_data)
  917. + return -ENOMEM;
  918. +
  919. + dma_size = cpu_to_le16(data_size);
  920. + cbdr->length = dma_size;
  921. +
  922. + dma = dma_map_single(&priv->si->pdev->dev, fmi_data,
  923. + data_size, DMA_FROM_DEVICE);
  924. + if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
  925. + netdev_err(priv->si->ndev, "DMA mapping failed!\n");
  926. + kfree(fmi_data);
  927. + return -ENOMEM;
  928. + }
  929. + cbdr->addr[0] = lower_32_bits(dma);
  930. + cbdr->addr[1] = upper_32_bits(dma);
  931. +
  932. + xmit_cbdr(priv->si, curr_cbd);
  933. +
  934. + memcpy(counters, fmi_data, sizeof(struct fmi_query_stat_resp));
  935. +
  936. + memset(cbdr, 0, sizeof(*cbdr));
  937. + kfree(fmi_data);
  938. + return 0;
  939. +}
  940. +
  941. +u16 enetc_get_max_gcl_len(struct enetc_hw *hw)
  942. +{
  943. + return (enetc_rd(hw, ENETC_QBV_PTGCAPR_OFFSET)
  944. + & ENETC_QBV_MAX_GCL_LEN_MASK);
  945. +}
  946. +
  947. +void enetc_pspeed_set(struct net_device *ndev)
  948. +{
  949. + u32 speed, pspeed;
  950. + u32 difflag = 0;
  951. + struct enetc_ndev_priv *priv = netdev_priv(ndev);
  952. +
  953. + speed = get_ndev_speed(ndev);
  954. + pspeed = enetc_port_rd(&priv->si->hw, ENETC_PMR)
  955. + & ENETC_PMR_PSPEED_MASK;
  956. + switch (speed) {
  957. + case SPEED_1000:
  958. + if (pspeed != ENETC_PMR_PSPEED_1000M) {
  959. + difflag = 1;
  960. + pspeed = ENETC_PMR_PSPEED_1000M;
  961. + }
  962. + break;
  963. + case SPEED_2500:
  964. + if (pspeed != ENETC_PMR_PSPEED_2500M) {
  965. + difflag = 1;
  966. + pspeed = ENETC_PMR_PSPEED_2500M;
  967. + }
  968. +
  969. + break;
  970. + case SPEED_100:
  971. + if (pspeed != ENETC_PMR_PSPEED_100M) {
  972. + difflag = 1;
  973. + pspeed = ENETC_PMR_PSPEED_100M;
  974. + }
  975. + break;
  976. + case SPEED_10:
  977. + if (pspeed != ENETC_PMR_PSPEED_10M) {
  978. + difflag = 1;
  979. + pspeed = ENETC_PMR_PSPEED_10M;
  980. + }
  981. + break;
  982. + default:
  983. + netdev_err(ndev, "not support speed\n");
  984. + }
  985. +
  986. + if (difflag) {
  987. + enetc_port_wr(&priv->si->hw, ENETC_PMR,
  988. + (enetc_port_rd(&priv->si->hw, ENETC_PMR)
  989. + & (~ENETC_PMR_PSPEED_MASK))
  990. + | pspeed);
  991. + }
  992. +}
  993. +
  994. +/* CBD Class 5: Time Gated Scheduling Gate Control List configuration
  995. + * Descriptor - Long Format
  996. + */
  997. +int enetc_qbv_set(struct net_device *ndev, struct tsn_qbv_conf *admin_conf)
  998. +{
  999. + struct enetc_cbd *cbdr;
  1000. + struct tgs_gcl_data *gcl_data;
  1001. + struct tgs_gcl_conf *gcl_config;
  1002. + struct gce *gce;
  1003. + u16 gcl_len;
  1004. + u16 data_size;
  1005. + int i;
  1006. + dma_addr_t dma;
  1007. + int curr_cbd;
  1008. + struct tsn_qbv_basic *admin_basic = &admin_conf->admin;
  1009. + struct enetc_ndev_priv *priv = netdev_priv(ndev);
  1010. + u32 temp;
  1011. + u64 tempclock;
  1012. + struct tsn_port *port;
  1013. +
  1014. + port = tsn_get_port(ndev);
  1015. + if (!port) {
  1016. + netdev_err(priv->si->ndev, "TSN device not registered!\n");
  1017. + return -ENODEV;
  1018. + }
  1019. +
  1020. + enetc_pspeed_set(ndev);
  1021. +
  1022. + gcl_len = admin_basic->control_list_length;
  1023. + if (gcl_len > enetc_get_max_gcl_len(&priv->si->hw))
  1024. + return -EINVAL;
  1025. +
  1026. + temp = enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET);
  1027. + if (admin_conf->gate_enabled && !(temp & ENETC_QBV_TGE)) {
  1028. + enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET,
  1029. + temp & (~ENETC_QBV_TGE));
  1030. + usleep_range(10, 20);
  1031. + enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET,
  1032. + temp | ENETC_QBV_TGE);
  1033. + } else if (!admin_conf->gate_enabled) {
  1034. + enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET,
  1035. + temp & (~ENETC_QBV_TGE));
  1036. + memcpy(&port->nd.ntdata, admin_conf, sizeof(*admin_conf));
  1037. + call_tsn_notifiers(TSN_QBV_CONFIGCHANGETIME_ARRIVE,
  1038. + ndev, &port->nd);
  1039. + return 0;
  1040. + }
  1041. +
  1042. + /* Set the maximum frame size for each traffic class index
  1043. + * PTCaMSDUR[MAXSDU]. The maximum frame size cannot exceed
  1044. + * 9,600 bytes (0x2580). Frames that exceed the limit are
  1045. + * discarded.
  1046. + */
  1047. + if (admin_conf->maxsdu) {
  1048. + enetc_wr(&priv->si->hw, ENETC_PTC0MSDUR, admin_conf->maxsdu);
  1049. + enetc_wr(&priv->si->hw, ENETC_PTC1MSDUR, admin_conf->maxsdu);
  1050. + enetc_wr(&priv->si->hw, ENETC_PTC2MSDUR, admin_conf->maxsdu);
  1051. + enetc_wr(&priv->si->hw, ENETC_PTC3MSDUR, admin_conf->maxsdu);
  1052. + enetc_wr(&priv->si->hw, ENETC_PTC4MSDUR, admin_conf->maxsdu);
  1053. + enetc_wr(&priv->si->hw, ENETC_PTC5MSDUR, admin_conf->maxsdu);
  1054. + enetc_wr(&priv->si->hw, ENETC_PTC6MSDUR, admin_conf->maxsdu);
  1055. + enetc_wr(&priv->si->hw, ENETC_PTC7MSDUR, admin_conf->maxsdu);
  1056. + }
  1057. +
  1058. + /* Configure the (administrative) gate control list using the
  1059. + * control BD descriptor.
  1060. + */
  1061. + curr_cbd = alloc_cbdr(priv->si, &cbdr);
  1062. +
  1063. + gcl_config = &cbdr->gcl_conf;
  1064. +
  1065. + data_size = struct_size(gcl_data, entry, gcl_len);
  1066. +
  1067. + gcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
  1068. + if (!gcl_data)
  1069. + return -ENOMEM;
  1070. +
  1071. + gce = &gcl_data->entry[0];
  1072. +
  1073. + gcl_config->atc = admin_basic->gate_states;
  1074. + gcl_config->acl_len = cpu_to_le16(gcl_len);
  1075. +
  1076. + if (!admin_basic->base_time) {
  1077. + gcl_data->btl =
  1078. + cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR0));
  1079. + gcl_data->bth =
  1080. + cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR1));
  1081. + } else {
  1082. + gcl_data->btl =
  1083. + cpu_to_le32(lower_32_bits(admin_basic->base_time));
  1084. + gcl_data->bth =
  1085. + cpu_to_le32(upper_32_bits(admin_basic->base_time));
  1086. + }
  1087. +
  1088. + gcl_data->ct = cpu_to_le32(admin_basic->cycle_time);
  1089. + gcl_data->cte = cpu_to_le32(admin_basic->cycle_time_extension);
  1090. +
  1091. + for (i = 0; i < gcl_len; i++) {
  1092. + struct gce *temp_gce = gce + i;
  1093. + struct tsn_qbv_entry *temp_entry;
  1094. +
  1095. + temp_entry = admin_basic->control_list + i;
  1096. +
  1097. + temp_gce->gate = temp_entry->gate_state;
  1098. + temp_gce->period = cpu_to_le32(temp_entry->time_interval);
  1099. + }
  1100. +
  1101. + cbdr->length = cpu_to_le16(data_size);
  1102. + cbdr->status_flags = 0;
  1103. +
  1104. + dma = dma_map_single(&priv->si->pdev->dev, gcl_data,
  1105. + data_size, DMA_TO_DEVICE);
  1106. + if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
  1107. + netdev_err(priv->si->ndev, "DMA mapping failed!\n");
  1108. + kfree(gcl_data);
  1109. + return -ENOMEM;
  1110. + }
  1111. +
  1112. + cbdr->addr[0] = lower_32_bits(dma);
  1113. + cbdr->addr[1] = upper_32_bits(dma);
  1114. + cbdr->cmd = 0;
  1115. + cbdr->cls = BDCR_CMD_PORT_GCL;
  1116. +
  1117. + /* Updated by ENETC on completion of the configuration
  1118. + * command. A zero value indicates success.
  1119. + */
  1120. + cbdr->status_flags = 0;
  1121. +
  1122. + xmit_cbdr(priv->si, curr_cbd);
  1123. +
  1124. + memcpy(&port->nd.ntdata, admin_conf, sizeof(*admin_conf));
  1125. +
  1126. + tempclock = ((u64)le32_to_cpu(gcl_config->ccth)) << 32;
  1127. + port->nd.ntdata.qbv_notify.admin.base_time =
  1128. + le32_to_cpu(gcl_config->cctl) + tempclock;
  1129. +
  1130. + memset(cbdr, 0, sizeof(struct enetc_cbd));
  1131. + dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_TO_DEVICE);
  1132. + kfree(gcl_data);
  1133. +
  1134. + call_tsn_notifiers(TSN_QBV_CONFIGCHANGETIME_ARRIVE,
  1135. + ndev, &port->nd);
  1136. +
  1137. + return 0;
  1138. +}
  1139. +
  1140. +/* CBD Class 5: Time Gated Scheduling Gate Control List query
  1141. + * Descriptor - Long Format
  1142. + */
  1143. +int enetc_qbv_get(struct net_device *ndev, struct tsn_qbv_conf *admin_conf)
  1144. +{
  1145. + struct enetc_cbd *cbdr;
  1146. + struct tgs_gcl_resp *gcl_data;
  1147. + struct tgs_gcl_query *gcl_query;
  1148. + struct gce *gce;
  1149. + struct tsn_qbv_basic *admin_basic = &admin_conf->admin;
  1150. + struct enetc_ndev_priv *priv = netdev_priv(ndev);
  1151. + dma_addr_t dma;
  1152. + int curr_cbd;
  1153. + u16 maxlen;
  1154. + u16 data_size, dma_size;
  1155. + u16 admin_len;
  1156. + u16 oper_len;
  1157. + u64 temp;
  1158. + int i;
  1159. +
  1160. + if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE) {
  1161. + admin_conf->gate_enabled = true;
  1162. + } else {
  1163. + admin_conf->gate_enabled = false;
  1164. + return 0;
  1165. + }
  1166. +
  1167. + curr_cbd = alloc_cbdr(priv->si, &cbdr);
  1168. +
  1169. + gcl_query = &cbdr->gcl_query;
  1170. +
  1171. + maxlen = enetc_get_max_gcl_len(&priv->si->hw);
  1172. +
  1173. + data_size = sizeof(struct tgs_gcl_resp)
  1174. + + sizeof(struct gce) * 2 * maxlen;
  1175. +
  1176. + gcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
  1177. + if (!gcl_data)
  1178. + return -ENOMEM;
  1179. +
  1180. + gce = (struct gce *)(gcl_data + 1);
  1181. +
  1182. + gcl_query->acl_len = cpu_to_le16(maxlen);
  1183. +
  1184. + dma_size = cpu_to_le16(data_size);
  1185. + cbdr->length = dma_size;
  1186. + cbdr->status_flags = 0;
  1187. +
  1188. + dma = dma_map_single(&priv->si->pdev->dev, gcl_data,
  1189. + data_size, DMA_FROM_DEVICE);
  1190. + if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
  1191. + netdev_err(priv->si->ndev, "DMA mapping failed!\n");
  1192. + kfree(gcl_data);
  1193. + return -ENOMEM;
  1194. + }
  1195. +
  1196. + cbdr->addr[0] = lower_32_bits(dma);
  1197. + cbdr->addr[1] = upper_32_bits(dma);
  1198. + cbdr->cmd = 1;
  1199. + cbdr->cls = BDCR_CMD_PORT_GCL;
  1200. + xmit_cbdr(priv->si, curr_cbd);
  1201. + dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_FROM_DEVICE);
  1202. +
  1203. + /* since cbdr already passed to free, below could be get wrong */
  1204. + admin_len = le16_to_cpu(gcl_query->admin_list_len);
  1205. + oper_len = le16_to_cpu(gcl_query->oper_list_len);
  1206. +
  1207. + admin_basic->control_list_length = admin_len;
  1208. +
  1209. + temp = ((u64)le32_to_cpu(gcl_data->abth)) << 32;
  1210. + admin_basic->base_time = le32_to_cpu(gcl_data->abtl) + temp;
  1211. +
  1212. + admin_basic->cycle_time = le32_to_cpu(gcl_data->act);
  1213. + admin_basic->cycle_time_extension = le32_to_cpu(gcl_data->acte);
  1214. +
  1215. + admin_basic->control_list = kcalloc(admin_len,
  1216. + sizeof(admin_basic->control_list),
  1217. + GFP_KERNEL);
  1218. + if (!admin_basic->control_list) {
  1219. + memset(cbdr, 0, sizeof(*cbdr));
  1220. + kfree(gcl_data);
  1221. + return -ENOMEM;
  1222. + }
  1223. +
  1224. + for (i = 0; i < admin_len; i++) {
  1225. + struct gce *temp_gce = gce + i;
  1226. + struct tsn_qbv_entry *temp_entry;
  1227. +
  1228. + temp_entry = admin_basic->control_list + i;
  1229. +
  1230. + temp_entry->gate_state = temp_gce->gate;
  1231. + temp_entry->time_interval = le32_to_cpu(temp_gce->period);
  1232. + }
  1233. +
  1234. + /* Updated by ENETC on completion of the configuration
  1235. + * command. A zero value indicates success.
  1236. + */
  1237. + admin_conf->config_change = true;
  1238. +
  1239. + memset(cbdr, 0, sizeof(*cbdr));
  1240. + kfree(gcl_data);
  1241. +
  1242. + return 0;
  1243. +}
  1244. +
  1245. +int enetc_qbv_get_status(struct net_device *ndev,
  1246. + struct tsn_qbv_status *status)
  1247. +{
  1248. + struct enetc_cbd *cbdr;
  1249. + struct tgs_gcl_resp *gcl_data;
  1250. + struct tgs_gcl_query *gcl_query;
  1251. + struct gce *gce;
  1252. + struct tsn_qbv_basic *oper_basic;
  1253. + struct enetc_ndev_priv *priv;
  1254. + dma_addr_t dma;
  1255. + int curr_cbd;
  1256. + u16 maxlen;
  1257. + u16 data_size, dma_size;
  1258. + u16 admin_len;
  1259. + u16 oper_len;
  1260. + u64 temp;
  1261. + int i;
  1262. +
  1263. + if (!ndev)
  1264. + return -EINVAL;
  1265. +
  1266. + if (!status)
  1267. + return -EINVAL;
  1268. +
  1269. + oper_basic = &status->oper;
  1270. + priv = netdev_priv(ndev);
  1271. +
  1272. + if (!(enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE))
  1273. + return -EINVAL;
  1274. +
  1275. + curr_cbd = alloc_cbdr(priv->si, &cbdr);
  1276. +
  1277. + gcl_query = &cbdr->gcl_query;
  1278. +
  1279. + maxlen = enetc_get_max_gcl_len(&priv->si->hw);
  1280. +
  1281. + data_size = sizeof(struct tgs_gcl_resp) +
  1282. + sizeof(struct gce) * 2 * maxlen;
  1283. +
  1284. + gcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
  1285. + if (!gcl_data)
  1286. + return -ENOMEM;
  1287. +
  1288. + gce = (struct gce *)(gcl_data + 1);
  1289. +
  1290. + gcl_query->acl_len = cpu_to_le16(maxlen);
  1291. + gcl_query->ocl_len = cpu_to_le16(maxlen);
  1292. +
  1293. + dma_size = cpu_to_le16(data_size);
  1294. + cbdr->length = dma_size;
  1295. + cbdr->status_flags = 0; /* long format command no ie */
  1296. +
  1297. + dma = dma_map_single(&priv->si->pdev->dev, gcl_data,
  1298. + data_size, DMA_FROM_DEVICE);
  1299. + if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
  1300. + netdev_err(priv->si->ndev, "DMA mapping failed!\n");
  1301. + kfree(gcl_data);
  1302. + return -ENOMEM;
  1303. + }
  1304. +
  1305. + cbdr->addr[0] = lower_32_bits(dma);
  1306. + cbdr->addr[1] = upper_32_bits(dma);
  1307. + cbdr->cmd = 1;
  1308. + cbdr->cls = BDCR_CMD_PORT_GCL;
  1309. + xmit_cbdr(priv->si, curr_cbd);
  1310. + dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_FROM_DEVICE);
  1311. +
  1312. + /* since cbdr already passed to free, below could be get wrong */
  1313. + admin_len = le16_to_cpu(gcl_query->admin_list_len);
  1314. + oper_len = le16_to_cpu(gcl_query->oper_list_len);
  1315. +
  1316. + if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGAGLSR_OFFSET) &
  1317. + ENETC_QBV_CFG_PEND_MASK) {
  1318. + status->config_pending = true;
  1319. + goto exit;
  1320. + }
  1321. +
  1322. + /* The Oper and Admin timing fields exist in the response buffer even
  1323. + * if no valid corresponding lists exists. These fields are considered
  1324. + * invalid if the corresponding list does not exist.
  1325. + */
  1326. + status->config_pending = false;
  1327. + temp = ((u64)le32_to_cpu(gcl_data->ccth)) << 32;
  1328. + status->config_change_time = le32_to_cpu(gcl_data->cctl) + temp;
  1329. +
  1330. + temp = ((u64)le32_to_cpu(gcl_data->cceh)) << 32;
  1331. + status->config_change_error = le32_to_cpu(gcl_data->ccel) + temp;
  1332. +
  1333. + /* changed to SITGTGR */
  1334. + status->tick_granularity = enetc_rd(&priv->si->hw, ENETC_SITGTGR);
  1335. +
  1336. + /* current time */
  1337. + status->current_time = get_current_time(priv->si);
  1338. +
  1339. + status->supported_list_max = maxlen;
  1340. +
  1341. + /* status->oper.gate_states , no init oper/admin gate state */
  1342. + status->oper.control_list_length = oper_len;
  1343. + temp = ((u64)le32_to_cpu(gcl_data->obth)) << 32;
  1344. + status->oper.base_time = le32_to_cpu(gcl_data->obtl) + temp;
  1345. + status->oper.cycle_time = le32_to_cpu(gcl_data->oct);
  1346. + status->oper.cycle_time_extension = le32_to_cpu(gcl_data->octe);
  1347. +
  1348. + oper_basic->control_list =
  1349. + kcalloc(oper_len, sizeof(oper_basic->control_list), GFP_KERNEL);
  1350. + if (!oper_basic->control_list) {
  1351. + memset(cbdr, 0, sizeof(*cbdr));
  1352. + kfree(gcl_data);
  1353. + return -ENOMEM;
  1354. + }
  1355. +
  1356. + for (i = 0; i < oper_len; i++) {
  1357. + struct gce *temp_gce = gce + maxlen + i;
  1358. + struct tsn_qbv_entry *temp_entry = oper_basic->control_list + i;
  1359. +
  1360. + temp_entry->gate_state = temp_gce->gate;
  1361. + temp_entry->time_interval = le32_to_cpu(temp_gce->period);
  1362. + }
  1363. +
  1364. +exit:
  1365. + memset(cbdr, 0, sizeof(*cbdr));
  1366. + kfree(gcl_data);
  1367. + return 0;
  1368. +}
  1369. +
  1370. +/* CBD Class 7: Stream Identity Entry Set Descriptor - Long Format */
  1371. +int enetc_cb_streamid_set(struct net_device *ndev, u32 index,
  1372. + bool en, struct tsn_cb_streamid *streamid)
  1373. +{
  1374. + struct enetc_cbd *cbdr;
  1375. + void *si_data;
  1376. + struct null_streamid_data *si_data1;
  1377. + struct smac_streamid_data *si_data2;
  1378. + struct streamid_conf *si_conf;
  1379. + struct enetc_ndev_priv *priv;
  1380. + dma_addr_t dma;
  1381. + u16 data_size, dma_size;
  1382. + int curr_cbd;
  1383. +
  1384. + if (!ndev)
  1385. + return -EINVAL;
  1386. +
  1387. + priv = netdev_priv(ndev);
  1388. +
  1389. + curr_cbd = alloc_cbdr(priv->si, &cbdr);
  1390. +
  1391. + cbdr->index = cpu_to_le16((u16)index);
  1392. + cbdr->cmd = 0;
  1393. + cbdr->cls = BDCR_CMD_STREAM_IDENTIFY;
  1394. + cbdr->status_flags = 0;
  1395. +
  1396. + data_size = sizeof(struct null_streamid_data);
  1397. + si_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
  1398. + cbdr->length = cpu_to_le16(data_size);
  1399. +
  1400. + dma = dma_map_single(&priv->si->pdev->dev, si_data,
  1401. + data_size, DMA_FROM_DEVICE);
  1402. + if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
  1403. + netdev_err(priv->si->ndev, "DMA mapping failed!\n");
  1404. + kfree(si_data);
  1405. + return -ENOMEM;
  1406. + }
  1407. +
  1408. + cbdr->addr[0] = lower_32_bits(dma);
  1409. + cbdr->addr[1] = upper_32_bits(dma);
  1410. + si_data1 = (struct null_streamid_data *)si_data;
  1411. + si_data1->dmac[0] = 0xFF;
  1412. + si_data1->dmac[1] = 0xFF;
  1413. + si_data1->dmac[2] = 0xFF;
  1414. + si_data1->dmac[3] = 0xFF;
  1415. + si_data1->dmac[4] = 0xFF;
  1416. + si_data1->dmac[5] = 0xFF;
  1417. + si_data1->vid_vidm_tg =
  1418. + cpu_to_le16(ENETC_CBDR_SID_VID_MASK
  1419. + + ((0x3 << 14) | ENETC_CBDR_SID_VIDM));
  1420. +
  1421. + si_conf = &cbdr->sid_set;
  1422. + /* Only one port supported for one entry, set itself */
  1423. + si_conf->iports = 1 << (priv->si->pdev->devfn & 0x7);
  1424. + si_conf->id_type = 1;
  1425. + si_conf->oui[2] = 0x0;
  1426. + si_conf->oui[1] = 0x80;
  1427. + si_conf->oui[0] = 0xC2;
  1428. +
  1429. + xmit_cbdr(priv->si, curr_cbd);
  1430. +
  1431. + memset(cbdr, 0, sizeof(*cbdr));
  1432. + kfree(si_data);
  1433. +
  1434. + if (!en)
  1435. + return 0;
  1436. +
  1437. + curr_cbd = alloc_cbdr(priv->si, &cbdr);
  1438. +
  1439. + cbdr->index = cpu_to_le16((u16)index);
  1440. + cbdr->cmd = 0;
  1441. + cbdr->cls = BDCR_CMD_STREAM_IDENTIFY;
  1442. + cbdr->status_flags = 0;
  1443. +
  1444. + si_conf = &cbdr->sid_set;
  1445. + si_conf->en = 0x80;
  1446. + si_conf->stream_handle = cpu_to_le32(streamid->handle);
  1447. + si_conf->iports = 1 << (priv->si->pdev->devfn & 0x7);
  1448. + si_conf->id_type = streamid->type;
  1449. + si_conf->oui[2] = 0x0;
  1450. + si_conf->oui[1] = 0x80;
  1451. + si_conf->oui[0] = 0xC2;
  1452. +
  1453. + if (si_conf->id_type == 1) {
  1454. + data_size = sizeof(struct null_streamid_data);
  1455. + si_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
  1456. + } else if (si_conf->id_type == 2) {
  1457. + data_size = sizeof(struct smac_streamid_data);
  1458. + si_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
  1459. + } else {
  1460. + return -EINVAL;
  1461. + }
  1462. +
  1463. + if (!si_data)
  1464. + return -ENOMEM;
  1465. +
  1466. + dma_size = cpu_to_le16(data_size);
  1467. + cbdr->length = dma_size;
  1468. + cbdr->status_flags = 0;
  1469. +
  1470. + dma = dma_map_single(&priv->si->pdev->dev, si_data,
  1471. + data_size, DMA_FROM_DEVICE);
  1472. + if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
  1473. + netdev_err(priv->si->ndev, "DMA mapping failed!\n");
  1474. + memset(cbdr, 0, sizeof(*cbdr));
  1475. + kfree(si_data);
  1476. + return -ENOMEM;
  1477. + }
  1478. + cbdr->addr[0] = lower_32_bits(dma);
  1479. + cbdr->addr[1] = upper_32_bits(dma);
  1480. +
  1481. + /* VIDM default to be 1.
  1482. + * VID Match. If set (b1) then the VID must match, otherwise
  1483. + * any VID is considered a match. VIDM setting is only used
  1484. + * when TG is set to b01.
  1485. + */
  1486. + if (si_conf->id_type == 1) {
  1487. + si_data1 = (struct null_streamid_data *)si_data;
  1488. + si_data1->dmac[0] = streamid->para.nid.dmac & 0xFF;
  1489. + si_data1->dmac[1] = (streamid->para.nid.dmac >> 8) & 0xFF;
  1490. + si_data1->dmac[2] = (streamid->para.nid.dmac >> 16) & 0xFF;
  1491. + si_data1->dmac[3] = (streamid->para.nid.dmac >> 24) & 0xFF;
  1492. + si_data1->dmac[4] = (streamid->para.nid.dmac >> 32) & 0xFF;
  1493. + si_data1->dmac[5] = (streamid->para.nid.dmac >> 40) & 0xFF;
  1494. + si_data1->vid_vidm_tg =
  1495. + cpu_to_le16((streamid->para.nid.vid & ENETC_CBDR_SID_VID_MASK) +
  1496. + ((((u16)(streamid->para.nid.tagged) & 0x3) << 14)
  1497. + | ENETC_CBDR_SID_VIDM));
  1498. + } else if (si_conf->id_type == 2) {
  1499. + si_data2 = (struct smac_streamid_data *)si_data;
  1500. + si_data2->smac[0] = streamid->para.sid.smac & 0xFF;
  1501. + si_data2->smac[1] = (streamid->para.sid.smac >> 8) & 0xFF;
  1502. + si_data2->smac[2] = (streamid->para.sid.smac >> 16) & 0xFF;
  1503. + si_data2->smac[3] = (streamid->para.sid.smac >> 24) & 0xFF;
  1504. + si_data2->smac[4] = (streamid->para.sid.smac >> 32) & 0xFF;
  1505. + si_data2->smac[5] = (streamid->para.sid.smac >> 40) & 0xFF;
  1506. + si_data2->vid_vidm_tg =
  1507. + cpu_to_le16((streamid->para.sid.vid & ENETC_CBDR_SID_VID_MASK) +
  1508. + ((((u16)(streamid->para.sid.tagged) & 0x3) << 14)
  1509. + | ENETC_CBDR_SID_VIDM));
  1510. + }
  1511. +
  1512. + xmit_cbdr(priv->si, curr_cbd);
  1513. +
  1514. + memset(cbdr, 0, sizeof(*cbdr));
  1515. + kfree(si_data);
  1516. +
  1517. + return 0;
  1518. +}
  1519. +
  1520. +/* CBD Class 7: Stream Identity Entry Query Descriptor - Long Format */
  1521. +int enetc_cb_streamid_get(struct net_device *ndev, u32 index,
  1522. + struct tsn_cb_streamid *streamid)
  1523. +{
  1524. + struct enetc_cbd *cbdr;
  1525. + struct streamid_query_resp *si_data;
  1526. + struct enetc_ndev_priv *priv;
  1527. + dma_addr_t dma;
  1528. + u16 data_size, dma_size;
  1529. + int curr_cbd;
  1530. + int valid;
  1531. +
  1532. + if (!ndev)
  1533. + return -EINVAL;
  1534. +
  1535. + priv = netdev_priv(ndev);
  1536. +
  1537. + curr_cbd = alloc_cbdr(priv->si, &cbdr);
  1538. +
  1539. + cbdr->index = cpu_to_le32(index);
  1540. + cbdr->cmd = 1;
  1541. + cbdr->cls = BDCR_CMD_STREAM_IDENTIFY;
  1542. + cbdr->status_flags = 0;
  1543. +
  1544. + data_size = sizeof(struct streamid_query_resp);
  1545. + si_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
  1546. + if (!si_data)
  1547. + return -ENOMEM;
  1548. +
  1549. + dma_size = cpu_to_le16(data_size);
  1550. + cbdr->length = dma_size;
  1551. + cbdr->status_flags = 0; /* long format command no ie */
  1552. +
  1553. + dma = dma_map_single(&priv->si->pdev->dev, si_data,
  1554. + data_size, DMA_FROM_DEVICE);
  1555. + if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
  1556. + netdev_err(priv->si->ndev, "DMA mapping failed!\n");
  1557. + kfree(si_data);
  1558. + return -ENOMEM;
  1559. + }
  1560. + cbdr->addr[0] = lower_32_bits(dma);
  1561. + cbdr->addr[1] = upper_32_bits(dma);
  1562. +
  1563. + xmit_cbdr(priv->si, curr_cbd);
  1564. +
  1565. + streamid->type = si_data->id_type;
  1566. +
  1567. + if (streamid->type == 1) {
  1568. + streamid->para.nid.dmac = si_data->mac[0]
  1569. + + ((u64)si_data->mac[1] << 8)
  1570. + + ((u64)si_data->mac[2] << 16)
  1571. + + ((u64)si_data->mac[3] << 24)
  1572. + + ((u64)si_data->mac[4] << 32)
  1573. + + ((u64)si_data->mac[5] << 40);
  1574. + /* VID Match. If set (b1) then the VID must match, otherwise
  1575. + * any VID is considered a match.
  1576. + */
  1577. + streamid->para.nid.vid =
  1578. + le16_to_cpu(si_data->vid_vidm_tg
  1579. + & ENETC_CBDR_SID_VID_MASK);
  1580. + streamid->para.nid.tagged =
  1581. + le16_to_cpu(si_data->vid_vidm_tg >> 14 & 0x3);
  1582. + } else if (streamid->type == 2) {
  1583. + streamid->para.sid.smac = si_data->mac[0]
  1584. + + ((u64)si_data->mac[1] << 8)
  1585. + + ((u64)si_data->mac[2] << 16)
  1586. + + ((u64)si_data->mac[3] << 24)
  1587. + + ((u64)si_data->mac[4] << 32)
  1588. + + ((u64)si_data->mac[5] << 40);
  1589. + /* VID Match. If set (b1) then the VID must match, otherwise
  1590. + * any VID is considered a match.
  1591. + */
  1592. + streamid->para.sid.vid =
  1593. + le16_to_cpu(si_data->vid_vidm_tg
  1594. + & ENETC_CBDR_SID_VID_MASK);
  1595. + streamid->para.sid.tagged =
  1596. + le16_to_cpu(si_data->vid_vidm_tg >> 14 & 0x3);
  1597. + }
  1598. +
  1599. + streamid->handle = le32_to_cpu(si_data->stream_handle);
  1600. + streamid->ifac_iport = le32_to_cpu(si_data->input_ports);
  1601. + valid = si_data->en ? 1 : 0;
  1602. +
  1603. + memset(cbdr, 0, sizeof(*cbdr));
  1604. + kfree(si_data);
  1605. +
  1606. + return valid;
  1607. +}
  1608. +
  1609. +/* CBD Class 7: Stream Identity Statistics Query Descriptor - Long Format */
  1610. +int enetc_cb_streamid_counters_get(struct net_device *ndev, u32 index,
  1611. + struct tsn_cb_streamid_counters *counters)
  1612. +{
  1613. + return 0;
  1614. +}
  1615. +
  1616. +void enetc_qci_enable(struct enetc_hw *hw)
  1617. +{
  1618. + enetc_wr(hw, ENETC_PPSFPMR, enetc_rd(hw, ENETC_PPSFPMR)
  1619. + | ENETC_PPSFPMR_PSFPEN | ENETC_PPSFPMR_VS
  1620. + | ENETC_PPSFPMR_PVC | ENETC_PPSFPMR_PVZC);
  1621. +}
  1622. +
  1623. +void enetc_qci_disable(struct enetc_hw *hw)
  1624. +{
  1625. + enetc_wr(hw, ENETC_PPSFPMR, enetc_rd(hw, ENETC_PPSFPMR)
  1626. + & ~ENETC_PPSFPMR_PSFPEN & ~ENETC_PPSFPMR_VS
  1627. + & ~ENETC_PPSFPMR_PVC & ~ENETC_PPSFPMR_PVZC);
  1628. +}
  1629. +
  1630. +/* CBD Class 8: Stream Filter Instance Set Descriptor - Short Format */
  1631. +int enetc_qci_sfi_set(struct net_device *ndev, u32 index, bool en,
  1632. + struct tsn_qci_psfp_sfi_conf *tsn_qci_sfi)
  1633. +{
  1634. + struct enetc_cbd *cbdr;
  1635. + struct sfi_conf *sfi_config;
  1636. +
  1637. + struct enetc_ndev_priv *priv = netdev_priv(ndev);
  1638. + int curr_cbd;
  1639. +
  1640. + curr_cbd = alloc_cbdr(priv->si, &cbdr);
  1641. +
  1642. + cbdr->index = cpu_to_le16(index);
  1643. + cbdr->cmd = 0;
  1644. + cbdr->cls = BDCR_CMD_STREAM_FILTER;
  1645. + cbdr->status_flags = 0x80;
  1646. + cbdr->length = cpu_to_le16(1);
  1647. +
  1648. + sfi_config = &cbdr->sfi_conf;
  1649. + if (en)
  1650. + sfi_config->en = 0x80;
  1651. +
  1652. + if (tsn_qci_sfi->stream_handle_spec >= 0) {
  1653. + sfi_config->stream_handle =
  1654. + cpu_to_le32(tsn_qci_sfi->stream_handle_spec);
  1655. + sfi_config->sthm |= 0x80;
  1656. + }
  1657. +
  1658. + sfi_config->sg_inst_table_index =
  1659. + cpu_to_le16(tsn_qci_sfi->stream_gate_instance_id);
  1660. + sfi_config->input_ports = 1 << (priv->si->pdev->devfn & 0x7);
  1661. +
  1662. + /* The priority value which may be matched against the
  1663. + * frame’s priority value to determine a match for this entry.
  1664. + */
  1665. + if (tsn_qci_sfi->priority_spec >= 0)
  1666. + sfi_config->multi |= (tsn_qci_sfi->priority_spec & 0x7) | 0x8;
  1667. +
  1668. + /* Filter Type. Identifies the contents of the MSDU/FM_INST_INDEX
  1669. + * field as being either an MSDU value or an index into the Flow
  1670. + * Meter Instance table.
  1671. + */
  1672. + if (tsn_qci_sfi->stream_filter.maximum_sdu_size != 0) {
  1673. + sfi_config->msdu =
  1674. + cpu_to_le16(tsn_qci_sfi->stream_filter.maximum_sdu_size);
  1675. + sfi_config->multi |= 0x40;
  1676. + }
  1677. +
  1678. + if (tsn_qci_sfi->stream_filter.flow_meter_instance_id >= 0) {
  1679. + sfi_config->fm_inst_table_index =
  1680. + cpu_to_le16(tsn_qci_sfi->stream_filter.flow_meter_instance_id);
  1681. + sfi_config->multi |= 0x80;
  1682. + }
  1683. +
  1684. + /* Stream blocked due to oversized frame enable. TRUE or FALSE */
  1685. + if (tsn_qci_sfi->block_oversize_enable)
  1686. + sfi_config->multi |= 0x20;
  1687. +
  1688. + /* Stream blocked due to oversized frame. TRUE or FALSE */
  1689. + if (tsn_qci_sfi->block_oversize)
  1690. + sfi_config->multi |= 0x10;
  1691. +
  1692. + xmit_cbdr(priv->si, curr_cbd);
  1693. +
  1694. + memset(cbdr, 0, sizeof(*cbdr));
  1695. + return 0;
  1696. +}
  1697. +
  1698. +/* CBD Class 8: Stream Filter Instance Query Descriptor - Short Format */
  1699. +int enetc_qci_sfi_get(struct net_device *ndev, u32 index,
  1700. + struct tsn_qci_psfp_sfi_conf *tsn_qci_sfi)
  1701. +{
  1702. + struct enetc_cbd *cbdr;
  1703. + struct sfi_conf *sfi_config;
  1704. + struct enetc_ndev_priv *priv = netdev_priv(ndev);
  1705. + int curr_cbd;
  1706. +
  1707. + curr_cbd = alloc_cbdr(priv->si, &cbdr);
  1708. +
  1709. + cbdr->index = cpu_to_le16(index);
  1710. + cbdr->cmd = 1;
  1711. + cbdr->cls = BDCR_CMD_STREAM_FILTER;
  1712. + cbdr->status_flags = 0x80;
  1713. +
  1714. + xmit_cbdr(priv->si, curr_cbd);
  1715. +
  1716. + sfi_config = &cbdr->sfi_conf;
  1717. + if (sfi_config->sthm & 0x80)
  1718. + tsn_qci_sfi->stream_handle_spec =
  1719. + le32_to_cpu(sfi_config->stream_handle);
  1720. + else
  1721. + tsn_qci_sfi->stream_handle_spec = -1;
  1722. +
  1723. + tsn_qci_sfi->stream_gate_instance_id =
  1724. + le16_to_cpu(sfi_config->sg_inst_table_index);
  1725. +
  1726. + if (sfi_config->multi & 0x8)
  1727. + tsn_qci_sfi->priority_spec =
  1728. + le16_to_cpu(sfi_config->multi & 0x7);
  1729. + else
  1730. + tsn_qci_sfi->priority_spec = -1;
  1731. +
  1732. + /* Filter Type. Identifies the contents of the MSDU/FM_INST_INDEX
  1733. + * field as being either an MSDU value or an index into the Flow
  1734. + * Meter Instance table.
  1735. + */
  1736. + if (sfi_config->multi & 0x80)
  1737. + tsn_qci_sfi->stream_filter.flow_meter_instance_id =
  1738. + le16_to_cpu(sfi_config->fm_inst_table_index);
  1739. + else
  1740. + tsn_qci_sfi->stream_filter.flow_meter_instance_id = -1;
  1741. +
  1742. + if (sfi_config->multi & 0x40)
  1743. + tsn_qci_sfi->stream_filter.maximum_sdu_size =
  1744. + le16_to_cpu(sfi_config->msdu);
  1745. +
  1746. + /* Stream blocked due to oversized frame enable. TRUE or FALSE */
  1747. + if (sfi_config->multi & 0x20)
  1748. + tsn_qci_sfi->block_oversize_enable = true;
  1749. + /* Stream blocked due to oversized frame. TRUE or FALSE */
  1750. + if (sfi_config->multi & 0x10)
  1751. + tsn_qci_sfi->block_oversize = true;
  1752. +
  1753. + if (sfi_config->en & 0x80) {
  1754. + memset(cbdr, 0, sizeof(*cbdr));
  1755. + return 1;
  1756. + }
  1757. +
  1758. + memset(cbdr, 0, sizeof(*cbdr));
  1759. + return 0;
  1760. +}
  1761. +
  1762. +/* CBD Class 8: Stream Filter Instance Query Statistics
  1763. + * Descriptor - Long Format
  1764. + */
  1765. +int enetc_qci_sfi_counters_get(struct net_device *ndev, u32 index,
  1766. + struct tsn_qci_psfp_sfi_counters *counters)
  1767. +{
  1768. + struct enetc_cbd *cbdr;
  1769. + struct enetc_ndev_priv *priv = netdev_priv(ndev);
  1770. + int curr_cbd;
  1771. + struct sfi_counter_data *sfi_counter_data;
  1772. + dma_addr_t dma;
  1773. + u16 data_size, dma_size;
  1774. +
  1775. + curr_cbd = alloc_cbdr(priv->si, &cbdr);
  1776. +
  1777. + cbdr->index = cpu_to_le16((u16)index);
  1778. + cbdr->cmd = 2;
  1779. + cbdr->cls = BDCR_CMD_STREAM_FILTER;
  1780. + cbdr->status_flags = 0;
  1781. +
  1782. + data_size = sizeof(struct sfi_counter_data);
  1783. + sfi_counter_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
  1784. + if (!sfi_counter_data)
  1785. + return -ENOMEM;
  1786. +
  1787. + dma = dma_map_single(&priv->si->pdev->dev, sfi_counter_data,
  1788. + data_size, DMA_FROM_DEVICE);
  1789. + if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
  1790. + netdev_err(priv->si->ndev, "DMA mapping failed!\n");
  1791. + kfree(sfi_counter_data);
  1792. + return -ENOMEM;
  1793. + }
  1794. + cbdr->addr[0] = lower_32_bits(dma);
  1795. + cbdr->addr[1] = upper_32_bits(dma);
  1796. +
  1797. + dma_size = cpu_to_le16(data_size);
  1798. + cbdr->length = dma_size;
  1799. +
  1800. + xmit_cbdr(priv->si, curr_cbd);
  1801. +
  1802. + counters->matching_frames_count =
  1803. + ((u64)le32_to_cpu(sfi_counter_data->matchh) << 32)
  1804. + + sfi_counter_data->matchl;
  1805. +
  1806. + counters->not_passing_sdu_count =
  1807. + ((u64)le32_to_cpu(sfi_counter_data->msdu_droph) << 32)
  1808. + + sfi_counter_data->msdu_dropl;
  1809. +
  1810. + counters->passing_sdu_count = counters->matching_frames_count
  1811. + - counters->not_passing_sdu_count;
  1812. +
  1813. + counters->not_passing_frames_count =
  1814. + ((u64)le32_to_cpu(sfi_counter_data->stream_gate_droph) << 32)
  1815. + + le32_to_cpu(sfi_counter_data->stream_gate_dropl);
  1816. +
  1817. + counters->passing_frames_count = counters->matching_frames_count
  1818. + - counters->not_passing_sdu_count
  1819. + - counters->not_passing_frames_count;
  1820. +
  1821. + counters->red_frames_count =
  1822. + ((u64)le32_to_cpu(sfi_counter_data->flow_meter_droph) << 32)
  1823. + + le32_to_cpu(sfi_counter_data->flow_meter_dropl);
  1824. +
  1825. + memset(cbdr, 0, sizeof(*cbdr));
  1826. + return 0;
  1827. +}
  1828. +
  1829. +/* CBD Class 9: Stream Gate Instance Table Entry Set
  1830. + * Descriptor - Short Format
  1831. + */
  1832. +int enetc_qci_sgi_set(struct net_device *ndev, u32 index,
  1833. + struct tsn_qci_psfp_sgi_conf *tsn_qci_sgi)
  1834. +{
  1835. + struct enetc_cbd *cbdr, *cbdr_sgcl;
  1836. + struct sgi_table *sgi_config;
  1837. + struct sgcl_conf *sgcl_config;
  1838. + struct sgcl_data *sgcl_data;
  1839. + struct sgce *sgce;
  1840. + struct enetc_ndev_priv *priv = netdev_priv(ndev);
  1841. +
  1842. + dma_addr_t dma;
  1843. + u16 data_size, dma_size;
  1844. + int curr_cbd, i;
  1845. +
  1846. + /* disable first */
  1847. + curr_cbd = alloc_cbdr(priv->si, &cbdr);
  1848. + memset(cbdr, 0, sizeof(*cbdr));
  1849. +
  1850. + cbdr->index = cpu_to_le16(index);
  1851. + cbdr->cmd = 0;
  1852. + cbdr->cls = BDCR_CMD_STREAM_GCL;
  1853. + cbdr->status_flags = 0x80;
  1854. +
  1855. + xmit_cbdr(priv->si, curr_cbd);
  1856. +
  1857. + if (!tsn_qci_sgi->gate_enabled) {
  1858. + memset(cbdr, 0, sizeof(*cbdr));
  1859. + return 0;
  1860. + }
  1861. +
  1862. + /* Re-enable */
  1863. + curr_cbd = alloc_cbdr(priv->si, &cbdr);
  1864. + memset(cbdr, 0, sizeof(*cbdr));
  1865. +
  1866. + cbdr->index = cpu_to_le16(index);
  1867. + cbdr->cmd = 0;
  1868. + cbdr->cls = BDCR_CMD_STREAM_GCL;
  1869. + cbdr->status_flags = 0x80;
  1870. +
  1871. + sgi_config = &cbdr->sgi_table;
  1872. +
  1873. + sgi_config->ocgtst = tsn_qci_sgi->admin.control_list_length ?
  1874. + 0x80 : (tsn_qci_sgi->admin.gate_states ? 0x80 : 0x0);
  1875. +
  1876. + sgi_config->oipv =
  1877. + tsn_qci_sgi->admin.control_list_length ?
  1878. + 0x0 : ((tsn_qci_sgi->admin.init_ipv < 0) ?
  1879. + 0x0 : ((tsn_qci_sgi->admin.init_ipv & 0x7) | 0x8));
  1880. +
  1881. + sgi_config->en = 0x80;
  1882. +
  1883. + if (tsn_qci_sgi->block_invalid_rx_enable)
  1884. + sgi_config->gset |= 0x80;
  1885. + if (tsn_qci_sgi->block_invalid_rx)
  1886. + sgi_config->gset |= 0x40;
  1887. + if (tsn_qci_sgi->block_octets_exceeded)
  1888. + sgi_config->gset |= 0x10;
  1889. + if (tsn_qci_sgi->block_octets_exceeded_enable)
  1890. + sgi_config->gset |= 0x20;
  1891. +
  1892. + xmit_cbdr(priv->si, curr_cbd);
  1893. +
  1894. + if (tsn_qci_sgi->admin.control_list_length == 0)
  1895. + goto exit;
  1896. +
  1897. + curr_cbd = alloc_cbdr(priv->si, &cbdr_sgcl);
  1898. + memset(cbdr, 0, sizeof(*cbdr));
  1899. +
  1900. + cbdr_sgcl->index = cpu_to_le16(index);
  1901. + cbdr_sgcl->cmd = 1;
  1902. + cbdr_sgcl->cls = BDCR_CMD_STREAM_GCL;
  1903. + cbdr_sgcl->status_flags = 0;
  1904. +
  1905. + sgcl_config = &cbdr_sgcl->sgcl_conf;
  1906. +
  1907. + /* tsn_qci_sgi->admin.control_list_length is not zero now */
  1908. + if (tsn_qci_sgi->admin.control_list_length > 4)
  1909. + return -EINVAL;
  1910. +
  1911. + sgcl_config->acl_len =
  1912. + (tsn_qci_sgi->admin.control_list_length - 1) & 0x3;
  1913. +
  1914. + data_size = sizeof(struct sgcl_data) +
  1915. + (sgcl_config->acl_len + 1) * sizeof(struct sgce);
  1916. +
  1917. + sgcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
  1918. + if (!sgcl_data)
  1919. + return -ENOMEM;
  1920. +
  1921. + dma_size = cpu_to_le16(data_size);
  1922. + cbdr_sgcl->length = dma_size;
  1923. +
  1924. + dma = dma_map_single(&priv->si->pdev->dev,
  1925. + sgcl_data, data_size,
  1926. + DMA_FROM_DEVICE);
  1927. + if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
  1928. + netdev_err(priv->si->ndev, "DMA mapping failed!\n");
  1929. + memset(cbdr, 0, sizeof(*cbdr));
  1930. + memset(cbdr_sgcl, 0, sizeof(*cbdr_sgcl));
  1931. + kfree(sgcl_data);
  1932. + return -ENOMEM;
  1933. + }
  1934. + cbdr_sgcl->addr[0] = lower_32_bits(dma);
  1935. + cbdr_sgcl->addr[1] = upper_32_bits(dma);
  1936. +
  1937. + sgce = (struct sgce *)(sgcl_data + 1);
  1938. +
  1939. + if (tsn_qci_sgi->admin.gate_states)
  1940. + sgcl_config->agtst = 0x80;
  1941. +
  1942. + sgcl_data->ct = cpu_to_le32(tsn_qci_sgi->admin.cycle_time);
  1943. + sgcl_data->cte = cpu_to_le32(tsn_qci_sgi->admin.cycle_time_extension);
  1944. +
  1945. + if (tsn_qci_sgi->admin.init_ipv >= 0)
  1946. + sgcl_config->aipv = (tsn_qci_sgi->admin.init_ipv & 0x7) | 0x8;
  1947. +
  1948. + for (i = 0; i < tsn_qci_sgi->admin.control_list_length; i++) {
  1949. + struct tsn_qci_psfp_gcl *temp_sgcl = tsn_qci_sgi->admin.gcl + i;
  1950. + struct sgce *temp_entry = (struct sgce *)(sgce + i);
  1951. +
  1952. + if (temp_sgcl->gate_state)
  1953. + temp_entry->multi |= 0x10;
  1954. +
  1955. + if (temp_sgcl->ipv >= 0)
  1956. + temp_entry->multi |= ((temp_sgcl->ipv & 0x7) << 5)
  1957. + | 0x08;
  1958. +
  1959. + if (temp_sgcl->octet_max)
  1960. + temp_entry->multi |= 0x01;
  1961. +
  1962. + temp_entry->interval = cpu_to_le32(temp_sgcl->time_interval);
  1963. + temp_entry->msdu[0] = temp_sgcl->octet_max & 0xFF;
  1964. + temp_entry->msdu[1] = (temp_sgcl->octet_max >> 8) & 0xFF;
  1965. + temp_entry->msdu[2] = (temp_sgcl->octet_max >> 16) & 0xFF;
  1966. + }
  1967. +
  1968. + if (!tsn_qci_sgi->admin.base_time) {
  1969. + sgcl_data->btl =
  1970. + cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR0));
  1971. + sgcl_data->bth =
  1972. + cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR1));
  1973. + } else {
  1974. + u32 tempu, templ;
  1975. +
  1976. + tempu = upper_32_bits(tsn_qci_sgi->admin.base_time);
  1977. + templ = lower_32_bits(tsn_qci_sgi->admin.base_time);
  1978. + sgcl_data->bth = cpu_to_le32(tempu);
  1979. + sgcl_data->btl = cpu_to_le32(templ);
  1980. + }
  1981. +
  1982. + xmit_cbdr(priv->si, curr_cbd);
  1983. +
  1984. + memset(cbdr_sgcl, 0, sizeof(*cbdr_sgcl));
  1985. + kfree(sgcl_data);
  1986. +
  1987. +exit:
  1988. + memset(cbdr, 0, sizeof(*cbdr));
  1989. + return 0;
  1990. +}
  1991. +
  1992. +/* CBD Class 9: Stream Gate Instance Table Entry Query
  1993. + * Descriptor - Short Format
  1994. + */
  1995. +int enetc_qci_sgi_get(struct net_device *ndev, u32 index,
  1996. + struct tsn_qci_psfp_sgi_conf *tsn_qci_sgi)
  1997. +{
  1998. + struct enetc_cbd *cbdr, *cbdr_sgcl;
  1999. + struct sgi_table *sgi_config;
  2000. + struct sgcl_query *sgcl_query;
  2001. + struct sgcl_query_resp *sgcl_data;
  2002. + struct sgce *sgce;
  2003. + struct enetc_ndev_priv *priv = netdev_priv(ndev);
  2004. + dma_addr_t dma;
  2005. + u16 data_size, dma_size, gcl_data_stat = 0;
  2006. + u8 admin_len = 0;
  2007. + int curr_cbd, i;
  2008. +
  2009. + curr_cbd = alloc_cbdr(priv->si, &cbdr);
  2010. +
  2011. + cbdr->index = cpu_to_le16(index);
  2012. + cbdr->cmd = 2;
  2013. + cbdr->cls = BDCR_CMD_STREAM_GCL;
  2014. + cbdr->status_flags = 0x80;
  2015. +
  2016. + xmit_cbdr(priv->si, curr_cbd);
  2017. +
  2018. + sgi_config = &cbdr->sgi_table;
  2019. +
  2020. + tsn_qci_sgi->admin.gate_states = (sgi_config->ocgtst & 0x80) ?
  2021. + true : false;
  2022. + if (sgi_config->oipv & 0x08)
  2023. + tsn_qci_sgi->admin.init_ipv = sgi_config->oipv & 0x7;
  2024. + else
  2025. + tsn_qci_sgi->admin.init_ipv = -1;
  2026. +
  2027. + if (sgi_config->en & 0x80)
  2028. + tsn_qci_sgi->gate_enabled = true;
  2029. + if (sgi_config->gset & 0x80)
  2030. + tsn_qci_sgi->block_invalid_rx_enable = true;
  2031. + if (sgi_config->gset & 0x40)
  2032. + tsn_qci_sgi->block_invalid_rx = true;
  2033. + if (sgi_config->gset & 0x20)
  2034. + tsn_qci_sgi->block_octets_exceeded_enable = true;
  2035. + if (sgi_config->gset & 0x10)
  2036. + tsn_qci_sgi->block_octets_exceeded = true;
  2037. +
  2038. + /* Check gate list length is zero? */
  2039. + if (!(sgi_config->oacl_len & 0x30)) {
  2040. + tsn_qci_sgi->admin.control_list_length = 0;
  2041. + goto exit;
  2042. + }
  2043. +
  2044. + curr_cbd = alloc_cbdr(priv->si, &cbdr_sgcl);
  2045. +
  2046. + cbdr_sgcl->index = cpu_to_le16(index);
  2047. + cbdr_sgcl->cmd = 3;
  2048. + cbdr_sgcl->cls = BDCR_CMD_STREAM_GCL;
  2049. + cbdr_sgcl->status_flags = 0;
  2050. +
  2051. + data_size = sizeof(struct sgcl_query_resp) + 4 * sizeof(struct sgce);
  2052. +
  2053. + sgcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
  2054. + if (!sgcl_data)
  2055. + return -ENOMEM;
  2056. +
  2057. + dma_size = cpu_to_le16(data_size);
  2058. + cbdr_sgcl->length = dma_size;
  2059. + cbdr_sgcl->status_flags = 0;
  2060. +
  2061. + sgcl_query = &cbdr_sgcl->sgcl_query;
  2062. +
  2063. + sgcl_query->oacl_len = 0x10;
  2064. +
  2065. + dma = dma_map_single(&priv->si->pdev->dev, sgcl_data,
  2066. + data_size, DMA_FROM_DEVICE);
  2067. + if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
  2068. + netdev_err(priv->si->ndev, "DMA mapping failed!\n");
  2069. + memset(cbdr, 0, sizeof(*cbdr));
  2070. + memset(cbdr_sgcl, 0, sizeof(*cbdr_sgcl));
  2071. + kfree(sgcl_data);
  2072. + return -ENOMEM;
  2073. + }
  2074. + cbdr_sgcl->addr[0] = lower_32_bits(dma);
  2075. + cbdr_sgcl->addr[1] = upper_32_bits(dma);
  2076. +
  2077. + xmit_cbdr(priv->si, curr_cbd);
  2078. +
  2079. + sgce = (struct sgce *)(sgcl_data + 1);
  2080. +
  2081. + gcl_data_stat = le16_to_cpu(sgcl_data->stat);
  2082. + if (gcl_data_stat & 0x10)
  2083. + tsn_qci_sgi->admin.gate_states = true;
  2084. +
  2085. + if (gcl_data_stat & 0x80)
  2086. + tsn_qci_sgi->admin.init_ipv = gcl_data_stat & 0x7;
  2087. + else
  2088. + tsn_qci_sgi->admin.init_ipv = -1;
  2089. +
  2090. + /* admin_len can also get from gcl_data_stat bit 5,6
  2091. + * OR sgi_config->oacl_len
  2092. + */
  2093. + admin_len = (sgcl_query->oacl_len & 0x3) + 1;
  2094. + tsn_qci_sgi->admin.control_list_length = admin_len;
  2095. + tsn_qci_sgi->admin.cycle_time = le32_to_cpu(sgcl_data->act);
  2096. + tsn_qci_sgi->admin.cycle_time_extension = le32_to_cpu(sgcl_data->acte);
  2097. + tsn_qci_sgi->admin.base_time = ((u64)(le32_to_cpu(sgcl_data->abth))
  2098. + << 32)
  2099. + + le32_to_cpu(sgcl_data->abtl);
  2100. +
  2101. + tsn_qci_sgi->admin.gcl = kcalloc(admin_len,
  2102. + sizeof(struct tsn_qci_psfp_gcl),
  2103. + GFP_KERNEL);
  2104. + if (!tsn_qci_sgi->admin.gcl) {
  2105. + kfree(sgcl_data);
  2106. + return -ENOMEM;
  2107. + }
  2108. +
  2109. + for (i = 0; i < admin_len; i++) {
  2110. + struct tsn_qci_psfp_gcl *temp_sgcl = tsn_qci_sgi->admin.gcl + i;
  2111. + struct sgce *temp_entry = (struct sgce *)(sgce + i);
  2112. +
  2113. + if (temp_entry->multi & 0x10)
  2114. + temp_sgcl->gate_state = true;
  2115. +
  2116. + if (temp_entry->multi & 0x08)
  2117. + temp_sgcl->ipv = temp_entry->multi >> 5;
  2118. + else
  2119. + temp_sgcl->ipv = -1;
  2120. +
  2121. + temp_sgcl->time_interval = le32_to_cpu(temp_entry->interval);
  2122. +
  2123. + if (temp_entry->multi & 0x01)
  2124. + temp_sgcl->octet_max = (temp_entry->msdu[0] & 0xff)
  2125. + | (((u32)temp_entry->msdu[1] << 8) & 0xff00)
  2126. + | (((u32)temp_entry->msdu[1] << 16) & 0xff0000);
  2127. + else
  2128. + temp_sgcl->octet_max = 0;
  2129. + }
  2130. +
  2131. + memset(cbdr_sgcl, 0, sizeof(*cbdr_sgcl));
  2132. + kfree(sgcl_data);
  2133. +
  2134. +exit:
  2135. + memset(cbdr, 0, sizeof(*cbdr));
  2136. + return 0;
  2137. +}
  2138. +
  2139. +/* CBD Class 9: Stream Gate Instance Table Entry Query Descriptor
  2140. + * CBD Class 9: Stream Gate Control List Query Descriptor
  2141. + */
  2142. +int enetc_qci_sgi_status_get(struct net_device *ndev, u16 index,
  2143. + struct tsn_psfp_sgi_status *status)
  2144. +{
  2145. + struct enetc_cbd *cbdr_sgi, *cbdr_sgcl;
  2146. + struct sgi_table *sgi_config;
  2147. + struct sgcl_query *sgcl_query;
  2148. + struct sgcl_query_resp *sgcl_data;
  2149. + struct sgce *sgce;
  2150. + struct enetc_ndev_priv *priv = netdev_priv(ndev);
  2151. + dma_addr_t dma;
  2152. + u16 data_size, dma_size, gcl_data_stat = 0;
  2153. + u8 oper_len = 0;
  2154. + int curr_cbd, i;
  2155. +
  2156. + curr_cbd = alloc_cbdr(priv->si, &cbdr_sgi);
  2157. +
  2158. + cbdr_sgi->index = cpu_to_le16(index);
  2159. + cbdr_sgi->cmd = 2;
  2160. + cbdr_sgi->cls = BDCR_CMD_STREAM_GCL;
  2161. + cbdr_sgi->status_flags = 0x80;
  2162. +
  2163. + sgi_config = &cbdr_sgi->sgi_table;
  2164. +
  2165. + if (sgi_config->gset & 0x4)
  2166. + status->config_pending = true;
  2167. +
  2168. + status->oper.gate_states = ((sgi_config->ocgtst & 0x80) ? true : false);
  2169. +
  2170. + /* Check gate list length is zero */
  2171. + if (!(sgi_config->oacl_len & 0x30)) {
  2172. + status->oper.control_list_length = 0;
  2173. + goto cmd2quit;
  2174. + }
  2175. +
  2176. + xmit_cbdr(priv->si, curr_cbd);
  2177. +
  2178. + curr_cbd = alloc_cbdr(priv->si, &cbdr_sgcl);
  2179. +
  2180. + cbdr_sgcl->index = cpu_to_le16(index);
  2181. + cbdr_sgcl->cmd = 3;
  2182. + cbdr_sgcl->cls = BDCR_CMD_STREAM_GCL;
  2183. + cbdr_sgcl->status_flags = 0;
  2184. +
  2185. + /* Max size */
  2186. + data_size = sizeof(struct sgcl_query_resp) + 4 * sizeof(struct sgce);
  2187. +
  2188. + sgcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
  2189. + if (!sgcl_data)
  2190. + return -ENOMEM;
  2191. +
  2192. + dma_size = cpu_to_le16(data_size);
  2193. + cbdr_sgcl->length = dma_size;
  2194. + cbdr_sgcl->status_flags = 0;
  2195. +
  2196. + sgcl_query = &cbdr_sgcl->sgcl_query;
  2197. +
  2198. + sgcl_query->oacl_len = 0x20;
  2199. +
  2200. + dma = dma_map_single(&priv->si->pdev->dev, sgcl_data,
  2201. + data_size, DMA_FROM_DEVICE);
  2202. + if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
  2203. + netdev_err(priv->si->ndev, "DMA mapping failed!\n");
  2204. + memset(cbdr_sgi, 0, sizeof(*cbdr_sgi));
  2205. + memset(cbdr_sgcl, 0, sizeof(*cbdr_sgcl));
  2206. + kfree(sgcl_data);
  2207. + return -ENOMEM;
  2208. + }
  2209. + cbdr_sgcl->addr[0] = lower_32_bits(dma);
  2210. + cbdr_sgcl->addr[1] = upper_32_bits(dma);
  2211. +
  2212. + xmit_cbdr(priv->si, curr_cbd);
  2213. +
  2214. + sgce = (struct sgce *)(sgcl_data + 1);
  2215. +
  2216. + /* oper_len can also get from gcl_data_stat bit 5,6
  2217. + * OR sgi_config->oacl_len
  2218. + */
  2219. + oper_len = ((sgcl_query->oacl_len & 0x0c) >> 2) + 1;
  2220. +
  2221. + /* Get Stream Gate Control List */
  2222. + status->oper.cycle_time = le32_to_cpu(sgcl_data->oct);
  2223. + status->oper.cycle_time_extension = le32_to_cpu(sgcl_data->octe);
  2224. + status->oper.base_time = le32_to_cpu(sgcl_data->obtl)
  2225. + + ((u64)le32_to_cpu(sgcl_data->obth) << 32);
  2226. + status->oper.control_list_length = oper_len;
  2227. +
  2228. + gcl_data_stat = le16_to_cpu(sgcl_data->stat);
  2229. + if (gcl_data_stat & 0x400)
  2230. + status->oper.init_ipv = gcl_data_stat & 0x38 >> 7;
  2231. + else
  2232. + status->oper.init_ipv = -1;
  2233. +
  2234. + if (gcl_data_stat & 0x800)
  2235. + status->oper.gate_states = true;
  2236. +
  2237. + status->oper.gcl = kcalloc(oper_len,
  2238. + sizeof(struct tsn_qci_psfp_gcl),
  2239. + GFP_KERNEL);
  2240. + if (!status->oper.gcl) {
  2241. + memset(cbdr_sgi, 0, sizeof(*cbdr_sgi));
  2242. + memset(cbdr_sgcl, 0, sizeof(*cbdr_sgcl));
  2243. + kfree(sgcl_data);
  2244. + return -ENOMEM;
  2245. + }
  2246. +
  2247. + for (i = 0; i < oper_len; i++) {
  2248. + struct tsn_qci_psfp_gcl *temp_sgcl = status->oper.gcl + i;
  2249. + struct sgce *temp_entry = (struct sgce *)(sgce + i);
  2250. +
  2251. + if (temp_entry->multi & 0x10)
  2252. + temp_sgcl->gate_state = true;
  2253. +
  2254. + if (temp_entry->multi & 0x08)
  2255. + temp_sgcl->ipv = temp_entry->multi >> 5;
  2256. + else
  2257. + temp_sgcl->ipv = -1;
  2258. +
  2259. + temp_sgcl->time_interval = le32_to_cpu(temp_entry->interval);
  2260. +
  2261. + if (temp_entry->multi & 0x01)
  2262. + temp_sgcl->octet_max = temp_entry->msdu[0]
  2263. + | ((((u32)temp_entry->msdu[1]) << 8)
  2264. + & 0xff00)
  2265. + | ((((u32)temp_entry->msdu[2]) << 16)
  2266. + & 0xff0000);
  2267. + else
  2268. + temp_sgcl->octet_max = 0;
  2269. + }
  2270. +
  2271. + status->config_change_time = le32_to_cpu(sgcl_data->cctl)
  2272. + + ((u64)le32_to_cpu(sgcl_data->ccth) << 32);
  2273. +
  2274. + memset(cbdr_sgcl, 0, sizeof(*cbdr_sgcl));
  2275. + kfree(sgcl_data);
  2276. +
  2277. +cmd2quit:
  2278. + /* changed to SITGTGR */
  2279. + status->tick_granularity = enetc_rd(&priv->si->hw, ENETC_SITGTGR);
  2280. +
  2281. + /* current time */
  2282. + status->current_time = get_current_time(priv->si);
  2283. +
  2284. + memset(cbdr_sgi, 0, sizeof(*cbdr_sgi));
  2285. +
  2286. + return 0;
  2287. +}
  2288. +
  2289. +/* CBD Class 10: Flow Meter Instance Set Descriptor - Short Format */
  2290. +int enetc_qci_fmi_set(struct net_device *ndev, u32 index, bool enable,
  2291. + struct tsn_qci_psfp_fmi *tsn_qci_fmi)
  2292. +{
  2293. + struct enetc_cbd *cbdr;
  2294. + struct fmi_conf *fmi_config;
  2295. +
  2296. + struct enetc_ndev_priv *priv = netdev_priv(ndev);
  2297. + int curr_cbd;
  2298. + u64 temp = 0;
  2299. +
  2300. + curr_cbd = alloc_cbdr(priv->si, &cbdr);
  2301. +
  2302. + cbdr->index = cpu_to_le16((u16)index);
  2303. + cbdr->cmd = 0;
  2304. + cbdr->cls = BDCR_CMD_FLOW_METER;
  2305. + cbdr->status_flags = 0x80;
  2306. +
  2307. + xmit_cbdr(priv->si, curr_cbd);
  2308. +
  2309. + if (!enable) {
  2310. + memset(cbdr, 0, sizeof(*cbdr));
  2311. + return 0;
  2312. + }
  2313. +
  2314. + /* Re-enable */
  2315. + curr_cbd = alloc_cbdr(priv->si, &cbdr);
  2316. + memset(cbdr, 0, sizeof(*cbdr));
  2317. + cbdr->index = cpu_to_le16((u16)index);
  2318. + cbdr->cmd = 0;
  2319. + cbdr->cls = BDCR_CMD_FLOW_METER;
  2320. + cbdr->status_flags = 0x80;
  2321. +
  2322. + fmi_config = &cbdr->fmi_conf;
  2323. + fmi_config->en = 0x80;
  2324. + if (tsn_qci_fmi->cir) {
  2325. + temp = (u64)1000 * tsn_qci_fmi->cir;
  2326. + temp = temp / 3725;
  2327. + }
  2328. + fmi_config->cir = cpu_to_le32((u32)temp);
  2329. + fmi_config->cbs = cpu_to_le32(tsn_qci_fmi->cbs);
  2330. + temp = 0;
  2331. + if (tsn_qci_fmi->eir) {
  2332. + temp = (u64)1000 * tsn_qci_fmi->eir;
  2333. + temp = temp / 3725;
  2334. + }
  2335. + fmi_config->eir = cpu_to_le32((u32)temp);
  2336. + fmi_config->ebs = cpu_to_le32(tsn_qci_fmi->ebs);
  2337. +
  2338. + if (tsn_qci_fmi->mark_red)
  2339. + fmi_config->conf |= 0x1;
  2340. +
  2341. + if (tsn_qci_fmi->mark_red_enable)
  2342. + fmi_config->conf |= 0x2;
  2343. +
  2344. + if (tsn_qci_fmi->drop_on_yellow)
  2345. + fmi_config->conf |= 0x4;
  2346. +
  2347. + if (tsn_qci_fmi->cm)
  2348. + fmi_config->conf |= 0x8;
  2349. +
  2350. + if (tsn_qci_fmi->cf)
  2351. + fmi_config->conf |= 0x10;
  2352. +
  2353. + xmit_cbdr(priv->si, curr_cbd);
  2354. +
  2355. + memset(cbdr, 0, sizeof(*cbdr));
  2356. + return 0;
  2357. +}
  2358. +
  2359. +/* CBD Class 10: Flow Meter Instance Query Descriptor - Short Format */
  2360. +int enetc_qci_fmi_get(struct net_device *ndev, u32 index,
  2361. + struct tsn_qci_psfp_fmi *tsn_qci_fmi,
  2362. + struct tsn_qci_psfp_fmi_counters *counters)
  2363. +{
  2364. + struct enetc_cbd *cbdr;
  2365. + struct fmi_conf *fmi_config;
  2366. + struct enetc_ndev_priv *priv = netdev_priv(ndev);
  2367. + int curr_cbd;
  2368. + u16 data_size, dma_size;
  2369. + dma_addr_t dma;
  2370. + struct fmi_query_stat_resp *fmi_counter_data;
  2371. + u64 temp = 0;
  2372. +
  2373. + curr_cbd = alloc_cbdr(priv->si, &cbdr);
  2374. +
  2375. + cbdr->index = cpu_to_le16(index);
  2376. + cbdr->cmd = 1;
  2377. + cbdr->cls = BDCR_CMD_FLOW_METER;
  2378. + cbdr->status_flags = 0x80;
  2379. +
  2380. + xmit_cbdr(priv->si, curr_cbd);
  2381. +
  2382. + fmi_config = &cbdr->fmi_conf;
  2383. + if (fmi_config->cir) {
  2384. + temp = (u64)3725 * fmi_config->cir;
  2385. + temp = temp / 1000;
  2386. + }
  2387. + tsn_qci_fmi->cir = le32_to_cpu((u32)temp);
  2388. + tsn_qci_fmi->cbs = le32_to_cpu(fmi_config->cbs);
  2389. + temp = 0;
  2390. + if (fmi_config->eir) {
  2391. + temp = (u64)3725 * fmi_config->eir;
  2392. + temp = temp / 1000;
  2393. + }
  2394. + tsn_qci_fmi->eir = le32_to_cpu((u32)temp);
  2395. + tsn_qci_fmi->ebs = le32_to_cpu(fmi_config->ebs);
  2396. +
  2397. + if (fmi_config->conf & 0x1)
  2398. + tsn_qci_fmi->mark_red = true;
  2399. +
  2400. + if (fmi_config->conf & 0x2)
  2401. + tsn_qci_fmi->mark_red_enable = true;
  2402. +
  2403. + if (fmi_config->conf & 0x4)
  2404. + tsn_qci_fmi->drop_on_yellow = true;
  2405. +
  2406. + if (fmi_config->conf & 0x8)
  2407. + tsn_qci_fmi->cm = true;
  2408. +
  2409. + if (fmi_config->conf & 0x10)
  2410. + tsn_qci_fmi->cf = true;
  2411. +
  2412. + memset(cbdr, 0, sizeof(*cbdr));
  2413. +
  2414. + /* Get counters */
  2415. + curr_cbd = alloc_cbdr(priv->si, &cbdr);
  2416. +
  2417. + cbdr->index = cpu_to_le16(index);
  2418. + cbdr->cmd = 2;
  2419. + cbdr->cls = BDCR_CMD_FLOW_METER;
  2420. + cbdr->status_flags = 0x0;
  2421. +
  2422. + data_size = sizeof(struct fmi_query_stat_resp);
  2423. + fmi_counter_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
  2424. + if (!fmi_counter_data)
  2425. + return -ENOMEM;
  2426. +
  2427. + dma = dma_map_single(&priv->si->pdev->dev, fmi_counter_data,
  2428. + data_size, DMA_FROM_DEVICE);
  2429. + if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
  2430. + netdev_err(priv->si->ndev, "DMA mapping failed!\n");
  2431. + kfree(fmi_counter_data);
  2432. + return -ENOMEM;
  2433. + }
  2434. + cbdr->addr[0] = lower_32_bits(dma);
  2435. + cbdr->addr[1] = upper_32_bits(dma);
  2436. +
  2437. + dma_size = cpu_to_le16(data_size);
  2438. + cbdr->length = dma_size;
  2439. +
  2440. + xmit_cbdr(priv->si, curr_cbd);
  2441. +
  2442. + memcpy(counters, fmi_counter_data, sizeof(*counters));
  2443. +
  2444. + return 0;
  2445. +}
  2446. +
  2447. +int enetc_qbu_set(struct net_device *ndev, u8 ptvector)
  2448. +{
  2449. + u32 temp;
  2450. + int i;
  2451. + struct enetc_ndev_priv *priv = netdev_priv(ndev);
  2452. +
  2453. + temp = enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET);
  2454. + if (temp & ENETC_QBV_TGE)
  2455. + enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET,
  2456. + temp & (~ENETC_QBV_TGPE));
  2457. +
  2458. + for (i = 0; i < 8; i++) {
  2459. + /* 1 Enabled. Traffic is transmitted on the preemptive MAC. */
  2460. + temp = enetc_port_rd(&priv->si->hw, ENETC_PTCFPR(i));
  2461. +
  2462. + if ((ptvector >> i) & 0x1)
  2463. + enetc_port_wr(&priv->si->hw,
  2464. + ENETC_PTCFPR(i),
  2465. + temp | ENETC_FPE);
  2466. + else
  2467. + enetc_port_wr(&priv->si->hw,
  2468. + ENETC_PTCFPR(i),
  2469. + temp & ~ENETC_FPE);
  2470. + }
  2471. +
  2472. + return 0;
  2473. +}
  2474. +
  2475. +int enetc_qbu_get(struct net_device *ndev,
  2476. + struct tsn_preempt_status *preemptstat)
  2477. +{
  2478. + int i;
  2479. + struct enetc_ndev_priv *priv = netdev_priv(ndev);
  2480. +
  2481. + if (enetc_port_rd(&priv->si->hw, ENETC_PFPMR) & ENETC_PFPMR_PMACE) {
  2482. + preemptstat->preemption_active = true;
  2483. + if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET)
  2484. + & ENETC_QBV_TGE)
  2485. + preemptstat->hold_request = 1;
  2486. + else
  2487. + preemptstat->hold_request = 2;
  2488. + } else {
  2489. + preemptstat->preemption_active = false;
  2490. + return 0;
  2491. + }
  2492. +
  2493. + for (i = 0; i < 8; i++)
  2494. + if (enetc_port_rd(&priv->si->hw, ENETC_PTCFPR(i)) & 0x80000000)
  2495. + preemptstat->admin_state |= 1 << i;
  2496. +
  2497. + preemptstat->hold_advance =
  2498. + enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & 0xFFFF;
  2499. + preemptstat->release_advance =
  2500. + enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & 0xFFFF;
  2501. +
  2502. + return 0;
  2503. +}
  2504. +
  2505. +u32 __enetc_tsn_get_cap(struct enetc_si *si)
  2506. +{
  2507. + u32 reg = 0;
  2508. + u32 cap = 0;
  2509. +
  2510. + reg = enetc_port_rd(&si->hw, ENETC_PCAPR0);
  2511. +
  2512. + if (reg & ENETC_PCAPR0_PSFP)
  2513. + cap |= TSN_CAP_QCI;
  2514. +
  2515. + if (reg & ENETC_PCAPR0_TSN)
  2516. + cap |= TSN_CAP_QBV;
  2517. +
  2518. + if (reg & ENETC_PCAPR0_QBU)
  2519. + cap |= TSN_CAP_QBU;
  2520. +
  2521. + cap |= TSN_CAP_CBS;
  2522. + cap |= TSN_CAP_TBS;
  2523. +
  2524. + return cap;
  2525. +}
  2526. +
  2527. +u32 enetc_tsn_get_capability(struct net_device *ndev)
  2528. +{
  2529. + struct enetc_ndev_priv *priv = netdev_priv(ndev);
  2530. +
  2531. + return __enetc_tsn_get_cap(priv->si);
  2532. +}
  2533. +
  2534. +static int __enetc_get_max_cap(struct enetc_si *si,
  2535. + struct tsn_qci_psfp_stream_param *stream_para)
  2536. +{
  2537. + u32 reg = 0;
  2538. +
  2539. + /* Port stream filter capability */
  2540. + reg = enetc_port_rd(&si->hw, ENETC_PSFCAPR);
  2541. + stream_para->max_sf_instance = reg & ENETC_PSFCAPR_MSK;
  2542. + /* Port stream filter capability */
  2543. + reg = enetc_port_rd(&si->hw, ENETC_PSGCAPR);
  2544. + stream_para->max_sg_instance = (reg & ENETC_PSGCAPR_SGIT_MSK);
  2545. + stream_para->supported_list_max = (reg & ENETC_PSGCAPR_GCL_MSK) >> 16;
  2546. + /* Port flow meter capability */
  2547. + reg = enetc_port_rd(&si->hw, ENETC_PFMCAPR);
  2548. + stream_para->max_fm_instance = reg & ENETC_PFMCAPR_MSK;
  2549. +
  2550. + return 0;
  2551. +}
  2552. +
  2553. +int enetc_get_max_cap(struct net_device *ndev,
  2554. + struct tsn_qci_psfp_stream_param *stream_para)
  2555. +{
  2556. + struct enetc_ndev_priv *priv = netdev_priv(ndev);
  2557. +
  2558. + return __enetc_get_max_cap(priv->si, stream_para);
  2559. +}
  2560. +
  2561. +static int enetc_set_cbs(struct net_device *ndev, u8 tc, u8 bw)
  2562. +{
  2563. + struct enetc_ndev_priv *priv = netdev_priv(ndev);
  2564. + struct enetc_si *si = priv->si;
  2565. + struct enetc_cbs *ecbs = si->ecbs;
  2566. + struct cbs *cbs;
  2567. +
  2568. + int bw_sum = 0;
  2569. + u32 port_transmit_rate;
  2570. + u32 port_frame_max_size;
  2571. + u8 tc_nums;
  2572. + int i;
  2573. +
  2574. + u32 max_interfrence_size;
  2575. + u32 send_slope;
  2576. + u32 hi_credit;
  2577. +
  2578. + if (!ecbs)
  2579. + return -ENOMEM;
  2580. +
  2581. + port_transmit_rate = get_ndev_speed(si->ndev);
  2582. + if (port_transmit_rate != ecbs->port_transmit_rate)
  2583. + ecbs->port_transmit_rate = port_transmit_rate;
  2584. + port_frame_max_size = ecbs->port_max_size_frame;
  2585. + tc_nums = ecbs->tc_nums;
  2586. + cbs = ecbs->cbs;
  2587. +
  2588. + if (tc >= tc_nums) {
  2589. + dev_err(&ndev->dev, "Make sure the TC less than %d\n", tc_nums);
  2590. + return -EINVAL;
  2591. + }
  2592. +
  2593. + if (!bw) {
  2594. + if (cbs[tc].enable) {
  2595. + /* Make sure the other TC that are numerically
  2596. + * lower than this TC have been disabled.
  2597. + */
  2598. + for (i = 0; i < tc; i++) {
  2599. + if (cbs[i].enable)
  2600. + break;
  2601. + }
  2602. + if (i < tc) {
  2603. + dev_err(&ndev->dev,
  2604. + "TC%d has been disabled first\n", i);
  2605. + return -EINVAL;
  2606. + }
  2607. + memset(&cbs[tc], 0, sizeof(*cbs));
  2608. + cbs[tc].enable = false;
  2609. + enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), 0);
  2610. + enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), 0);
  2611. + }
  2612. + return 0;
  2613. + }
  2614. +
  2615. + /* Make sure the other TC that are numerically
  2616. + * higher than this TC have been enabled.
  2617. + */
  2618. + for (i = tc_nums - 1; i > tc; i--) {
  2619. + if (!cbs[i].enable) {
  2620. + dev_err(&ndev->dev,
  2621. + "TC%d has been enabled first\n", i);
  2622. + return -EINVAL;
  2623. + }
  2624. + bw_sum += cbs[i].bw;
  2625. + }
  2626. +
  2627. + if (bw_sum + bw >= 100) {
  2628. + dev_err(&ndev->dev,
  2629. + "The sum of all CBS Bandwidth cann't exceed 100\n");
  2630. + return -EINVAL;
  2631. + }
  2632. +
  2633. + cbs[tc].bw = bw;
  2634. + cbs[tc].tc_max_sized_frame = enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc));
  2635. + cbs[tc].idle_slope = port_transmit_rate / 100 * bw;
  2636. + cbs[tc].send_slope = port_transmit_rate - cbs[tc].idle_slope;
  2637. +
  2638. + /* For TC7, the max_interfrence_size is ENETC_MAC_MAXFRM_SIZE.
  2639. + * For TC6, the max_interfrence_size is calculated as below:
  2640. + *
  2641. + * max_interfrence_size = (M0 + Ma + Ra * M0 / (R0 - Ra))
  2642. + *
  2643. + * For other traffic class, for example SR class Q:
  2644. + *
  2645. + * R0 * (M0 + Ma + ... + Mp)
  2646. + * max_interfrence_size = ------------------------------
  2647. + * (R0 - Ra) + ... + (R0 - Rp)
  2648. + *
  2649. + */
  2650. +
  2651. + if (tc == tc_nums - 1) {
  2652. + cbs[tc].max_interfrence_size = port_frame_max_size * 8;
  2653. +
  2654. + } else if (tc == tc_nums - 2) {
  2655. + cbs[tc].max_interfrence_size = (port_frame_max_size
  2656. + + cbs[tc + 1].tc_max_sized_frame
  2657. + + port_frame_max_size * (cbs[tc + 1].idle_slope
  2658. + / cbs[tc + 1].send_slope)) * 8;
  2659. + } else {
  2660. + max_interfrence_size = port_frame_max_size;
  2661. + send_slope = 0;
  2662. + for (i = tc + 1; i < tc_nums; i++) {
  2663. + send_slope += cbs[i].send_slope;
  2664. + max_interfrence_size += cbs[i].tc_max_sized_frame;
  2665. + }
  2666. + max_interfrence_size = ((u64)port_transmit_rate
  2667. + * max_interfrence_size) / send_slope;
  2668. + cbs[tc].max_interfrence_size = max_interfrence_size * 8;
  2669. + }
  2670. +
  2671. + cbs[tc].hi_credit = cbs[tc].max_interfrence_size * cbs[tc].bw / 100;
  2672. + cbs[tc].lo_credit = cbs[tc].tc_max_sized_frame * (cbs[tc].send_slope
  2673. + / port_transmit_rate);
  2674. + cbs[tc].tc = tc;
  2675. +
  2676. + hi_credit = (ENETC_CLK * 100L) * (u64)cbs[tc].hi_credit
  2677. + / port_transmit_rate;
  2678. + enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), hi_credit);
  2679. +
  2680. + /* Set bw register and enable this traffic class*/
  2681. + enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc),
  2682. + (cbs[tc].bw & 0x7F) | (1 << 31));
  2683. + cbs[tc].enable = true;
  2684. +
  2685. + return 0;
  2686. +}
  2687. +
  2688. +static int enetc_get_cbs(struct net_device *ndev, u8 tc)
  2689. +{
  2690. + struct enetc_ndev_priv *priv = netdev_priv(ndev);
  2691. + struct enetc_si *si = priv->si;
  2692. + struct enetc_cbs *ecbs = si->ecbs;
  2693. + struct cbs *cbs;
  2694. +
  2695. + if (!ecbs)
  2696. + return -ENOMEM;
  2697. + cbs = ecbs->cbs;
  2698. + if (tc >= ecbs->tc_nums) {
  2699. + dev_err(&ndev->dev, "The maximum of TC is %d\n", ecbs->tc_nums);
  2700. + return -EINVAL;
  2701. + }
  2702. +
  2703. + return cbs[tc].bw;
  2704. +}
  2705. +
  2706. +static int enetc_set_tsd(struct net_device *ndev, struct tsn_tsd *ttsd)
  2707. +{
  2708. + return 0;
  2709. +}
  2710. +
  2711. +static int enetc_get_tsd(struct net_device *ndev, struct tsn_tsd_status *tts)
  2712. +{
  2713. + return 0;
  2714. +}
  2715. +
  2716. +static u32 get_ndev_speed(struct net_device *netdev)
  2717. +{
  2718. + struct ethtool_link_ksettings ksettings;
  2719. + int rc = -1;
  2720. +
  2721. + if (netdev->ethtool_ops->get_link_ksettings) {
  2722. + if (netdev->ethtool_ops->begin) {
  2723. + rc = netdev->ethtool_ops->begin(netdev);
  2724. + if (rc < 0)
  2725. + return 0;
  2726. + }
  2727. +
  2728. + memset(&ksettings, 0, sizeof(ksettings));
  2729. +
  2730. + if (!netdev->ethtool_ops->get_link_ksettings)
  2731. + return 0;
  2732. +
  2733. + rc = netdev->ethtool_ops->get_link_ksettings(netdev,
  2734. + &ksettings);
  2735. +
  2736. + if (netdev->ethtool_ops->complete)
  2737. + netdev->ethtool_ops->complete(netdev);
  2738. + }
  2739. +
  2740. + return (rc < 0) ? 0 : ksettings.base.speed;
  2741. +}
  2742. +
  2743. +static void enetc_cbs_init(struct enetc_si *si)
  2744. +{
  2745. + struct enetc_ndev_priv *priv = netdev_priv(si->ndev);
  2746. + u8 tc_nums;
  2747. +
  2748. + tc_nums = priv->num_tx_rings;
  2749. + si->ecbs = kzalloc(sizeof(*si->ecbs) +
  2750. + sizeof(struct cbs) * tc_nums, GFP_KERNEL);
  2751. + if (!si->ecbs)
  2752. + return;
  2753. +
  2754. + si->ecbs->port_max_size_frame = si->ndev->mtu + ETH_HLEN
  2755. + + VLAN_HLEN + ETH_FCS_LEN;
  2756. + si->ecbs->tc_nums = tc_nums;
  2757. + si->ecbs->port_transmit_rate = get_ndev_speed(si->ndev);
  2758. +
  2759. + /*This trick is used only for CFP*/
  2760. + if (!si->ecbs->port_transmit_rate)
  2761. + si->ecbs->port_transmit_rate = 1000000000;
  2762. +
  2763. + if (!si->ecbs->port_transmit_rate) {
  2764. + dev_err(&si->pdev->dev, "Failure to get port speed for CBS\n");
  2765. + kfree(si->ecbs);
  2766. + si->ecbs = NULL;
  2767. + }
  2768. +}
  2769. +
  2770. +static void enetc_qbv_init(struct enetc_hw *hw)
  2771. +{
  2772. + /* Set PSPEED to be 1Gbps */
  2773. + enetc_port_wr(hw, ENETC_PMR,
  2774. + (enetc_port_rd(hw, ENETC_PMR)
  2775. + & (~ENETC_PMR_PSPEED_MASK))
  2776. + | ENETC_PMR_PSPEED_1000M);
  2777. +}
  2778. +
  2779. +void enetc_tsn_init(struct net_device *ndev)
  2780. +{
  2781. + struct enetc_ndev_priv *priv = netdev_priv(ndev);
  2782. + struct enetc_si *si = priv->si;
  2783. + u32 capability = 0;
  2784. +
  2785. + capability = __enetc_tsn_get_cap(si);
  2786. +
  2787. + if (capability & TSN_CAP_CBS)
  2788. + enetc_cbs_init(si);
  2789. +
  2790. + if (capability & TSN_CAP_QBV)
  2791. + enetc_qbv_init(&si->hw);
  2792. +
  2793. + if (capability & TSN_CAP_QCI)
  2794. + enetc_qci_enable(&si->hw);
  2795. +
  2796. + dev_info(&si->pdev->dev, "%s: setup done\n", __func__);
  2797. +}
  2798. +
  2799. +void enetc_tsn_deinit(struct net_device *ndev)
  2800. +{
  2801. + struct enetc_ndev_priv *priv = netdev_priv(ndev);
  2802. + struct enetc_si *si = priv->si;
  2803. +
  2804. + dev_info(&si->pdev->dev, "%s: release\n", __func__);
  2805. +}
  2806. +
  2807. +static struct tsn_ops enetc_tsn_ops_full = {
  2808. + .device_init = enetc_tsn_init,
  2809. + .device_deinit = enetc_tsn_deinit,
  2810. + .get_capability = enetc_tsn_get_capability,
  2811. + .qbv_set = enetc_qbv_set,
  2812. + .qbv_get = enetc_qbv_get,
  2813. + .qbv_get_status = enetc_qbv_get_status,
  2814. + .cb_streamid_set = enetc_cb_streamid_set,
  2815. + .cb_streamid_get = enetc_cb_streamid_get,
  2816. + .cb_streamid_counters_get = enetc_cb_streamid_counters_get,
  2817. + .qci_get_maxcap = enetc_get_max_cap,
  2818. + .qci_sfi_set = enetc_qci_sfi_set,
  2819. + .qci_sfi_get = enetc_qci_sfi_get,
  2820. + .qci_sfi_counters_get = enetc_qci_sfi_counters_get,
  2821. + .qci_sgi_set = enetc_qci_sgi_set,
  2822. + .qci_sgi_get = enetc_qci_sgi_get,
  2823. + .qci_sgi_status_get = enetc_qci_sgi_status_get,
  2824. + .qci_fmi_set = enetc_qci_fmi_set,
  2825. + .qci_fmi_get = enetc_qci_fmi_get,
  2826. + .qbu_set = enetc_qbu_set,
  2827. + .qbu_get = enetc_qbu_get,
  2828. + .cbs_set = enetc_set_cbs,
  2829. + .cbs_get = enetc_get_cbs,
  2830. + .tsd_set = enetc_set_tsd,
  2831. + .tsd_get = enetc_get_tsd,
  2832. +};
  2833. +
  2834. +static struct tsn_ops enetc_tsn_ops_part = {
  2835. + .device_init = enetc_tsn_init,
  2836. + .device_deinit = enetc_tsn_deinit,
  2837. + .get_capability = enetc_tsn_get_capability,
  2838. + .cb_streamid_set = enetc_cb_streamid_set,
  2839. + .cb_streamid_get = enetc_cb_streamid_get,
  2840. + .cb_streamid_counters_get = enetc_cb_streamid_counters_get,
  2841. + .qci_get_maxcap = enetc_get_max_cap,
  2842. + .qci_sfi_set = enetc_qci_sfi_set,
  2843. + .qci_sfi_get = enetc_qci_sfi_get,
  2844. + .qci_sfi_counters_get = enetc_qci_sfi_counters_get,
  2845. + .qci_sgi_set = enetc_qci_sgi_set,
  2846. + .qci_sgi_get = enetc_qci_sgi_get,
  2847. + .qci_sgi_status_get = enetc_qci_sgi_status_get,
  2848. + .qci_fmi_set = enetc_qci_fmi_set,
  2849. + .qci_fmi_get = enetc_qci_fmi_get,
  2850. +};
  2851. +
  2852. +void enetc_tsn_pf_init(struct net_device *netdev, struct pci_dev *pdev)
  2853. +{
  2854. + int port = pdev->devfn & 0x7;
  2855. +
  2856. + if (port == 1 || port == 3)
  2857. + tsn_port_register(netdev, &enetc_tsn_ops_part,
  2858. + (u16)pdev->bus->number);
  2859. + else
  2860. + tsn_port_register(netdev, &enetc_tsn_ops_full,
  2861. + (u16)pdev->bus->number);
  2862. +}
  2863. +
  2864. +void enetc_tsn_pf_deinit(struct net_device *netdev)
  2865. +{
  2866. + tsn_port_unregister(netdev);
  2867. +}
  2868. +#endif /* #if IS_ENABLED(CONFIG_ENETC_TSN) */