ifxmips_ptm_adsl.c 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629
  1. /******************************************************************************
  2. **
  3. ** FILE NAME : ifxmips_ptm_adsl.c
  4. ** PROJECT : UEIP
  5. ** MODULES : PTM
  6. **
  7. ** DATE : 7 Jul 2009
  8. ** AUTHOR : Xu Liang
  9. ** DESCRIPTION : PTM driver common source file (core functions for Danube/
  10. ** Amazon-SE/AR9)
  11. ** COPYRIGHT : Copyright (c) 2006
  12. ** Infineon Technologies AG
  13. ** Am Campeon 1-12, 85579 Neubiberg, Germany
  14. **
  15. ** This program is free software; you can redistribute it and/or modify
  16. ** it under the terms of the GNU General Public License as published by
  17. ** the Free Software Foundation; either version 2 of the License, or
  18. ** (at your option) any later version.
  19. **
  20. ** HISTORY
  21. ** $Date $Author $Comment
  22. ** 07 JUL 2009 Xu Liang Init Version
  23. *******************************************************************************/
  24. /*
  25. * ####################################
  26. * Head File
  27. * ####################################
  28. */
  29. /*
  30. * Common Head File
  31. */
  32. #include <linux/version.h>
  33. #include <linux/kernel.h>
  34. #include <linux/module.h>
  35. #include <linux/types.h>
  36. #include <linux/errno.h>
  37. #include <linux/proc_fs.h>
  38. #include <linux/init.h>
  39. #include <linux/ioctl.h>
  40. #include <linux/etherdevice.h>
  41. #include <linux/interrupt.h>
  42. #include <linux/netdevice.h>
  43. #include <linux/platform_device.h>
  44. #include <linux/of_device.h>
  45. #include <asm/io.h>
  46. /*
  47. * Chip Specific Head File
  48. */
  49. #include "ifxmips_ptm_adsl.h"
  50. #include <lantiq_soc.h>
  51. /*
  52. * ####################################
  53. * Kernel Version Adaption
  54. * ####################################
  55. */
  56. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
  57. #define MODULE_PARM_ARRAY(a, b) module_param_array(a, int, NULL, 0)
  58. #define MODULE_PARM(a, b) module_param(a, int, 0)
  59. #else
  60. #define MODULE_PARM_ARRAY(a, b) MODULE_PARM(a, b)
  61. #endif
  62. /*
  63. * ####################################
  64. * Parameters to Configure PPE
  65. * ####################################
  66. */
  67. static int write_desc_delay = 0x20; /* Write descriptor delay */
  68. static int rx_max_packet_size = ETH_MAX_FRAME_LENGTH;
  69. /* Max packet size for RX */
  70. static int dma_rx_descriptor_length = 24; /* Number of descriptors per DMA RX channel */
  71. static int dma_tx_descriptor_length = 24; /* Number of descriptors per DMA TX channel */
  72. static int eth_efmtc_crc_cfg = 0x03100710; /* default: tx_eth_crc_check: 1, tx_tc_crc_check: 1, tx_tc_crc_len = 16 */
  73. /* rx_eth_crc_present: 1, rx_eth_crc_check: 1, rx_tc_crc_check: 1, rx_tc_crc_len = 16 */
  74. MODULE_PARM(write_desc_delay, "i");
  75. MODULE_PARM_DESC(write_desc_delay, "PPE core clock cycles between descriptor write and effectiveness in external RAM");
  76. MODULE_PARM(rx_max_packet_size, "i");
  77. MODULE_PARM_DESC(rx_max_packet_size, "Max packet size in byte for downstream ethernet frames");
  78. MODULE_PARM(dma_rx_descriptor_length, "i");
  79. MODULE_PARM_DESC(dma_rx_descriptor_length, "Number of descriptor assigned to DMA RX channel (>16)");
  80. MODULE_PARM(dma_tx_descriptor_length, "i");
  81. MODULE_PARM_DESC(dma_tx_descriptor_length, "Number of descriptor assigned to DMA TX channel (>16)");
  82. MODULE_PARM(eth_efmtc_crc_cfg, "i");
  83. MODULE_PARM_DESC(eth_efmtc_crc_cfg, "Configuration for PTM TX/RX ethernet/efm-tc CRC");
  84. /*
  85. * ####################################
  86. * Definition
  87. * ####################################
  88. */
  89. #define DUMP_SKB_LEN ~0
  90. /*
  91. * ####################################
  92. * Declaration
  93. * ####################################
  94. */
  95. /*
  96. * Network Operations
  97. */
  98. static void ptm_setup(struct net_device *, int);
  99. static struct net_device_stats *ptm_get_stats(struct net_device *);
  100. static int ptm_open(struct net_device *);
  101. static int ptm_stop(struct net_device *);
  102. static unsigned int ptm_poll(int, unsigned int);
  103. static int ptm_napi_poll(struct napi_struct *, int);
  104. static int ptm_hard_start_xmit(struct sk_buff *, struct net_device *);
  105. #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
  106. static int ptm_change_mtu(struct net_device *, int);
  107. #endif
  108. static int ptm_ioctl(struct net_device *, struct ifreq *, int);
  109. static void ptm_tx_timeout(struct net_device *);
  110. /*
  111. * DSL Data LED
  112. */
  113. static INLINE void adsl_led_flash(void);
  114. /*
  115. * buffer manage functions
  116. */
  117. static INLINE struct sk_buff* alloc_skb_rx(void);
  118. //static INLINE struct sk_buff* alloc_skb_tx(unsigned int);
  119. static INLINE struct sk_buff *get_skb_rx_pointer(unsigned int);
  120. static INLINE int get_tx_desc(unsigned int, unsigned int *);
  121. /*
  122. * Mailbox handler and signal function
  123. */
  124. static INLINE int mailbox_rx_irq_handler(unsigned int);
  125. static irqreturn_t mailbox_irq_handler(int, void *);
  126. static INLINE void mailbox_signal(unsigned int, int);
  127. #ifdef CONFIG_IFX_PTM_RX_TASKLET
  128. static void do_ptm_tasklet(unsigned long);
  129. #endif
  130. /*
  131. * Debug Functions
  132. */
  133. #if defined(DEBUG_DUMP_SKB) && DEBUG_DUMP_SKB
  134. static void dump_skb(struct sk_buff *, u32, char *, int, int, int);
  135. #else
  136. #define dump_skb(skb, len, title, port, ch, is_tx) do {} while (0)
  137. #endif
  138. #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
  139. static void skb_swap(struct sk_buff *);
  140. #else
  141. #define skb_swap(skb) do {} while (0)
  142. #endif
  143. /*
  144. * Proc File Functions
  145. */
  146. static INLINE void proc_file_create(void);
  147. static INLINE void proc_file_delete(void);
  148. static int proc_read_version(char *, char **, off_t, int, int *, void *);
  149. static int proc_read_wanmib(char *, char **, off_t, int, int *, void *);
  150. static int proc_write_wanmib(struct file *, const char *, unsigned long, void *);
  151. #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
  152. static int proc_read_genconf(char *, char **, off_t, int, int *, void *);
  153. #endif
  154. #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
  155. static int proc_read_dbg(char *, char **, off_t, int, int *, void *);
  156. static int proc_write_dbg(struct file *, const char *, unsigned long, void *);
  157. #endif
  158. /*
  159. * Proc Help Functions
  160. */
  161. static INLINE int stricmp(const char *, const char *);
  162. #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
  163. static INLINE int strincmp(const char *, const char *, int);
  164. #endif
  165. static INLINE int ifx_ptm_version(char *);
  166. /*
  167. * Init & clean-up functions
  168. */
  169. static INLINE void check_parameters(void);
  170. static INLINE int init_priv_data(void);
  171. static INLINE void clear_priv_data(void);
  172. static INLINE void init_tables(void);
  173. /*
  174. * Exteranl Function
  175. */
  176. #if defined(CONFIG_IFXMIPS_DSL_CPE_MEI) || defined(CONFIG_IFXMIPS_DSL_CPE_MEI_MODULE)
  177. extern int ifx_mei_atm_showtime_check(int *is_showtime, struct port_cell_info *port_cell, void **xdata_addr);
  178. #else
  179. static inline int ifx_mei_atm_showtime_check(int *is_showtime, struct port_cell_info *port_cell, void **xdata_addr)
  180. {
  181. if ( is_showtime != NULL )
  182. *is_showtime = 0;
  183. return 0;
  184. }
  185. #endif
  186. /*
  187. * External variable
  188. */
  189. #if defined(CONFIG_IFXMIPS_DSL_CPE_MEI) || defined(CONFIG_IFXMIPS_DSL_CPE_MEI_MODULE)
  190. extern int (*ifx_mei_atm_showtime_enter)(struct port_cell_info *, void *);
  191. extern int (*ifx_mei_atm_showtime_exit)(void);
  192. #else
  193. int (*ifx_mei_atm_showtime_enter)(struct port_cell_info *, void *) = NULL;
  194. EXPORT_SYMBOL(ifx_mei_atm_showtime_enter);
  195. int (*ifx_mei_atm_showtime_exit)(void) = NULL;
  196. EXPORT_SYMBOL(ifx_mei_atm_showtime_exit);
  197. #endif
  198. /*
  199. * ####################################
  200. * Local Variable
  201. * ####################################
  202. */
  203. static struct ptm_priv_data g_ptm_priv_data;
  204. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)
  205. static struct net_device_ops g_ptm_netdev_ops = {
  206. .ndo_get_stats = ptm_get_stats,
  207. .ndo_open = ptm_open,
  208. .ndo_stop = ptm_stop,
  209. .ndo_start_xmit = ptm_hard_start_xmit,
  210. .ndo_validate_addr = eth_validate_addr,
  211. .ndo_set_mac_address = eth_mac_addr,
  212. #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
  213. .ndo_change_mtu = ptm_change_mtu,
  214. #endif
  215. .ndo_do_ioctl = ptm_ioctl,
  216. .ndo_tx_timeout = ptm_tx_timeout,
  217. };
  218. #endif
  219. static struct net_device *g_net_dev[2] = {0};
  220. static char *g_net_dev_name[2] = {"dsl0", "dslfast0"};
  221. #ifdef CONFIG_IFX_PTM_RX_TASKLET
  222. static struct tasklet_struct g_ptm_tasklet[] = {
  223. {NULL, 0, ATOMIC_INIT(0), do_ptm_tasklet, 0},
  224. {NULL, 0, ATOMIC_INIT(0), do_ptm_tasklet, 1},
  225. };
  226. #endif
  227. unsigned int ifx_ptm_dbg_enable = DBG_ENABLE_MASK_ERR;
  228. static struct proc_dir_entry* g_ptm_dir = NULL;
  229. static int g_showtime = 0;
  230. /*
  231. * ####################################
  232. * Local Function
  233. * ####################################
  234. */
  235. static void ptm_setup(struct net_device *dev, int ndev)
  236. {
  237. #if defined(CONFIG_IFXMIPS_DSL_CPE_MEI) || defined(CONFIG_IFXMIPS_DSL_CPE_MEI_MODULE)
  238. netif_carrier_off(dev);
  239. #endif
  240. /* hook network operations */
  241. dev->netdev_ops = &g_ptm_netdev_ops;
  242. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
  243. /* Allow up to 1508 bytes, for RFC4638 */
  244. dev->max_mtu = ETH_DATA_LEN + 8;
  245. #endif
  246. netif_napi_add(dev, &g_ptm_priv_data.itf[ndev].napi, ptm_napi_poll, 25);
  247. dev->watchdog_timeo = ETH_WATCHDOG_TIMEOUT;
  248. dev->dev_addr[0] = 0x00;
  249. dev->dev_addr[1] = 0x20;
  250. dev->dev_addr[2] = 0xda;
  251. dev->dev_addr[3] = 0x86;
  252. dev->dev_addr[4] = 0x23;
  253. dev->dev_addr[5] = 0x75 + ndev;
  254. }
  255. static struct net_device_stats *ptm_get_stats(struct net_device *dev)
  256. {
  257. int ndev;
  258. for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
  259. ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
  260. g_ptm_priv_data.itf[ndev].stats.rx_errors = WAN_MIB_TABLE[ndev].wrx_tccrc_err_pdu + WAN_MIB_TABLE[ndev].wrx_ethcrc_err_pdu;
  261. g_ptm_priv_data.itf[ndev].stats.rx_dropped = WAN_MIB_TABLE[ndev].wrx_nodesc_drop_pdu + WAN_MIB_TABLE[ndev].wrx_len_violation_drop_pdu + (WAN_MIB_TABLE[ndev].wrx_correct_pdu - g_ptm_priv_data.itf[ndev].stats.rx_packets);
  262. return &g_ptm_priv_data.itf[ndev].stats;
  263. }
  264. static int ptm_open(struct net_device *dev)
  265. {
  266. int ndev;
  267. for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
  268. ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
  269. napi_enable(&g_ptm_priv_data.itf[ndev].napi);
  270. IFX_REG_W32_MASK(0, 1 << ndev, MBOX_IGU1_IER);
  271. netif_start_queue(dev);
  272. return 0;
  273. }
  274. static int ptm_stop(struct net_device *dev)
  275. {
  276. int ndev;
  277. for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
  278. ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
  279. IFX_REG_W32_MASK((1 << ndev) | (1 << (ndev + 16)), 0, MBOX_IGU1_IER);
  280. napi_disable(&g_ptm_priv_data.itf[ndev].napi);
  281. netif_stop_queue(dev);
  282. return 0;
  283. }
  284. static unsigned int ptm_poll(int ndev, unsigned int work_to_do)
  285. {
  286. unsigned int work_done = 0;
  287. ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
  288. while ( work_done < work_to_do && WRX_DMA_CHANNEL_CONFIG(ndev)->vlddes > 0 ) {
  289. if ( mailbox_rx_irq_handler(ndev) < 0 )
  290. break;
  291. work_done++;
  292. }
  293. return work_done;
  294. }
  295. static int ptm_napi_poll(struct napi_struct *napi, int budget)
  296. {
  297. int ndev;
  298. unsigned int work_done;
  299. for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != napi->dev; ndev++ );
  300. work_done = ptm_poll(ndev, budget);
  301. // interface down
  302. if ( !netif_running(napi->dev) ) {
  303. napi_complete(napi);
  304. return work_done;
  305. }
  306. // no more traffic
  307. if ( WRX_DMA_CHANNEL_CONFIG(ndev)->vlddes == 0 ) {
  308. // clear interrupt
  309. IFX_REG_W32_MASK(0, 1 << ndev, MBOX_IGU1_ISRC);
  310. // double check
  311. if ( WRX_DMA_CHANNEL_CONFIG(ndev)->vlddes == 0 ) {
  312. napi_complete(napi);
  313. IFX_REG_W32_MASK(0, 1 << ndev, MBOX_IGU1_IER);
  314. return work_done;
  315. }
  316. }
  317. // next round
  318. return work_done;
  319. }
  320. static int ptm_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
  321. {
  322. int ndev;
  323. unsigned int f_full;
  324. int desc_base;
  325. register struct tx_descriptor reg_desc = {0};
  326. for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
  327. ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
  328. if ( !g_showtime ) {
  329. err("not in showtime");
  330. goto PTM_HARD_START_XMIT_FAIL;
  331. }
  332. /* allocate descriptor */
  333. desc_base = get_tx_desc(ndev, &f_full);
  334. if ( f_full ) {
  335. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
  336. netif_trans_update(dev);
  337. #else
  338. dev->trans_start = jiffies;
  339. #endif
  340. netif_stop_queue(dev);
  341. IFX_REG_W32_MASK(0, 1 << (ndev + 16), MBOX_IGU1_ISRC);
  342. IFX_REG_W32_MASK(0, 1 << (ndev + 16), MBOX_IGU1_IER);
  343. }
  344. if ( desc_base < 0 )
  345. goto PTM_HARD_START_XMIT_FAIL;
  346. if ( g_ptm_priv_data.itf[ndev].tx_skb[desc_base] != NULL )
  347. dev_kfree_skb_any(g_ptm_priv_data.itf[ndev].tx_skb[desc_base]);
  348. g_ptm_priv_data.itf[ndev].tx_skb[desc_base] = skb;
  349. reg_desc.dataptr = (unsigned int)skb->data >> 2;
  350. reg_desc.datalen = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
  351. reg_desc.byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
  352. reg_desc.own = 1;
  353. reg_desc.c = 1;
  354. reg_desc.sop = reg_desc.eop = 1;
  355. /* write discriptor to memory and write back cache */
  356. g_ptm_priv_data.itf[ndev].tx_desc[desc_base] = reg_desc;
  357. dma_cache_wback((unsigned long)skb->data, skb->len);
  358. wmb();
  359. dump_skb(skb, DUMP_SKB_LEN, (char *)__func__, ndev, ndev, 1);
  360. if ( (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_MAC_SWAP) ) {
  361. skb_swap(skb);
  362. }
  363. g_ptm_priv_data.itf[ndev].stats.tx_packets++;
  364. g_ptm_priv_data.itf[ndev].stats.tx_bytes += reg_desc.datalen;
  365. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
  366. netif_trans_update(dev);
  367. #else
  368. dev->trans_start = jiffies;
  369. #endif
  370. mailbox_signal(ndev, 1);
  371. adsl_led_flash();
  372. return NETDEV_TX_OK;
  373. PTM_HARD_START_XMIT_FAIL:
  374. dev_kfree_skb_any(skb);
  375. g_ptm_priv_data.itf[ndev].stats.tx_dropped++;
  376. return NETDEV_TX_OK;
  377. }
  378. #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
  379. static int ptm_change_mtu(struct net_device *dev, int mtu)
  380. {
  381. /* Allow up to 1508 bytes, for RFC4638 */
  382. if (mtu < 68 || mtu > ETH_DATA_LEN + 8)
  383. return -EINVAL;
  384. dev->mtu = mtu;
  385. return 0;
  386. }
  387. #endif
  388. static int ptm_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  389. {
  390. int ndev;
  391. for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
  392. ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
  393. switch ( cmd )
  394. {
  395. case IFX_PTM_MIB_CW_GET:
  396. ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxNoIdleCodewords = WAN_MIB_TABLE[ndev].wrx_nonidle_cw;
  397. ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxIdleCodewords = WAN_MIB_TABLE[ndev].wrx_idle_cw;
  398. ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxCodingViolation = WAN_MIB_TABLE[ndev].wrx_err_cw;
  399. ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxNoIdleCodewords = 0;
  400. ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxIdleCodewords = 0;
  401. break;
  402. case IFX_PTM_MIB_FRAME_GET:
  403. ((PTM_FRAME_MIB_T *)ifr->ifr_data)->RxCorrect = WAN_MIB_TABLE[ndev].wrx_correct_pdu;
  404. ((PTM_FRAME_MIB_T *)ifr->ifr_data)->TC_CrcError = WAN_MIB_TABLE[ndev].wrx_tccrc_err_pdu;
  405. ((PTM_FRAME_MIB_T *)ifr->ifr_data)->RxDropped = WAN_MIB_TABLE[ndev].wrx_nodesc_drop_pdu + WAN_MIB_TABLE[ndev].wrx_len_violation_drop_pdu;
  406. ((PTM_FRAME_MIB_T *)ifr->ifr_data)->TxSend = WAN_MIB_TABLE[ndev].wtx_total_pdu;
  407. break;
  408. case IFX_PTM_CFG_GET:
  409. ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcPresent = CFG_ETH_EFMTC_CRC->rx_eth_crc_present;
  410. ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck = CFG_ETH_EFMTC_CRC->rx_eth_crc_check;
  411. ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck = CFG_ETH_EFMTC_CRC->rx_tc_crc_check;
  412. ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen = CFG_ETH_EFMTC_CRC->rx_tc_crc_len;
  413. ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen = CFG_ETH_EFMTC_CRC->tx_eth_crc_gen;
  414. ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen = CFG_ETH_EFMTC_CRC->tx_tc_crc_gen;
  415. ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen = CFG_ETH_EFMTC_CRC->tx_tc_crc_len;
  416. break;
  417. case IFX_PTM_CFG_SET:
  418. CFG_ETH_EFMTC_CRC->rx_eth_crc_present = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcPresent ? 1 : 0;
  419. CFG_ETH_EFMTC_CRC->rx_eth_crc_check = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck ? 1 : 0;
  420. if ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck && (((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen == 16 || ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen == 32) )
  421. {
  422. CFG_ETH_EFMTC_CRC->rx_tc_crc_check = 1;
  423. CFG_ETH_EFMTC_CRC->rx_tc_crc_len = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen;
  424. }
  425. else
  426. {
  427. CFG_ETH_EFMTC_CRC->rx_tc_crc_check = 0;
  428. CFG_ETH_EFMTC_CRC->rx_tc_crc_len = 0;
  429. }
  430. CFG_ETH_EFMTC_CRC->tx_eth_crc_gen = ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen ? 1 : 0;
  431. if ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen && (((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen == 16 || ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen == 32) )
  432. {
  433. CFG_ETH_EFMTC_CRC->tx_tc_crc_gen = 1;
  434. CFG_ETH_EFMTC_CRC->tx_tc_crc_len = ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen;
  435. }
  436. else
  437. {
  438. CFG_ETH_EFMTC_CRC->tx_tc_crc_gen = 0;
  439. CFG_ETH_EFMTC_CRC->tx_tc_crc_len = 0;
  440. }
  441. break;
  442. default:
  443. return -EOPNOTSUPP;
  444. }
  445. return 0;
  446. }
  447. static void ptm_tx_timeout(struct net_device *dev)
  448. {
  449. int ndev;
  450. for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
  451. ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
  452. /* disable TX irq, release skb when sending new packet */
  453. IFX_REG_W32_MASK(1 << (ndev + 16), 0, MBOX_IGU1_IER);
  454. /* wake up TX queue */
  455. netif_wake_queue(dev);
  456. return;
  457. }
  458. static INLINE void adsl_led_flash(void)
  459. {
  460. }
  461. static INLINE struct sk_buff* alloc_skb_rx(void)
  462. {
  463. struct sk_buff *skb;
  464. /* allocate memroy including trailer and padding */
  465. skb = dev_alloc_skb(rx_max_packet_size + RX_HEAD_MAC_ADDR_ALIGNMENT + DATA_BUFFER_ALIGNMENT);
  466. if ( skb != NULL ) {
  467. /* must be burst length alignment and reserve two more bytes for MAC address alignment */
  468. if ( ((unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1)) != 0 )
  469. skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
  470. /* pub skb in reserved area "skb->data - 4" */
  471. *((struct sk_buff **)skb->data - 1) = skb;
  472. wmb();
  473. /* write back and invalidate cache */
  474. dma_cache_wback_inv((unsigned long)skb->data - sizeof(skb), sizeof(skb));
  475. /* invalidate cache */
  476. dma_cache_inv((unsigned long)skb->data, (unsigned int)skb->end - (unsigned int)skb->data);
  477. }
  478. return skb;
  479. }
  480. #if 0
  481. static INLINE struct sk_buff* alloc_skb_tx(unsigned int size)
  482. {
  483. struct sk_buff *skb;
  484. /* allocate memory including padding */
  485. size = (size + DATA_BUFFER_ALIGNMENT - 1) & ~(DATA_BUFFER_ALIGNMENT - 1);
  486. skb = dev_alloc_skb(size + DATA_BUFFER_ALIGNMENT);
  487. /* must be burst length alignment */
  488. if ( skb != NULL )
  489. skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
  490. return skb;
  491. }
  492. #endif
  493. static INLINE struct sk_buff *get_skb_rx_pointer(unsigned int dataptr)
  494. {
  495. unsigned int skb_dataptr;
  496. struct sk_buff *skb;
  497. skb_dataptr = ((dataptr - 1) << 2) | KSEG1;
  498. skb = *(struct sk_buff **)skb_dataptr;
  499. ASSERT((unsigned int)skb >= KSEG0, "invalid skb - skb = %#08x, dataptr = %#08x", (unsigned int)skb, dataptr);
  500. ASSERT(((unsigned int)skb->data | KSEG1) == ((dataptr << 2) | KSEG1), "invalid skb - skb = %#08x, skb->data = %#08x, dataptr = %#08x", (unsigned int)skb, (unsigned int)skb->data, dataptr);
  501. return skb;
  502. }
  503. static INLINE int get_tx_desc(unsigned int itf, unsigned int *f_full)
  504. {
  505. int desc_base = -1;
  506. struct ptm_itf *p_itf = &g_ptm_priv_data.itf[itf];
  507. // assume TX is serial operation
  508. // no protection provided
  509. *f_full = 1;
  510. if ( p_itf->tx_desc[p_itf->tx_desc_pos].own == 0 ) {
  511. desc_base = p_itf->tx_desc_pos;
  512. if ( ++(p_itf->tx_desc_pos) == dma_tx_descriptor_length )
  513. p_itf->tx_desc_pos = 0;
  514. if ( p_itf->tx_desc[p_itf->tx_desc_pos].own == 0 )
  515. *f_full = 0;
  516. }
  517. return desc_base;
  518. }
  519. static INLINE int mailbox_rx_irq_handler(unsigned int ch) // return: < 0 - descriptor not available, 0 - received one packet
  520. {
  521. unsigned int ndev = ch;
  522. struct sk_buff *skb;
  523. struct sk_buff *new_skb;
  524. volatile struct rx_descriptor *desc;
  525. struct rx_descriptor reg_desc;
  526. int netif_rx_ret;
  527. desc = &g_ptm_priv_data.itf[ndev].rx_desc[g_ptm_priv_data.itf[ndev].rx_desc_pos];
  528. if ( desc->own || !desc->c ) // if PP32 hold descriptor or descriptor not completed
  529. return -EAGAIN;
  530. if ( ++g_ptm_priv_data.itf[ndev].rx_desc_pos == dma_rx_descriptor_length )
  531. g_ptm_priv_data.itf[ndev].rx_desc_pos = 0;
  532. reg_desc = *desc;
  533. skb = get_skb_rx_pointer(reg_desc.dataptr);
  534. if ( !reg_desc.err ) {
  535. new_skb = alloc_skb_rx();
  536. if ( new_skb != NULL ) {
  537. skb_reserve(skb, reg_desc.byteoff);
  538. skb_put(skb, reg_desc.datalen);
  539. dump_skb(skb, DUMP_SKB_LEN, (char *)__func__, ndev, ndev, 0);
  540. // parse protocol header
  541. skb->dev = g_net_dev[ndev];
  542. skb->protocol = eth_type_trans(skb, skb->dev);
  543. #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0))
  544. g_net_dev[ndev]->last_rx = jiffies;
  545. #endif
  546. netif_rx_ret = netif_receive_skb(skb);
  547. if ( netif_rx_ret != NET_RX_DROP ) {
  548. g_ptm_priv_data.itf[ndev].stats.rx_packets++;
  549. g_ptm_priv_data.itf[ndev].stats.rx_bytes += reg_desc.datalen;
  550. }
  551. reg_desc.dataptr = ((unsigned int)new_skb->data >> 2) & 0x0FFFFFFF;
  552. reg_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT;
  553. }
  554. }
  555. else
  556. reg_desc.err = 0;
  557. reg_desc.datalen = rx_max_packet_size;
  558. reg_desc.own = 1;
  559. reg_desc.c = 0;
  560. // update descriptor
  561. *desc = reg_desc;
  562. wmb();
  563. mailbox_signal(ndev, 0);
  564. adsl_led_flash();
  565. return 0;
  566. }
  567. static irqreturn_t mailbox_irq_handler(int irq, void *dev_id)
  568. {
  569. unsigned int isr;
  570. int i;
  571. isr = IFX_REG_R32(MBOX_IGU1_ISR);
  572. IFX_REG_W32(isr, MBOX_IGU1_ISRC);
  573. isr &= IFX_REG_R32(MBOX_IGU1_IER);
  574. while ( (i = __fls(isr)) >= 0 ) {
  575. isr ^= 1 << i;
  576. if ( i >= 16 ) {
  577. // TX
  578. IFX_REG_W32_MASK(1 << i, 0, MBOX_IGU1_IER);
  579. i -= 16;
  580. if ( i < MAX_ITF_NUMBER )
  581. netif_wake_queue(g_net_dev[i]);
  582. }
  583. else {
  584. // RX
  585. #ifdef CONFIG_IFX_PTM_RX_INTERRUPT
  586. while ( WRX_DMA_CHANNEL_CONFIG(i)->vlddes > 0 )
  587. mailbox_rx_irq_handler(i);
  588. #else
  589. IFX_REG_W32_MASK(1 << i, 0, MBOX_IGU1_IER);
  590. napi_schedule(&g_ptm_priv_data.itf[i].napi);
  591. #endif
  592. }
  593. }
  594. return IRQ_HANDLED;
  595. }
  596. static INLINE void mailbox_signal(unsigned int itf, int is_tx)
  597. {
  598. int count = 1000;
  599. if ( is_tx ) {
  600. while ( MBOX_IGU3_ISR_ISR(itf + 16) && count > 0 )
  601. count--;
  602. IFX_REG_W32(MBOX_IGU3_ISRS_SET(itf + 16), MBOX_IGU3_ISRS);
  603. }
  604. else {
  605. while ( MBOX_IGU3_ISR_ISR(itf) && count > 0 )
  606. count--;
  607. IFX_REG_W32(MBOX_IGU3_ISRS_SET(itf), MBOX_IGU3_ISRS);
  608. }
  609. ASSERT(count != 0, "MBOX_IGU3_ISR = 0x%08x", IFX_REG_R32(MBOX_IGU3_ISR));
  610. }
  611. #ifdef CONFIG_IFX_PTM_RX_TASKLET
  612. static void do_ptm_tasklet(unsigned long arg)
  613. {
  614. unsigned int work_to_do = 25;
  615. unsigned int work_done = 0;
  616. ASSERT(arg >= 0 && arg < ARRAY_SIZE(g_net_dev), "arg = %lu (wrong value)", arg);
  617. while ( work_done < work_to_do && WRX_DMA_CHANNEL_CONFIG(arg)->vlddes > 0 ) {
  618. if ( mailbox_rx_irq_handler(arg) < 0 )
  619. break;
  620. work_done++;
  621. }
  622. // interface down
  623. if ( !netif_running(g_net_dev[arg]) )
  624. return;
  625. // no more traffic
  626. if ( WRX_DMA_CHANNEL_CONFIG(arg)->vlddes == 0 ) {
  627. // clear interrupt
  628. IFX_REG_W32_MASK(0, 1 << arg, MBOX_IGU1_ISRC);
  629. // double check
  630. if ( WRX_DMA_CHANNEL_CONFIG(arg)->vlddes == 0 ) {
  631. IFX_REG_W32_MASK(0, 1 << arg, MBOX_IGU1_IER);
  632. return;
  633. }
  634. }
  635. // next round
  636. tasklet_schedule(&g_ptm_tasklet[arg]);
  637. }
  638. #endif
  639. #if defined(DEBUG_DUMP_SKB) && DEBUG_DUMP_SKB
  640. static void dump_skb(struct sk_buff *skb, u32 len, char *title, int port, int ch, int is_tx)
  641. {
  642. int i;
  643. if ( !(ifx_ptm_dbg_enable & (is_tx ? DBG_ENABLE_MASK_DUMP_SKB_TX : DBG_ENABLE_MASK_DUMP_SKB_RX)) )
  644. return;
  645. if ( skb->len < len )
  646. len = skb->len;
  647. if ( len > rx_max_packet_size ) {
  648. printk("too big data length: skb = %08x, skb->data = %08x, skb->len = %d\n", (u32)skb, (u32)skb->data, skb->len);
  649. return;
  650. }
  651. if ( ch >= 0 )
  652. printk("%s (port %d, ch %d)\n", title, port, ch);
  653. else
  654. printk("%s\n", title);
  655. printk(" skb->data = %08X, skb->tail = %08X, skb->len = %d\n", (u32)skb->data, (u32)skb->tail, (int)skb->len);
  656. for ( i = 1; i <= len; i++ ) {
  657. if ( i % 16 == 1 )
  658. printk(" %4d:", i - 1);
  659. printk(" %02X", (int)(*((char*)skb->data + i - 1) & 0xFF));
  660. if ( i % 16 == 0 )
  661. printk("\n");
  662. }
  663. if ( (i - 1) % 16 != 0 )
  664. printk("\n");
  665. }
  666. #endif
  667. #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
  668. static void skb_swap(struct sk_buff *skb)
  669. {
  670. unsigned char tmp[8];
  671. unsigned char *p = skb->data;
  672. if ( !(p[0] & 0x01) ) { // bypass broadcast/multicast
  673. // swap MAC
  674. memcpy(tmp, p, 6);
  675. memcpy(p, p + 6, 6);
  676. memcpy(p + 6, tmp, 6);
  677. p += 12;
  678. // bypass VLAN
  679. while ( p[0] == 0x81 && p[1] == 0x00 )
  680. p += 4;
  681. // IP
  682. if ( p[0] == 0x08 && p[1] == 0x00 ) {
  683. p += 14;
  684. memcpy(tmp, p, 4);
  685. memcpy(p, p + 4, 4);
  686. memcpy(p + 4, tmp, 4);
  687. p += 8;
  688. }
  689. dma_cache_wback((unsigned long)skb->data, (unsigned long)p - (unsigned long)skb->data);
  690. }
  691. }
  692. #endif
  693. static INLINE void proc_file_create(void)
  694. {
  695. #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
  696. struct proc_dir_entry *res;
  697. g_ptm_dir = proc_mkdir("driver/ifx_ptm", NULL);
  698. create_proc_read_entry("version",
  699. 0,
  700. g_ptm_dir,
  701. proc_read_version,
  702. NULL);
  703. res = create_proc_entry("wanmib",
  704. 0,
  705. g_ptm_dir);
  706. if ( res != NULL ) {
  707. res->read_proc = proc_read_wanmib;
  708. res->write_proc = proc_write_wanmib;
  709. }
  710. #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
  711. create_proc_read_entry("genconf",
  712. 0,
  713. g_ptm_dir,
  714. proc_read_genconf,
  715. NULL);
  716. #ifdef CONFIG_AR9
  717. create_proc_read_entry("regs",
  718. 0,
  719. g_ptm_dir,
  720. ifx_ptm_proc_read_regs,
  721. NULL);
  722. #endif
  723. #endif
  724. res = create_proc_entry("dbg",
  725. 0,
  726. g_ptm_dir);
  727. if ( res != NULL ) {
  728. res->read_proc = proc_read_dbg;
  729. res->write_proc = proc_write_dbg;
  730. }
  731. #endif
  732. }
  733. static INLINE void proc_file_delete(void)
  734. {
  735. #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
  736. remove_proc_entry("dbg", g_ptm_dir);
  737. #endif
  738. #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
  739. #ifdef CONFIG_AR9
  740. remove_proc_entry("regs", g_ptm_dir);
  741. #endif
  742. remove_proc_entry("genconf", g_ptm_dir);
  743. #endif
  744. remove_proc_entry("wanmib", g_ptm_dir);
  745. remove_proc_entry("version", g_ptm_dir);
  746. remove_proc_entry("driver/ifx_ptm", NULL);
  747. }
  748. static int proc_read_version(char *buf, char **start, off_t offset, int count, int *eof, void *data)
  749. {
  750. int len = 0;
  751. len += ifx_ptm_version(buf + len);
  752. if ( offset >= len ) {
  753. *start = buf;
  754. *eof = 1;
  755. return 0;
  756. }
  757. *start = buf + offset;
  758. if ( (len -= offset) > count )
  759. return count;
  760. *eof = 1;
  761. return len;
  762. }
  763. static int proc_read_wanmib(char *page, char **start, off_t off, int count, int *eof, void *data)
  764. {
  765. int len = 0;
  766. int i;
  767. char *title[] = {
  768. "dsl0\n",
  769. "dslfast0\n"
  770. };
  771. for ( i = 0; i < ARRAY_SIZE(title); i++ ) {
  772. len += sprintf(page + off + len, title[i]);
  773. len += sprintf(page + off + len, " wrx_correct_pdu = %d\n", WAN_MIB_TABLE[i].wrx_correct_pdu);
  774. len += sprintf(page + off + len, " wrx_correct_pdu_bytes = %d\n", WAN_MIB_TABLE[i].wrx_correct_pdu_bytes);
  775. len += sprintf(page + off + len, " wrx_tccrc_err_pdu = %d\n", WAN_MIB_TABLE[i].wrx_tccrc_err_pdu);
  776. len += sprintf(page + off + len, " wrx_tccrc_err_pdu_bytes = %d\n", WAN_MIB_TABLE[i].wrx_tccrc_err_pdu_bytes);
  777. len += sprintf(page + off + len, " wrx_ethcrc_err_pdu = %d\n", WAN_MIB_TABLE[i].wrx_ethcrc_err_pdu);
  778. len += sprintf(page + off + len, " wrx_ethcrc_err_pdu_bytes = %d\n", WAN_MIB_TABLE[i].wrx_ethcrc_err_pdu_bytes);
  779. len += sprintf(page + off + len, " wrx_nodesc_drop_pdu = %d\n", WAN_MIB_TABLE[i].wrx_nodesc_drop_pdu);
  780. len += sprintf(page + off + len, " wrx_len_violation_drop_pdu = %d\n", WAN_MIB_TABLE[i].wrx_len_violation_drop_pdu);
  781. len += sprintf(page + off + len, " wrx_idle_bytes = %d\n", WAN_MIB_TABLE[i].wrx_idle_bytes);
  782. len += sprintf(page + off + len, " wrx_nonidle_cw = %d\n", WAN_MIB_TABLE[i].wrx_nonidle_cw);
  783. len += sprintf(page + off + len, " wrx_idle_cw = %d\n", WAN_MIB_TABLE[i].wrx_idle_cw);
  784. len += sprintf(page + off + len, " wrx_err_cw = %d\n", WAN_MIB_TABLE[i].wrx_err_cw);
  785. len += sprintf(page + off + len, " wtx_total_pdu = %d\n", WAN_MIB_TABLE[i].wtx_total_pdu);
  786. len += sprintf(page + off + len, " wtx_total_bytes = %d\n", WAN_MIB_TABLE[i].wtx_total_bytes);
  787. }
  788. *eof = 1;
  789. return len;
  790. }
  791. static int proc_write_wanmib(struct file *file, const char *buf, unsigned long count, void *data)
  792. {
  793. char str[2048];
  794. char *p;
  795. int len, rlen;
  796. int i;
  797. len = count < sizeof(str) ? count : sizeof(str) - 1;
  798. rlen = len - copy_from_user(str, buf, len);
  799. while ( rlen && str[rlen - 1] <= ' ' )
  800. rlen--;
  801. str[rlen] = 0;
  802. for ( p = str; *p && *p <= ' '; p++, rlen-- );
  803. if ( !*p )
  804. return count;
  805. if ( stricmp(p, "clear") == 0 || stricmp(p, "clean") == 0 ) {
  806. for ( i = 0; i < 2; i++ )
  807. memset((void*)&WAN_MIB_TABLE[i], 0, sizeof(WAN_MIB_TABLE[i]));
  808. }
  809. return count;
  810. }
  811. #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
  812. static int proc_read_genconf(char *page, char **start, off_t off, int count, int *eof, void *data)
  813. {
  814. int len = 0;
  815. int len_max = off + count;
  816. char *pstr;
  817. char str[2048];
  818. int llen = 0;
  819. int i;
  820. unsigned long bit;
  821. pstr = *start = page;
  822. __sync();
  823. llen += sprintf(str + llen, "CFG_WAN_WRDES_DELAY (0x%08X): %d\n", (unsigned int)CFG_WAN_WRDES_DELAY, IFX_REG_R32(CFG_WAN_WRDES_DELAY));
  824. llen += sprintf(str + llen, "CFG_WRX_DMACH_ON (0x%08X):", (unsigned int)CFG_WRX_DMACH_ON);
  825. for ( i = 0, bit = 1; i < MAX_RX_DMA_CHANNEL_NUMBER; i++, bit <<= 1 )
  826. llen += sprintf(str + llen, " %d - %s", i, (IFX_REG_R32(CFG_WRX_DMACH_ON) & bit) ? "on " : "off");
  827. llen += sprintf(str + llen, "\n");
  828. llen += sprintf(str + llen, "CFG_WTX_DMACH_ON (0x%08X):", (unsigned int)CFG_WTX_DMACH_ON);
  829. for ( i = 0, bit = 1; i < MAX_TX_DMA_CHANNEL_NUMBER; i++, bit <<= 1 )
  830. llen += sprintf(str + llen, " %d - %s", i, (IFX_REG_R32(CFG_WTX_DMACH_ON) & bit) ? "on " : "off");
  831. llen += sprintf(str + llen, "\n");
  832. llen += sprintf(str + llen, "CFG_WRX_LOOK_BITTH (0x%08X): %d\n", (unsigned int)CFG_WRX_LOOK_BITTH, IFX_REG_R32(CFG_WRX_LOOK_BITTH));
  833. llen += sprintf(str + llen, "CFG_ETH_EFMTC_CRC (0x%08X): rx_tc_crc_len - %2d, rx_tc_crc_check - %s\n", (unsigned int)CFG_ETH_EFMTC_CRC, CFG_ETH_EFMTC_CRC->rx_tc_crc_len, CFG_ETH_EFMTC_CRC->rx_tc_crc_check ? " on" : "off");
  834. llen += sprintf(str + llen, " rx_eth_crc_check - %s, rx_eth_crc_present - %s\n", CFG_ETH_EFMTC_CRC->rx_eth_crc_check ? " on" : "off", CFG_ETH_EFMTC_CRC->rx_eth_crc_present ? " on" : "off");
  835. llen += sprintf(str + llen, " tx_tc_crc_len - %2d, tx_tc_crc_gen - %s\n", CFG_ETH_EFMTC_CRC->tx_tc_crc_len, CFG_ETH_EFMTC_CRC->tx_tc_crc_gen ? " on" : "off");
  836. llen += sprintf(str + llen, " tx_eth_crc_gen - %s\n", CFG_ETH_EFMTC_CRC->tx_eth_crc_gen ? " on" : "off");
  837. llen += sprintf(str + llen, "RX Port:\n");
  838. for ( i = 0; i < MAX_RX_DMA_CHANNEL_NUMBER; i++ )
  839. llen += sprintf(str + llen, " %d (0x%08X). mfs - %5d, dmach - %d, local_state - %d, partner_state - %d\n", i, (unsigned int)WRX_PORT_CONFIG(i), WRX_PORT_CONFIG(i)->mfs, WRX_PORT_CONFIG(i)->dmach, WRX_PORT_CONFIG(i)->local_state, WRX_PORT_CONFIG(i)->partner_state);
  840. llen += sprintf(str + llen, "RX DMA Channel:\n");
  841. for ( i = 0; i < MAX_RX_DMA_CHANNEL_NUMBER; i++ )
  842. llen += sprintf(str + llen, " %d (0x%08X). desba - 0x%08X (0x%08X), deslen - %d, vlddes - %d\n", i, (unsigned int)WRX_DMA_CHANNEL_CONFIG(i), WRX_DMA_CHANNEL_CONFIG(i)->desba, ((unsigned int)WRX_DMA_CHANNEL_CONFIG(i)->desba << 2) | KSEG1, WRX_DMA_CHANNEL_CONFIG(i)->deslen, WRX_DMA_CHANNEL_CONFIG(i)->vlddes);
  843. llen += sprintf(str + llen, "TX Port:\n");
  844. for ( i = 0; i < MAX_TX_DMA_CHANNEL_NUMBER; i++ )
  845. llen += sprintf(str + llen, " %d (0x%08X). tx_cwth2 - %d, tx_cwth1 - %d\n", i, (unsigned int)WTX_PORT_CONFIG(i), WTX_PORT_CONFIG(i)->tx_cwth2, WTX_PORT_CONFIG(i)->tx_cwth1);
  846. llen += sprintf(str + llen, "TX DMA Channel:\n");
  847. for ( i = 0; i < MAX_TX_DMA_CHANNEL_NUMBER; i++ )
  848. llen += sprintf(str + llen, " %d (0x%08X). desba - 0x%08X (0x%08X), deslen - %d, vlddes - %d\n", i, (unsigned int)WTX_DMA_CHANNEL_CONFIG(i), WTX_DMA_CHANNEL_CONFIG(i)->desba, ((unsigned int)WTX_DMA_CHANNEL_CONFIG(i)->desba << 2) | KSEG1, WTX_DMA_CHANNEL_CONFIG(i)->deslen, WTX_DMA_CHANNEL_CONFIG(i)->vlddes);
  849. if ( len <= off && len + llen > off )
  850. {
  851. memcpy(pstr, str + off - len, len + llen - off);
  852. pstr += len + llen - off;
  853. }
  854. else if ( len > off )
  855. {
  856. memcpy(pstr, str, llen);
  857. pstr += llen;
  858. }
  859. len += llen;
  860. if ( len >= len_max )
  861. goto PROC_READ_GENCONF_OVERRUN_END;
  862. *eof = 1;
  863. return len - off;
  864. PROC_READ_GENCONF_OVERRUN_END:
  865. return len - llen - off;
  866. }
  867. #endif // defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
  868. #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
  869. static int proc_read_dbg(char *page, char **start, off_t off, int count, int *eof, void *data)
  870. {
  871. int len = 0;
  872. len += sprintf(page + off + len, "error print - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_ERR) ? "enabled" : "disabled");
  873. len += sprintf(page + off + len, "debug print - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_DEBUG_PRINT) ? "enabled" : "disabled");
  874. len += sprintf(page + off + len, "assert - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_ASSERT) ? "enabled" : "disabled");
  875. len += sprintf(page + off + len, "dump rx skb - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_DUMP_SKB_RX) ? "enabled" : "disabled");
  876. len += sprintf(page + off + len, "dump tx skb - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_DUMP_SKB_TX) ? "enabled" : "disabled");
  877. len += sprintf(page + off + len, "mac swap - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_MAC_SWAP) ? "enabled" : "disabled");
  878. *eof = 1;
  879. return len;
  880. }
  881. static int proc_write_dbg(struct file *file, const char *buf, unsigned long count, void *data)
  882. {
  883. static const char *dbg_enable_mask_str[] = {
  884. " error print",
  885. " err",
  886. " debug print",
  887. " dbg",
  888. " assert",
  889. " assert",
  890. " dump rx skb",
  891. " rx",
  892. " dump tx skb",
  893. " tx",
  894. " dump init",
  895. " init",
  896. " dump qos",
  897. " qos",
  898. " mac swap",
  899. " swap",
  900. " all"
  901. };
  902. static const int dbg_enable_mask_str_len[] = {
  903. 12, 4,
  904. 12, 4,
  905. 7, 7,
  906. 12, 3,
  907. 12, 3,
  908. 10, 5,
  909. 9, 4,
  910. 9, 5,
  911. 4
  912. };
  913. unsigned int dbg_enable_mask[] = {
  914. DBG_ENABLE_MASK_ERR,
  915. DBG_ENABLE_MASK_DEBUG_PRINT,
  916. DBG_ENABLE_MASK_ASSERT,
  917. DBG_ENABLE_MASK_DUMP_SKB_RX,
  918. DBG_ENABLE_MASK_DUMP_SKB_TX,
  919. DBG_ENABLE_MASK_DUMP_INIT,
  920. DBG_ENABLE_MASK_DUMP_QOS,
  921. DBG_ENABLE_MASK_MAC_SWAP,
  922. DBG_ENABLE_MASK_ALL
  923. };
  924. char str[2048];
  925. char *p;
  926. int len, rlen;
  927. int f_enable = 0;
  928. int i;
  929. len = count < sizeof(str) ? count : sizeof(str) - 1;
  930. rlen = len - copy_from_user(str, buf, len);
  931. while ( rlen && str[rlen - 1] <= ' ' )
  932. rlen--;
  933. str[rlen] = 0;
  934. for ( p = str; *p && *p <= ' '; p++, rlen-- );
  935. if ( !*p )
  936. return 0;
  937. // debugging feature for enter/leave showtime
  938. if ( strincmp(p, "enter", 5) == 0 && ifx_mei_atm_showtime_enter != NULL )
  939. ifx_mei_atm_showtime_enter(NULL, NULL);
  940. else if ( strincmp(p, "leave", 5) == 0 && ifx_mei_atm_showtime_exit != NULL )
  941. ifx_mei_atm_showtime_exit();
  942. if ( strincmp(p, "enable", 6) == 0 ) {
  943. p += 6;
  944. f_enable = 1;
  945. }
  946. else if ( strincmp(p, "disable", 7) == 0 ) {
  947. p += 7;
  948. f_enable = -1;
  949. }
  950. else if ( strincmp(p, "help", 4) == 0 || *p == '?' ) {
  951. printk("echo <enable/disable> [err/dbg/assert/rx/tx/init/qos/swap/all] > /proc/driver/ifx_ptm/dbg\n");
  952. }
  953. if ( f_enable ) {
  954. if ( *p == 0 ) {
  955. if ( f_enable > 0 )
  956. ifx_ptm_dbg_enable |= DBG_ENABLE_MASK_ALL & ~DBG_ENABLE_MASK_MAC_SWAP;
  957. else
  958. ifx_ptm_dbg_enable &= ~DBG_ENABLE_MASK_ALL | DBG_ENABLE_MASK_MAC_SWAP;
  959. }
  960. else {
  961. do {
  962. for ( i = 0; i < ARRAY_SIZE(dbg_enable_mask_str); i++ )
  963. if ( strincmp(p, dbg_enable_mask_str[i], dbg_enable_mask_str_len[i]) == 0 ) {
  964. if ( f_enable > 0 )
  965. ifx_ptm_dbg_enable |= dbg_enable_mask[i >> 1];
  966. else
  967. ifx_ptm_dbg_enable &= ~dbg_enable_mask[i >> 1];
  968. p += dbg_enable_mask_str_len[i];
  969. break;
  970. }
  971. } while ( i < ARRAY_SIZE(dbg_enable_mask_str) );
  972. }
  973. }
  974. return count;
  975. }
  976. #endif // defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
  977. static INLINE int stricmp(const char *p1, const char *p2)
  978. {
  979. int c1, c2;
  980. while ( *p1 && *p2 )
  981. {
  982. c1 = *p1 >= 'A' && *p1 <= 'Z' ? *p1 + 'a' - 'A' : *p1;
  983. c2 = *p2 >= 'A' && *p2 <= 'Z' ? *p2 + 'a' - 'A' : *p2;
  984. if ( (c1 -= c2) )
  985. return c1;
  986. p1++;
  987. p2++;
  988. }
  989. return *p1 - *p2;
  990. }
  991. #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
  992. static INLINE int strincmp(const char *p1, const char *p2, int n)
  993. {
  994. int c1 = 0, c2;
  995. while ( n && *p1 && *p2 )
  996. {
  997. c1 = *p1 >= 'A' && *p1 <= 'Z' ? *p1 + 'a' - 'A' : *p1;
  998. c2 = *p2 >= 'A' && *p2 <= 'Z' ? *p2 + 'a' - 'A' : *p2;
  999. if ( (c1 -= c2) )
  1000. return c1;
  1001. p1++;
  1002. p2++;
  1003. n--;
  1004. }
  1005. return n ? *p1 - *p2 : c1;
  1006. }
  1007. #endif
  1008. static INLINE int ifx_ptm_version(char *buf)
  1009. {
  1010. int len = 0;
  1011. unsigned int major, minor;
  1012. ifx_ptm_get_fw_ver(&major, &minor);
  1013. len += sprintf(buf + len, "PTM %d.%d.%d", IFX_PTM_VER_MAJOR, IFX_PTM_VER_MID, IFX_PTM_VER_MINOR);
  1014. len += sprintf(buf + len, " PTM (E1) firmware version %d.%d\n", major, minor);
  1015. return len;
  1016. }
  1017. static INLINE void check_parameters(void)
  1018. {
  1019. /* There is a delay between PPE write descriptor and descriptor is */
  1020. /* really stored in memory. Host also has this delay when writing */
  1021. /* descriptor. So PPE will use this value to determine if the write */
  1022. /* operation makes effect. */
  1023. if ( write_desc_delay < 0 )
  1024. write_desc_delay = 0;
  1025. /* Because of the limitation of length field in descriptors, the packet */
  1026. /* size could not be larger than 64K minus overhead size. */
  1027. if ( rx_max_packet_size < ETH_MIN_FRAME_LENGTH )
  1028. rx_max_packet_size = ETH_MIN_FRAME_LENGTH;
  1029. else if ( rx_max_packet_size > 65536 - 1 )
  1030. rx_max_packet_size = 65536 - 1;
  1031. if ( dma_rx_descriptor_length < 2 )
  1032. dma_rx_descriptor_length = 2;
  1033. if ( dma_tx_descriptor_length < 2 )
  1034. dma_tx_descriptor_length = 2;
  1035. }
  1036. static INLINE int init_priv_data(void)
  1037. {
  1038. void *p;
  1039. int i;
  1040. struct rx_descriptor rx_desc = {0};
  1041. struct sk_buff *skb;
  1042. volatile struct rx_descriptor *p_rx_desc;
  1043. volatile struct tx_descriptor *p_tx_desc;
  1044. struct sk_buff **ppskb;
  1045. // clear ptm private data structure
  1046. memset(&g_ptm_priv_data, 0, sizeof(g_ptm_priv_data));
  1047. // allocate memory for RX descriptors
  1048. p = kzalloc(MAX_ITF_NUMBER * dma_rx_descriptor_length * sizeof(struct rx_descriptor) + DESC_ALIGNMENT, GFP_KERNEL);
  1049. if ( p == NULL )
  1050. return -1;
  1051. dma_cache_inv((unsigned long)p, MAX_ITF_NUMBER * dma_rx_descriptor_length * sizeof(struct rx_descriptor) + DESC_ALIGNMENT);
  1052. g_ptm_priv_data.rx_desc_base = p;
  1053. //p = (void *)((((unsigned int)p + DESC_ALIGNMENT - 1) & ~(DESC_ALIGNMENT - 1)) | KSEG1);
  1054. // allocate memory for TX descriptors
  1055. p = kzalloc(MAX_ITF_NUMBER * dma_tx_descriptor_length * sizeof(struct tx_descriptor) + DESC_ALIGNMENT, GFP_KERNEL);
  1056. if ( p == NULL )
  1057. return -1;
  1058. dma_cache_inv((unsigned long)p, MAX_ITF_NUMBER * dma_tx_descriptor_length * sizeof(struct tx_descriptor) + DESC_ALIGNMENT);
  1059. g_ptm_priv_data.tx_desc_base = p;
  1060. // allocate memroy for TX skb pointers
  1061. p = kzalloc(MAX_ITF_NUMBER * dma_tx_descriptor_length * sizeof(struct sk_buff *) + 4, GFP_KERNEL);
  1062. if ( p == NULL )
  1063. return -1;
  1064. dma_cache_wback_inv((unsigned long)p, MAX_ITF_NUMBER * dma_tx_descriptor_length * sizeof(struct sk_buff *) + 4);
  1065. g_ptm_priv_data.tx_skb_base = p;
  1066. p_rx_desc = (volatile struct rx_descriptor *)((((unsigned int)g_ptm_priv_data.rx_desc_base + DESC_ALIGNMENT - 1) & ~(DESC_ALIGNMENT - 1)) | KSEG1);
  1067. p_tx_desc = (volatile struct tx_descriptor *)((((unsigned int)g_ptm_priv_data.tx_desc_base + DESC_ALIGNMENT - 1) & ~(DESC_ALIGNMENT - 1)) | KSEG1);
  1068. ppskb = (struct sk_buff **)(((unsigned int)g_ptm_priv_data.tx_skb_base + 3) & ~3);
  1069. for ( i = 0; i < MAX_ITF_NUMBER; i++ ) {
  1070. g_ptm_priv_data.itf[i].rx_desc = &p_rx_desc[i * dma_rx_descriptor_length];
  1071. g_ptm_priv_data.itf[i].tx_desc = &p_tx_desc[i * dma_tx_descriptor_length];
  1072. g_ptm_priv_data.itf[i].tx_skb = &ppskb[i * dma_tx_descriptor_length];
  1073. }
  1074. rx_desc.own = 1;
  1075. rx_desc.c = 0;
  1076. rx_desc.sop = 1;
  1077. rx_desc.eop = 1;
  1078. rx_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT;
  1079. rx_desc.id = 0;
  1080. rx_desc.err = 0;
  1081. rx_desc.datalen = rx_max_packet_size;
  1082. for ( i = 0; i < MAX_ITF_NUMBER * dma_rx_descriptor_length; i++ ) {
  1083. skb = alloc_skb_rx();
  1084. if ( skb == NULL )
  1085. return -1;
  1086. rx_desc.dataptr = ((unsigned int)skb->data >> 2) & 0x0FFFFFFF;
  1087. p_rx_desc[i] = rx_desc;
  1088. }
  1089. return 0;
  1090. }
  1091. static INLINE void clear_priv_data(void)
  1092. {
  1093. int i, j;
  1094. struct sk_buff *skb;
  1095. for ( i = 0; i < MAX_ITF_NUMBER; i++ ) {
  1096. if ( g_ptm_priv_data.itf[i].tx_skb != NULL ) {
  1097. for ( j = 0; j < dma_tx_descriptor_length; j++ )
  1098. if ( g_ptm_priv_data.itf[i].tx_skb[j] != NULL )
  1099. dev_kfree_skb_any(g_ptm_priv_data.itf[i].tx_skb[j]);
  1100. }
  1101. if ( g_ptm_priv_data.itf[i].rx_desc != NULL ) {
  1102. for ( j = 0; j < dma_rx_descriptor_length; j++ ) {
  1103. if ( g_ptm_priv_data.itf[i].rx_desc[j].sop || g_ptm_priv_data.itf[i].rx_desc[j].eop ) { // descriptor initialized
  1104. skb = get_skb_rx_pointer(g_ptm_priv_data.itf[i].rx_desc[j].dataptr);
  1105. dev_kfree_skb_any(skb);
  1106. }
  1107. }
  1108. }
  1109. }
  1110. if ( g_ptm_priv_data.rx_desc_base != NULL )
  1111. kfree(g_ptm_priv_data.rx_desc_base);
  1112. if ( g_ptm_priv_data.tx_desc_base != NULL )
  1113. kfree(g_ptm_priv_data.tx_desc_base);
  1114. if ( g_ptm_priv_data.tx_skb_base != NULL )
  1115. kfree(g_ptm_priv_data.tx_skb_base);
  1116. }
  1117. static INLINE void init_tables(void)
  1118. {
  1119. int i;
  1120. volatile unsigned int *p;
  1121. struct wrx_dma_channel_config rx_config = {0};
  1122. struct wtx_dma_channel_config tx_config = {0};
  1123. struct wrx_port_cfg_status rx_port_cfg = { 0 };
  1124. struct wtx_port_cfg tx_port_cfg = { 0 };
  1125. /*
  1126. * CDM Block 1
  1127. */
  1128. IFX_REG_W32(CDM_CFG_RAM1_SET(0x00) | CDM_CFG_RAM0_SET(0x00), CDM_CFG); // CDM block 1 must be data memory and mapped to 0x5000 (dword addr)
  1129. p = CDM_DATA_MEMORY(0, 0); // Clear CDM block 1
  1130. for ( i = 0; i < CDM_DATA_MEMORY_DWLEN; i++, p++ )
  1131. IFX_REG_W32(0, p);
  1132. /*
  1133. * General Registers
  1134. */
  1135. IFX_REG_W32(write_desc_delay, CFG_WAN_WRDES_DELAY);
  1136. IFX_REG_W32((1 << MAX_RX_DMA_CHANNEL_NUMBER) - 1, CFG_WRX_DMACH_ON);
  1137. IFX_REG_W32((1 << MAX_TX_DMA_CHANNEL_NUMBER) - 1, CFG_WTX_DMACH_ON);
  1138. IFX_REG_W32(8, CFG_WRX_LOOK_BITTH); // WAN RX EFM-TC Looking Threshold
  1139. IFX_REG_W32(eth_efmtc_crc_cfg, CFG_ETH_EFMTC_CRC);
  1140. /*
  1141. * WRX DMA Channel Configuration Table
  1142. */
  1143. rx_config.deslen = dma_rx_descriptor_length;
  1144. rx_port_cfg.mfs = ETH_MAX_FRAME_LENGTH;
  1145. rx_port_cfg.local_state = 0; // looking for sync
  1146. rx_port_cfg.partner_state = 0; // parter receiver is out of sync
  1147. for ( i = 0; i < MAX_RX_DMA_CHANNEL_NUMBER; i++ ) {
  1148. rx_config.desba = ((unsigned int)g_ptm_priv_data.itf[i].rx_desc >> 2) & 0x0FFFFFFF;
  1149. *WRX_DMA_CHANNEL_CONFIG(i) = rx_config;
  1150. rx_port_cfg.dmach = i;
  1151. *WRX_PORT_CONFIG(i) = rx_port_cfg;
  1152. }
  1153. /*
  1154. * WTX DMA Channel Configuration Table
  1155. */
  1156. tx_config.deslen = dma_tx_descriptor_length;
  1157. tx_port_cfg.tx_cwth1 = 5;
  1158. tx_port_cfg.tx_cwth2 = 4;
  1159. for ( i = 0; i < MAX_TX_DMA_CHANNEL_NUMBER; i++ ) {
  1160. tx_config.desba = ((unsigned int)g_ptm_priv_data.itf[i].tx_desc >> 2) & 0x0FFFFFFF;
  1161. *WTX_DMA_CHANNEL_CONFIG(i) = tx_config;
  1162. *WTX_PORT_CONFIG(i) = tx_port_cfg;
  1163. }
  1164. }
  1165. /*
  1166. * ####################################
  1167. * Global Function
  1168. * ####################################
  1169. */
  1170. static int ptm_showtime_enter(struct port_cell_info *port_cell, void *xdata_addr)
  1171. {
  1172. int i;
  1173. g_showtime = 1;
  1174. for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
  1175. netif_carrier_on(g_net_dev[i]);
  1176. printk("enter showtime\n");
  1177. return 0;
  1178. }
  1179. static int ptm_showtime_exit(void)
  1180. {
  1181. int i;
  1182. if ( !g_showtime )
  1183. return -1;
  1184. for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
  1185. netif_carrier_off(g_net_dev[i]);
  1186. g_showtime = 0;
  1187. printk("leave showtime\n");
  1188. return 0;
  1189. }
  1190. static const struct of_device_id ltq_ptm_match[] = {
  1191. #ifdef CONFIG_DANUBE
  1192. { .compatible = "lantiq,ppe-danube", .data = NULL },
  1193. #elif defined CONFIG_AMAZON_SE
  1194. { .compatible = "lantiq,ppe-ase", .data = NULL },
  1195. #elif defined CONFIG_AR9
  1196. { .compatible = "lantiq,ppe-arx100", .data = NULL },
  1197. #elif defined CONFIG_VR9
  1198. { .compatible = "lantiq,ppe-xrx200", .data = NULL },
  1199. #endif
  1200. {},
  1201. };
  1202. MODULE_DEVICE_TABLE(of, ltq_ptm_match);
  1203. /*
  1204. * ####################################
  1205. * Init/Cleanup API
  1206. * ####################################
  1207. */
  1208. /*
  1209. * Description:
  1210. * Initialize global variables, PP32, comunication structures, register IRQ
  1211. * and register device.
  1212. * Input:
  1213. * none
  1214. * Output:
  1215. * 0 --- successful
  1216. * else --- failure, usually it is negative value of error code
  1217. */
  1218. static int ltq_ptm_probe(struct platform_device *pdev)
  1219. {
  1220. int ret;
  1221. struct port_cell_info port_cell = {0};
  1222. void *xdata_addr = NULL;
  1223. int i;
  1224. char ver_str[256];
  1225. check_parameters();
  1226. ret = init_priv_data();
  1227. if ( ret != 0 ) {
  1228. err("INIT_PRIV_DATA_FAIL");
  1229. goto INIT_PRIV_DATA_FAIL;
  1230. }
  1231. ifx_ptm_init_chip(pdev);
  1232. init_tables();
  1233. for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
  1234. g_net_dev[i] = alloc_netdev(0, g_net_dev_name[i], NET_NAME_UNKNOWN, ether_setup);
  1235. if ( g_net_dev[i] == NULL )
  1236. goto ALLOC_NETDEV_FAIL;
  1237. ptm_setup(g_net_dev[i], i);
  1238. }
  1239. for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
  1240. ret = register_netdev(g_net_dev[i]);
  1241. if ( ret != 0 )
  1242. goto REGISTER_NETDEV_FAIL;
  1243. }
  1244. /* register interrupt handler */
  1245. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
  1246. ret = request_irq(PPE_MAILBOX_IGU1_INT, mailbox_irq_handler, 0, "ptm_mailbox_isr", &g_ptm_priv_data);
  1247. #else
  1248. ret = request_irq(PPE_MAILBOX_IGU1_INT, mailbox_irq_handler, IRQF_DISABLED, "ptm_mailbox_isr", &g_ptm_priv_data);
  1249. #endif
  1250. if ( ret ) {
  1251. if ( ret == -EBUSY ) {
  1252. err("IRQ may be occupied by other driver, please reconfig to disable it.");
  1253. }
  1254. else {
  1255. err("request_irq fail");
  1256. }
  1257. goto REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL;
  1258. }
  1259. disable_irq(PPE_MAILBOX_IGU1_INT);
  1260. ret = ifx_pp32_start(0);
  1261. if ( ret ) {
  1262. err("ifx_pp32_start fail!");
  1263. goto PP32_START_FAIL;
  1264. }
  1265. IFX_REG_W32(0, MBOX_IGU1_IER);
  1266. IFX_REG_W32(~0, MBOX_IGU1_ISRC);
  1267. enable_irq(PPE_MAILBOX_IGU1_INT);
  1268. proc_file_create();
  1269. port_cell.port_num = 1;
  1270. ifx_mei_atm_showtime_check(&g_showtime, &port_cell, &xdata_addr);
  1271. if ( g_showtime ) {
  1272. ptm_showtime_enter(&port_cell, &xdata_addr);
  1273. }
  1274. ifx_mei_atm_showtime_enter = ptm_showtime_enter;
  1275. ifx_mei_atm_showtime_exit = ptm_showtime_exit;
  1276. ifx_ptm_version(ver_str);
  1277. printk(KERN_INFO "%s", ver_str);
  1278. printk("ifxmips_ptm: PTM init succeed\n");
  1279. return 0;
  1280. PP32_START_FAIL:
  1281. free_irq(PPE_MAILBOX_IGU1_INT, &g_ptm_priv_data);
  1282. REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL:
  1283. i = ARRAY_SIZE(g_net_dev);
  1284. REGISTER_NETDEV_FAIL:
  1285. while ( i-- )
  1286. unregister_netdev(g_net_dev[i]);
  1287. i = ARRAY_SIZE(g_net_dev);
  1288. ALLOC_NETDEV_FAIL:
  1289. while ( i-- ) {
  1290. free_netdev(g_net_dev[i]);
  1291. g_net_dev[i] = NULL;
  1292. }
  1293. INIT_PRIV_DATA_FAIL:
  1294. clear_priv_data();
  1295. printk("ifxmips_ptm: PTM init failed\n");
  1296. return ret;
  1297. }
  1298. /*
  1299. * Description:
  1300. * Release memory, free IRQ, and deregister device.
  1301. * Input:
  1302. * none
  1303. * Output:
  1304. * none
  1305. */
  1306. static int ltq_ptm_remove(struct platform_device *pdev)
  1307. {
  1308. int i;
  1309. ifx_mei_atm_showtime_enter = NULL;
  1310. ifx_mei_atm_showtime_exit = NULL;
  1311. proc_file_delete();
  1312. ifx_pp32_stop(0);
  1313. free_irq(PPE_MAILBOX_IGU1_INT, &g_ptm_priv_data);
  1314. for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
  1315. unregister_netdev(g_net_dev[i]);
  1316. for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
  1317. free_netdev(g_net_dev[i]);
  1318. g_net_dev[i] = NULL;
  1319. }
  1320. ifx_ptm_uninit_chip();
  1321. clear_priv_data();
  1322. return 0;
  1323. }
  1324. static struct platform_driver ltq_ptm_driver = {
  1325. .probe = ltq_ptm_probe,
  1326. .remove = ltq_ptm_remove,
  1327. .driver = {
  1328. .name = "ptm",
  1329. .owner = THIS_MODULE,
  1330. .of_match_table = ltq_ptm_match,
  1331. },
  1332. };
  1333. module_platform_driver(ltq_ptm_driver);
  1334. MODULE_LICENSE("GPL");