ipqess.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336
  1. // SPDX-License-Identifier: (GPL-2.0 OR ISC)
  2. /* Copyright (c) 2014 - 2017, The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2017 - 2018, John Crispin <[email protected]>
  4. * Copyright (c) 2018 - 2019, Christian Lamparter <[email protected]>
  5. * Copyright (c) 2020 - 2021, Gabor Juhos <[email protected]>
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all copies.
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  11. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  12. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  13. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  14. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  15. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
  16. * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <linux/bitfield.h>
  19. #include <linux/clk.h>
  20. #include <linux/dsa/ipq4019.h>
  21. #include <linux/if_vlan.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/module.h>
  24. #include <linux/of.h>
  25. #include <linux/of_device.h>
  26. #include <linux/of_mdio.h>
  27. #include <linux/of_net.h>
  28. #include <linux/phylink.h>
  29. #include <linux/platform_device.h>
  30. #include <linux/reset.h>
  31. #include <linux/skbuff.h>
  32. #include <linux/vmalloc.h>
  33. #include <net/checksum.h>
  34. #include <net/dsa.h>
  35. #include <net/ip6_checksum.h>
  36. #include "ipqess.h"
  37. #define IPQESS_RRD_SIZE 16
  38. #define IPQESS_NEXT_IDX(X, Y) (((X) + 1) & ((Y) - 1))
  39. #define IPQESS_TX_DMA_BUF_LEN 0x3fff
  40. static void ipqess_w32(struct ipqess *ess, u32 reg, u32 val)
  41. {
  42. writel(val, ess->hw_addr + reg);
  43. }
  44. static u32 ipqess_r32(struct ipqess *ess, u16 reg)
  45. {
  46. return readl(ess->hw_addr + reg);
  47. }
  48. static void ipqess_m32(struct ipqess *ess, u32 mask, u32 val, u16 reg)
  49. {
  50. u32 _val = ipqess_r32(ess, reg);
  51. _val &= ~mask;
  52. _val |= val;
  53. ipqess_w32(ess, reg, _val);
  54. }
  55. void ipqess_update_hw_stats(struct ipqess *ess)
  56. {
  57. uint32_t *p;
  58. u32 stat;
  59. int i;
  60. lockdep_assert_held(&ess->stats_lock);
  61. p = (uint32_t *)&(ess->ipqessstats);
  62. for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++) {
  63. stat = ipqess_r32(ess, IPQESS_REG_TX_STAT_PKT_Q(i));
  64. *p += stat;
  65. p++;
  66. }
  67. for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++) {
  68. stat = ipqess_r32(ess, IPQESS_REG_TX_STAT_BYTE_Q(i));
  69. *p += stat;
  70. p++;
  71. }
  72. for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++) {
  73. stat = ipqess_r32(ess, IPQESS_REG_RX_STAT_PKT_Q(i));
  74. *p += stat;
  75. p++;
  76. }
  77. for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++) {
  78. stat = ipqess_r32(ess, IPQESS_REG_RX_STAT_BYTE_Q(i));
  79. *p += stat;
  80. p++;
  81. }
  82. }
  83. static int ipqess_tx_ring_alloc(struct ipqess *ess)
  84. {
  85. struct device *dev = &ess->pdev->dev;
  86. int i;
  87. for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
  88. struct ipqess_tx_ring *tx_ring = &ess->tx_ring[i];
  89. size_t size;
  90. u32 idx;
  91. tx_ring->ess = ess;
  92. tx_ring->ring_id = i;
  93. tx_ring->idx = i * 4;
  94. tx_ring->count = IPQESS_TX_RING_SIZE;
  95. tx_ring->nq = netdev_get_tx_queue(ess->netdev, i);
  96. size = sizeof(struct ipqess_buf) * IPQESS_TX_RING_SIZE;
  97. tx_ring->buf = devm_kzalloc(dev, size, GFP_KERNEL);
  98. if (!tx_ring->buf) {
  99. netdev_err(ess->netdev, "buffer alloc of tx ring failed");
  100. return -ENOMEM;
  101. }
  102. size = sizeof(struct ipqess_tx_desc) * IPQESS_TX_RING_SIZE;
  103. tx_ring->hw_desc = dmam_alloc_coherent(dev, size, &tx_ring->dma,
  104. GFP_KERNEL | __GFP_ZERO);
  105. if (!tx_ring->hw_desc) {
  106. netdev_err(ess->netdev, "descriptor allocation for tx ring failed");
  107. return -ENOMEM;
  108. }
  109. ipqess_w32(ess, IPQESS_REG_TPD_BASE_ADDR_Q(tx_ring->idx),
  110. (u32)tx_ring->dma);
  111. idx = ipqess_r32(ess, IPQESS_REG_TPD_IDX_Q(tx_ring->idx));
  112. idx >>= IPQESS_TPD_CONS_IDX_SHIFT; /* need u32 here */
  113. idx &= 0xffff;
  114. tx_ring->head = tx_ring->tail = idx;
  115. ipqess_m32(ess, IPQESS_TPD_PROD_IDX_MASK << IPQESS_TPD_PROD_IDX_SHIFT,
  116. idx, IPQESS_REG_TPD_IDX_Q(tx_ring->idx));
  117. ipqess_w32(ess, IPQESS_REG_TX_SW_CONS_IDX_Q(tx_ring->idx), idx);
  118. ipqess_w32(ess, IPQESS_REG_TPD_RING_SIZE, IPQESS_TX_RING_SIZE);
  119. }
  120. return 0;
  121. }
  122. static int ipqess_tx_unmap_and_free(struct device *dev, struct ipqess_buf *buf)
  123. {
  124. int len = 0;
  125. if (buf->flags & IPQESS_DESC_SINGLE)
  126. dma_unmap_single(dev, buf->dma, buf->length, DMA_TO_DEVICE);
  127. else if (buf->flags & IPQESS_DESC_PAGE)
  128. dma_unmap_page(dev, buf->dma, buf->length, DMA_TO_DEVICE);
  129. if (buf->flags & IPQESS_DESC_LAST) {
  130. len = buf->skb->len;
  131. dev_kfree_skb_any(buf->skb);
  132. }
  133. buf->flags = 0;
  134. return len;
  135. }
  136. static void ipqess_tx_ring_free(struct ipqess *ess)
  137. {
  138. int i;
  139. for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
  140. int j;
  141. if (ess->tx_ring[i].hw_desc)
  142. continue;
  143. for (j = 0; j < IPQESS_TX_RING_SIZE; j++) {
  144. struct ipqess_buf *buf = &ess->tx_ring[i].buf[j];
  145. ipqess_tx_unmap_and_free(&ess->pdev->dev, buf);
  146. }
  147. ess->tx_ring[i].buf = NULL;
  148. }
  149. }
  150. static int ipqess_rx_buf_prepare(struct ipqess_buf *buf,
  151. struct ipqess_rx_ring *rx_ring)
  152. {
  153. /* Clean the HW DESC header, otherwise we might end up
  154. * with a spurious desc because of random garbage */
  155. memset(buf->skb->data, 0, sizeof(struct ipqess_rx_desc));
  156. buf->dma = dma_map_single(rx_ring->ppdev, buf->skb->data,
  157. IPQESS_RX_HEAD_BUFF_SIZE, DMA_FROM_DEVICE);
  158. if (dma_mapping_error(rx_ring->ppdev, buf->dma)) {
  159. dev_err_once(rx_ring->ppdev,
  160. "IPQESS DMA mapping failed for linear address %x",
  161. buf->dma);
  162. dev_kfree_skb_any(buf->skb);
  163. buf->skb = NULL;
  164. return -EFAULT;
  165. }
  166. buf->length = IPQESS_RX_HEAD_BUFF_SIZE;
  167. rx_ring->hw_desc[rx_ring->head] = (struct ipqess_rx_desc *)buf->dma;
  168. rx_ring->head = (rx_ring->head + 1) % IPQESS_RX_RING_SIZE;
  169. ipqess_m32(rx_ring->ess, IPQESS_RFD_PROD_IDX_BITS,
  170. (rx_ring->head + IPQESS_RX_RING_SIZE - 1) % IPQESS_RX_RING_SIZE,
  171. IPQESS_REG_RFD_IDX_Q(rx_ring->idx));
  172. return 0;
  173. }
  174. /* locking is handled by the caller */
  175. static int ipqess_rx_buf_alloc_napi(struct ipqess_rx_ring *rx_ring)
  176. {
  177. struct ipqess_buf *buf = &rx_ring->buf[rx_ring->head];
  178. buf->skb = napi_alloc_skb(&rx_ring->napi_rx,
  179. IPQESS_RX_HEAD_BUFF_SIZE);
  180. if (!buf->skb)
  181. return -ENOMEM;
  182. return ipqess_rx_buf_prepare(buf, rx_ring);
  183. }
  184. static int ipqess_rx_buf_alloc(struct ipqess_rx_ring *rx_ring)
  185. {
  186. struct ipqess_buf *buf = &rx_ring->buf[rx_ring->head];
  187. buf->skb = netdev_alloc_skb_ip_align(rx_ring->ess->netdev,
  188. IPQESS_RX_HEAD_BUFF_SIZE);
  189. if (!buf->skb)
  190. return -ENOMEM;
  191. return ipqess_rx_buf_prepare(buf, rx_ring);
  192. }
  193. static void ipqess_refill_work(struct work_struct *work)
  194. {
  195. struct ipqess_rx_ring_refill *rx_refill = container_of(work,
  196. struct ipqess_rx_ring_refill, refill_work);
  197. struct ipqess_rx_ring *rx_ring = rx_refill->rx_ring;
  198. int refill = 0;
  199. /* don't let this loop by accident. */
  200. while (atomic_dec_and_test(&rx_ring->refill_count)) {
  201. napi_disable(&rx_ring->napi_rx);
  202. if (ipqess_rx_buf_alloc(rx_ring)) {
  203. refill++;
  204. dev_dbg(rx_ring->ppdev,
  205. "Not all buffers were reallocated");
  206. }
  207. napi_enable(&rx_ring->napi_rx);
  208. }
  209. if (atomic_add_return(refill, &rx_ring->refill_count))
  210. schedule_work(&rx_refill->refill_work);
  211. }
  212. static int ipqess_rx_ring_alloc(struct ipqess *ess)
  213. {
  214. int i;
  215. for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
  216. int j;
  217. ess->rx_ring[i].ess = ess;
  218. ess->rx_ring[i].ppdev = &ess->pdev->dev;
  219. ess->rx_ring[i].ring_id = i;
  220. ess->rx_ring[i].idx = i * 2;
  221. ess->rx_ring[i].buf = devm_kzalloc(&ess->pdev->dev,
  222. sizeof(struct ipqess_buf) * IPQESS_RX_RING_SIZE,
  223. GFP_KERNEL);
  224. if (!ess->rx_ring[i].buf)
  225. return -ENOMEM;
  226. ess->rx_ring[i].hw_desc = dmam_alloc_coherent(&ess->pdev->dev,
  227. sizeof(struct ipqess_rx_desc) * IPQESS_RX_RING_SIZE,
  228. &ess->rx_ring[i].dma, GFP_KERNEL);
  229. if (!ess->rx_ring[i].hw_desc)
  230. return -ENOMEM;
  231. for (j = 0; j < IPQESS_RX_RING_SIZE; j++)
  232. if (ipqess_rx_buf_alloc(&ess->rx_ring[i]) < 0)
  233. return -ENOMEM;
  234. ess->rx_refill[i].rx_ring = &ess->rx_ring[i];
  235. INIT_WORK(&ess->rx_refill[i].refill_work, ipqess_refill_work);
  236. ipqess_w32(ess, IPQESS_REG_RFD_BASE_ADDR_Q(ess->rx_ring[i].idx),
  237. (u32)(ess->rx_ring[i].dma));
  238. }
  239. ipqess_w32(ess, IPQESS_REG_RX_DESC0,
  240. (IPQESS_RX_HEAD_BUFF_SIZE << IPQESS_RX_BUF_SIZE_SHIFT) |
  241. (IPQESS_RX_RING_SIZE << IPQESS_RFD_RING_SIZE_SHIFT));
  242. return 0;
  243. }
  244. static void ipqess_rx_ring_free(struct ipqess *ess)
  245. {
  246. int i;
  247. for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
  248. int j;
  249. atomic_set(&ess->rx_ring[i].refill_count, 0);
  250. cancel_work_sync(&ess->rx_refill[i].refill_work);
  251. for (j = 0; j < IPQESS_RX_RING_SIZE; j++) {
  252. dma_unmap_single(&ess->pdev->dev,
  253. ess->rx_ring[i].buf[j].dma,
  254. ess->rx_ring[i].buf[j].length,
  255. DMA_FROM_DEVICE);
  256. dev_kfree_skb_any(ess->rx_ring[i].buf[j].skb);
  257. }
  258. }
  259. }
  260. static struct net_device_stats *ipqess_get_stats(struct net_device *netdev)
  261. {
  262. struct ipqess *ess = netdev_priv(netdev);
  263. spin_lock(&ess->stats_lock);
  264. ipqess_update_hw_stats(ess);
  265. spin_unlock(&ess->stats_lock);
  266. return &ess->stats;
  267. }
  268. static int ipqess_rx_poll(struct ipqess_rx_ring *rx_ring, int budget)
  269. {
  270. u32 length = 0, num_desc, tail, rx_ring_tail;
  271. int done = 0;
  272. rx_ring_tail = rx_ring->tail;
  273. tail = ipqess_r32(rx_ring->ess, IPQESS_REG_RFD_IDX_Q(rx_ring->idx));
  274. tail >>= IPQESS_RFD_CONS_IDX_SHIFT;
  275. tail &= IPQESS_RFD_CONS_IDX_MASK;
  276. while (done < budget) {
  277. struct sk_buff *skb;
  278. struct ipqess_rx_desc *rd;
  279. if (rx_ring_tail == tail)
  280. break;
  281. dma_unmap_single(rx_ring->ppdev,
  282. rx_ring->buf[rx_ring_tail].dma,
  283. rx_ring->buf[rx_ring_tail].length,
  284. DMA_FROM_DEVICE);
  285. skb = xchg(&rx_ring->buf[rx_ring_tail].skb, NULL);
  286. rd = (struct ipqess_rx_desc *)skb->data;
  287. rx_ring_tail = IPQESS_NEXT_IDX(rx_ring_tail, IPQESS_RX_RING_SIZE);
  288. /* Check if RRD is valid */
  289. if (!(rd->rrd7 & IPQESS_RRD_DESC_VALID)) {
  290. num_desc = 1;
  291. dev_kfree_skb_any(skb);
  292. goto skip;
  293. }
  294. num_desc = rd->rrd1 & IPQESS_RRD_NUM_RFD_MASK;
  295. length = rd->rrd6 & IPQESS_RRD_PKT_SIZE_MASK;
  296. skb_reserve(skb, IPQESS_RRD_SIZE);
  297. if (num_desc > 1) {
  298. /* can we use build_skb here ? */
  299. struct sk_buff *skb_prev = NULL;
  300. int size_remaining;
  301. int i;
  302. skb->data_len = 0;
  303. skb->tail += (IPQESS_RX_HEAD_BUFF_SIZE - IPQESS_RRD_SIZE);
  304. skb->len = skb->truesize = length;
  305. size_remaining = length - (IPQESS_RX_HEAD_BUFF_SIZE - IPQESS_RRD_SIZE);
  306. for (i = 1; i < num_desc; i++) {
  307. /* TODO: use build_skb ? */
  308. struct sk_buff *skb_temp = rx_ring->buf[rx_ring_tail].skb;
  309. dma_unmap_single(rx_ring->ppdev,
  310. rx_ring->buf[rx_ring_tail].dma,
  311. rx_ring->buf[rx_ring_tail].length,
  312. DMA_FROM_DEVICE);
  313. skb_put(skb_temp, min(size_remaining, IPQESS_RX_HEAD_BUFF_SIZE));
  314. if (skb_prev)
  315. skb_prev->next = rx_ring->buf[rx_ring_tail].skb;
  316. else
  317. skb_shinfo(skb)->frag_list = rx_ring->buf[rx_ring_tail].skb;
  318. skb_prev = rx_ring->buf[rx_ring_tail].skb;
  319. rx_ring->buf[rx_ring_tail].skb->next = NULL;
  320. skb->data_len += rx_ring->buf[rx_ring_tail].skb->len;
  321. size_remaining -= rx_ring->buf[rx_ring_tail].skb->len;
  322. rx_ring_tail = IPQESS_NEXT_IDX(rx_ring_tail, IPQESS_RX_RING_SIZE);
  323. }
  324. } else {
  325. skb_put(skb, length);
  326. }
  327. skb->dev = rx_ring->ess->netdev;
  328. skb->protocol = eth_type_trans(skb, rx_ring->ess->netdev);
  329. skb_record_rx_queue(skb, rx_ring->ring_id);
  330. if (rd->rrd6 & IPQESS_RRD_CSUM_FAIL_MASK)
  331. skb_checksum_none_assert(skb);
  332. else
  333. skb->ip_summed = CHECKSUM_UNNECESSARY;
  334. if (rd->rrd7 & IPQESS_RRD_CVLAN) {
  335. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rd->rrd4);
  336. } else if (rd->rrd1 & IPQESS_RRD_SVLAN) {
  337. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), rd->rrd4);
  338. }
  339. napi_gro_receive(&rx_ring->napi_rx, skb);
  340. /* TODO: do we need to have these here ? */
  341. rx_ring->ess->stats.rx_packets++;
  342. rx_ring->ess->stats.rx_bytes += length;
  343. done++;
  344. skip:
  345. num_desc += atomic_xchg(&rx_ring->refill_count, 0);
  346. while (num_desc) {
  347. if (ipqess_rx_buf_alloc_napi(rx_ring)) {
  348. num_desc = atomic_add_return(num_desc,
  349. &rx_ring->refill_count);
  350. if (num_desc >= ((4 * IPQESS_RX_RING_SIZE + 6) / 7))
  351. schedule_work(&rx_ring->ess->rx_refill[rx_ring->ring_id].refill_work);
  352. break;
  353. }
  354. num_desc--;
  355. }
  356. }
  357. ipqess_w32(rx_ring->ess, IPQESS_REG_RX_SW_CONS_IDX_Q(rx_ring->idx),
  358. rx_ring_tail);
  359. rx_ring->tail = rx_ring_tail;
  360. return done;
  361. }
  362. static int ipqess_tx_complete(struct ipqess_tx_ring *tx_ring, int budget)
  363. {
  364. u32 tail;
  365. int done = 0;
  366. int total = 0, ret;
  367. tail = ipqess_r32(tx_ring->ess, IPQESS_REG_TPD_IDX_Q(tx_ring->idx));
  368. tail >>= IPQESS_TPD_CONS_IDX_SHIFT;
  369. tail &= IPQESS_TPD_CONS_IDX_MASK;
  370. while ((tx_ring->tail != tail) && (done < budget)) {
  371. //pr_info("freeing txq:%d tail:%d tailbuf:%p\n", tx_ring->idx, tx_ring->tail, &tx_ring->buf[tx_ring->tail]);
  372. ret = ipqess_tx_unmap_and_free(&tx_ring->ess->pdev->dev,
  373. &tx_ring->buf[tx_ring->tail]);
  374. tx_ring->tail = IPQESS_NEXT_IDX(tx_ring->tail, tx_ring->count);
  375. if (ret) {
  376. total += ret;
  377. done++;
  378. }
  379. }
  380. ipqess_w32(tx_ring->ess,
  381. IPQESS_REG_TX_SW_CONS_IDX_Q(tx_ring->idx),
  382. tx_ring->tail);
  383. if (netif_tx_queue_stopped(tx_ring->nq)) {
  384. netdev_dbg(tx_ring->ess->netdev, "waking up tx queue %d\n",
  385. tx_ring->idx);
  386. netif_tx_wake_queue(tx_ring->nq);
  387. }
  388. netdev_tx_completed_queue(tx_ring->nq, done, total);
  389. return done;
  390. }
  391. static int ipqess_tx_napi(struct napi_struct *napi, int budget)
  392. {
  393. struct ipqess_tx_ring *tx_ring = container_of(napi, struct ipqess_tx_ring,
  394. napi_tx);
  395. u32 tx_status;
  396. int work_done = 0;
  397. tx_status = ipqess_r32(tx_ring->ess, IPQESS_REG_TX_ISR);
  398. tx_status &= BIT(tx_ring->idx);
  399. work_done = ipqess_tx_complete(tx_ring, budget);
  400. ipqess_w32(tx_ring->ess, IPQESS_REG_TX_ISR, tx_status);
  401. if (likely(work_done < budget)) {
  402. if (napi_complete_done(napi, work_done))
  403. ipqess_w32(tx_ring->ess,
  404. IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx), 0x1);
  405. }
  406. return work_done;
  407. }
  408. static int ipqess_rx_napi(struct napi_struct *napi, int budget)
  409. {
  410. struct ipqess_rx_ring *rx_ring = container_of(napi, struct ipqess_rx_ring,
  411. napi_rx);
  412. struct ipqess *ess = rx_ring->ess;
  413. int remain_budget = budget;
  414. int rx_done;
  415. u32 rx_mask = BIT(rx_ring->idx);
  416. u32 status;
  417. poll_again:
  418. ipqess_w32(ess, IPQESS_REG_RX_ISR, rx_mask);
  419. rx_done = ipqess_rx_poll(rx_ring, remain_budget);
  420. if (rx_done == remain_budget)
  421. return budget;
  422. status = ipqess_r32(ess, IPQESS_REG_RX_ISR);
  423. if (status & rx_mask) {
  424. remain_budget -= rx_done;
  425. goto poll_again;
  426. }
  427. if (napi_complete_done(napi, rx_done + budget - remain_budget))
  428. ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(rx_ring->idx), 0x1);
  429. return rx_done + budget - remain_budget;
  430. }
  431. static irqreturn_t ipqess_interrupt_tx(int irq, void *priv)
  432. {
  433. struct ipqess_tx_ring *tx_ring = (struct ipqess_tx_ring *) priv;
  434. if (likely(napi_schedule_prep(&tx_ring->napi_tx))) {
  435. ipqess_w32(tx_ring->ess,
  436. IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx),
  437. 0x0);
  438. __napi_schedule(&tx_ring->napi_tx);
  439. }
  440. return IRQ_HANDLED;
  441. }
  442. static irqreturn_t ipqess_interrupt_rx(int irq, void *priv)
  443. {
  444. struct ipqess_rx_ring *rx_ring = (struct ipqess_rx_ring *) priv;
  445. if (likely(napi_schedule_prep(&rx_ring->napi_rx))) {
  446. ipqess_w32(rx_ring->ess,
  447. IPQESS_REG_RX_INT_MASK_Q(rx_ring->idx),
  448. 0x0);
  449. __napi_schedule(&rx_ring->napi_rx);
  450. }
  451. return IRQ_HANDLED;
  452. }
  453. static void ipqess_irq_enable(struct ipqess *ess)
  454. {
  455. int i;
  456. ipqess_w32(ess, IPQESS_REG_RX_ISR, 0xff);
  457. ipqess_w32(ess, IPQESS_REG_TX_ISR, 0xffff);
  458. for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
  459. ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(ess->rx_ring[i].idx), 1);
  460. ipqess_w32(ess, IPQESS_REG_TX_INT_MASK_Q(ess->tx_ring[i].idx), 1);
  461. }
  462. }
  463. static void ipqess_irq_disable(struct ipqess *ess)
  464. {
  465. int i;
  466. for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
  467. ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(ess->rx_ring[i].idx), 0);
  468. ipqess_w32(ess, IPQESS_REG_TX_INT_MASK_Q(ess->tx_ring[i].idx), 0);
  469. }
  470. }
  471. static int __init ipqess_init(struct net_device *netdev)
  472. {
  473. struct ipqess *ess = netdev_priv(netdev);
  474. struct device_node *of_node = ess->pdev->dev.of_node;
  475. return phylink_of_phy_connect(ess->phylink, of_node, 0);
  476. }
  477. static void ipqess_uninit(struct net_device *netdev)
  478. {
  479. struct ipqess *ess = netdev_priv(netdev);
  480. phylink_disconnect_phy(ess->phylink);
  481. }
  482. static int ipqess_open(struct net_device *netdev)
  483. {
  484. struct ipqess *ess = netdev_priv(netdev);
  485. int i;
  486. for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
  487. napi_enable(&ess->tx_ring[i].napi_tx);
  488. napi_enable(&ess->rx_ring[i].napi_rx);
  489. }
  490. ipqess_irq_enable(ess);
  491. phylink_start(ess->phylink);
  492. netif_tx_start_all_queues(netdev);
  493. return 0;
  494. }
  495. static int ipqess_stop(struct net_device *netdev)
  496. {
  497. struct ipqess *ess = netdev_priv(netdev);
  498. int i;
  499. netif_tx_stop_all_queues(netdev);
  500. phylink_stop(ess->phylink);
  501. ipqess_irq_disable(ess);
  502. for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
  503. napi_disable(&ess->tx_ring[i].napi_tx);
  504. napi_disable(&ess->rx_ring[i].napi_rx);
  505. }
  506. return 0;
  507. }
  508. static int ipqess_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  509. {
  510. struct ipqess *ess = netdev_priv(netdev);
  511. switch (cmd) {
  512. case SIOCGMIIPHY:
  513. case SIOCGMIIREG:
  514. case SIOCSMIIREG:
  515. return phylink_mii_ioctl(ess->phylink, ifr, cmd);
  516. default:
  517. break;
  518. }
  519. return -EOPNOTSUPP;
  520. }
  521. static inline u16 ipqess_tx_desc_available(struct ipqess_tx_ring *tx_ring)
  522. {
  523. u16 count = 0;
  524. if (tx_ring->tail <= tx_ring->head)
  525. count = IPQESS_TX_RING_SIZE;
  526. count += tx_ring->tail - tx_ring->head - 1;
  527. return count;
  528. }
  529. static inline int ipqess_cal_txd_req(struct sk_buff *skb)
  530. {
  531. int tpds;
  532. /* one TPD for the header, and one for each fragments */
  533. tpds = 1 + skb_shinfo(skb)->nr_frags;
  534. if (skb_is_gso(skb) && skb_is_gso_v6(skb)) {
  535. /* for LSOv2 one extra TPD is needed */
  536. tpds++;
  537. }
  538. return tpds;
  539. }
  540. static struct ipqess_buf *ipqess_get_tx_buffer(struct ipqess_tx_ring *tx_ring,
  541. struct ipqess_tx_desc *desc)
  542. {
  543. return &tx_ring->buf[desc - tx_ring->hw_desc];
  544. }
  545. static struct ipqess_tx_desc *ipqess_tx_desc_next(struct ipqess_tx_ring *tx_ring)
  546. {
  547. struct ipqess_tx_desc *desc;
  548. desc = &tx_ring->hw_desc[tx_ring->head];
  549. tx_ring->head = IPQESS_NEXT_IDX(tx_ring->head, tx_ring->count);
  550. return desc;
  551. }
  552. static void ipqess_rollback_tx(struct ipqess *eth,
  553. struct ipqess_tx_desc *first_desc, int ring_id)
  554. {
  555. struct ipqess_tx_ring *tx_ring = &eth->tx_ring[ring_id];
  556. struct ipqess_buf *buf;
  557. struct ipqess_tx_desc *desc = NULL;
  558. u16 start_index, index;
  559. start_index = first_desc - tx_ring->hw_desc;
  560. index = start_index;
  561. while (index != tx_ring->head) {
  562. desc = &tx_ring->hw_desc[index];
  563. buf = &tx_ring->buf[index];
  564. ipqess_tx_unmap_and_free(&eth->pdev->dev, buf);
  565. memset(desc, 0, sizeof(struct ipqess_tx_desc));
  566. if (++index == tx_ring->count)
  567. index = 0;
  568. }
  569. tx_ring->head = start_index;
  570. }
  571. static bool ipqess_process_dsa_tag_sh(struct sk_buff *skb, u32 *word3)
  572. {
  573. struct skb_shared_info *shinfo = skb_shinfo(skb);
  574. struct ipq40xx_dsa_tag_data *tag_data;
  575. if (shinfo->dsa_tag_proto != DSA_TAG_PROTO_IPQ4019)
  576. return false;
  577. tag_data = (struct ipq40xx_dsa_tag_data *)shinfo->dsa_tag_data;
  578. pr_debug("SH tag @ %08x, dp:%02x from_cpu:%u\n",
  579. (u32)tag_data, tag_data->dp, tag_data->from_cpu);
  580. *word3 |= tag_data->dp << IPQESS_TPD_PORT_BITMAP_SHIFT;
  581. if (tag_data->from_cpu)
  582. *word3 |= BIT(IPQESS_TPD_FROM_CPU_SHIFT);
  583. return true;
  584. }
  585. static void ipqess_get_dp_info(struct ipqess *ess, struct sk_buff *skb,
  586. u32 *word3)
  587. {
  588. if (netdev_uses_dsa(ess->netdev)) {
  589. if (ipqess_process_dsa_tag_sh(skb, word3))
  590. return;
  591. }
  592. *word3 |= 0x3e << IPQESS_TPD_PORT_BITMAP_SHIFT;
  593. }
  594. static int ipqess_tx_map_and_fill(struct ipqess_tx_ring *tx_ring, struct sk_buff *skb)
  595. {
  596. struct ipqess_buf *buf = NULL;
  597. struct platform_device *pdev = tx_ring->ess->pdev;
  598. struct ipqess_tx_desc *desc = NULL, *first_desc = NULL;
  599. u32 word1 = 0, word3 = 0, lso_word1 = 0, svlan_tag = 0;
  600. u16 len;
  601. int i;
  602. ipqess_get_dp_info(tx_ring->ess, skb, &word3);
  603. if (skb_is_gso(skb)) {
  604. if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
  605. lso_word1 |= IPQESS_TPD_IPV4_EN;
  606. ip_hdr(skb)->check = 0;
  607. tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
  608. ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
  609. } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
  610. lso_word1 |= IPQESS_TPD_LSO_V2_EN;
  611. ipv6_hdr(skb)->payload_len = 0;
  612. tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
  613. &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
  614. }
  615. lso_word1 |= IPQESS_TPD_LSO_EN |
  616. ((skb_shinfo(skb)->gso_size & IPQESS_TPD_MSS_MASK) << IPQESS_TPD_MSS_SHIFT) |
  617. (skb_transport_offset(skb) << IPQESS_TPD_HDR_SHIFT);
  618. } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
  619. u8 css, cso;
  620. cso = skb_checksum_start_offset(skb);
  621. css = cso + skb->csum_offset;
  622. word1 |= (IPQESS_TPD_CUSTOM_CSUM_EN);
  623. word1 |= (cso >> 1) << IPQESS_TPD_HDR_SHIFT;
  624. word1 |= ((css >> 1) << IPQESS_TPD_CUSTOM_CSUM_SHIFT);
  625. }
  626. if (skb_vlan_tag_present(skb)) {
  627. switch (skb->vlan_proto) {
  628. case htons(ETH_P_8021Q):
  629. word3 |= BIT(IPQESS_TX_INS_CVLAN);
  630. word3 |= skb_vlan_tag_get(skb) << IPQESS_TX_CVLAN_TAG_SHIFT;
  631. break;
  632. case htons(ETH_P_8021AD):
  633. word1 |= BIT(IPQESS_TX_INS_SVLAN);
  634. svlan_tag = skb_vlan_tag_get(skb);
  635. break;
  636. default:
  637. dev_err(&pdev->dev, "no ctag or stag present\n");
  638. goto vlan_tag_error;
  639. }
  640. }
  641. if (eth_type_vlan(skb->protocol))
  642. word1 |= IPQESS_TPD_VLAN_TAGGED;
  643. if (skb->protocol == htons(ETH_P_PPP_SES))
  644. word1 |= IPQESS_TPD_PPPOE_EN;
  645. len = skb_headlen(skb);
  646. first_desc = desc = ipqess_tx_desc_next(tx_ring);
  647. if (lso_word1 & IPQESS_TPD_LSO_V2_EN) {
  648. desc->addr = cpu_to_le16(skb->len);
  649. desc->word1 = word1 | lso_word1;
  650. desc->svlan_tag = svlan_tag;
  651. desc->word3 = word3;
  652. desc = ipqess_tx_desc_next(tx_ring);
  653. }
  654. buf = ipqess_get_tx_buffer(tx_ring, desc);
  655. buf->length = len;
  656. buf->dma = dma_map_single(&pdev->dev,
  657. skb->data, len, DMA_TO_DEVICE);
  658. if (dma_mapping_error(&pdev->dev, buf->dma))
  659. goto dma_error;
  660. desc->addr = cpu_to_le32(buf->dma);
  661. desc->len = cpu_to_le16(len);
  662. buf->flags |= IPQESS_DESC_SINGLE;
  663. desc->word1 = word1 | lso_word1;
  664. desc->svlan_tag = svlan_tag;
  665. desc->word3 = word3;
  666. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  667. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  668. len = skb_frag_size(frag);
  669. desc = ipqess_tx_desc_next(tx_ring);
  670. buf = ipqess_get_tx_buffer(tx_ring, desc);
  671. buf->length = len;
  672. buf->flags |= IPQESS_DESC_PAGE;
  673. buf->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len, DMA_TO_DEVICE);
  674. if (dma_mapping_error(&pdev->dev, buf->dma))
  675. goto dma_error;
  676. desc->addr = cpu_to_le32(buf->dma);
  677. desc->len = cpu_to_le16(len);
  678. desc->svlan_tag = svlan_tag;
  679. desc->word1 = word1 | lso_word1;
  680. desc->word3 = word3;
  681. }
  682. desc->word1 |= 1 << IPQESS_TPD_EOP_SHIFT;
  683. buf->skb = skb;
  684. buf->flags |= IPQESS_DESC_LAST;
  685. return 0;
  686. dma_error:
  687. ipqess_rollback_tx(tx_ring->ess, first_desc, tx_ring->ring_id);
  688. dev_err(&pdev->dev, "TX DMA map failed\n");
  689. vlan_tag_error:
  690. return -ENOMEM;
  691. }
  692. static inline void ipqess_kick_tx(struct ipqess_tx_ring *tx_ring)
  693. {
  694. /* Ensure that all TPDs has been written completely */
  695. dma_wmb();
  696. /* update software producer index */
  697. ipqess_w32(tx_ring->ess, IPQESS_REG_TPD_IDX_Q(tx_ring->idx),
  698. tx_ring->head);
  699. }
  700. static netdev_tx_t ipqess_xmit(struct sk_buff *skb,
  701. struct net_device *netdev)
  702. {
  703. struct ipqess *ess = netdev_priv(netdev);
  704. struct ipqess_tx_ring *tx_ring;
  705. int avail;
  706. int tx_num;
  707. int ret;
  708. tx_ring = &ess->tx_ring[skb_get_queue_mapping(skb)];
  709. tx_num = ipqess_cal_txd_req(skb);
  710. avail = ipqess_tx_desc_available(tx_ring);
  711. if (avail < tx_num) {
  712. netdev_dbg(netdev,
  713. "stopping tx queue %d, avail=%d req=%d im=%x\n",
  714. tx_ring->idx, avail, tx_num,
  715. ipqess_r32(tx_ring->ess,
  716. IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx)));
  717. netif_tx_stop_queue(tx_ring->nq);
  718. ipqess_w32(tx_ring->ess, IPQESS_REG_TX_INT_MASK_Q(tx_ring->idx), 0x1);
  719. ipqess_kick_tx(tx_ring);
  720. return NETDEV_TX_BUSY;
  721. }
  722. ret = ipqess_tx_map_and_fill(tx_ring, skb);
  723. if (ret) {
  724. dev_kfree_skb_any(skb);
  725. ess->stats.tx_errors++;
  726. goto err_out;
  727. }
  728. ess->stats.tx_packets++;
  729. ess->stats.tx_bytes += skb->len;
  730. netdev_tx_sent_queue(tx_ring->nq, skb->len);
  731. if (!netdev_xmit_more() || netif_xmit_stopped(tx_ring->nq))
  732. ipqess_kick_tx(tx_ring);
  733. err_out:
  734. return NETDEV_TX_OK;
  735. }
  736. static int ipqess_set_mac_address(struct net_device *netdev, void *p)
  737. {
  738. int ret = eth_mac_addr(netdev, p);
  739. struct ipqess *ess = netdev_priv(netdev);
  740. const char *macaddr = netdev->dev_addr;
  741. if (ret)
  742. return ret;
  743. // spin_lock_bh(&mac->hw->page_lock);
  744. ipqess_w32(ess, IPQESS_REG_MAC_CTRL1,
  745. (macaddr[0] << 8) | macaddr[1]);
  746. ipqess_w32(ess, IPQESS_REG_MAC_CTRL0,
  747. (macaddr[2] << 24) | (macaddr[3] << 16) |
  748. (macaddr[4] << 8) | macaddr[5]);
  749. // spin_unlock_bh(&mac->hw->page_lock);
  750. return 0;
  751. }
  752. static void ipqess_tx_timeout(struct net_device *netdev, unsigned int txq_id)
  753. {
  754. struct ipqess *ess = netdev_priv(netdev);
  755. struct ipqess_tx_ring *tr = &ess->tx_ring[txq_id];
  756. netdev_warn(netdev, "hardware queue %d is in stuck?\n",
  757. tr->idx);
  758. /* TODO: dump hardware queue */
  759. }
  760. static const struct net_device_ops ipqess_axi_netdev_ops = {
  761. .ndo_init = ipqess_init,
  762. .ndo_uninit = ipqess_uninit,
  763. .ndo_open = ipqess_open,
  764. .ndo_stop = ipqess_stop,
  765. .ndo_do_ioctl = ipqess_do_ioctl,
  766. .ndo_start_xmit = ipqess_xmit,
  767. .ndo_get_stats = ipqess_get_stats,
  768. .ndo_set_mac_address = ipqess_set_mac_address,
  769. .ndo_tx_timeout = ipqess_tx_timeout,
  770. };
  771. static void ipqess_hw_stop(struct ipqess *ess)
  772. {
  773. int i;
  774. /* disable all RX queue IRQs */
  775. for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++)
  776. ipqess_w32(ess, IPQESS_REG_RX_INT_MASK_Q(i), 0);
  777. /* disable all TX queue IRQs */
  778. for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++)
  779. ipqess_w32(ess, IPQESS_REG_TX_INT_MASK_Q(i), 0);
  780. /* disable all other IRQs */
  781. ipqess_w32(ess, IPQESS_REG_MISC_IMR, 0);
  782. ipqess_w32(ess, IPQESS_REG_WOL_IMR, 0);
  783. /* clear the IRQ status registers */
  784. ipqess_w32(ess, IPQESS_REG_RX_ISR, 0xff);
  785. ipqess_w32(ess, IPQESS_REG_TX_ISR, 0xffff);
  786. ipqess_w32(ess, IPQESS_REG_MISC_ISR, 0x1fff);
  787. ipqess_w32(ess, IPQESS_REG_WOL_ISR, 0x1);
  788. ipqess_w32(ess, IPQESS_REG_WOL_CTRL, 0);
  789. /* disable RX and TX queues */
  790. ipqess_m32(ess, IPQESS_RXQ_CTRL_EN_MASK, 0, IPQESS_REG_RXQ_CTRL);
  791. ipqess_m32(ess, IPQESS_TXQ_CTRL_TXQ_EN, 0, IPQESS_REG_TXQ_CTRL);
  792. }
  793. static int ipqess_hw_init(struct ipqess *ess)
  794. {
  795. u32 tmp;
  796. int i, err;
  797. ipqess_hw_stop(ess);
  798. ipqess_m32(ess, BIT(IPQESS_INTR_SW_IDX_W_TYP_SHIFT),
  799. IPQESS_INTR_SW_IDX_W_TYPE << IPQESS_INTR_SW_IDX_W_TYP_SHIFT,
  800. IPQESS_REG_INTR_CTRL);
  801. /* enable IRQ delay slot */
  802. ipqess_w32(ess, IPQESS_REG_IRQ_MODRT_TIMER_INIT,
  803. (IPQESS_TX_IMT << IPQESS_IRQ_MODRT_TX_TIMER_SHIFT) |
  804. (IPQESS_RX_IMT << IPQESS_IRQ_MODRT_RX_TIMER_SHIFT));
  805. /* Set Customer and Service VLAN TPIDs */
  806. ipqess_w32(ess, IPQESS_REG_VLAN_CFG,
  807. (ETH_P_8021Q << IPQESS_VLAN_CFG_CVLAN_TPID_SHIFT) |
  808. (ETH_P_8021AD << IPQESS_VLAN_CFG_SVLAN_TPID_SHIFT));
  809. /* Configure the TX Queue bursting */
  810. ipqess_w32(ess, IPQESS_REG_TXQ_CTRL,
  811. (IPQESS_TPD_BURST << IPQESS_TXQ_NUM_TPD_BURST_SHIFT) |
  812. (IPQESS_TXF_BURST << IPQESS_TXQ_TXF_BURST_NUM_SHIFT) |
  813. IPQESS_TXQ_CTRL_TPD_BURST_EN);
  814. /* Set RSS type */
  815. ipqess_w32(ess, IPQESS_REG_RSS_TYPE,
  816. IPQESS_RSS_TYPE_IPV4TCP | IPQESS_RSS_TYPE_IPV6_TCP |
  817. IPQESS_RSS_TYPE_IPV4_UDP | IPQESS_RSS_TYPE_IPV6UDP |
  818. IPQESS_RSS_TYPE_IPV4 | IPQESS_RSS_TYPE_IPV6);
  819. /* Set RFD ring burst and threshold */
  820. ipqess_w32(ess, IPQESS_REG_RX_DESC1,
  821. (IPQESS_RFD_BURST << IPQESS_RXQ_RFD_BURST_NUM_SHIFT) |
  822. (IPQESS_RFD_THR << IPQESS_RXQ_RFD_PF_THRESH_SHIFT) |
  823. (IPQESS_RFD_LTHR << IPQESS_RXQ_RFD_LOW_THRESH_SHIFT));
  824. /* Set Rx FIFO
  825. * - threshold to start to DMA data to host
  826. */
  827. ipqess_w32(ess, IPQESS_REG_RXQ_CTRL,
  828. IPQESS_FIFO_THRESH_128_BYTE | IPQESS_RXQ_CTRL_RMV_VLAN);
  829. err = ipqess_rx_ring_alloc(ess);
  830. if (err)
  831. return err;
  832. err = ipqess_tx_ring_alloc(ess);
  833. if (err)
  834. return err;
  835. /* Load all of ring base addresses above into the dma engine */
  836. ipqess_m32(ess, 0, BIT(IPQESS_LOAD_PTR_SHIFT),
  837. IPQESS_REG_TX_SRAM_PART);
  838. /* Disable TX FIFO low watermark and high watermark */
  839. ipqess_w32(ess, IPQESS_REG_TXF_WATER_MARK, 0);
  840. /* Configure RSS indirection table.
  841. * 128 hash will be configured in the following
  842. * pattern: hash{0,1,2,3} = {Q0,Q2,Q4,Q6} respectively
  843. * and so on
  844. */
  845. for (i = 0; i < IPQESS_NUM_IDT; i++)
  846. ipqess_w32(ess, IPQESS_REG_RSS_IDT(i), IPQESS_RSS_IDT_VALUE);
  847. /* Configure load balance mapping table.
  848. * 4 table entry will be configured according to the
  849. * following pattern: load_balance{0,1,2,3} = {Q0,Q1,Q3,Q4}
  850. * respectively.
  851. */
  852. ipqess_w32(ess, IPQESS_REG_LB_RING, IPQESS_LB_REG_VALUE);
  853. /* Configure Virtual queue for Tx rings */
  854. ipqess_w32(ess, IPQESS_REG_VQ_CTRL0, IPQESS_VQ_REG_VALUE);
  855. ipqess_w32(ess, IPQESS_REG_VQ_CTRL1, IPQESS_VQ_REG_VALUE);
  856. /* Configure Max AXI Burst write size to 128 bytes*/
  857. ipqess_w32(ess, IPQESS_REG_AXIW_CTRL_MAXWRSIZE,
  858. IPQESS_AXIW_MAXWRSIZE_VALUE);
  859. /* Enable TX queues */
  860. ipqess_m32(ess, 0, IPQESS_TXQ_CTRL_TXQ_EN, IPQESS_REG_TXQ_CTRL);
  861. /* Enable RX queues */
  862. tmp = 0;
  863. for (i = 0; i < IPQESS_NETDEV_QUEUES; i++)
  864. tmp |= IPQESS_RXQ_CTRL_EN(ess->rx_ring[i].idx);
  865. ipqess_m32(ess, IPQESS_RXQ_CTRL_EN_MASK, tmp, IPQESS_REG_RXQ_CTRL);
  866. return 0;
  867. }
  868. static void ipqess_validate(struct phylink_config *config,
  869. unsigned long *supported,
  870. struct phylink_link_state *state)
  871. {
  872. struct ipqess *ess = container_of(config, struct ipqess, phylink_config);
  873. __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
  874. if (state->interface != PHY_INTERFACE_MODE_INTERNAL) {
  875. dev_err(&ess->pdev->dev, "unsupported interface mode: %d\n",
  876. state->interface);
  877. linkmode_zero(supported);
  878. return;
  879. }
  880. phylink_set_port_modes(mask);
  881. phylink_set(mask, 1000baseT_Full);
  882. phylink_set(mask, Pause);
  883. phylink_set(mask, Asym_Pause);
  884. linkmode_and(supported, supported, mask);
  885. linkmode_and(state->advertising, state->advertising, mask);
  886. }
  887. static void ipqess_mac_config(struct phylink_config *config, unsigned int mode,
  888. const struct phylink_link_state *state)
  889. {
  890. /* TODO */
  891. }
  892. static void ipqess_mac_link_down(struct phylink_config *config,
  893. unsigned int mode,
  894. phy_interface_t interface)
  895. {
  896. /* TODO */
  897. }
  898. static void ipqess_mac_link_up(struct phylink_config *config,
  899. struct phy_device *phy, unsigned int mode,
  900. phy_interface_t interface,
  901. int speed, int duplex,
  902. bool tx_pause, bool rx_pause)
  903. {
  904. /* TODO */
  905. }
  906. static struct phylink_mac_ops ipqess_phylink_mac_ops = {
  907. .validate = ipqess_validate,
  908. .mac_config = ipqess_mac_config,
  909. .mac_link_up = ipqess_mac_link_up,
  910. .mac_link_down = ipqess_mac_link_down,
  911. };
  912. static void ipqess_cleanup(struct ipqess *ess)
  913. {
  914. ipqess_hw_stop(ess);
  915. unregister_netdev(ess->netdev);
  916. ipqess_tx_ring_free(ess);
  917. ipqess_rx_ring_free(ess);
  918. if (!IS_ERR_OR_NULL(ess->phylink))
  919. phylink_destroy(ess->phylink);
  920. }
  921. static void ess_reset(struct ipqess *ess)
  922. {
  923. reset_control_assert(ess->ess_rst);
  924. mdelay(10);
  925. reset_control_deassert(ess->ess_rst);
  926. /* Waiting for all inner tables to be flushed and reinitialized.
  927. * This takes between 5 and 10ms.
  928. */
  929. mdelay(10);
  930. }
  931. static int ipqess_axi_probe(struct platform_device *pdev)
  932. {
  933. struct device_node *np = pdev->dev.of_node;
  934. struct ipqess *ess;
  935. struct net_device *netdev;
  936. struct resource *res;
  937. int i, err = 0;
  938. netdev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct ipqess),
  939. IPQESS_NETDEV_QUEUES,
  940. IPQESS_NETDEV_QUEUES);
  941. if (!netdev)
  942. return -ENOMEM;
  943. ess = netdev_priv(netdev);
  944. ess->netdev = netdev;
  945. ess->pdev = pdev;
  946. spin_lock_init(&ess->stats_lock);
  947. SET_NETDEV_DEV(netdev, &pdev->dev);
  948. platform_set_drvdata(pdev, netdev);
  949. err = of_get_mac_address(np, netdev->dev_addr);
  950. if (err == -EPROBE_DEFER)
  951. return -EPROBE_DEFER;
  952. if (err) {
  953. random_ether_addr(netdev->dev_addr);
  954. dev_info(&ess->pdev->dev, "generated random MAC address %pM\n",
  955. netdev->dev_addr);
  956. netdev->addr_assign_type = NET_ADDR_RANDOM;
  957. }
  958. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  959. ess->hw_addr = devm_ioremap_resource(&pdev->dev, res);
  960. if (IS_ERR(ess->hw_addr)) {
  961. err = PTR_ERR(ess->hw_addr);
  962. goto err_out;
  963. }
  964. ess->ess_clk = of_clk_get_by_name(np, "ess_clk");
  965. if (IS_ERR(ess->ess_clk)) {
  966. dev_err(&pdev->dev, "Failed to get ess_clk\n");
  967. return PTR_ERR(ess->ess_clk);
  968. }
  969. ess->ess_rst = devm_reset_control_get(&pdev->dev, "ess_rst");
  970. if (IS_ERR(ess->ess_rst)) {
  971. dev_err(&pdev->dev, "Failed to get ess_rst control!\n");
  972. return PTR_ERR(ess->ess_rst);
  973. }
  974. clk_prepare_enable(ess->ess_clk);
  975. ess_reset(ess);
  976. ess->phylink_config.dev = &netdev->dev;
  977. ess->phylink_config.type = PHYLINK_NETDEV;
  978. ess->phylink_config.pcs_poll = true;
  979. ess->phylink = phylink_create(&ess->phylink_config,
  980. of_fwnode_handle(np),
  981. PHY_INTERFACE_MODE_INTERNAL,
  982. &ipqess_phylink_mac_ops);
  983. if (IS_ERR(ess->phylink)) {
  984. err = PTR_ERR(ess->phylink);
  985. goto err_out;
  986. }
  987. for (i = 0; i < IPQESS_MAX_TX_QUEUE; i++) {
  988. ess->tx_irq[i] = platform_get_irq(pdev, i);
  989. scnprintf(ess->tx_irq_names[i], sizeof(ess->tx_irq_names[i]),
  990. "%s:txq%d", pdev->name, i);
  991. }
  992. for (i = 0; i < IPQESS_MAX_RX_QUEUE; i++) {
  993. ess->rx_irq[i] = platform_get_irq(pdev, i + IPQESS_MAX_TX_QUEUE);
  994. scnprintf(ess->rx_irq_names[i], sizeof(ess->rx_irq_names[i]),
  995. "%s:rxq%d", pdev->name, i);
  996. }
  997. #undef NETIF_F_TSO6
  998. #define NETIF_F_TSO6 0
  999. netdev->netdev_ops = &ipqess_axi_netdev_ops;
  1000. netdev->features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
  1001. NETIF_F_HW_VLAN_CTAG_RX |
  1002. NETIF_F_HW_VLAN_CTAG_TX |
  1003. NETIF_F_TSO | NETIF_F_TSO6 |
  1004. NETIF_F_GRO | NETIF_F_SG;
  1005. /* feature change is not supported yet */
  1006. netdev->hw_features = 0;
  1007. netdev->vlan_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_RXCSUM |
  1008. NETIF_F_TSO | NETIF_F_TSO6 |
  1009. NETIF_F_GRO;
  1010. netdev->watchdog_timeo = 5 * HZ;
  1011. netdev->base_addr = (u32) ess->hw_addr;
  1012. netdev->max_mtu = 9000;
  1013. netdev->gso_max_segs = IPQESS_TX_RING_SIZE / 2;
  1014. ipqess_set_ethtool_ops(netdev);
  1015. err = register_netdev(netdev);
  1016. if (err)
  1017. goto err_out;
  1018. err = ipqess_hw_init(ess);
  1019. if (err)
  1020. goto err_out;
  1021. dev_set_threaded(netdev, true);
  1022. for (i = 0; i < IPQESS_NETDEV_QUEUES; i++) {
  1023. int qid;
  1024. netif_tx_napi_add(netdev, &ess->tx_ring[i].napi_tx,
  1025. ipqess_tx_napi, 64);
  1026. netif_napi_add(netdev,
  1027. &ess->rx_ring[i].napi_rx,
  1028. ipqess_rx_napi, 64);
  1029. qid = ess->tx_ring[i].idx;
  1030. err = devm_request_irq(&ess->netdev->dev, ess->tx_irq[qid],
  1031. ipqess_interrupt_tx, 0, ess->tx_irq_names[qid],
  1032. &ess->tx_ring[i]);
  1033. if (err)
  1034. goto err_out;
  1035. qid = ess->rx_ring[i].idx;
  1036. err = devm_request_irq(&ess->netdev->dev, ess->rx_irq[qid],
  1037. ipqess_interrupt_rx, 0, ess->rx_irq_names[qid],
  1038. &ess->rx_ring[i]);
  1039. if (err)
  1040. goto err_out;
  1041. }
  1042. return 0;
  1043. err_out:
  1044. ipqess_cleanup(ess);
  1045. return err;
  1046. }
  1047. static int ipqess_axi_remove(struct platform_device *pdev)
  1048. {
  1049. const struct net_device *netdev = platform_get_drvdata(pdev);
  1050. struct ipqess *ess = netdev_priv(netdev);
  1051. ipqess_cleanup(ess);
  1052. return 0;
  1053. }
  1054. static const struct of_device_id ipqess_of_mtable[] = {
  1055. {.compatible = "qcom,ipq4019-ess-edma" },
  1056. {}
  1057. };
  1058. MODULE_DEVICE_TABLE(of, ipqess_of_mtable);
  1059. static struct platform_driver ipqess_axi_driver = {
  1060. .driver = {
  1061. .name = "ipqess-edma",
  1062. .of_match_table = ipqess_of_mtable,
  1063. },
  1064. .probe = ipqess_axi_probe,
  1065. .remove = ipqess_axi_remove,
  1066. };
  1067. module_platform_driver(ipqess_axi_driver);
  1068. MODULE_AUTHOR("Qualcomm Atheros Inc");
  1069. MODULE_AUTHOR("John Crispin <[email protected]>");
  1070. MODULE_AUTHOR("Christian Lamparter <[email protected]>");
  1071. MODULE_AUTHOR("Gabor Juhos <[email protected]>");
  1072. MODULE_LICENSE("GPL");