072-v5.12-0002-net-broadcom-bcm4908enet-add-BCM4908-controller-driv.patch 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847
  1. From 4feffeadbcb2e5b11cbbf191a33c245b74a5837b Mon Sep 17 00:00:00 2001
  2. From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <[email protected]>
  3. Date: Sun, 7 Feb 2021 23:26:32 +0100
  4. Subject: [PATCH] net: broadcom: bcm4908enet: add BCM4908 controller driver
  5. MIME-Version: 1.0
  6. Content-Type: text/plain; charset=UTF-8
  7. Content-Transfer-Encoding: 8bit
  8. BCM4908 SoCs family uses Ethernel controller that includes UniMAC but
  9. uses different DMA engine (than other controllers) and requires
  10. different programming.
  11. Signed-off-by: Rafał Miłecki <[email protected]>
  12. Signed-off-by: David S. Miller <[email protected]>
  13. ---
  14. MAINTAINERS | 9 +
  15. drivers/net/ethernet/broadcom/Kconfig | 8 +
  16. drivers/net/ethernet/broadcom/Makefile | 1 +
  17. drivers/net/ethernet/broadcom/bcm4908enet.c | 676 ++++++++++++++++++++
  18. drivers/net/ethernet/broadcom/bcm4908enet.h | 96 +++
  19. 5 files changed, 790 insertions(+)
  20. create mode 100644 drivers/net/ethernet/broadcom/bcm4908enet.c
  21. create mode 100644 drivers/net/ethernet/broadcom/bcm4908enet.h
  22. --- a/MAINTAINERS
  23. +++ b/MAINTAINERS
  24. @@ -3207,6 +3207,15 @@ F: Documentation/devicetree/bindings/mip
  25. F: arch/mips/bcm47xx/*
  26. F: arch/mips/include/asm/mach-bcm47xx/*
  27. +BROADCOM BCM4908 ETHERNET DRIVER
  28. +M: Rafał Miłecki <[email protected]>
  29. +M: [email protected]
  30. +L: [email protected]
  31. +S: Maintained
  32. +F: Documentation/devicetree/bindings/net/brcm,bcm4908enet.yaml
  33. +F: drivers/net/ethernet/broadcom/bcm4908enet.*
  34. +F: drivers/net/ethernet/broadcom/unimac.h
  35. +
  36. BROADCOM BCM5301X ARM ARCHITECTURE
  37. M: Hauke Mehrtens <[email protected]>
  38. M: Rafał Miłecki <[email protected]>
  39. --- a/drivers/net/ethernet/broadcom/Kconfig
  40. +++ b/drivers/net/ethernet/broadcom/Kconfig
  41. @@ -51,6 +51,14 @@ config B44_PCI
  42. depends on B44_PCI_AUTOSELECT && B44_PCICORE_AUTOSELECT
  43. default y
  44. +config BCM4908ENET
  45. + tristate "Broadcom BCM4908 internal mac support"
  46. + depends on ARCH_BCM4908 || COMPILE_TEST
  47. + default y
  48. + help
  49. + This driver supports Ethernet controller integrated into Broadcom
  50. + BCM4908 family SoCs.
  51. +
  52. config BCM63XX_ENET
  53. tristate "Broadcom 63xx internal mac support"
  54. depends on BCM63XX
  55. --- a/drivers/net/ethernet/broadcom/Makefile
  56. +++ b/drivers/net/ethernet/broadcom/Makefile
  57. @@ -4,6 +4,7 @@
  58. #
  59. obj-$(CONFIG_B44) += b44.o
  60. +obj-$(CONFIG_BCM4908ENET) += bcm4908enet.o
  61. obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
  62. obj-$(CONFIG_BCMGENET) += genet/
  63. obj-$(CONFIG_BNX2) += bnx2.o
  64. --- /dev/null
  65. +++ b/drivers/net/ethernet/broadcom/bcm4908enet.c
  66. @@ -0,0 +1,676 @@
  67. +// SPDX-License-Identifier: GPL-2.0-only
  68. +/*
  69. + * Copyright (C) 2021 Rafał Miłecki <[email protected]>
  70. + */
  71. +
  72. +#include <linux/delay.h>
  73. +#include <linux/etherdevice.h>
  74. +#include <linux/interrupt.h>
  75. +#include <linux/module.h>
  76. +#include <linux/of.h>
  77. +#include <linux/platform_device.h>
  78. +#include <linux/slab.h>
  79. +#include <linux/string.h>
  80. +
  81. +#include "bcm4908enet.h"
  82. +#include "unimac.h"
  83. +
  84. +#define ENET_DMA_CH_RX_CFG ENET_DMA_CH0_CFG
  85. +#define ENET_DMA_CH_TX_CFG ENET_DMA_CH1_CFG
  86. +#define ENET_DMA_CH_RX_STATE_RAM ENET_DMA_CH0_STATE_RAM
  87. +#define ENET_DMA_CH_TX_STATE_RAM ENET_DMA_CH1_STATE_RAM
  88. +
  89. +#define ENET_TX_BDS_NUM 200
  90. +#define ENET_RX_BDS_NUM 200
  91. +#define ENET_RX_BDS_NUM_MAX 8192
  92. +
  93. +#define ENET_DMA_INT_DEFAULTS (ENET_DMA_CH_CFG_INT_DONE | \
  94. + ENET_DMA_CH_CFG_INT_NO_DESC | \
  95. + ENET_DMA_CH_CFG_INT_BUFF_DONE)
  96. +#define ENET_DMA_MAX_BURST_LEN 8 /* in 64 bit words */
  97. +
  98. +#define ENET_MTU_MIN 60
  99. +#define ENET_MTU_MAX 1500 /* Is it possible to support 2044? */
  100. +#define ENET_MTU_MAX_EXTRA_SIZE 32 /* L2 */
  101. +
  102. +struct bcm4908enet_dma_ring_bd {
  103. + __le32 ctl;
  104. + __le32 addr;
  105. +} __packed;
  106. +
  107. +struct bcm4908enet_dma_ring_slot {
  108. + struct sk_buff *skb;
  109. + unsigned int len;
  110. + dma_addr_t dma_addr;
  111. +};
  112. +
  113. +struct bcm4908enet_dma_ring {
  114. + int is_tx;
  115. + int read_idx;
  116. + int write_idx;
  117. + int length;
  118. + u16 cfg_block;
  119. + u16 st_ram_block;
  120. +
  121. + union {
  122. + void *cpu_addr;
  123. + struct bcm4908enet_dma_ring_bd *buf_desc;
  124. + };
  125. + dma_addr_t dma_addr;
  126. +
  127. + struct bcm4908enet_dma_ring_slot *slots;
  128. +};
  129. +
  130. +struct bcm4908enet {
  131. + struct device *dev;
  132. + struct net_device *netdev;
  133. + struct napi_struct napi;
  134. + void __iomem *base;
  135. +
  136. + struct bcm4908enet_dma_ring tx_ring;
  137. + struct bcm4908enet_dma_ring rx_ring;
  138. +};
  139. +
  140. +/***
  141. + * R/W ops
  142. + */
  143. +
  144. +static inline u32 enet_read(struct bcm4908enet *enet, u16 offset)
  145. +{
  146. + return readl(enet->base + offset);
  147. +}
  148. +
  149. +static inline void enet_write(struct bcm4908enet *enet, u16 offset, u32 value)
  150. +{
  151. + writel(value, enet->base + offset);
  152. +}
  153. +
  154. +static inline void enet_maskset(struct bcm4908enet *enet, u16 offset, u32 mask, u32 set)
  155. +{
  156. + u32 val;
  157. +
  158. + WARN_ON(set & ~mask);
  159. +
  160. + val = enet_read(enet, offset);
  161. + val = (val & ~mask) | (set & mask);
  162. + enet_write(enet, offset, val);
  163. +}
  164. +
  165. +static inline void enet_set(struct bcm4908enet *enet, u16 offset, u32 set)
  166. +{
  167. + enet_maskset(enet, offset, set, set);
  168. +}
  169. +
  170. +static inline u32 enet_umac_read(struct bcm4908enet *enet, u16 offset)
  171. +{
  172. + return enet_read(enet, ENET_UNIMAC + offset);
  173. +}
  174. +
  175. +static inline void enet_umac_write(struct bcm4908enet *enet, u16 offset, u32 value)
  176. +{
  177. + enet_write(enet, ENET_UNIMAC + offset, value);
  178. +}
  179. +
  180. +static inline void enet_umac_maskset(struct bcm4908enet *enet, u16 offset, u32 mask, u32 set)
  181. +{
  182. + enet_maskset(enet, ENET_UNIMAC + offset, mask, set);
  183. +}
  184. +
  185. +static inline void enet_umac_set(struct bcm4908enet *enet, u16 offset, u32 set)
  186. +{
  187. + enet_set(enet, ENET_UNIMAC + offset, set);
  188. +}
  189. +
  190. +/***
  191. + * Helpers
  192. + */
  193. +
  194. +static void bcm4908enet_intrs_on(struct bcm4908enet *enet)
  195. +{
  196. + enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_MASK, ENET_DMA_INT_DEFAULTS);
  197. +}
  198. +
  199. +static void bcm4908enet_intrs_off(struct bcm4908enet *enet)
  200. +{
  201. + enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_MASK, 0);
  202. +}
  203. +
  204. +static void bcm4908enet_intrs_ack(struct bcm4908enet *enet)
  205. +{
  206. + enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_STAT, ENET_DMA_INT_DEFAULTS);
  207. +}
  208. +
  209. +/***
  210. + * DMA
  211. + */
  212. +
  213. +static int bcm4908_dma_alloc_buf_descs(struct bcm4908enet *enet, struct bcm4908enet_dma_ring *ring)
  214. +{
  215. + int size = ring->length * sizeof(struct bcm4908enet_dma_ring_bd);
  216. + struct device *dev = enet->dev;
  217. +
  218. + ring->cpu_addr = dma_alloc_coherent(dev, size, &ring->dma_addr, GFP_KERNEL);
  219. + if (!ring->cpu_addr)
  220. + return -ENOMEM;
  221. +
  222. + if (((uintptr_t)ring->cpu_addr) & (0x40 - 1)) {
  223. + dev_err(dev, "Invalid DMA ring alignment\n");
  224. + goto err_free_buf_descs;
  225. + }
  226. +
  227. + ring->slots = kzalloc(ring->length * sizeof(*ring->slots), GFP_KERNEL);
  228. + if (!ring->slots)
  229. + goto err_free_buf_descs;
  230. +
  231. + memset(ring->cpu_addr, 0, size);
  232. +
  233. + ring->read_idx = 0;
  234. + ring->write_idx = 0;
  235. +
  236. + return 0;
  237. +
  238. +err_free_buf_descs:
  239. + dma_free_coherent(dev, size, ring->cpu_addr, ring->dma_addr);
  240. + return -ENOMEM;
  241. +}
  242. +
  243. +static void bcm4908enet_dma_free(struct bcm4908enet *enet)
  244. +{
  245. + struct bcm4908enet_dma_ring *tx_ring = &enet->tx_ring;
  246. + struct bcm4908enet_dma_ring *rx_ring = &enet->rx_ring;
  247. + struct device *dev = enet->dev;
  248. + int size;
  249. +
  250. + size = rx_ring->length * sizeof(struct bcm4908enet_dma_ring_bd);
  251. + if (rx_ring->cpu_addr)
  252. + dma_free_coherent(dev, size, rx_ring->cpu_addr, rx_ring->dma_addr);
  253. + kfree(rx_ring->slots);
  254. +
  255. + size = tx_ring->length * sizeof(struct bcm4908enet_dma_ring_bd);
  256. + if (tx_ring->cpu_addr)
  257. + dma_free_coherent(dev, size, tx_ring->cpu_addr, tx_ring->dma_addr);
  258. + kfree(tx_ring->slots);
  259. +}
  260. +
  261. +static int bcm4908enet_dma_alloc(struct bcm4908enet *enet)
  262. +{
  263. + struct bcm4908enet_dma_ring *tx_ring = &enet->tx_ring;
  264. + struct bcm4908enet_dma_ring *rx_ring = &enet->rx_ring;
  265. + struct device *dev = enet->dev;
  266. + int err;
  267. +
  268. + tx_ring->length = ENET_TX_BDS_NUM;
  269. + tx_ring->is_tx = 1;
  270. + tx_ring->cfg_block = ENET_DMA_CH_TX_CFG;
  271. + tx_ring->st_ram_block = ENET_DMA_CH_TX_STATE_RAM;
  272. + err = bcm4908_dma_alloc_buf_descs(enet, tx_ring);
  273. + if (err) {
  274. + dev_err(dev, "Failed to alloc TX buf descriptors: %d\n", err);
  275. + return err;
  276. + }
  277. +
  278. + rx_ring->length = ENET_RX_BDS_NUM;
  279. + rx_ring->is_tx = 0;
  280. + rx_ring->cfg_block = ENET_DMA_CH_RX_CFG;
  281. + rx_ring->st_ram_block = ENET_DMA_CH_RX_STATE_RAM;
  282. + err = bcm4908_dma_alloc_buf_descs(enet, rx_ring);
  283. + if (err) {
  284. + dev_err(dev, "Failed to alloc RX buf descriptors: %d\n", err);
  285. + bcm4908enet_dma_free(enet);
  286. + return err;
  287. + }
  288. +
  289. + return 0;
  290. +}
  291. +
  292. +static void bcm4908enet_dma_reset(struct bcm4908enet *enet)
  293. +{
  294. + struct bcm4908enet_dma_ring *rings[] = { &enet->rx_ring, &enet->tx_ring };
  295. + int i;
  296. +
  297. + /* Disable the DMA controller and channel */
  298. + for (i = 0; i < ARRAY_SIZE(rings); i++)
  299. + enet_write(enet, rings[i]->cfg_block + ENET_DMA_CH_CFG, 0);
  300. + enet_maskset(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_MASTER_EN, 0);
  301. +
  302. + /* Reset channels state */
  303. + for (i = 0; i < ARRAY_SIZE(rings); i++) {
  304. + struct bcm4908enet_dma_ring *ring = rings[i];
  305. +
  306. + enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR, 0);
  307. + enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_STATE_DATA, 0);
  308. + enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_DESC_LEN_STATUS, 0);
  309. + enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_DESC_BASE_BUFPTR, 0);
  310. + }
  311. +}
  312. +
  313. +static int bcm4908enet_dma_alloc_rx_buf(struct bcm4908enet *enet, unsigned int idx)
  314. +{
  315. + struct bcm4908enet_dma_ring_bd *buf_desc = &enet->rx_ring.buf_desc[idx];
  316. + struct bcm4908enet_dma_ring_slot *slot = &enet->rx_ring.slots[idx];
  317. + struct device *dev = enet->dev;
  318. + u32 tmp;
  319. + int err;
  320. +
  321. + slot->len = ENET_MTU_MAX + ENET_MTU_MAX_EXTRA_SIZE;
  322. +
  323. + slot->skb = netdev_alloc_skb(enet->netdev, slot->len);
  324. + if (!slot->skb)
  325. + return -ENOMEM;
  326. +
  327. + slot->dma_addr = dma_map_single(dev, slot->skb->data, slot->len, DMA_FROM_DEVICE);
  328. + err = dma_mapping_error(dev, slot->dma_addr);
  329. + if (err) {
  330. + dev_err(dev, "Failed to map DMA buffer: %d\n", err);
  331. + kfree_skb(slot->skb);
  332. + slot->skb = NULL;
  333. + return err;
  334. + }
  335. +
  336. + tmp = slot->len << DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
  337. + tmp |= DMA_CTL_STATUS_OWN;
  338. + if (idx == enet->rx_ring.length - 1)
  339. + tmp |= DMA_CTL_STATUS_WRAP;
  340. + buf_desc->ctl = cpu_to_le32(tmp);
  341. + buf_desc->addr = cpu_to_le32(slot->dma_addr);
  342. +
  343. + return 0;
  344. +}
  345. +
  346. +static void bcm4908enet_dma_ring_init(struct bcm4908enet *enet,
  347. + struct bcm4908enet_dma_ring *ring)
  348. +{
  349. + int reset_channel = 0; /* We support only 1 main channel (with TX and RX) */
  350. + int reset_subch = ring->is_tx ? 1 : 0;
  351. +
  352. + /* Reset the DMA channel */
  353. + enet_write(enet, ENET_DMA_CTRL_CHANNEL_RESET, BIT(reset_channel * 2 + reset_subch));
  354. + enet_write(enet, ENET_DMA_CTRL_CHANNEL_RESET, 0);
  355. +
  356. + enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG, 0);
  357. + enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_MAX_BURST, ENET_DMA_MAX_BURST_LEN);
  358. + enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_MASK, 0);
  359. +
  360. + enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR,
  361. + (uint32_t)ring->dma_addr);
  362. +}
  363. +
  364. +static void bcm4908enet_dma_uninit(struct bcm4908enet *enet)
  365. +{
  366. + struct bcm4908enet_dma_ring *rx_ring = &enet->rx_ring;
  367. + struct bcm4908enet_dma_ring_slot *slot;
  368. + struct device *dev = enet->dev;
  369. + int i;
  370. +
  371. + for (i = rx_ring->length - 1; i >= 0; i--) {
  372. + slot = &rx_ring->slots[i];
  373. + if (!slot->skb)
  374. + continue;
  375. + dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_FROM_DEVICE);
  376. + kfree_skb(slot->skb);
  377. + slot->skb = NULL;
  378. + }
  379. +}
  380. +
  381. +static int bcm4908enet_dma_init(struct bcm4908enet *enet)
  382. +{
  383. + struct bcm4908enet_dma_ring *rx_ring = &enet->rx_ring;
  384. + struct device *dev = enet->dev;
  385. + int err;
  386. + int i;
  387. +
  388. + for (i = 0; i < rx_ring->length; i++) {
  389. + err = bcm4908enet_dma_alloc_rx_buf(enet, i);
  390. + if (err) {
  391. + dev_err(dev, "Failed to alloc RX buffer: %d\n", err);
  392. + bcm4908enet_dma_uninit(enet);
  393. + return err;
  394. + }
  395. + }
  396. +
  397. + bcm4908enet_dma_ring_init(enet, &enet->tx_ring);
  398. + bcm4908enet_dma_ring_init(enet, &enet->rx_ring);
  399. +
  400. + return 0;
  401. +}
  402. +
  403. +static void bcm4908enet_dma_tx_ring_ensable(struct bcm4908enet *enet,
  404. + struct bcm4908enet_dma_ring *ring)
  405. +{
  406. + enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE);
  407. +}
  408. +
  409. +static void bcm4908enet_dma_tx_ring_disable(struct bcm4908enet *enet,
  410. + struct bcm4908enet_dma_ring *ring)
  411. +{
  412. + enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG, 0);
  413. +}
  414. +
  415. +static void bcm4908enet_dma_rx_ring_enable(struct bcm4908enet *enet,
  416. + struct bcm4908enet_dma_ring *ring)
  417. +{
  418. + enet_set(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE);
  419. +}
  420. +
  421. +static void bcm4908enet_dma_rx_ring_disable(struct bcm4908enet *enet,
  422. + struct bcm4908enet_dma_ring *ring)
  423. +{
  424. + unsigned long deadline;
  425. + u32 tmp;
  426. +
  427. + enet_maskset(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE, 0);
  428. +
  429. + deadline = jiffies + usecs_to_jiffies(2000);
  430. + do {
  431. + tmp = enet_read(enet, ring->cfg_block + ENET_DMA_CH_CFG);
  432. + if (!(tmp & ENET_DMA_CH_CFG_ENABLE))
  433. + return;
  434. + enet_maskset(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE, 0);
  435. + usleep_range(10, 30);
  436. + } while (!time_after_eq(jiffies, deadline));
  437. +
  438. + dev_warn(enet->dev, "Timeout waiting for DMA TX stop\n");
  439. +}
  440. +
  441. +/***
  442. + * Ethernet driver
  443. + */
  444. +
  445. +static void bcm4908enet_gmac_init(struct bcm4908enet *enet)
  446. +{
  447. + u32 cmd;
  448. +
  449. + cmd = enet_umac_read(enet, UMAC_CMD);
  450. + enet_umac_write(enet, UMAC_CMD, cmd | CMD_SW_RESET);
  451. + enet_umac_write(enet, UMAC_CMD, cmd & ~CMD_SW_RESET);
  452. +
  453. + enet_set(enet, ENET_FLUSH, ENET_FLUSH_RXFIFO_FLUSH | ENET_FLUSH_TXFIFO_FLUSH);
  454. + enet_maskset(enet, ENET_FLUSH, ENET_FLUSH_RXFIFO_FLUSH | ENET_FLUSH_TXFIFO_FLUSH, 0);
  455. +
  456. + enet_set(enet, ENET_MIB_CTRL, ENET_MIB_CTRL_CLR_MIB);
  457. + enet_maskset(enet, ENET_MIB_CTRL, ENET_MIB_CTRL_CLR_MIB, 0);
  458. +
  459. + cmd = enet_umac_read(enet, UMAC_CMD);
  460. + cmd &= ~(CMD_SPEED_MASK << CMD_SPEED_SHIFT);
  461. + cmd &= ~CMD_TX_EN;
  462. + cmd &= ~CMD_RX_EN;
  463. + cmd |= CMD_SPEED_1000 << CMD_SPEED_SHIFT;
  464. + enet_umac_write(enet, UMAC_CMD, cmd);
  465. +
  466. + enet_maskset(enet, ENET_GMAC_STATUS,
  467. + ENET_GMAC_STATUS_ETH_SPEED_MASK |
  468. + ENET_GMAC_STATUS_HD |
  469. + ENET_GMAC_STATUS_AUTO_CFG_EN |
  470. + ENET_GMAC_STATUS_LINK_UP,
  471. + ENET_GMAC_STATUS_ETH_SPEED_1000 |
  472. + ENET_GMAC_STATUS_AUTO_CFG_EN |
  473. + ENET_GMAC_STATUS_LINK_UP);
  474. +}
  475. +
  476. +static irqreturn_t bcm4908enet_irq_handler(int irq, void *dev_id)
  477. +{
  478. + struct bcm4908enet *enet = dev_id;
  479. +
  480. + bcm4908enet_intrs_off(enet);
  481. + bcm4908enet_intrs_ack(enet);
  482. +
  483. + napi_schedule(&enet->napi);
  484. +
  485. + return IRQ_HANDLED;
  486. +}
  487. +
  488. +static int bcm4908enet_open(struct net_device *netdev)
  489. +{
  490. + struct bcm4908enet *enet = netdev_priv(netdev);
  491. + struct device *dev = enet->dev;
  492. + int err;
  493. +
  494. + err = request_irq(netdev->irq, bcm4908enet_irq_handler, 0, "enet", enet);
  495. + if (err) {
  496. + dev_err(dev, "Failed to request IRQ %d: %d\n", netdev->irq, err);
  497. + return err;
  498. + }
  499. +
  500. + bcm4908enet_gmac_init(enet);
  501. + bcm4908enet_dma_reset(enet);
  502. + bcm4908enet_dma_init(enet);
  503. +
  504. + enet_umac_set(enet, UMAC_CMD, CMD_TX_EN | CMD_RX_EN);
  505. +
  506. + enet_set(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_MASTER_EN);
  507. + enet_maskset(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_FLOWC_CH1_EN, 0);
  508. + bcm4908enet_dma_rx_ring_enable(enet, &enet->rx_ring);
  509. +
  510. + napi_enable(&enet->napi);
  511. + netif_carrier_on(netdev);
  512. + netif_start_queue(netdev);
  513. +
  514. + bcm4908enet_intrs_ack(enet);
  515. + bcm4908enet_intrs_on(enet);
  516. +
  517. + return 0;
  518. +}
  519. +
  520. +static int bcm4908enet_stop(struct net_device *netdev)
  521. +{
  522. + struct bcm4908enet *enet = netdev_priv(netdev);
  523. +
  524. + netif_stop_queue(netdev);
  525. + netif_carrier_off(netdev);
  526. + napi_disable(&enet->napi);
  527. +
  528. + bcm4908enet_dma_rx_ring_disable(enet, &enet->rx_ring);
  529. + bcm4908enet_dma_tx_ring_disable(enet, &enet->tx_ring);
  530. +
  531. + bcm4908enet_dma_uninit(enet);
  532. +
  533. + free_irq(enet->netdev->irq, enet);
  534. +
  535. + return 0;
  536. +}
  537. +
  538. +static int bcm4908enet_start_xmit(struct sk_buff *skb, struct net_device *netdev)
  539. +{
  540. + struct bcm4908enet *enet = netdev_priv(netdev);
  541. + struct bcm4908enet_dma_ring *ring = &enet->tx_ring;
  542. + struct bcm4908enet_dma_ring_slot *slot;
  543. + struct device *dev = enet->dev;
  544. + struct bcm4908enet_dma_ring_bd *buf_desc;
  545. + int free_buf_descs;
  546. + u32 tmp;
  547. +
  548. + /* Free transmitted skbs */
  549. + while (ring->read_idx != ring->write_idx) {
  550. + buf_desc = &ring->buf_desc[ring->read_idx];
  551. + if (buf_desc->ctl & DMA_CTL_STATUS_OWN)
  552. + break;
  553. + slot = &ring->slots[ring->read_idx];
  554. +
  555. + dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE);
  556. + dev_kfree_skb(slot->skb);
  557. + if (++ring->read_idx == ring->length)
  558. + ring->read_idx = 0;
  559. + }
  560. +
  561. + /* Don't use the last empty buf descriptor */
  562. + if (ring->read_idx <= ring->write_idx)
  563. + free_buf_descs = ring->read_idx - ring->write_idx + ring->length;
  564. + else
  565. + free_buf_descs = ring->read_idx - ring->write_idx;
  566. + if (free_buf_descs < 2)
  567. + return NETDEV_TX_BUSY;
  568. +
  569. + /* Hardware removes OWN bit after sending data */
  570. + buf_desc = &ring->buf_desc[ring->write_idx];
  571. + if (unlikely(le32_to_cpu(buf_desc->ctl) & DMA_CTL_STATUS_OWN)) {
  572. + netif_stop_queue(netdev);
  573. + return NETDEV_TX_BUSY;
  574. + }
  575. +
  576. + slot = &ring->slots[ring->write_idx];
  577. + slot->skb = skb;
  578. + slot->len = skb->len;
  579. + slot->dma_addr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
  580. + if (unlikely(dma_mapping_error(dev, slot->dma_addr)))
  581. + return NETDEV_TX_BUSY;
  582. +
  583. + tmp = skb->len << DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
  584. + tmp |= DMA_CTL_STATUS_OWN;
  585. + tmp |= DMA_CTL_STATUS_SOP;
  586. + tmp |= DMA_CTL_STATUS_EOP;
  587. + tmp |= DMA_CTL_STATUS_APPEND_CRC;
  588. + if (ring->write_idx + 1 == ring->length - 1)
  589. + tmp |= DMA_CTL_STATUS_WRAP;
  590. +
  591. + buf_desc->addr = cpu_to_le32((uint32_t)slot->dma_addr);
  592. + buf_desc->ctl = cpu_to_le32(tmp);
  593. +
  594. + bcm4908enet_dma_tx_ring_ensable(enet, &enet->tx_ring);
  595. +
  596. + if (++ring->write_idx == ring->length - 1)
  597. + ring->write_idx = 0;
  598. + enet->netdev->stats.tx_bytes += skb->len;
  599. + enet->netdev->stats.tx_packets++;
  600. +
  601. + return NETDEV_TX_OK;
  602. +}
  603. +
  604. +static int bcm4908enet_poll(struct napi_struct *napi, int weight)
  605. +{
  606. + struct bcm4908enet *enet = container_of(napi, struct bcm4908enet, napi);
  607. + struct device *dev = enet->dev;
  608. + int handled = 0;
  609. +
  610. + while (handled < weight) {
  611. + struct bcm4908enet_dma_ring_bd *buf_desc;
  612. + struct bcm4908enet_dma_ring_slot slot;
  613. + u32 ctl;
  614. + int len;
  615. + int err;
  616. +
  617. + buf_desc = &enet->rx_ring.buf_desc[enet->rx_ring.read_idx];
  618. + ctl = le32_to_cpu(buf_desc->ctl);
  619. + if (ctl & DMA_CTL_STATUS_OWN)
  620. + break;
  621. +
  622. + slot = enet->rx_ring.slots[enet->rx_ring.read_idx];
  623. +
  624. + /* Provide new buffer before unpinning the old one */
  625. + err = bcm4908enet_dma_alloc_rx_buf(enet, enet->rx_ring.read_idx);
  626. + if (err)
  627. + break;
  628. +
  629. + if (++enet->rx_ring.read_idx == enet->rx_ring.length)
  630. + enet->rx_ring.read_idx = 0;
  631. +
  632. + len = (ctl & DMA_CTL_LEN_DESC_BUFLENGTH) >> DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
  633. +
  634. + if (len < ENET_MTU_MIN ||
  635. + (ctl & (DMA_CTL_STATUS_SOP | DMA_CTL_STATUS_EOP)) != (DMA_CTL_STATUS_SOP | DMA_CTL_STATUS_EOP)) {
  636. + enet->netdev->stats.rx_dropped++;
  637. + break;
  638. + }
  639. +
  640. + dma_unmap_single(dev, slot.dma_addr, slot.len, DMA_FROM_DEVICE);
  641. +
  642. + skb_put(slot.skb, len - 4 + 2);
  643. + slot.skb->protocol = eth_type_trans(slot.skb, enet->netdev);
  644. + netif_receive_skb(slot.skb);
  645. +
  646. + enet->netdev->stats.rx_packets++;
  647. + enet->netdev->stats.rx_bytes += len;
  648. + }
  649. +
  650. + if (handled < weight) {
  651. + napi_complete_done(napi, handled);
  652. + bcm4908enet_intrs_on(enet);
  653. + }
  654. +
  655. + return handled;
  656. +}
  657. +
  658. +static const struct net_device_ops bcm96xx_netdev_ops = {
  659. + .ndo_open = bcm4908enet_open,
  660. + .ndo_stop = bcm4908enet_stop,
  661. + .ndo_start_xmit = bcm4908enet_start_xmit,
  662. + .ndo_set_mac_address = eth_mac_addr,
  663. +};
  664. +
  665. +static int bcm4908enet_probe(struct platform_device *pdev)
  666. +{
  667. + struct device *dev = &pdev->dev;
  668. + struct net_device *netdev;
  669. + struct bcm4908enet *enet;
  670. + int err;
  671. +
  672. + netdev = devm_alloc_etherdev(dev, sizeof(*enet));
  673. + if (!netdev)
  674. + return -ENOMEM;
  675. +
  676. + enet = netdev_priv(netdev);
  677. + enet->dev = dev;
  678. + enet->netdev = netdev;
  679. +
  680. + enet->base = devm_platform_ioremap_resource(pdev, 0);
  681. + if (IS_ERR(enet->base)) {
  682. + dev_err(dev, "Failed to map registers: %ld\n", PTR_ERR(enet->base));
  683. + return PTR_ERR(enet->base);
  684. + }
  685. +
  686. + netdev->irq = platform_get_irq_byname(pdev, "rx");
  687. + if (netdev->irq < 0)
  688. + return netdev->irq;
  689. +
  690. + dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
  691. +
  692. + err = bcm4908enet_dma_alloc(enet);
  693. + if (err)
  694. + return err;
  695. +
  696. + SET_NETDEV_DEV(netdev, &pdev->dev);
  697. + eth_hw_addr_random(netdev);
  698. + netdev->netdev_ops = &bcm96xx_netdev_ops;
  699. + netdev->min_mtu = ETH_ZLEN;
  700. + netdev->mtu = ENET_MTU_MAX;
  701. + netdev->max_mtu = ENET_MTU_MAX;
  702. + netif_napi_add(netdev, &enet->napi, bcm4908enet_poll, 64);
  703. +
  704. + err = register_netdev(netdev);
  705. + if (err) {
  706. + bcm4908enet_dma_free(enet);
  707. + return err;
  708. + }
  709. +
  710. + platform_set_drvdata(pdev, enet);
  711. +
  712. + return 0;
  713. +}
  714. +
  715. +static int bcm4908enet_remove(struct platform_device *pdev)
  716. +{
  717. + struct bcm4908enet *enet = platform_get_drvdata(pdev);
  718. +
  719. + unregister_netdev(enet->netdev);
  720. + netif_napi_del(&enet->napi);
  721. + bcm4908enet_dma_free(enet);
  722. +
  723. + return 0;
  724. +}
  725. +
  726. +static const struct of_device_id bcm4908enet_of_match[] = {
  727. + { .compatible = "brcm,bcm4908enet"},
  728. + {},
  729. +};
  730. +
  731. +static struct platform_driver bcm4908enet_driver = {
  732. + .driver = {
  733. + .name = "bcm4908enet",
  734. + .of_match_table = bcm4908enet_of_match,
  735. + },
  736. + .probe = bcm4908enet_probe,
  737. + .remove = bcm4908enet_remove,
  738. +};
  739. +module_platform_driver(bcm4908enet_driver);
  740. +
  741. +MODULE_LICENSE("GPL v2");
  742. +MODULE_DEVICE_TABLE(of, bcm4908enet_of_match);
  743. --- /dev/null
  744. +++ b/drivers/net/ethernet/broadcom/bcm4908enet.h
  745. @@ -0,0 +1,96 @@
  746. +/* SPDX-License-Identifier: GPL-2.0-only */
  747. +#ifndef __BCM4908ENET_H
  748. +#define __BCM4908ENET_H
  749. +
  750. +#define ENET_CONTROL 0x000
  751. +#define ENET_MIB_CTRL 0x004
  752. +#define ENET_MIB_CTRL_CLR_MIB 0x00000001
  753. +#define ENET_RX_ERR_MASK 0x008
  754. +#define ENET_MIB_MAX_PKT_SIZE 0x00C
  755. +#define ENET_MIB_MAX_PKT_SIZE_VAL 0x00003fff
  756. +#define ENET_DIAG_OUT 0x01c
  757. +#define ENET_ENABLE_DROP_PKT 0x020
  758. +#define ENET_IRQ_ENABLE 0x024
  759. +#define ENET_IRQ_ENABLE_OVFL 0x00000001
  760. +#define ENET_GMAC_STATUS 0x028
  761. +#define ENET_GMAC_STATUS_ETH_SPEED_MASK 0x00000003
  762. +#define ENET_GMAC_STATUS_ETH_SPEED_10 0x00000000
  763. +#define ENET_GMAC_STATUS_ETH_SPEED_100 0x00000001
  764. +#define ENET_GMAC_STATUS_ETH_SPEED_1000 0x00000002
  765. +#define ENET_GMAC_STATUS_HD 0x00000004
  766. +#define ENET_GMAC_STATUS_AUTO_CFG_EN 0x00000008
  767. +#define ENET_GMAC_STATUS_LINK_UP 0x00000010
  768. +#define ENET_IRQ_STATUS 0x02c
  769. +#define ENET_IRQ_STATUS_OVFL 0x00000001
  770. +#define ENET_OVERFLOW_COUNTER 0x030
  771. +#define ENET_FLUSH 0x034
  772. +#define ENET_FLUSH_RXFIFO_FLUSH 0x00000001
  773. +#define ENET_FLUSH_TXFIFO_FLUSH 0x00000002
  774. +#define ENET_RSV_SELECT 0x038
  775. +#define ENET_BP_FORCE 0x03c
  776. +#define ENET_BP_FORCE_FORCE 0x00000001
  777. +#define ENET_DMA_RX_OK_TO_SEND_COUNT 0x040
  778. +#define ENET_DMA_RX_OK_TO_SEND_COUNT_VAL 0x0000000f
  779. +#define ENET_TX_CRC_CTRL 0x044
  780. +#define ENET_MIB 0x200
  781. +#define ENET_UNIMAC 0x400
  782. +#define ENET_DMA 0x800
  783. +#define ENET_DMA_CONTROLLER_CFG 0x800
  784. +#define ENET_DMA_CTRL_CFG_MASTER_EN 0x00000001
  785. +#define ENET_DMA_CTRL_CFG_FLOWC_CH1_EN 0x00000002
  786. +#define ENET_DMA_CTRL_CFG_FLOWC_CH3_EN 0x00000004
  787. +#define ENET_DMA_FLOWCTL_CH1_THRESH_LO 0x804
  788. +#define ENET_DMA_FLOWCTL_CH1_THRESH_HI 0x808
  789. +#define ENET_DMA_FLOWCTL_CH1_ALLOC 0x80c
  790. +#define ENET_DMA_FLOWCTL_CH1_ALLOC_FORCE 0x80000000
  791. +#define ENET_DMA_FLOWCTL_CH3_THRESH_LO 0x810
  792. +#define ENET_DMA_FLOWCTL_CH3_THRESH_HI 0x814
  793. +#define ENET_DMA_FLOWCTL_CH3_ALLOC 0x818
  794. +#define ENET_DMA_FLOWCTL_CH5_THRESH_LO 0x81C
  795. +#define ENET_DMA_FLOWCTL_CH5_THRESH_HI 0x820
  796. +#define ENET_DMA_FLOWCTL_CH5_ALLOC 0x824
  797. +#define ENET_DMA_FLOWCTL_CH7_THRESH_LO 0x828
  798. +#define ENET_DMA_FLOWCTL_CH7_THRESH_HI 0x82C
  799. +#define ENET_DMA_FLOWCTL_CH7_ALLOC 0x830
  800. +#define ENET_DMA_CTRL_CHANNEL_RESET 0x834
  801. +#define ENET_DMA_CTRL_CHANNEL_DEBUG 0x838
  802. +#define ENET_DMA_CTRL_GLOBAL_INTERRUPT_STATUS 0x840
  803. +#define ENET_DMA_CTRL_GLOBAL_INTERRUPT_MASK 0x844
  804. +#define ENET_DMA_CH0_CFG 0xa00 /* RX */
  805. +#define ENET_DMA_CH1_CFG 0xa10 /* TX */
  806. +#define ENET_DMA_CH0_STATE_RAM 0xc00 /* RX */
  807. +#define ENET_DMA_CH1_STATE_RAM 0xc10 /* TX */
  808. +
  809. +#define ENET_DMA_CH_CFG 0x00 /* assorted configuration */
  810. +#define ENET_DMA_CH_CFG_ENABLE 0x00000001 /* set to enable channel */
  811. +#define ENET_DMA_CH_CFG_PKT_HALT 0x00000002 /* idle after an EOP flag is detected */
  812. +#define ENET_DMA_CH_CFG_BURST_HALT 0x00000004 /* idle after finish current memory burst */
  813. +#define ENET_DMA_CH_CFG_INT_STAT 0x04 /* interrupts control and status */
  814. +#define ENET_DMA_CH_CFG_INT_MASK 0x08 /* interrupts mask */
  815. +#define ENET_DMA_CH_CFG_INT_BUFF_DONE 0x00000001 /* buffer done */
  816. +#define ENET_DMA_CH_CFG_INT_DONE 0x00000002 /* packet xfer complete */
  817. +#define ENET_DMA_CH_CFG_INT_NO_DESC 0x00000004 /* no valid descriptors */
  818. +#define ENET_DMA_CH_CFG_INT_RX_ERROR 0x00000008 /* rxdma detect client protocol error */
  819. +#define ENET_DMA_CH_CFG_MAX_BURST 0x0c /* max burst length permitted */
  820. +#define ENET_DMA_CH_CFG_MAX_BURST_DESCSIZE_SEL 0x00040000 /* DMA Descriptor Size Selection */
  821. +#define ENET_DMA_CH_CFG_SIZE 0x10
  822. +
  823. +#define ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR 0x00 /* descriptor ring start address */
  824. +#define ENET_DMA_CH_STATE_RAM_STATE_DATA 0x04 /* state/bytes done/ring offset */
  825. +#define ENET_DMA_CH_STATE_RAM_DESC_LEN_STATUS 0x08 /* buffer descriptor status and len */
  826. +#define ENET_DMA_CH_STATE_RAM_DESC_BASE_BUFPTR 0x0c /* buffer descrpitor current processing */
  827. +#define ENET_DMA_CH_STATE_RAM_SIZE 0x10
  828. +
  829. +#define DMA_CTL_STATUS_APPEND_CRC 0x00000100
  830. +#define DMA_CTL_STATUS_APPEND_BRCM_TAG 0x00000200
  831. +#define DMA_CTL_STATUS_PRIO 0x00000C00 /* Prio for Tx */
  832. +#define DMA_CTL_STATUS_WRAP 0x00001000 /* */
  833. +#define DMA_CTL_STATUS_SOP 0x00002000 /* first buffer in packet */
  834. +#define DMA_CTL_STATUS_EOP 0x00004000 /* last buffer in packet */
  835. +#define DMA_CTL_STATUS_OWN 0x00008000 /* cleared by DMA, set by SW */
  836. +#define DMA_CTL_LEN_DESC_BUFLENGTH 0x0fff0000
  837. +#define DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT 16
  838. +#define DMA_CTL_LEN_DESC_MULTICAST 0x40000000
  839. +#define DMA_CTL_LEN_DESC_USEFPM 0x80000000
  840. +
  841. +#endif