170-0002-net-broadcom-bcm4908_enet-add-BCM4908-controller-dri.patch 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844
  1. From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <[email protected]>
  2. Date: Fri, 5 Feb 2021 21:59:51 +0100
  3. Subject: [PATCH 2/2] net: broadcom: bcm4908_enet: add BCM4908 controller
  4. driver
  5. MIME-Version: 1.0
  6. Content-Type: text/plain; charset=UTF-8
  7. Content-Transfer-Encoding: 8bit
  8. BCM4908 SoCs family has integrated Ethernel controller that includes
  9. UniMAC but uses different DMA engine (than other controllers) and
  10. requires different programming.
  11. Ethernet controller in BCM4908 is always connected to the internal SF2
  12. switch's port and uses fixed link.
  13. Signed-off-by: Rafał Miłecki <[email protected]>
  14. ---
  15. MAINTAINERS | 9 +
  16. drivers/net/ethernet/broadcom/Kconfig | 8 +
  17. drivers/net/ethernet/broadcom/Makefile | 1 +
  18. drivers/net/ethernet/broadcom/bcm4908_enet.c | 671 +++++++++++++++++++
  19. drivers/net/ethernet/broadcom/bcm4908_enet.h | 96 +++
  20. 5 files changed, 785 insertions(+)
  21. create mode 100644 drivers/net/ethernet/broadcom/bcm4908_enet.c
  22. create mode 100644 drivers/net/ethernet/broadcom/bcm4908_enet.h
  23. --- a/MAINTAINERS
  24. +++ b/MAINTAINERS
  25. @@ -3207,6 +3207,15 @@ F: Documentation/devicetree/bindings/mip
  26. F: arch/mips/bcm47xx/*
  27. F: arch/mips/include/asm/mach-bcm47xx/*
  28. +BROADCOM BCM4908 ETHERNET DRIVER
  29. +M: Rafał Miłecki <[email protected]>
  30. +M: [email protected]
  31. +L: [email protected]
  32. +S: Maintained
  33. +F: Documentation/devicetree/bindings/net/brcm,bcm4908-enet.yaml
  34. +F: drivers/net/ethernet/broadcom/bcm4908_enet.*
  35. +F: drivers/net/ethernet/broadcom/unimac.h
  36. +
  37. BROADCOM BCM5301X ARM ARCHITECTURE
  38. M: Hauke Mehrtens <[email protected]>
  39. M: Rafał Miłecki <[email protected]>
  40. --- a/drivers/net/ethernet/broadcom/Kconfig
  41. +++ b/drivers/net/ethernet/broadcom/Kconfig
  42. @@ -51,6 +51,14 @@ config B44_PCI
  43. depends on B44_PCI_AUTOSELECT && B44_PCICORE_AUTOSELECT
  44. default y
  45. +config BCM4908_ENET
  46. + tristate "Broadcom BCM4908 internal mac support"
  47. + depends on ARCH_BCM4908 || COMPILE_TEST
  48. + default ARCH_BCM4908
  49. + help
  50. + This driver supports Ethernet controller integrated into Broadcom
  51. + BCM4908 family SoCs.
  52. +
  53. config BCM63XX_ENET
  54. tristate "Broadcom 63xx internal mac support"
  55. depends on BCM63XX
  56. --- a/drivers/net/ethernet/broadcom/Makefile
  57. +++ b/drivers/net/ethernet/broadcom/Makefile
  58. @@ -4,6 +4,7 @@
  59. #
  60. obj-$(CONFIG_B44) += b44.o
  61. +obj-$(CONFIG_BCM4908_ENET) += bcm4908_enet.o
  62. obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
  63. obj-$(CONFIG_BCMGENET) += genet/
  64. obj-$(CONFIG_BNX2) += bnx2.o
  65. --- /dev/null
  66. +++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
  67. @@ -0,0 +1,671 @@
  68. +// SPDX-License-Identifier: GPL-2.0-only
  69. +/*
  70. + * Copyright (C) 2021 Rafał Miłecki <[email protected]>
  71. + */
  72. +
  73. +#include <linux/delay.h>
  74. +#include <linux/etherdevice.h>
  75. +#include <linux/interrupt.h>
  76. +#include <linux/module.h>
  77. +#include <linux/of.h>
  78. +#include <linux/platform_device.h>
  79. +#include <linux/slab.h>
  80. +#include <linux/string.h>
  81. +
  82. +#include "bcm4908_enet.h"
  83. +#include "unimac.h"
  84. +
  85. +#define ENET_DMA_CH_RX_CFG ENET_DMA_CH0_CFG
  86. +#define ENET_DMA_CH_TX_CFG ENET_DMA_CH1_CFG
  87. +#define ENET_DMA_CH_RX_STATE_RAM ENET_DMA_CH0_STATE_RAM
  88. +#define ENET_DMA_CH_TX_STATE_RAM ENET_DMA_CH1_STATE_RAM
  89. +
  90. +#define ENET_TX_BDS_NUM 200
  91. +#define ENET_RX_BDS_NUM 200
  92. +#define ENET_RX_BDS_NUM_MAX 8192
  93. +
  94. +#define ENET_DMA_INT_DEFAULTS (ENET_DMA_CH_CFG_INT_DONE | \
  95. + ENET_DMA_CH_CFG_INT_NO_DESC | \
  96. + ENET_DMA_CH_CFG_INT_BUFF_DONE)
  97. +#define ENET_DMA_MAX_BURST_LEN 8 /* in 64 bit words */
  98. +
  99. +#define ENET_MTU_MIN 60
  100. +#define ENET_MTU_MAX 1500 /* Is it possible to support 2044? */
  101. +#define ENET_MTU_MAX_EXTRA_SIZE 32 /* L2 */
  102. +
  103. +struct bcm4908_enet_dma_ring_bd {
  104. + __le32 ctl;
  105. + __le32 addr;
  106. +} __packed;
  107. +
  108. +struct bcm4908_enet_dma_ring_slot {
  109. + struct sk_buff *skb;
  110. + unsigned int len;
  111. + dma_addr_t dma_addr;
  112. +};
  113. +
  114. +struct bcm4908_enet_dma_ring {
  115. + int is_tx;
  116. + int read_idx;
  117. + int write_idx;
  118. + int length;
  119. + u16 cfg_block;
  120. + u16 st_ram_block;
  121. +
  122. + union {
  123. + void *cpu_addr;
  124. + struct bcm4908_enet_dma_ring_bd *buf_desc;
  125. + };
  126. + dma_addr_t dma_addr;
  127. +
  128. + struct bcm4908_enet_dma_ring_slot *slots;
  129. +};
  130. +
  131. +struct bcm4908_enet {
  132. + struct device *dev;
  133. + struct net_device *netdev;
  134. + struct napi_struct napi;
  135. + void __iomem *base;
  136. +
  137. + struct bcm4908_enet_dma_ring tx_ring;
  138. + struct bcm4908_enet_dma_ring rx_ring;
  139. +};
  140. +
  141. +/***
  142. + * R/W ops
  143. + */
  144. +
  145. +static u32 enet_read(struct bcm4908_enet *enet, u16 offset)
  146. +{
  147. + return readl(enet->base + offset);
  148. +}
  149. +
  150. +static void enet_write(struct bcm4908_enet *enet, u16 offset, u32 value)
  151. +{
  152. + writel(value, enet->base + offset);
  153. +}
  154. +
  155. +static void enet_maskset(struct bcm4908_enet *enet, u16 offset, u32 mask, u32 set)
  156. +{
  157. + u32 val;
  158. +
  159. + WARN_ON(set & ~mask);
  160. +
  161. + val = enet_read(enet, offset);
  162. + val = (val & ~mask) | (set & mask);
  163. + enet_write(enet, offset, val);
  164. +}
  165. +
  166. +static void enet_set(struct bcm4908_enet *enet, u16 offset, u32 set)
  167. +{
  168. + enet_maskset(enet, offset, set, set);
  169. +}
  170. +
  171. +static u32 enet_umac_read(struct bcm4908_enet *enet, u16 offset)
  172. +{
  173. + return enet_read(enet, ENET_UNIMAC + offset);
  174. +}
  175. +
  176. +static void enet_umac_write(struct bcm4908_enet *enet, u16 offset, u32 value)
  177. +{
  178. + enet_write(enet, ENET_UNIMAC + offset, value);
  179. +}
  180. +
  181. +static void enet_umac_set(struct bcm4908_enet *enet, u16 offset, u32 set)
  182. +{
  183. + enet_set(enet, ENET_UNIMAC + offset, set);
  184. +}
  185. +
  186. +/***
  187. + * Helpers
  188. + */
  189. +
  190. +static void bcm4908_enet_intrs_on(struct bcm4908_enet *enet)
  191. +{
  192. + enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_MASK, ENET_DMA_INT_DEFAULTS);
  193. +}
  194. +
  195. +static void bcm4908_enet_intrs_off(struct bcm4908_enet *enet)
  196. +{
  197. + enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_MASK, 0);
  198. +}
  199. +
  200. +static void bcm4908_enet_intrs_ack(struct bcm4908_enet *enet)
  201. +{
  202. + enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_STAT, ENET_DMA_INT_DEFAULTS);
  203. +}
  204. +
  205. +/***
  206. + * DMA
  207. + */
  208. +
  209. +static int bcm4908_dma_alloc_buf_descs(struct bcm4908_enet *enet,
  210. + struct bcm4908_enet_dma_ring *ring)
  211. +{
  212. + int size = ring->length * sizeof(struct bcm4908_enet_dma_ring_bd);
  213. + struct device *dev = enet->dev;
  214. +
  215. + ring->cpu_addr = dma_alloc_coherent(dev, size, &ring->dma_addr, GFP_KERNEL);
  216. + if (!ring->cpu_addr)
  217. + return -ENOMEM;
  218. +
  219. + if (((uintptr_t)ring->cpu_addr) & (0x40 - 1)) {
  220. + dev_err(dev, "Invalid DMA ring alignment\n");
  221. + goto err_free_buf_descs;
  222. + }
  223. +
  224. + ring->slots = kzalloc(ring->length * sizeof(*ring->slots), GFP_KERNEL);
  225. + if (!ring->slots)
  226. + goto err_free_buf_descs;
  227. +
  228. + ring->read_idx = 0;
  229. + ring->write_idx = 0;
  230. +
  231. + return 0;
  232. +
  233. +err_free_buf_descs:
  234. + dma_free_coherent(dev, size, ring->cpu_addr, ring->dma_addr);
  235. + return -ENOMEM;
  236. +}
  237. +
  238. +static void bcm4908_enet_dma_free(struct bcm4908_enet *enet)
  239. +{
  240. + struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring;
  241. + struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
  242. + struct device *dev = enet->dev;
  243. + int size;
  244. +
  245. + size = rx_ring->length * sizeof(struct bcm4908_enet_dma_ring_bd);
  246. + if (rx_ring->cpu_addr)
  247. + dma_free_coherent(dev, size, rx_ring->cpu_addr, rx_ring->dma_addr);
  248. + kfree(rx_ring->slots);
  249. +
  250. + size = tx_ring->length * sizeof(struct bcm4908_enet_dma_ring_bd);
  251. + if (tx_ring->cpu_addr)
  252. + dma_free_coherent(dev, size, tx_ring->cpu_addr, tx_ring->dma_addr);
  253. + kfree(tx_ring->slots);
  254. +}
  255. +
  256. +static int bcm4908_enet_dma_alloc(struct bcm4908_enet *enet)
  257. +{
  258. + struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring;
  259. + struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
  260. + struct device *dev = enet->dev;
  261. + int err;
  262. +
  263. + tx_ring->length = ENET_TX_BDS_NUM;
  264. + tx_ring->is_tx = 1;
  265. + tx_ring->cfg_block = ENET_DMA_CH_TX_CFG;
  266. + tx_ring->st_ram_block = ENET_DMA_CH_TX_STATE_RAM;
  267. + err = bcm4908_dma_alloc_buf_descs(enet, tx_ring);
  268. + if (err) {
  269. + dev_err(dev, "Failed to alloc TX buf descriptors: %d\n", err);
  270. + return err;
  271. + }
  272. +
  273. + rx_ring->length = ENET_RX_BDS_NUM;
  274. + rx_ring->is_tx = 0;
  275. + rx_ring->cfg_block = ENET_DMA_CH_RX_CFG;
  276. + rx_ring->st_ram_block = ENET_DMA_CH_RX_STATE_RAM;
  277. + err = bcm4908_dma_alloc_buf_descs(enet, rx_ring);
  278. + if (err) {
  279. + dev_err(dev, "Failed to alloc RX buf descriptors: %d\n", err);
  280. + bcm4908_enet_dma_free(enet);
  281. + return err;
  282. + }
  283. +
  284. + return 0;
  285. +}
  286. +
  287. +static void bcm4908_enet_dma_reset(struct bcm4908_enet *enet)
  288. +{
  289. + struct bcm4908_enet_dma_ring *rings[] = { &enet->rx_ring, &enet->tx_ring };
  290. + int i;
  291. +
  292. + /* Disable the DMA controller and channel */
  293. + for (i = 0; i < ARRAY_SIZE(rings); i++)
  294. + enet_write(enet, rings[i]->cfg_block + ENET_DMA_CH_CFG, 0);
  295. + enet_maskset(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_MASTER_EN, 0);
  296. +
  297. + /* Reset channels state */
  298. + for (i = 0; i < ARRAY_SIZE(rings); i++) {
  299. + struct bcm4908_enet_dma_ring *ring = rings[i];
  300. +
  301. + enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR, 0);
  302. + enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_STATE_DATA, 0);
  303. + enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_DESC_LEN_STATUS, 0);
  304. + enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_DESC_BASE_BUFPTR, 0);
  305. + }
  306. +}
  307. +
  308. +static int bcm4908_enet_dma_alloc_rx_buf(struct bcm4908_enet *enet, unsigned int idx)
  309. +{
  310. + struct bcm4908_enet_dma_ring_bd *buf_desc = &enet->rx_ring.buf_desc[idx];
  311. + struct bcm4908_enet_dma_ring_slot *slot = &enet->rx_ring.slots[idx];
  312. + struct device *dev = enet->dev;
  313. + u32 tmp;
  314. + int err;
  315. +
  316. + slot->len = ENET_MTU_MAX + ENET_MTU_MAX_EXTRA_SIZE;
  317. +
  318. + slot->skb = netdev_alloc_skb(enet->netdev, slot->len);
  319. + if (!slot->skb)
  320. + return -ENOMEM;
  321. +
  322. + slot->dma_addr = dma_map_single(dev, slot->skb->data, slot->len, DMA_FROM_DEVICE);
  323. + err = dma_mapping_error(dev, slot->dma_addr);
  324. + if (err) {
  325. + dev_err(dev, "Failed to map DMA buffer: %d\n", err);
  326. + kfree_skb(slot->skb);
  327. + slot->skb = NULL;
  328. + return err;
  329. + }
  330. +
  331. + tmp = slot->len << DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
  332. + tmp |= DMA_CTL_STATUS_OWN;
  333. + if (idx == enet->rx_ring.length - 1)
  334. + tmp |= DMA_CTL_STATUS_WRAP;
  335. + buf_desc->ctl = cpu_to_le32(tmp);
  336. + buf_desc->addr = cpu_to_le32(slot->dma_addr);
  337. +
  338. + return 0;
  339. +}
  340. +
  341. +static void bcm4908_enet_dma_ring_init(struct bcm4908_enet *enet,
  342. + struct bcm4908_enet_dma_ring *ring)
  343. +{
  344. + int reset_channel = 0; /* We support only 1 main channel (with TX and RX) */
  345. + int reset_subch = ring->is_tx ? 1 : 0;
  346. +
  347. + /* Reset the DMA channel */
  348. + enet_write(enet, ENET_DMA_CTRL_CHANNEL_RESET, BIT(reset_channel * 2 + reset_subch));
  349. + enet_write(enet, ENET_DMA_CTRL_CHANNEL_RESET, 0);
  350. +
  351. + enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG, 0);
  352. + enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_MAX_BURST, ENET_DMA_MAX_BURST_LEN);
  353. + enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_MASK, 0);
  354. +
  355. + enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR,
  356. + (uint32_t)ring->dma_addr);
  357. +}
  358. +
  359. +static void bcm4908_enet_dma_uninit(struct bcm4908_enet *enet)
  360. +{
  361. + struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
  362. + struct bcm4908_enet_dma_ring_slot *slot;
  363. + struct device *dev = enet->dev;
  364. + int i;
  365. +
  366. + for (i = rx_ring->length - 1; i >= 0; i--) {
  367. + slot = &rx_ring->slots[i];
  368. + if (!slot->skb)
  369. + continue;
  370. + dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_FROM_DEVICE);
  371. + kfree_skb(slot->skb);
  372. + slot->skb = NULL;
  373. + }
  374. +}
  375. +
  376. +static int bcm4908_enet_dma_init(struct bcm4908_enet *enet)
  377. +{
  378. + struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
  379. + struct device *dev = enet->dev;
  380. + int err;
  381. + int i;
  382. +
  383. + for (i = 0; i < rx_ring->length; i++) {
  384. + err = bcm4908_enet_dma_alloc_rx_buf(enet, i);
  385. + if (err) {
  386. + dev_err(dev, "Failed to alloc RX buffer: %d\n", err);
  387. + bcm4908_enet_dma_uninit(enet);
  388. + return err;
  389. + }
  390. + }
  391. +
  392. + bcm4908_enet_dma_ring_init(enet, &enet->tx_ring);
  393. + bcm4908_enet_dma_ring_init(enet, &enet->rx_ring);
  394. +
  395. + return 0;
  396. +}
  397. +
  398. +static void bcm4908_enet_dma_tx_ring_enable(struct bcm4908_enet *enet,
  399. + struct bcm4908_enet_dma_ring *ring)
  400. +{
  401. + enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE);
  402. +}
  403. +
  404. +static void bcm4908_enet_dma_tx_ring_disable(struct bcm4908_enet *enet,
  405. + struct bcm4908_enet_dma_ring *ring)
  406. +{
  407. + enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG, 0);
  408. +}
  409. +
  410. +static void bcm4908_enet_dma_rx_ring_enable(struct bcm4908_enet *enet,
  411. + struct bcm4908_enet_dma_ring *ring)
  412. +{
  413. + enet_set(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE);
  414. +}
  415. +
  416. +static void bcm4908_enet_dma_rx_ring_disable(struct bcm4908_enet *enet,
  417. + struct bcm4908_enet_dma_ring *ring)
  418. +{
  419. + unsigned long deadline;
  420. + u32 tmp;
  421. +
  422. + enet_maskset(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE, 0);
  423. +
  424. + deadline = jiffies + usecs_to_jiffies(2000);
  425. + do {
  426. + tmp = enet_read(enet, ring->cfg_block + ENET_DMA_CH_CFG);
  427. + if (!(tmp & ENET_DMA_CH_CFG_ENABLE))
  428. + return;
  429. + enet_maskset(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE, 0);
  430. + usleep_range(10, 30);
  431. + } while (!time_after_eq(jiffies, deadline));
  432. +
  433. + dev_warn(enet->dev, "Timeout waiting for DMA TX stop\n");
  434. +}
  435. +
  436. +/***
  437. + * Ethernet driver
  438. + */
  439. +
  440. +static void bcm4908_enet_gmac_init(struct bcm4908_enet *enet)
  441. +{
  442. + u32 cmd;
  443. +
  444. + cmd = enet_umac_read(enet, UMAC_CMD);
  445. + enet_umac_write(enet, UMAC_CMD, cmd | CMD_SW_RESET);
  446. + enet_umac_write(enet, UMAC_CMD, cmd & ~CMD_SW_RESET);
  447. +
  448. + enet_set(enet, ENET_FLUSH, ENET_FLUSH_RXFIFO_FLUSH | ENET_FLUSH_TXFIFO_FLUSH);
  449. + enet_maskset(enet, ENET_FLUSH, ENET_FLUSH_RXFIFO_FLUSH | ENET_FLUSH_TXFIFO_FLUSH, 0);
  450. +
  451. + enet_set(enet, ENET_MIB_CTRL, ENET_MIB_CTRL_CLR_MIB);
  452. + enet_maskset(enet, ENET_MIB_CTRL, ENET_MIB_CTRL_CLR_MIB, 0);
  453. +
  454. + cmd = enet_umac_read(enet, UMAC_CMD);
  455. + cmd &= ~(CMD_SPEED_MASK << CMD_SPEED_SHIFT);
  456. + cmd &= ~CMD_TX_EN;
  457. + cmd &= ~CMD_RX_EN;
  458. + cmd |= CMD_SPEED_1000 << CMD_SPEED_SHIFT;
  459. + enet_umac_write(enet, UMAC_CMD, cmd);
  460. +
  461. + enet_maskset(enet, ENET_GMAC_STATUS,
  462. + ENET_GMAC_STATUS_ETH_SPEED_MASK |
  463. + ENET_GMAC_STATUS_HD |
  464. + ENET_GMAC_STATUS_AUTO_CFG_EN |
  465. + ENET_GMAC_STATUS_LINK_UP,
  466. + ENET_GMAC_STATUS_ETH_SPEED_1000 |
  467. + ENET_GMAC_STATUS_AUTO_CFG_EN |
  468. + ENET_GMAC_STATUS_LINK_UP);
  469. +}
  470. +
  471. +static irqreturn_t bcm4908_enet_irq_handler(int irq, void *dev_id)
  472. +{
  473. + struct bcm4908_enet *enet = dev_id;
  474. +
  475. + bcm4908_enet_intrs_off(enet);
  476. + bcm4908_enet_intrs_ack(enet);
  477. +
  478. + napi_schedule(&enet->napi);
  479. +
  480. + return IRQ_HANDLED;
  481. +}
  482. +
  483. +static int bcm4908_enet_open(struct net_device *netdev)
  484. +{
  485. + struct bcm4908_enet *enet = netdev_priv(netdev);
  486. + struct device *dev = enet->dev;
  487. + int err;
  488. +
  489. + err = request_irq(netdev->irq, bcm4908_enet_irq_handler, 0, "enet", enet);
  490. + if (err) {
  491. + dev_err(dev, "Failed to request IRQ %d: %d\n", netdev->irq, err);
  492. + return err;
  493. + }
  494. +
  495. + bcm4908_enet_gmac_init(enet);
  496. + bcm4908_enet_dma_reset(enet);
  497. + bcm4908_enet_dma_init(enet);
  498. +
  499. + enet_umac_set(enet, UMAC_CMD, CMD_TX_EN | CMD_RX_EN);
  500. +
  501. + enet_set(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_MASTER_EN);
  502. + enet_maskset(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_FLOWC_CH1_EN, 0);
  503. + bcm4908_enet_dma_rx_ring_enable(enet, &enet->rx_ring);
  504. +
  505. + napi_enable(&enet->napi);
  506. + netif_carrier_on(netdev);
  507. + netif_start_queue(netdev);
  508. +
  509. + bcm4908_enet_intrs_ack(enet);
  510. + bcm4908_enet_intrs_on(enet);
  511. +
  512. + return 0;
  513. +}
  514. +
  515. +static int bcm4908_enet_stop(struct net_device *netdev)
  516. +{
  517. + struct bcm4908_enet *enet = netdev_priv(netdev);
  518. +
  519. + netif_stop_queue(netdev);
  520. + netif_carrier_off(netdev);
  521. + napi_disable(&enet->napi);
  522. +
  523. + bcm4908_enet_dma_rx_ring_disable(enet, &enet->rx_ring);
  524. + bcm4908_enet_dma_tx_ring_disable(enet, &enet->tx_ring);
  525. +
  526. + bcm4908_enet_dma_uninit(enet);
  527. +
  528. + free_irq(enet->netdev->irq, enet);
  529. +
  530. + return 0;
  531. +}
  532. +
  533. +static int bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netdev)
  534. +{
  535. + struct bcm4908_enet *enet = netdev_priv(netdev);
  536. + struct bcm4908_enet_dma_ring *ring = &enet->tx_ring;
  537. + struct bcm4908_enet_dma_ring_slot *slot;
  538. + struct device *dev = enet->dev;
  539. + struct bcm4908_enet_dma_ring_bd *buf_desc;
  540. + int free_buf_descs;
  541. + u32 tmp;
  542. +
  543. + /* Free transmitted skbs */
  544. + while (ring->read_idx != ring->write_idx) {
  545. + buf_desc = &ring->buf_desc[ring->read_idx];
  546. + if (le32_to_cpu(buf_desc->ctl) & DMA_CTL_STATUS_OWN)
  547. + break;
  548. + slot = &ring->slots[ring->read_idx];
  549. +
  550. + dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE);
  551. + dev_kfree_skb(slot->skb);
  552. + if (++ring->read_idx == ring->length)
  553. + ring->read_idx = 0;
  554. + }
  555. +
  556. + /* Don't use the last empty buf descriptor */
  557. + if (ring->read_idx <= ring->write_idx)
  558. + free_buf_descs = ring->read_idx - ring->write_idx + ring->length;
  559. + else
  560. + free_buf_descs = ring->read_idx - ring->write_idx;
  561. + if (free_buf_descs < 2)
  562. + return NETDEV_TX_BUSY;
  563. +
  564. + /* Hardware removes OWN bit after sending data */
  565. + buf_desc = &ring->buf_desc[ring->write_idx];
  566. + if (unlikely(le32_to_cpu(buf_desc->ctl) & DMA_CTL_STATUS_OWN)) {
  567. + netif_stop_queue(netdev);
  568. + return NETDEV_TX_BUSY;
  569. + }
  570. +
  571. + slot = &ring->slots[ring->write_idx];
  572. + slot->skb = skb;
  573. + slot->len = skb->len;
  574. + slot->dma_addr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
  575. + if (unlikely(dma_mapping_error(dev, slot->dma_addr)))
  576. + return NETDEV_TX_BUSY;
  577. +
  578. + tmp = skb->len << DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
  579. + tmp |= DMA_CTL_STATUS_OWN;
  580. + tmp |= DMA_CTL_STATUS_SOP;
  581. + tmp |= DMA_CTL_STATUS_EOP;
  582. + tmp |= DMA_CTL_STATUS_APPEND_CRC;
  583. + if (ring->write_idx + 1 == ring->length - 1)
  584. + tmp |= DMA_CTL_STATUS_WRAP;
  585. +
  586. + buf_desc->addr = cpu_to_le32((uint32_t)slot->dma_addr);
  587. + buf_desc->ctl = cpu_to_le32(tmp);
  588. +
  589. + bcm4908_enet_dma_tx_ring_enable(enet, &enet->tx_ring);
  590. +
  591. + if (++ring->write_idx == ring->length - 1)
  592. + ring->write_idx = 0;
  593. + enet->netdev->stats.tx_bytes += skb->len;
  594. + enet->netdev->stats.tx_packets++;
  595. +
  596. + return NETDEV_TX_OK;
  597. +}
  598. +
  599. +static int bcm4908_enet_poll(struct napi_struct *napi, int weight)
  600. +{
  601. + struct bcm4908_enet *enet = container_of(napi, struct bcm4908_enet, napi);
  602. + struct device *dev = enet->dev;
  603. + int handled = 0;
  604. +
  605. + while (handled < weight) {
  606. + struct bcm4908_enet_dma_ring_bd *buf_desc;
  607. + struct bcm4908_enet_dma_ring_slot slot;
  608. + u32 ctl;
  609. + int len;
  610. + int err;
  611. +
  612. + buf_desc = &enet->rx_ring.buf_desc[enet->rx_ring.read_idx];
  613. + ctl = le32_to_cpu(buf_desc->ctl);
  614. + if (ctl & DMA_CTL_STATUS_OWN)
  615. + break;
  616. +
  617. + slot = enet->rx_ring.slots[enet->rx_ring.read_idx];
  618. +
  619. + /* Provide new buffer before unpinning the old one */
  620. + err = bcm4908_enet_dma_alloc_rx_buf(enet, enet->rx_ring.read_idx);
  621. + if (err)
  622. + break;
  623. +
  624. + if (++enet->rx_ring.read_idx == enet->rx_ring.length)
  625. + enet->rx_ring.read_idx = 0;
  626. +
  627. + len = (ctl & DMA_CTL_LEN_DESC_BUFLENGTH) >> DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
  628. +
  629. + if (len < ENET_MTU_MIN ||
  630. + (ctl & (DMA_CTL_STATUS_SOP | DMA_CTL_STATUS_EOP)) != (DMA_CTL_STATUS_SOP | DMA_CTL_STATUS_EOP)) {
  631. + kfree(slot.skb);
  632. + enet->netdev->stats.rx_dropped++;
  633. + break;
  634. + }
  635. +
  636. + dma_unmap_single(dev, slot.dma_addr, slot.len, DMA_FROM_DEVICE);
  637. +
  638. + skb_put(slot.skb, len - ETH_FCS_LEN);
  639. + slot.skb->protocol = eth_type_trans(slot.skb, enet->netdev);
  640. + netif_receive_skb(slot.skb);
  641. +
  642. + enet->netdev->stats.rx_packets++;
  643. + enet->netdev->stats.rx_bytes += len;
  644. + }
  645. +
  646. + if (handled < weight) {
  647. + napi_complete_done(napi, handled);
  648. + bcm4908_enet_intrs_on(enet);
  649. + }
  650. +
  651. + return handled;
  652. +}
  653. +
  654. +static const struct net_device_ops bcm4908_enet_netdev_ops = {
  655. + .ndo_open = bcm4908_enet_open,
  656. + .ndo_stop = bcm4908_enet_stop,
  657. + .ndo_start_xmit = bcm4908_enet_start_xmit,
  658. + .ndo_set_mac_address = eth_mac_addr,
  659. +};
  660. +
  661. +static int bcm4908_enet_probe(struct platform_device *pdev)
  662. +{
  663. + struct device *dev = &pdev->dev;
  664. + struct net_device *netdev;
  665. + struct bcm4908_enet *enet;
  666. + int err;
  667. +
  668. + netdev = devm_alloc_etherdev(dev, sizeof(*enet));
  669. + if (!netdev)
  670. + return -ENOMEM;
  671. +
  672. + enet = netdev_priv(netdev);
  673. + enet->dev = dev;
  674. + enet->netdev = netdev;
  675. +
  676. + enet->base = devm_platform_ioremap_resource(pdev, 0);
  677. + if (IS_ERR(enet->base)) {
  678. + dev_err(dev, "Failed to map registers: %ld\n", PTR_ERR(enet->base));
  679. + return PTR_ERR(enet->base);
  680. + }
  681. +
  682. + netdev->irq = platform_get_irq(pdev, 0);
  683. + if (netdev->irq < 0)
  684. + return netdev->irq;
  685. +
  686. + dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
  687. +
  688. + err = bcm4908_enet_dma_alloc(enet);
  689. + if (err)
  690. + return err;
  691. +
  692. + SET_NETDEV_DEV(netdev, &pdev->dev);
  693. + eth_hw_addr_random(netdev);
  694. + netdev->netdev_ops = &bcm4908_enet_netdev_ops;
  695. + netdev->min_mtu = ETH_ZLEN;
  696. + netdev->mtu = ENET_MTU_MAX;
  697. + netdev->max_mtu = ENET_MTU_MAX;
  698. + netif_napi_add(netdev, &enet->napi, bcm4908_enet_poll, 64);
  699. +
  700. + err = register_netdev(netdev);
  701. + if (err) {
  702. + bcm4908_enet_dma_free(enet);
  703. + return err;
  704. + }
  705. +
  706. + platform_set_drvdata(pdev, enet);
  707. +
  708. + return 0;
  709. +}
  710. +
  711. +static int bcm4908_enet_remove(struct platform_device *pdev)
  712. +{
  713. + struct bcm4908_enet *enet = platform_get_drvdata(pdev);
  714. +
  715. + unregister_netdev(enet->netdev);
  716. + netif_napi_del(&enet->napi);
  717. + bcm4908_enet_dma_free(enet);
  718. +
  719. + return 0;
  720. +}
  721. +
  722. +static const struct of_device_id bcm4908_enet_of_match[] = {
  723. + { .compatible = "brcm,bcm4908-enet"},
  724. + {},
  725. +};
  726. +
  727. +static struct platform_driver bcm4908_enet_driver = {
  728. + .driver = {
  729. + .name = "bcm4908_enet",
  730. + .of_match_table = bcm4908_enet_of_match,
  731. + },
  732. + .probe = bcm4908_enet_probe,
  733. + .remove = bcm4908_enet_remove,
  734. +};
  735. +module_platform_driver(bcm4908_enet_driver);
  736. +
  737. +MODULE_LICENSE("GPL v2");
  738. +MODULE_DEVICE_TABLE(of, bcm4908_enet_of_match);
  739. --- /dev/null
  740. +++ b/drivers/net/ethernet/broadcom/bcm4908_enet.h
  741. @@ -0,0 +1,96 @@
  742. +/* SPDX-License-Identifier: GPL-2.0-only */
  743. +#ifndef __BCM4908_ENET_H
  744. +#define __BCM4908_ENET_H
  745. +
  746. +#define ENET_CONTROL 0x000
  747. +#define ENET_MIB_CTRL 0x004
  748. +#define ENET_MIB_CTRL_CLR_MIB 0x00000001
  749. +#define ENET_RX_ERR_MASK 0x008
  750. +#define ENET_MIB_MAX_PKT_SIZE 0x00C
  751. +#define ENET_MIB_MAX_PKT_SIZE_VAL 0x00003fff
  752. +#define ENET_DIAG_OUT 0x01c
  753. +#define ENET_ENABLE_DROP_PKT 0x020
  754. +#define ENET_IRQ_ENABLE 0x024
  755. +#define ENET_IRQ_ENABLE_OVFL 0x00000001
  756. +#define ENET_GMAC_STATUS 0x028
  757. +#define ENET_GMAC_STATUS_ETH_SPEED_MASK 0x00000003
  758. +#define ENET_GMAC_STATUS_ETH_SPEED_10 0x00000000
  759. +#define ENET_GMAC_STATUS_ETH_SPEED_100 0x00000001
  760. +#define ENET_GMAC_STATUS_ETH_SPEED_1000 0x00000002
  761. +#define ENET_GMAC_STATUS_HD 0x00000004
  762. +#define ENET_GMAC_STATUS_AUTO_CFG_EN 0x00000008
  763. +#define ENET_GMAC_STATUS_LINK_UP 0x00000010
  764. +#define ENET_IRQ_STATUS 0x02c
  765. +#define ENET_IRQ_STATUS_OVFL 0x00000001
  766. +#define ENET_OVERFLOW_COUNTER 0x030
  767. +#define ENET_FLUSH 0x034
  768. +#define ENET_FLUSH_RXFIFO_FLUSH 0x00000001
  769. +#define ENET_FLUSH_TXFIFO_FLUSH 0x00000002
  770. +#define ENET_RSV_SELECT 0x038
  771. +#define ENET_BP_FORCE 0x03c
  772. +#define ENET_BP_FORCE_FORCE 0x00000001
  773. +#define ENET_DMA_RX_OK_TO_SEND_COUNT 0x040
  774. +#define ENET_DMA_RX_OK_TO_SEND_COUNT_VAL 0x0000000f
  775. +#define ENET_TX_CRC_CTRL 0x044
  776. +#define ENET_MIB 0x200
  777. +#define ENET_UNIMAC 0x400
  778. +#define ENET_DMA 0x800
  779. +#define ENET_DMA_CONTROLLER_CFG 0x800
  780. +#define ENET_DMA_CTRL_CFG_MASTER_EN 0x00000001
  781. +#define ENET_DMA_CTRL_CFG_FLOWC_CH1_EN 0x00000002
  782. +#define ENET_DMA_CTRL_CFG_FLOWC_CH3_EN 0x00000004
  783. +#define ENET_DMA_FLOWCTL_CH1_THRESH_LO 0x804
  784. +#define ENET_DMA_FLOWCTL_CH1_THRESH_HI 0x808
  785. +#define ENET_DMA_FLOWCTL_CH1_ALLOC 0x80c
  786. +#define ENET_DMA_FLOWCTL_CH1_ALLOC_FORCE 0x80000000
  787. +#define ENET_DMA_FLOWCTL_CH3_THRESH_LO 0x810
  788. +#define ENET_DMA_FLOWCTL_CH3_THRESH_HI 0x814
  789. +#define ENET_DMA_FLOWCTL_CH3_ALLOC 0x818
  790. +#define ENET_DMA_FLOWCTL_CH5_THRESH_LO 0x81C
  791. +#define ENET_DMA_FLOWCTL_CH5_THRESH_HI 0x820
  792. +#define ENET_DMA_FLOWCTL_CH5_ALLOC 0x824
  793. +#define ENET_DMA_FLOWCTL_CH7_THRESH_LO 0x828
  794. +#define ENET_DMA_FLOWCTL_CH7_THRESH_HI 0x82C
  795. +#define ENET_DMA_FLOWCTL_CH7_ALLOC 0x830
  796. +#define ENET_DMA_CTRL_CHANNEL_RESET 0x834
  797. +#define ENET_DMA_CTRL_CHANNEL_DEBUG 0x838
  798. +#define ENET_DMA_CTRL_GLOBAL_INTERRUPT_STATUS 0x840
  799. +#define ENET_DMA_CTRL_GLOBAL_INTERRUPT_MASK 0x844
  800. +#define ENET_DMA_CH0_CFG 0xa00 /* RX */
  801. +#define ENET_DMA_CH1_CFG 0xa10 /* TX */
  802. +#define ENET_DMA_CH0_STATE_RAM 0xc00 /* RX */
  803. +#define ENET_DMA_CH1_STATE_RAM 0xc10 /* TX */
  804. +
  805. +#define ENET_DMA_CH_CFG 0x00 /* assorted configuration */
  806. +#define ENET_DMA_CH_CFG_ENABLE 0x00000001 /* set to enable channel */
  807. +#define ENET_DMA_CH_CFG_PKT_HALT 0x00000002 /* idle after an EOP flag is detected */
  808. +#define ENET_DMA_CH_CFG_BURST_HALT 0x00000004 /* idle after finish current memory burst */
  809. +#define ENET_DMA_CH_CFG_INT_STAT 0x04 /* interrupts control and status */
  810. +#define ENET_DMA_CH_CFG_INT_MASK 0x08 /* interrupts mask */
  811. +#define ENET_DMA_CH_CFG_INT_BUFF_DONE 0x00000001 /* buffer done */
  812. +#define ENET_DMA_CH_CFG_INT_DONE 0x00000002 /* packet xfer complete */
  813. +#define ENET_DMA_CH_CFG_INT_NO_DESC 0x00000004 /* no valid descriptors */
  814. +#define ENET_DMA_CH_CFG_INT_RX_ERROR 0x00000008 /* rxdma detect client protocol error */
  815. +#define ENET_DMA_CH_CFG_MAX_BURST 0x0c /* max burst length permitted */
  816. +#define ENET_DMA_CH_CFG_MAX_BURST_DESCSIZE_SEL 0x00040000 /* DMA Descriptor Size Selection */
  817. +#define ENET_DMA_CH_CFG_SIZE 0x10
  818. +
  819. +#define ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR 0x00 /* descriptor ring start address */
  820. +#define ENET_DMA_CH_STATE_RAM_STATE_DATA 0x04 /* state/bytes done/ring offset */
  821. +#define ENET_DMA_CH_STATE_RAM_DESC_LEN_STATUS 0x08 /* buffer descriptor status and len */
  822. +#define ENET_DMA_CH_STATE_RAM_DESC_BASE_BUFPTR 0x0c /* buffer descrpitor current processing */
  823. +#define ENET_DMA_CH_STATE_RAM_SIZE 0x10
  824. +
  825. +#define DMA_CTL_STATUS_APPEND_CRC 0x00000100
  826. +#define DMA_CTL_STATUS_APPEND_BRCM_TAG 0x00000200
  827. +#define DMA_CTL_STATUS_PRIO 0x00000C00 /* Prio for Tx */
  828. +#define DMA_CTL_STATUS_WRAP 0x00001000 /* */
  829. +#define DMA_CTL_STATUS_SOP 0x00002000 /* first buffer in packet */
  830. +#define DMA_CTL_STATUS_EOP 0x00004000 /* last buffer in packet */
  831. +#define DMA_CTL_STATUS_OWN 0x00008000 /* cleared by DMA, set by SW */
  832. +#define DMA_CTL_LEN_DESC_BUFLENGTH 0x0fff0000
  833. +#define DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT 16
  834. +#define DMA_CTL_LEN_DESC_MULTICAST 0x40000000
  835. +#define DMA_CTL_LEN_DESC_USEFPM 0x80000000
  836. +
  837. +#endif