Просмотр исходного кода

kernel/siflower: Restore kernel files for v6.6

This is an automatically generated commit which aids following Kernel patch
history, as git will see the move and copy as a rename thus defeating the
purpose.

For the original discussion see:
https://lists.openwrt.org/pipermail/openwrt-devel/2023-October/041673.html

Signed-off-by: Zhu Yujie <[email protected]>
Link: https://github.com/openwrt/openwrt/pull/20555
Signed-off-by: Christian Marangi <[email protected]>
Zhu Yujie 5 месяцев назад
Родитель
Сommit
dc9436728a
61 измененных файлов с 13618 добавлено и 0 удалено
  1. 9 0
      target/linux/siflower/files-6.6/arch/mips/include/asm/mach-siflower/kmalloc.h
  2. 35 0
      target/linux/siflower/files-6.6/drivers/clk/siflower/Kconfig
  3. 3 0
      target/linux/siflower/files-6.6/drivers/clk/siflower/Makefile
  4. 170 0
      target/linux/siflower/files-6.6/drivers/clk/siflower/clk-sf19a2890-periph.c
  5. 416 0
      target/linux/siflower/files-6.6/drivers/clk/siflower/clk-sf19a2890.c
  6. 808 0
      target/linux/siflower/files-6.6/drivers/clk/siflower/clk-sf21-topcrm.c
  7. 346 0
      target/linux/siflower/files-6.6/drivers/gpio/gpio-siflower.c
  8. 53 0
      target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/Kconfig
  9. 11 0
      target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/Makefile
  10. 152 0
      target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/dma.h
  11. 59 0
      target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/dpns.h
  12. 782 0
      target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/eth.h
  13. 76 0
      target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/sf_dpns.c
  14. 427 0
      target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/sf_dpns_debugfs.c
  15. 50 0
      target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/sf_dpns_se.c
  16. 77 0
      target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/sf_dpns_se.h
  17. 247 0
      target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/sf_dpns_tmu.c
  18. 315 0
      target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/sf_dpns_tmu.h
  19. 1618 0
      target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/sfxgmac-dma.c
  20. 31 0
      target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/sfxgmac-ext.h
  21. 1324 0
      target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/sfxgmac.c
  22. 621 0
      target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/sfxpcs.c
  23. 251 0
      target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/sfxpcs.h
  24. 193 0
      target/linux/siflower/files-6.6/drivers/net/ethernet/stmicro/stmmac/dwmac-sf19a2890.c
  25. 877 0
      target/linux/siflower/files-6.6/drivers/net/phy/siflower.c
  26. 361 0
      target/linux/siflower/files-6.6/drivers/pci/controller/dwc/pcie-sf21.c
  27. 23 0
      target/linux/siflower/files-6.6/drivers/phy/siflower/Kconfig
  28. 3 0
      target/linux/siflower/files-6.6/drivers/phy/siflower/Makefile
  29. 145 0
      target/linux/siflower/files-6.6/drivers/phy/siflower/phy-sf19a2890-usb.c
  30. 335 0
      target/linux/siflower/files-6.6/drivers/phy/siflower/phy-sf21-pcie.c
  31. 115 0
      target/linux/siflower/files-6.6/drivers/phy/siflower/phy-sf21-usb.c
  32. 515 0
      target/linux/siflower/files-6.6/drivers/pinctrl/pinctrl-sf19a2890.c
  33. 131 0
      target/linux/siflower/files-6.6/drivers/reset/reset-sf19a2890-periph.c
  34. 142 0
      target/linux/siflower/files-6.6/drivers/reset/reset-sf21.c
  35. 531 0
      target/linux/siflower/files-6.6/drivers/spi/spi-sf21-qspi.c
  36. 27 0
      target/linux/siflower/files-6.6/include/dt-bindings/clock/siflower,sf19a2890-clk.h
  37. 43 0
      target/linux/siflower/files-6.6/include/dt-bindings/clock/siflower,sf21-topcrm.h
  38. 65 0
      target/linux/siflower/files-6.6/include/dt-bindings/pinctrl/siflower,sf21-iomux.h
  39. 29 0
      target/linux/siflower/files-6.6/include/dt-bindings/reset/siflower,sf21-reset.h
  40. 180 0
      target/linux/siflower/patches-6.6/001-net-phy-c45-add-genphy_c45_pma_read_ext_abilities-fu.patch
  41. 49 0
      target/linux/siflower/patches-6.6/002-net-phy-Optimize-phy-speed-mask-to-be-compatible-to-.patch
  42. 753 0
      target/linux/siflower/patches-6.6/003-net-phy-Add-driver-for-Motorcomm-yt8821-2.5G-etherne.patch
  43. 58 0
      target/linux/siflower/patches-6.6/004-mips-add-support-for-Siflower-SF19A2890.patch
  44. 30 0
      target/linux/siflower/patches-6.6/005-clk-add-drivers-for-siflower-socs.patch
  45. 37 0
      target/linux/siflower/patches-6.6/006-reset-add-support-for-sf19a2890.patch
  46. 42 0
      target/linux/siflower/patches-6.6/007-gpio-add-support-for-siflower-socs.patch
  47. 38 0
      target/linux/siflower/patches-6.6/008-pinctrl-add-driver-for-siflower-sf19a2890.patch
  48. 37 0
      target/linux/siflower/patches-6.6/009-stmmac-add-support-for-sf19a2890.patch
  49. 30 0
      target/linux/siflower/patches-6.6/010-phy-add-support-for-Siflower-USB-PHYs.patch
  50. 35 0
      target/linux/siflower/patches-6.6/011-usb-dwc2-add-support-for-Siflower-SF19A2890.patch
  51. 66 0
      target/linux/siflower/patches-6.6/012-usb-dwc2-handle-OTG-interrupt-regardless-of-GINTSTS.patch
  52. 31 0
      target/linux/siflower/patches-6.6/013-riscv-add-Siflower-RISC-V-SoC-family-Kconfig-support.patch
  53. 46 0
      target/linux/siflower/patches-6.6/014-riscv-add-an-option-for-efficient-unaligned-access.patch
  54. 33 0
      target/linux/siflower/patches-6.6/015-reset-add-support-for-sf21a6826-sf21h8898.patch
  55. 41 0
      target/linux/siflower/patches-6.6/016-spi-spi-mem-allow-gpio-cs-in-spi_mem_exec_op.patch
  56. 46 0
      target/linux/siflower/patches-6.6/017-spi-add-support-for-sf21-qspi.patch
  57. 41 0
      target/linux/siflower/patches-6.6/018-pci-dw-pcie-add-support-for-sf21-pcie.patch
  58. 30 0
      target/linux/siflower/patches-6.6/019-net-phy-add-support-for-Siflower-SF23P1211-SF23P1240.patch
  59. 27 0
      target/linux/siflower/patches-6.6/020-net-ethernet-add-support-for-Siflower-DPNS.patch
  60. 266 0
      target/linux/siflower/sf19a2890/config-6.6
  61. 286 0
      target/linux/siflower/sf21/config-6.6

+ 9 - 0
target/linux/siflower/files-6.6/arch/mips/include/asm/mach-siflower/kmalloc.h

@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MACH_SIFLOWER_KMALLOC_H
+#define __ASM_MACH_SIFLOWER_KMALLOC_H
+
+#ifdef CONFIG_DMA_NONCOHERENT
+#define ARCH_DMA_MINALIGN	32
+#endif
+
+#endif /* __ASM_MACH_SIFLOWER_KMALLOC_H */

+ 35 - 0
target/linux/siflower/files-6.6/drivers/clk/siflower/Kconfig

@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menuconfig CLK_SIFLOWER
+	bool "Siflower SoC driver support"
+	depends on MIPS || RISCV || COMPILE_TEST
+	help
+	  SoC drivers for Siflower Linux-capable SoCs.
+
+if CLK_SIFLOWER
+
+config CLK_SF19A2890
+	bool "Clock driver for Siflower CLK_SF19A2890"
+	depends on MIPS || COMPILE_TEST
+	help
+	  Supports the Top Clock Module found in SF19A2890. If this
+	  kernel is meant to run on a Siflower SF19A2890 SoC,
+	  enable this driver.
+
+config CLK_SF19A2890_PERIPH
+	bool "Clock driver for Siflower SF19A2890 peripheral clock gates"
+	depends on MIPS || RISCV || COMPILE_TEST
+	help
+	  Supports the clock gates for various peripherals in SF19A2890.
+	  If this kernel is meant to run on a Siflower SF19A2890 SoC,
+	  enable this driver.
+
+config CLK_SF21_TOPCRM
+	bool "Clock driver for Siflower SF21A6826/SF21H8898 Top Clock & Reset Module"
+	depends on RISCV || COMPILE_TEST
+	help
+	  Supports the Top Clock & Reset Module IP block found in SF21A6826.
+	  If this kernel is meant to run on a Siflower SF21A6826 SoC,
+	  enable this driver.
+
+endif

+ 3 - 0
target/linux/siflower/files-6.6/drivers/clk/siflower/Makefile

@@ -0,0 +1,3 @@
+obj-$(CONFIG_CLK_SF19A2890) += clk-sf19a2890.o
+obj-$(CONFIG_CLK_SF19A2890_PERIPH) += clk-sf19a2890-periph.o
+obj-$(CONFIG_CLK_SF21_TOPCRM) += clk-sf21-topcrm.o

+ 170 - 0
target/linux/siflower/files-6.6/drivers/clk/siflower/clk-sf19a2890-periph.c

@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/of_clk.h>
+#include <linux/of.h>
+#include <linux/spinlock.h>
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#define REG_GATE 0x0
+/*
+ * A shared 'Bus Output Enable' signal for all APB peripherals. The peripheral
+ * only responds to bus requests if its dedicated clock is enabled and this
+ * shared BOE is set.
+ */
+#define REG_BOE 0x8
+#define BOE_EN GENMASK(1, 0)
+
+struct sf19a2890_periphclk {
+	void __iomem *base;
+	struct clk_hw hw;
+	u32 idx;
+};
+
+struct sf19a2890_periphclk_priv {
+	struct sf19a2890_periphclk *gates;
+	struct clk_hw_onecell_data clk_data;
+};
+
+static inline struct sf19a2890_periphclk *hw_to_periphclk(struct clk_hw *hw)
+{
+	return container_of(hw, struct sf19a2890_periphclk, hw);
+}
+
+static int sf19a2890_periphclk_enable(struct clk_hw *hw)
+{
+	struct sf19a2890_periphclk *priv = hw_to_periphclk(hw);
+	u32 reg = readl(priv->base + REG_GATE);
+
+	writel(reg | BIT(priv->idx), priv->base + REG_GATE);
+	writel(BOE_EN, priv->base + REG_BOE);
+	return 0;
+}
+
+static void sf19a2890_periphclk_disable(struct clk_hw *hw)
+{
+	struct sf19a2890_periphclk *priv = hw_to_periphclk(hw);
+	u32 reg = readl(priv->base + REG_GATE);
+
+	reg &= ~BIT(priv->idx);
+	writel(reg, priv->base + REG_GATE);
+	if (reg == 0)
+		writel(0, priv->base + REG_BOE);
+}
+
+static int sf19a2890_periphclk_is_enabled(struct clk_hw *hw)
+{
+	struct sf19a2890_periphclk *priv = hw_to_periphclk(hw);
+
+	return !!(readl(priv->base + REG_GATE) & BIT(priv->idx));
+}
+
+static const struct clk_ops sf19a28_periphclk_ops = {
+	.enable = sf19a2890_periphclk_enable,
+	.disable = sf19a2890_periphclk_disable,
+	.is_enabled = sf19a2890_periphclk_is_enabled,
+};
+
+static void __init sf19a2890_periphclk_init(struct device_node *node)
+{
+	struct clk_init_data init = {};
+	struct sf19a2890_periphclk_priv *priv;
+	u32 reg, valid_gates, critical_gates;
+	int num_clks, i, ret, idx;
+	const char *name, *parent;
+	void __iomem *base;
+
+	num_clks = of_count_phandle_with_args(node, "clocks", "#clock-cells");
+	if (num_clks < 1 || num_clks > 32)
+		return;
+
+	ret = of_property_read_u32(node, "siflower,valid-gates", &valid_gates);
+	if (ret < 0)
+		valid_gates = BIT(num_clks) - 1;
+
+	ret = of_property_read_u32(node, "siflower,critical-gates", &critical_gates);
+	if (ret < 0)
+		critical_gates = 0;
+
+	priv = kzalloc(struct_size(priv, clk_data.hws, num_clks), GFP_KERNEL);
+	if (!priv)
+		return;
+
+	priv->clk_data.num = num_clks;
+
+	priv->gates = kcalloc(num_clks, sizeof(struct sf19a2890_periphclk),
+			      GFP_KERNEL);
+	if (!priv->gates)
+		goto err1;
+
+	base = of_iomap(node, 0);
+	if (!base) {
+		pr_err("failed to map resources.\n");
+		goto err2;
+	}
+
+	/* clear unused higher bits for BOE check in disable call. */
+	reg = readl(base + REG_GATE);
+	reg &= valid_gates;
+	writel(reg, base + REG_GATE);
+
+	for (i = 0, idx = 0; i < num_clks && idx < 32; i++, idx++) {
+		ret = of_property_read_string_index(node, "clock-output-names",
+						    i, &name);
+		if (ret != 0) {
+			pr_err("failed to read output name for the %dth gate.\n",
+			       i);
+			goto err3;
+		}
+		parent = of_clk_get_parent_name(node, i);
+		if (!parent) {
+			pr_err("failed to get parent clock for the %dth gate.\n",
+			       i);
+			goto err3;
+		}
+
+		while (!(valid_gates & BIT(idx))) {
+			idx++;
+			if (idx >= 32) {
+				pr_err("too few valid gates.");
+				goto err3;
+			}
+		}
+
+		priv->gates[i].base = base;
+		priv->gates[i].idx = idx;
+		init.name = name;
+		init.ops = &sf19a28_periphclk_ops;
+		init.parent_names = &parent;
+		init.num_parents = 1;
+		init.flags = (critical_gates & BIT(idx)) ? CLK_IS_CRITICAL : 0;
+		priv->gates[i].hw.init = &init;
+
+		ret = clk_hw_register(NULL, &priv->gates[i].hw);
+		if (ret) {
+			pr_err("failed to register the %dth gate: %d.\n", i,
+			       ret);
+			goto err3;
+		}
+		priv->clk_data.hws[i] = &priv->gates[i].hw;
+	}
+
+	ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
+				     &priv->clk_data);
+	if (ret) {
+		pr_err("failed to add hw provider.\n");
+		goto err3;
+	}
+	return;
+err3:
+	for (i--; i >= 0; i--)
+		clk_hw_unregister_gate(priv->clk_data.hws[i]);
+err2:
+	kfree(priv->gates);
+err1:
+	kfree(priv);
+}
+
+CLK_OF_DECLARE(sf19a2890_periphclk, "siflower,sf19a2890-periph-clk",
+	       sf19a2890_periphclk_init);

+ 416 - 0
target/linux/siflower/files-6.6/drivers/clk/siflower/clk-sf19a2890.c

@@ -0,0 +1,416 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/compiler.h>
+#include <linux/clk-provider.h>
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/bug.h>
+#include <dt-bindings/clock/siflower,sf19a2890-clk.h>
+
+#define REG_PLL_BASE		0x0
+#define REG_PLL_PD		0x0
+#define  PLL_PD			BIT(0)
+#define  PLL_PD_VCO		BIT(1)
+#define  PLL_PD_POSTDIV		BIT(2)
+#define  PLL_PD_4PHASE		BIT(3)
+#define  PLL_PD_DAC		BIT(4)
+#define  PLL_PD_DSM		BIT(5)
+
+/*
+ * PLL_PARAM is a 48-bit value put into 6 registers, 8-bit per register:
+ * REFDIV = PLL_PARAM[47:42]
+ * POSTDIV2 = PLL_PARAM[41:39]
+ * POSTDIV1 = PLL_PARAM[38:36]
+ * FRAC = PLL_PARAM[35:12]
+ * FBDIV = PLL_PARAM[11:0]
+ */
+
+#define REG_PLL_PARAM(_x)	(0x4 + (_x) * 4)
+#define  PLL_REFDIV_HI		47
+#define  PLL_REFDIV_LO		42
+#define  PLL_POSTDIV2_HI	41
+#define  PLL_POSTDIV2_LO	39
+#define  PLL_POSTDIV1_HI	38
+#define  PLL_POSTDIV1_LO	36
+#define  PLL_FRAC_HI		35
+#define  PLL_FRAC_LO		12
+#define  PLL_FRAC_BITS		(PLL_FRAC_HI - PLL_FRAC_LO + 1)
+#define  PLL_FBDIV_HI		11
+#define  PLL_FBDIV_LO		0
+
+#define REG_PLL_CFG 0x1c
+#define  PLL_CFG_BYPASS		BIT(0)
+#define  PLL_CFG_SRC		GENMASK(2, 1)
+#define  PLL_CFG_OCLK_SEL	BIT(3)
+#define  PLL_CFG_OCLK_GATE	BIT(4)
+#define  PLL_CFG_LOAD_DIVS	BIT(5)
+
+#define REG_PLL_LOCK 0x20
+
+/*
+ * Read-only register indicating the value of the hardware clock source
+ * override pin. When the first bit of this register is set, PLL clock
+ * source is forced to the 40M oscillator regardless of PLL_CFG_SRC
+ * value.
+ */
+#define REG_PLL_SRC_OVERRIDE 0x24
+
+struct sf_clk_common {
+	void __iomem *base;
+	spinlock_t *lock;
+	struct clk_hw hw;
+};
+
+struct sf19a2890_clk {
+	struct sf_clk_common common;
+	ulong offset;
+};
+
+#define SF_CLK_COMMON(_name, _parents, _op, _flags)                           \
+	{                                                                     \
+		.hw.init = CLK_HW_INIT_PARENTS(_name, _parents, _op, _flags), \
+	}
+
+static inline struct sf_clk_common *hw_to_sf_clk_common(struct clk_hw *hw)
+{
+	return container_of(hw, struct sf_clk_common, hw);
+}
+
+static inline struct sf19a2890_clk *cmn_to_clk(struct sf_clk_common *cmn_priv)
+{
+	return container_of(cmn_priv, struct sf19a2890_clk, common);
+}
+
+static inline u32 sf_readl(struct sf_clk_common *priv, u32 reg)
+{
+	return readl(priv->base + reg);
+}
+
+static inline void sf_writel(struct sf_clk_common *priv, u32 reg, u32 val)
+{
+	return writel(val, priv->base + reg);
+}
+
+static inline void sf_rmw(struct sf_clk_common *priv, u32 reg, u32 clr, u32 set)
+{
+	u32 val = sf_readl(priv, reg);
+
+	val &= ~clr;
+	val |= set;
+	sf_writel(priv, reg, val);
+}
+
+static u32 sf_pll_param_get(struct sf19a2890_clk *priv, u32 hi, u32 lo)
+{
+	struct sf_clk_common *cmn = &priv->common;
+	u32 ret = 0;
+	int reg_hi = hi / 8;
+	int reg_lo = lo / 8;
+	u32 reg_hi_pos = hi % 8;
+	u32 reg_lo_pos = lo % 8;
+	int i;
+
+	if (reg_hi == reg_lo) {
+		u32 mask = (BIT(reg_hi_pos - reg_lo_pos + 1)) - 1;
+		u32 reg_val =
+			sf_readl(cmn, priv->offset + REG_PLL_PARAM(reg_hi));
+		return (reg_val >> reg_lo_pos) & mask;
+	}
+
+	ret = sf_readl(cmn, priv->offset + REG_PLL_PARAM(reg_hi)) &
+	      (BIT(reg_hi_pos + 1) - 1);
+	for (i = reg_hi - 1; i > reg_lo; i--)
+		ret = (ret << 8) |
+		      sf_readl(cmn, priv->offset + REG_PLL_PARAM(i));
+	ret = (ret << (8 - reg_lo_pos)) |
+	      (sf_readl(cmn, priv->offset + REG_PLL_PARAM(reg_lo)) >>
+	       reg_lo_pos);
+
+	return ret;
+}
+
+static unsigned long sf19a28_pll_recalc_rate(struct clk_hw *hw,
+					     unsigned long parent_rate)
+{
+	struct sf_clk_common *cmn_priv = hw_to_sf_clk_common(hw);
+	struct sf19a2890_clk *priv = cmn_to_clk(cmn_priv);
+	u32 refdiv = sf_pll_param_get(priv, PLL_REFDIV_HI, PLL_REFDIV_LO);
+	u32 fbdiv = sf_pll_param_get(priv, PLL_FBDIV_HI, PLL_FBDIV_LO);
+	u32 postdiv1 = sf_pll_param_get(priv, PLL_POSTDIV1_HI, PLL_POSTDIV1_LO);
+	u32 postdiv2 = sf_pll_param_get(priv, PLL_POSTDIV2_HI, PLL_POSTDIV2_LO);
+	u32 pll_pd = sf_readl(cmn_priv, PLL_PD);
+	u32 ref = parent_rate / refdiv;
+	u32 rate = ref * fbdiv;
+	u32 frac;
+	u64 frac_rate;
+
+	if (!(pll_pd & PLL_PD_DSM)) {
+		frac = sf_pll_param_get(priv, PLL_FRAC_HI, PLL_FRAC_LO);
+		frac_rate = ((u64)rate * frac) >> PLL_FRAC_BITS;
+		rate += frac_rate;
+	}
+	rate = rate / postdiv1 / postdiv2;
+	return rate;
+}
+
+static u8 sf19a28_pll_get_parent(struct clk_hw *hw)
+{
+	struct sf_clk_common *cmn_priv = hw_to_sf_clk_common(hw);
+	struct sf19a2890_clk *priv = cmn_to_clk(cmn_priv);
+	u32 cfg;
+
+	if (sf_readl(cmn_priv, priv->offset + REG_PLL_SRC_OVERRIDE))
+		return 1;
+	cfg = sf_readl(cmn_priv, priv->offset + REG_PLL_CFG);
+	return (FIELD_GET(PLL_CFG_SRC, cfg) == 1);
+}
+
+static const struct clk_ops sf19a28_pll_ops = {
+	.recalc_rate = sf19a28_pll_recalc_rate,
+	.get_parent = sf19a28_pll_get_parent,
+};
+
+static const char * const clk_pll_parents[] = { "osc12m", "osc40m" };
+
+#define SF19A28_PLL(_name, _offset, _flags)				\
+	struct sf19a2890_clk _name = {					\
+		.common = SF_CLK_COMMON(#_name, clk_pll_parents,	\
+					&sf19a28_pll_ops, _flags),	\
+		.offset = REG_PLL_BASE + _offset,			\
+	}
+
+static SF19A28_PLL(pll_cpu, 0x0, 0);
+static SF19A28_PLL(pll_ddr, 0x40, 0);
+static SF19A28_PLL(pll_cmn, 0x80, 0);
+
+#define REG_MUXDIV_BASE		0x400
+#define REG_MUXDIV_CFG		0x0
+#define  MUXDIV_USE_NCO		BIT(3)
+#define  MUXDIV_SRC_SEL		GENMASK(2, 0)
+#define REG_MUXDIV_RATIO	0x4
+#define  MUXDIV_RATIO_MAX	0xff
+#define REG_MUXDIV_NCO_V	0x8
+#define REG_MUXDIV_EN		0xc
+#define REG_MUXDIV_XN_DIV_RATIO	0x10
+#define  MUXDIV_XN_DIV_MAX	3
+
+static unsigned long sf19a28_muxdiv_recalc_rate(struct clk_hw *hw,
+						unsigned long parent_rate)
+{
+	struct sf_clk_common *cmn_priv = hw_to_sf_clk_common(hw);
+	struct sf19a2890_clk *priv = cmn_to_clk(cmn_priv);
+	u32 div;
+
+	div = sf_readl(cmn_priv, priv->offset + REG_MUXDIV_RATIO) + 1;
+	return parent_rate / div;
+}
+
+int sf19a28_muxdiv_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
+{
+	unsigned int div;
+
+	div = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate);
+	if (!div)
+		div = 1;
+	else if (div > MUXDIV_RATIO_MAX + 1)
+		div = MUXDIV_RATIO_MAX + 1;
+
+	req->rate = req->best_parent_rate / div;
+	return 0;
+}
+
+static int sf19a28_muxdiv_set_rate(struct clk_hw *hw, unsigned long rate,
+				   unsigned long parent_rate)
+{
+	struct sf_clk_common *cmn_priv = hw_to_sf_clk_common(hw);
+	struct sf19a2890_clk *priv = cmn_to_clk(cmn_priv);
+	unsigned int div;
+
+	div = DIV_ROUND_CLOSEST(parent_rate, rate);
+	if (div < 1)
+		div = 1;
+	else if (div > MUXDIV_RATIO_MAX + 1)
+		div = MUXDIV_RATIO_MAX + 1;
+	div -= 1;
+
+	sf_writel(cmn_priv, priv->offset + REG_MUXDIV_RATIO, div);
+
+	return 0;
+}
+
+static int sf19a28_muxdiv_enable(struct clk_hw *hw)
+{
+	struct sf_clk_common *cmn_priv = hw_to_sf_clk_common(hw);
+	struct sf19a2890_clk *priv = cmn_to_clk(cmn_priv);
+
+	sf_writel(cmn_priv, priv->offset + REG_MUXDIV_EN, 1);
+	return 0;
+}
+
+static void sf19a28_muxdiv_disable(struct clk_hw *hw)
+{
+	struct sf_clk_common *cmn_priv = hw_to_sf_clk_common(hw);
+	struct sf19a2890_clk *priv = cmn_to_clk(cmn_priv);
+
+	sf_writel(cmn_priv, priv->offset + REG_MUXDIV_EN, 0);
+}
+
+static int sf19a28_muxdiv_is_enabled(struct clk_hw *hw)
+{
+	struct sf_clk_common *cmn_priv = hw_to_sf_clk_common(hw);
+	struct sf19a2890_clk *priv = cmn_to_clk(cmn_priv);
+
+	return !!sf_readl(cmn_priv, priv->offset + REG_MUXDIV_EN);
+}
+
+static u8 sf19a28_muxdiv_get_parent(struct clk_hw *hw)
+{
+	struct sf_clk_common *cmn_priv = hw_to_sf_clk_common(hw);
+	struct sf19a2890_clk *priv = cmn_to_clk(cmn_priv);
+	u32 cfg = sf_readl(cmn_priv, priv->offset + REG_MUXDIV_CFG);
+	u32 src = FIELD_GET(MUXDIV_SRC_SEL, cfg);
+
+	if (src <= 2)
+		return src;
+	if (src == 4)
+		return 3;
+	return 4;
+}
+
+static int sf19a28_muxdiv_set_parent(struct clk_hw *hw, u8 index)
+{
+	struct sf_clk_common *cmn_priv = hw_to_sf_clk_common(hw);
+	struct sf19a2890_clk *priv = cmn_to_clk(cmn_priv);
+	u32 src;
+
+	if (index <= 2)
+		src = index;
+	else if (index == 3)
+		src = 4;
+	else
+		src = 6;
+	sf_writel(cmn_priv, priv->offset + REG_MUXDIV_CFG, src);
+	return 0;
+}
+
+static const char * const clk_muxdiv_parents[] = { "pll_cpu", "pll_ddr", "pll_cmn",
+					    "osc12m", "osc40m" };
+
+static const struct clk_ops sf19a28_muxdiv_ops = {
+	.recalc_rate = sf19a28_muxdiv_recalc_rate,
+	.determine_rate = sf19a28_muxdiv_determine_rate,
+	.set_rate = sf19a28_muxdiv_set_rate,
+	.enable = sf19a28_muxdiv_enable,
+	.disable = sf19a28_muxdiv_disable,
+	.is_enabled = sf19a28_muxdiv_is_enabled,
+	.get_parent = sf19a28_muxdiv_get_parent,
+	.set_parent = sf19a28_muxdiv_set_parent,
+};
+
+#define SF19A28_MUXDIV(_name, _offset, _flags)				\
+	struct sf19a2890_clk _name = {					\
+		.common = SF_CLK_COMMON(#_name, clk_muxdiv_parents,	\
+					&sf19a28_muxdiv_ops, _flags),	\
+		.offset = REG_MUXDIV_BASE + _offset,			\
+	}
+
+static SF19A28_MUXDIV(muxdiv_bus1, 0x0, CLK_IS_CRITICAL);
+static SF19A28_MUXDIV(muxdiv_bus2, 0x20, CLK_IS_CRITICAL);
+static SF19A28_MUXDIV(muxdiv_bus3, 0x40, CLK_IS_CRITICAL);
+static SF19A28_MUXDIV(muxdiv_cpu, 0x100, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT);
+static SF19A28_MUXDIV(muxdiv_pbus, 0x120, CLK_IS_CRITICAL);
+static SF19A28_MUXDIV(muxdiv_mem_phy, 0x140, CLK_IS_CRITICAL);
+static SF19A28_MUXDIV(muxdiv_uart, 0x180, 0);
+static SF19A28_MUXDIV(muxdiv_eth_ref, 0x200, 0);
+static SF19A28_MUXDIV(muxdiv_eth_byp_ref, 0x220, 0);
+static SF19A28_MUXDIV(muxdiv_eth_tsu, 0x240, 0);
+static SF19A28_MUXDIV(muxdiv_gmac_byp_ref, 0x260, 0);
+static SF19A28_MUXDIV(muxdiv_m6250_0, 0x280, 0);
+static SF19A28_MUXDIV(muxdiv_m6250_1, 0x2a0, 0);
+static SF19A28_MUXDIV(muxdiv_wlan24_plf, 0x2c0, 0);
+static SF19A28_MUXDIV(muxdiv_wlan5_plf, 0x2e0, 0);
+static SF19A28_MUXDIV(muxdiv_usbphy_ref, 0x300, 0);
+static SF19A28_MUXDIV(muxdiv_tclk, 0x320, 0);
+static SF19A28_MUXDIV(muxdiv_npu_pe, 0x340, 0);
+
+static struct clk_hw_onecell_data sf19a2890_hw_clks = {
+	.num = CLK_SF19A2890_MAX,
+	.hws = {
+		[CLK_PLL_CPU] = &pll_cpu.common.hw,
+		[CLK_PLL_DDR] = &pll_ddr.common.hw,
+		[CLK_PLL_CMN] = &pll_cmn.common.hw,
+		[CLK_MUXDIV_BUS1] = &muxdiv_bus1.common.hw,
+		[CLK_MUXDIV_BUS2] = &muxdiv_bus2.common.hw,
+		[CLK_MUXDIV_BUS3] = &muxdiv_bus3.common.hw,
+		[CLK_MUXDIV_CPU] = &muxdiv_cpu.common.hw,
+		[CLK_MUXDIV_PBUS] = &muxdiv_pbus.common.hw,
+		[CLK_MUXDIV_MEM_PHY] = &muxdiv_mem_phy.common.hw,
+		[CLK_MUXDIV_UART] = &muxdiv_uart.common.hw,
+		[CLK_MUXDIV_ETH_REF] = &muxdiv_eth_ref.common.hw,
+		[CLK_MUXDIV_ETH_BYP_REF] = &muxdiv_eth_byp_ref.common.hw,
+		[CLK_MUXDIV_ETH_TSU] = &muxdiv_eth_tsu.common.hw,
+		[CLK_MUXDIV_GMAC_BYP_REF] = &muxdiv_gmac_byp_ref.common.hw,
+		[CLK_MUXDIV_M6250_0] = &muxdiv_m6250_0.common.hw,
+		[CLK_MUXDIV_M6250_1] = &muxdiv_m6250_1.common.hw,
+		[CLK_MUXDIV_WLAN24_PLF] = &muxdiv_wlan24_plf.common.hw,
+		[CLK_MUXDIV_WLAN5_PLF] = &muxdiv_wlan5_plf.common.hw,
+		[CLK_MUXDIV_USBPHY_REF] = &muxdiv_usbphy_ref.common.hw,
+		[CLK_MUXDIV_TCLK] = &muxdiv_tclk.common.hw,
+		[CLK_MUXDIV_NPU_PE_CLK] = &muxdiv_npu_pe.common.hw,
+	},
+};
+
+
+struct sf19a2890_clk_ctrl {
+	void __iomem *base;
+	spinlock_t lock;
+};
+
+static void __init sf19a2890_clk_init(struct device_node *node)
+{
+	struct sf19a2890_clk_ctrl *ctrl;
+	int i, ret;
+
+	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+	if (!ctrl)
+		return;
+
+	ctrl->base = of_iomap(node, 0);
+	if (!ctrl->base) {
+		pr_err("failed to map resources.\n");
+		return;
+	}
+
+	spin_lock_init(&ctrl->lock);
+
+	for (i = 0; i < sf19a2890_hw_clks.num; i++) {
+		struct clk_hw *hw = sf19a2890_hw_clks.hws[i];
+		struct sf_clk_common *common;
+
+		if (!hw)
+			continue;
+		common = hw_to_sf_clk_common(hw);
+		common->base = ctrl->base;
+		common->lock = &ctrl->lock;
+		ret = clk_hw_register(NULL, hw);
+		if (ret) {
+			pr_err("Couldn't register clock %d: %d\n", i, ret);
+			goto err;
+		}
+	}
+
+	ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, &sf19a2890_hw_clks);
+	if (ret) {
+		pr_err("failed to add hw provider.\n");
+		goto err;
+	}
+	return;
+err:
+	iounmap(ctrl->base);
+}
+
+CLK_OF_DECLARE(sf19a2890_clk, "siflower,sf19a2890-clk", sf19a2890_clk_init);

+ 808 - 0
target/linux/siflower/files-6.6/drivers/clk/siflower/clk-sf21-topcrm.c

@@ -0,0 +1,808 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/compiler.h>
+#include <linux/clk-provider.h>
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/rational.h>
+#include <linux/spinlock.h>
+#include <linux/bug.h>
+#include <dt-bindings/clock/siflower,sf21-topcrm.h>
+
+struct sf_clk_common {
+	void __iomem	*base;
+	spinlock_t	*lock;
+	struct clk_hw	hw;
+};
+
+#define SF_CLK_COMMON(_name, _parents, _op, _flags)			\
+	{								\
+		.hw.init = CLK_HW_INIT_PARENTS(_name, _parents,		\
+						    _op, _flags),	\
+	}
+
+static inline struct sf_clk_common *hw_to_sf_clk_common(struct clk_hw *hw)
+{
+	return container_of(hw, struct sf_clk_common, hw);
+}
+
+static inline u32 sf_readl(struct sf_clk_common *priv, u32 reg)
+{
+	return readl(priv->base + reg);
+}
+
+static inline void sf_writel(struct sf_clk_common *priv, u32 reg, u32 val)
+{
+	return writel(val, priv->base + reg);
+}
+
+static inline void sf_rmw(struct sf_clk_common *priv, u32 reg, u32 clr, u32 set)
+{
+	u32 val;
+
+	val = sf_readl(priv, reg);
+	val &= ~clr;
+	val |= set;
+	sf_writel(priv, reg, val);
+}
+
+#define PLL_CMN_CFG1		0x0
+#define  PLL_CMN_BYPASS		BIT(27)
+#define  PLL_CMN_PD		BIT(26)
+#define  PLL_CMN_FBDIV		GENMASK(25, 14)
+#define  PLL_CMN_FBDIV_BITS	(25 - 14 + 1)
+#define  PLL_CMN_POSTDIV_PD	BIT(13)
+#define  PLL_CMN_VCO_PD		BIT(12)
+#define  PLL_CMN_POSTDIV1	GENMASK(11, 9)
+#define  PLL_CMN_POSTDIV2	GENMASK(8, 6)
+#define  PLL_CMN_REFDIV		GENMASK(5, 0)
+#define  PLL_CMN_REFDIV_BITS	6
+
+#define PLL_CMN_LOCK		0xc8
+#define PLL_DDR_LOCK		0xcc
+#define PLL_PCIE_LOCK		0xd4
+
+#define CFG_LOAD		0x100
+#define  CFG_LOAD_PCIE_PLL	BIT(4)
+#define  CFG_LOAD_DDR_PLL	BIT(2)
+#define  CFG_LOAD_CMN_PLL	BIT(1)
+#define  CFG_LOAD_DIV		BIT(0)
+
+static unsigned long sf21_cmnpll_vco_recalc_rate(struct clk_hw *hw,
+						      unsigned long parent_rate)
+{
+	struct sf_clk_common *priv = hw_to_sf_clk_common(hw);
+	u32 cfg = sf_readl(priv, PLL_CMN_CFG1);
+	unsigned long refdiv = FIELD_GET(PLL_CMN_REFDIV, cfg);
+	unsigned long fbdiv = FIELD_GET(PLL_CMN_FBDIV, cfg);
+
+	return (parent_rate / refdiv) * fbdiv;
+}
+
+static long sf21_cmnpll_vco_round_rate(struct clk_hw *hw,
+					    unsigned long rate,
+					    unsigned long *parent_rate)
+{
+	unsigned long fbdiv, refdiv;
+
+	rational_best_approximation(rate, *parent_rate,
+				    BIT(PLL_CMN_FBDIV_BITS) - 1,
+				    BIT(PLL_CMN_REFDIV_BITS) - 1, &fbdiv,
+				    &refdiv);
+	return (*parent_rate / refdiv) * fbdiv;
+}
+
+static int sf21_cmnpll_vco_set_rate(struct clk_hw *hw, unsigned long rate,
+					 unsigned long parent_rate)
+{
+	struct sf_clk_common *priv = hw_to_sf_clk_common(hw);
+	unsigned long flags;
+	unsigned long fbdiv, refdiv;
+
+	rational_best_approximation(rate, parent_rate,
+				    BIT(PLL_CMN_FBDIV_BITS) - 1,
+				    BIT(PLL_CMN_REFDIV_BITS) - 1, &fbdiv,
+				    &refdiv);
+
+	spin_lock_irqsave(priv->lock, flags);
+
+	sf_rmw(priv, PLL_CMN_CFG1, 0, PLL_CMN_BYPASS);
+	sf_writel(priv, CFG_LOAD, CFG_LOAD_CMN_PLL);
+	sf_writel(priv, CFG_LOAD, 0);
+
+	sf_rmw(priv, PLL_CMN_CFG1, PLL_CMN_REFDIV | PLL_CMN_FBDIV | PLL_CMN_PD,
+	       FIELD_PREP(PLL_CMN_REFDIV, refdiv) |
+		       FIELD_PREP(PLL_CMN_FBDIV, fbdiv));
+	sf_writel(priv, CFG_LOAD, CFG_LOAD_CMN_PLL);
+	sf_writel(priv, CFG_LOAD, 0);
+
+	while (!(sf_readl(priv, PLL_CMN_LOCK) & 1))
+		cpu_relax();
+
+	sf_rmw(priv, PLL_CMN_CFG1, PLL_CMN_BYPASS, 0);
+	sf_writel(priv, CFG_LOAD, CFG_LOAD_CMN_PLL);
+	sf_writel(priv, CFG_LOAD, 0);
+
+	spin_unlock_irqrestore(priv->lock, flags);
+	return 0;
+}
+
+static const struct clk_ops sf21_cmnpll_vco_ops = {
+	.recalc_rate = sf21_cmnpll_vco_recalc_rate,
+	.round_rate = sf21_cmnpll_vco_round_rate,
+	.set_rate = sf21_cmnpll_vco_set_rate,
+};
+
+static const char *const clk_pll_parents[] = { "xin25m" };
+
+static struct sf_clk_common cmnpll_vco = SF_CLK_COMMON(
+	"cmnpll_vco", clk_pll_parents, &sf21_cmnpll_vco_ops, 0);
+
+static unsigned long sf21_dualdiv_round_rate(
+	unsigned long rate, unsigned long parent_rate,
+	unsigned int range, unsigned int *diva, unsigned int *divb)
+{
+	unsigned int div = DIV_ROUND_CLOSEST(parent_rate, rate);
+	unsigned int best_diff, da, db, cur_div, cur_diff;
+
+	if (div <= 1) {
+		*diva = 1;
+		*divb = 1;
+		return parent_rate;
+	}
+
+	best_diff = div - 1;
+	*diva = 1;
+	*divb = 1;
+
+	for (da = 1; da <= range; da++) {
+		db = DIV_ROUND_CLOSEST(div, da);
+		if (db > da)
+			db = da;
+
+		cur_div = da * db;
+		if (div > cur_div)
+			cur_diff = div - cur_div;
+		else
+			cur_diff = cur_div - div;
+
+		if (cur_diff < best_diff) {
+			best_diff = cur_diff;
+			*diva = da;
+			*divb = db;
+		}
+		if (cur_diff == 0)
+			break;
+	}
+
+	return parent_rate / *diva / *divb;
+}
+
+static long sf21_cmnpll_postdiv_round_rate(struct clk_hw *hw,
+						unsigned long rate,
+						unsigned long *parent_rate)
+{
+	unsigned int diva, divb;
+
+	return sf21_dualdiv_round_rate(rate, *parent_rate, 7, &diva,
+					    &divb);
+}
+
+static int sf21_cmnpll_postdiv_set_rate(struct clk_hw *hw,
+					     unsigned long rate,
+					     unsigned long parent_rate)
+{
+	struct sf_clk_common *priv = hw_to_sf_clk_common(hw);
+	unsigned int diva, divb;
+	unsigned long flags;
+
+	sf21_dualdiv_round_rate(rate, parent_rate, 7, &diva, &divb);
+
+	spin_lock_irqsave(priv->lock, flags);
+	sf_rmw(priv, PLL_CMN_CFG1, PLL_CMN_POSTDIV1 | PLL_CMN_POSTDIV2,
+	       FIELD_PREP(PLL_CMN_POSTDIV1, diva) |
+		       FIELD_PREP(PLL_CMN_POSTDIV2, divb));
+	sf_writel(priv, CFG_LOAD, CFG_LOAD_CMN_PLL);
+	sf_writel(priv, CFG_LOAD, 0);
+	spin_unlock_irqrestore(priv->lock, flags);
+	return 0;
+}
+
+static unsigned long
+sf21_cmnpll_postdiv_recalc_rate(struct clk_hw *hw,
+				     unsigned long parent_rate)
+{
+	struct sf_clk_common *priv = hw_to_sf_clk_common(hw);
+	u32 cfg = sf_readl(priv, PLL_CMN_CFG1);
+	unsigned long div1 = FIELD_GET(PLL_CMN_POSTDIV1, cfg);
+	unsigned long div2 = FIELD_GET(PLL_CMN_POSTDIV2, cfg);
+
+	return parent_rate / div1 / div2;
+}
+
+static const struct clk_ops sf21_cmnpll_postdiv_ops = {
+	.recalc_rate = sf21_cmnpll_postdiv_recalc_rate,
+	.round_rate = sf21_cmnpll_postdiv_round_rate,
+	.set_rate = sf21_cmnpll_postdiv_set_rate,
+};
+
+static const char *const clk_cmnpll_postdiv_parents[] = { "cmnpll_vco" };
+
+static struct sf_clk_common cmnpll_postdiv =
+	SF_CLK_COMMON("cmnpll_postdiv", clk_cmnpll_postdiv_parents,
+		      &sf21_cmnpll_postdiv_ops, 0);
+
+#define PLL_DDR_CFG1		0x18
+#define  PLL_DDR_BYPASS		BIT(23)
+#define  PLL_DDR_PLLEN		BIT(22)
+#define  PLL_DDR_4PHASEEN	BIT(21)
+#define  PLL_DDR_POSTDIVEN	BIT(20)
+#define  PLL_DDR_DSMEN		BIT(19)
+#define  PLL_DDR_DACEN		BIT(18)
+#define  PLL_DDR_DSKEWCALBYP	BIT(17)
+#define  PLL_DDR_DSKEWCALCNT	GENMASK(16, 14)
+#define  PLL_DDR_DSKEWCALEN	BIT(13)
+#define  PLL_DDR_DSKEWCALIN	GENMASK(12, 1)
+#define  PLL_DDR_DSKEWFASTCAL	BIT(0)
+
+#define PLL_DDR_CFG2		0x1c
+#define  PLL_DDR_POSTDIV1	GENMASK(29, 27)
+#define  PLL_DDR_POSTDIV2	GENMASK(26, 24)
+#define  PLL_DDR_FRAC		GENMASK(23, 0)
+
+#define PLL_DDR_CFG3		0x20
+#define  PLL_DDR_FBDIV		GENMASK(17, 6)
+#define  PLL_DDR_REFDIV		GENMASK(5, 0)
+
+static unsigned long
+sf21_ddrpll_postdiv_recalc_rate(struct clk_hw *hw,
+				     unsigned long parent_rate)
+{
+	struct sf_clk_common *priv = hw_to_sf_clk_common(hw);
+	u32 cfg2 = sf_readl(priv, PLL_DDR_CFG2);
+	u32 postdiv1 = FIELD_GET(PLL_DDR_POSTDIV1, cfg2);
+	u32 postdiv2 = FIELD_GET(PLL_DDR_POSTDIV2, cfg2);
+	u32 cfg3 = sf_readl(priv, PLL_DDR_CFG3);
+	u32 fbdiv = FIELD_GET(PLL_DDR_FBDIV, cfg3);
+	u32 refdiv = FIELD_GET(PLL_DDR_REFDIV, cfg3);
+
+	return (parent_rate / refdiv) * fbdiv / postdiv1 / postdiv2;
+}
+
+static const struct clk_ops sf21_ddrpll_postdiv_ops = {
+	.recalc_rate = sf21_ddrpll_postdiv_recalc_rate,
+};
+
+static struct sf_clk_common ddrpll_postdiv = SF_CLK_COMMON(
+	"ddrpll_postdiv", clk_pll_parents, &sf21_ddrpll_postdiv_ops, 0);
+
+#define PLL_PCIE_CFG1		0x4c
+#define  PLL_PCIE_PLLEN		BIT(31)
+#define  PLL_PCIE_POSTDIV0PRE	BIT(30)
+#define  PLL_PCIE_REFDIV	GENMASK(29, 24)
+#define  PLL_PCIE_FRAC		GENMASK(23, 0)
+
+#define PLL_PCIE_CFG2		0x50
+#define  PLL_PCIE_FOUTEN(i)	BIT(28 + (i))
+#define  PLL_PCIE_BYPASS(i)	BIT(24 + (i))
+#define  PLL_PCIE_PDIVA_OFFS(i)	(21 - 6 * (i))
+#define  PLL_PCIE_PDIVB_OFFS(i)	(18 - 6 * (i))
+#define  PLL_PCIE_PDIV_MASK	GENMASK(2, 0)
+
+#define PLL_PCIE_CFG3		0x54
+#define  PLL_PCIE_DSKEWFASTCAL	BIT(31)
+#define  PLL_PCIE_DACEN		BIT(30)
+#define  PLL_PCIE_DSMEN		BIT(29)
+#define  PLL_PCIE_DSKEWCALEN	BIT(28)
+#define  PLL_PCIE_DSKEWCALBYP	BIT(27)
+#define  PLL_PCIE_DSKEWCALCNT	GENMASK(26, 24)
+#define  PLL_PCIE_DSKEWCALIN	GENMASK(23, 12)
+#define  PLL_PCIE_FBDIV		GENMASK(11, 0)
+
+static unsigned long
+sf21_pciepll_vco_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+	struct sf_clk_common *priv = hw_to_sf_clk_common(hw);
+	u32 cfg1 = sf_readl(priv, PLL_PCIE_CFG1);
+	unsigned long refdiv = FIELD_GET(PLL_PCIE_REFDIV, cfg1);
+	u32 cfg3 = sf_readl(priv, PLL_PCIE_CFG3);
+	unsigned long fbdiv = FIELD_GET(PLL_PCIE_FBDIV, cfg3);
+
+	return (parent_rate / refdiv) * fbdiv / 4;
+}
+
+static int sf21_pciepll_vco_enable(struct clk_hw *hw)
+{
+	struct sf_clk_common *priv = hw_to_sf_clk_common(hw);
+	unsigned long flags;
+
+	spin_lock_irqsave(priv->lock, flags);
+	sf_rmw(priv, PLL_PCIE_CFG1, 0, PLL_PCIE_PLLEN);
+	sf_writel(priv, CFG_LOAD, CFG_LOAD_PCIE_PLL);
+	sf_writel(priv, CFG_LOAD, 0);
+	while (!(sf_readl(priv, PLL_PCIE_LOCK)))
+		;
+	spin_unlock_irqrestore(priv->lock, flags);
+	return 0;
+}
+
+static void sf21_pciepll_vco_disable(struct clk_hw *hw)
+{
+	struct sf_clk_common *priv = hw_to_sf_clk_common(hw);
+	unsigned long flags;
+
+	spin_lock_irqsave(priv->lock, flags);
+	sf_rmw(priv, PLL_PCIE_CFG1, PLL_PCIE_PLLEN, 0);
+	sf_writel(priv, CFG_LOAD, CFG_LOAD_PCIE_PLL);
+	sf_writel(priv, CFG_LOAD, 0);
+	spin_unlock_irqrestore(priv->lock, flags);
+}
+
+static const struct clk_ops sf21_pciepll_vco_ops = {
+	.enable = sf21_pciepll_vco_enable,
+	.disable = sf21_pciepll_vco_disable,
+	.recalc_rate = sf21_pciepll_vco_recalc_rate,
+};
+
+static struct sf_clk_common pciepll_vco =
+	SF_CLK_COMMON("pciepll_vco", clk_pll_parents,
+		      &sf21_pciepll_vco_ops, CLK_SET_RATE_GATE);
+
+struct sf21_pciepll_fout {
+	struct sf_clk_common common;
+	u8 index;
+};
+
+static int sf21_pciepll_fout_enable(struct clk_hw *hw)
+{
+	struct sf_clk_common *cmn_priv = hw_to_sf_clk_common(hw);
+	struct sf21_pciepll_fout *priv =
+		container_of(cmn_priv, struct sf21_pciepll_fout, common);
+	unsigned long flags;
+
+	spin_lock_irqsave(cmn_priv->lock, flags);
+	sf_rmw(cmn_priv, PLL_PCIE_CFG2, 0, PLL_PCIE_FOUTEN(priv->index));
+	sf_writel(cmn_priv, CFG_LOAD, CFG_LOAD_PCIE_PLL);
+	sf_writel(cmn_priv, CFG_LOAD, 0);
+	spin_unlock_irqrestore(cmn_priv->lock, flags);
+	return 0;
+}
+
+static void sf21_pciepll_fout_disable(struct clk_hw *hw)
+{
+	struct sf_clk_common *cmn_priv = hw_to_sf_clk_common(hw);
+	struct sf21_pciepll_fout *priv =
+		container_of(cmn_priv, struct sf21_pciepll_fout, common);
+	unsigned long flags;
+
+	spin_lock_irqsave(cmn_priv->lock, flags);
+	sf_rmw(cmn_priv, PLL_PCIE_CFG2, PLL_PCIE_FOUTEN(priv->index), 0);
+	sf_writel(cmn_priv, CFG_LOAD, CFG_LOAD_PCIE_PLL);
+	sf_writel(cmn_priv, CFG_LOAD, 0);
+	spin_unlock_irqrestore(cmn_priv->lock, flags);
+}
+
+static long sf21_pciepll_fout_round_rate(struct clk_hw *hw,
+					      unsigned long rate,
+					      unsigned long *parent_rate)
+{
+	unsigned int diva, divb;
+
+	return sf21_dualdiv_round_rate(rate, *parent_rate, 8, &diva,
+					    &divb);
+}
+
+static int sf21_pciepll_fout_set_rate(struct clk_hw *hw,
+					   unsigned long rate,
+					   unsigned long parent_rate)
+{
+	struct sf_clk_common *cmn_priv = hw_to_sf_clk_common(hw);
+	struct sf21_pciepll_fout *priv =
+		container_of(cmn_priv, struct sf21_pciepll_fout, common);
+	unsigned int diva, divb;
+	unsigned long flags;
+
+	sf21_dualdiv_round_rate(rate, parent_rate, 8, &diva, &divb);
+
+	spin_lock_irqsave(cmn_priv->lock, flags);
+	sf_rmw(cmn_priv, PLL_PCIE_CFG2,
+	       (PLL_PCIE_PDIV_MASK << PLL_PCIE_PDIVA_OFFS(priv->index)) |
+		       (PLL_PCIE_PDIV_MASK << PLL_PCIE_PDIVB_OFFS(priv->index)),
+	       ((diva - 1) << PLL_PCIE_PDIVA_OFFS(priv->index)) |
+		       ((divb - 1) << PLL_PCIE_PDIVB_OFFS(priv->index)));
+	sf_writel(cmn_priv, CFG_LOAD, CFG_LOAD_PCIE_PLL);
+	sf_writel(cmn_priv, CFG_LOAD, 0);
+	spin_unlock_irqrestore(cmn_priv->lock, flags);
+	return 0;
+}
+
+static unsigned long
+sf21_pciepll_fout_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+	struct sf_clk_common *cmn_priv = hw_to_sf_clk_common(hw);
+	struct sf21_pciepll_fout *priv =
+		container_of(cmn_priv, struct sf21_pciepll_fout, common);
+	int idx = priv->index;
+	u32 cfg2 = sf_readl(cmn_priv, PLL_PCIE_CFG2);
+	ulong pdiva = (cfg2 >> PLL_PCIE_PDIVA_OFFS(idx)) & PLL_PCIE_PDIV_MASK;
+	ulong pdivb = (cfg2 >> PLL_PCIE_PDIVB_OFFS(idx)) & PLL_PCIE_PDIV_MASK;
+
+	return parent_rate / (pdiva + 1) / (pdivb + 1);
+}
+
+static const struct clk_ops sf21_pciepll_fout_ops = {
+	.enable = sf21_pciepll_fout_enable,
+	.disable = sf21_pciepll_fout_disable,
+	.recalc_rate = sf21_pciepll_fout_recalc_rate,
+	.round_rate = sf21_pciepll_fout_round_rate,
+	.set_rate = sf21_pciepll_fout_set_rate,
+};
+
+static const char * const clk_pciepll_fout_parents[] = { "pciepll_vco" };
+
+#define SF21_PCIEPLL_FOUT(_name, _idx, _flags)			\
+	struct sf21_pciepll_fout _name = {				\
+		.common = SF_CLK_COMMON(#_name,				\
+					clk_pciepll_fout_parents,	\
+					&sf21_pciepll_fout_ops,	\
+					_flags),			\
+		.index = _idx,						\
+	}
+
+static SF21_PCIEPLL_FOUT(pciepll_fout0, 0, 0);
+static SF21_PCIEPLL_FOUT(pciepll_fout1, 1, 0);
+static SF21_PCIEPLL_FOUT(pciepll_fout2, 2, 0);
+static SF21_PCIEPLL_FOUT(pciepll_fout3, 3, 0);
+
+struct sf21_clk_muxdiv {
+	struct sf_clk_common common;
+	u16 mux;
+	u16 en;
+	u8 div_reg;
+	u8 div_offs;
+};
+
+#define CRM_CLK_SEL(_x)		((_x) * 4 + 0x80)
+#define CRM_CLK_EN		0x8c
+#define CRM_CLK_DIV(_x)		((_x) * 4 + 0x94)
+#define  CRM_CLK_DIV_MASK	GENMASK(7, 0)
+
+static unsigned long sf21_muxdiv_recalc_rate(struct clk_hw *hw,
+						  unsigned long parent_rate)
+{
+	struct sf_clk_common *cmn_priv = hw_to_sf_clk_common(hw);
+	struct sf21_clk_muxdiv *priv =
+		container_of(cmn_priv, struct sf21_clk_muxdiv, common);
+	ulong div_reg = CRM_CLK_DIV(priv->div_reg);
+	u16 div_offs = priv->div_offs;
+	u16 div_val = (sf_readl(cmn_priv, div_reg) >> div_offs) &
+		      CRM_CLK_DIV_MASK;
+	div_val += 1;
+	return parent_rate / div_val;
+}
+
+static int sf21_muxdiv_determine_rate(struct clk_hw *hw,
+					   struct clk_rate_request *req)
+{
+	unsigned int div;
+
+	div = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate);
+	if (!div)
+		div = 1;
+	else if (div > CRM_CLK_DIV_MASK + 1)
+		div = CRM_CLK_DIV_MASK + 1;
+
+	req->rate = req->best_parent_rate / div;
+	return 0;
+}
+
+static int sf21_muxdiv_set_rate(struct clk_hw *hw, unsigned long rate,
+				     unsigned long parent_rate)
+{
+	struct sf_clk_common *cmn_priv = hw_to_sf_clk_common(hw);
+	struct sf21_clk_muxdiv *priv =
+		container_of(cmn_priv, struct sf21_clk_muxdiv, common);
+	ulong div_reg = CRM_CLK_DIV(priv->div_reg);
+	u16 div_offs = priv->div_offs;
+	unsigned long flags;
+	unsigned int div;
+
+	div = DIV_ROUND_CLOSEST(parent_rate, rate);
+	if (div < 1)
+		div = 1;
+	else if (div > CRM_CLK_DIV_MASK + 1)
+		div = CRM_CLK_DIV_MASK + 1;
+	div -= 1;
+
+	spin_lock_irqsave(cmn_priv->lock, flags);
+	sf_rmw(cmn_priv, div_reg, CRM_CLK_DIV_MASK << div_offs,
+	       div << div_offs);
+	sf_writel(cmn_priv, CFG_LOAD, CFG_LOAD_DIV);
+	sf_writel(cmn_priv, CFG_LOAD, 0);
+	spin_unlock_irqrestore(cmn_priv->lock, flags);
+	return 0;
+}
+
+static int sf21_muxdiv_enable(struct clk_hw *hw)
+{
+	struct sf_clk_common *cmn_priv = hw_to_sf_clk_common(hw);
+	struct sf21_clk_muxdiv *priv =
+		container_of(cmn_priv, struct sf21_clk_muxdiv, common);
+	unsigned long flags;
+
+	spin_lock_irqsave(cmn_priv->lock, flags);
+	sf_rmw(cmn_priv, CRM_CLK_EN, 0, BIT(priv->en));
+	sf_writel(cmn_priv, CFG_LOAD, CFG_LOAD_DIV);
+	sf_writel(cmn_priv, CFG_LOAD, 0);
+	spin_unlock_irqrestore(cmn_priv->lock, flags);
+	return 0;
+}
+
+static void sf21_muxdiv_disable(struct clk_hw *hw)
+{
+	struct sf_clk_common *cmn_priv = hw_to_sf_clk_common(hw);
+	struct sf21_clk_muxdiv *priv =
+		container_of(cmn_priv, struct sf21_clk_muxdiv, common);
+	unsigned long flags;
+
+	spin_lock_irqsave(cmn_priv->lock, flags);
+	sf_rmw(cmn_priv, CRM_CLK_EN, BIT(priv->en), 0);
+	sf_writel(cmn_priv, CFG_LOAD, CFG_LOAD_DIV);
+	sf_writel(cmn_priv, CFG_LOAD, 0);
+	spin_unlock_irqrestore(cmn_priv->lock, flags);
+}
+
+static int sf21_muxdiv_is_enabled(struct clk_hw *hw)
+{
+	struct sf_clk_common *cmn_priv = hw_to_sf_clk_common(hw);
+	struct sf21_clk_muxdiv *priv =
+		container_of(cmn_priv, struct sf21_clk_muxdiv, common);
+	u32 reg_val = sf_readl(cmn_priv, CRM_CLK_EN);
+
+	return reg_val & (BIT(priv->en)) ? 1 : 0;
+}
+
+static u8 sf21_muxdiv_get_parent(struct clk_hw *hw)
+{
+	struct sf_clk_common *cmn_priv = hw_to_sf_clk_common(hw);
+	struct sf21_clk_muxdiv *priv =
+		container_of(cmn_priv, struct sf21_clk_muxdiv, common);
+	ulong mux_reg = CRM_CLK_SEL(priv->mux / 32);
+	u16 mux_offs = priv->mux % 32;
+	u32 reg_val = sf_readl(cmn_priv, mux_reg);
+
+	return reg_val & BIT(mux_offs) ? 1 : 0;
+}
+
+static int sf21_muxdiv_set_parent(struct clk_hw *hw, u8 index)
+{
+	struct sf_clk_common *cmn_priv = hw_to_sf_clk_common(hw);
+	struct sf21_clk_muxdiv *priv =
+		container_of(cmn_priv, struct sf21_clk_muxdiv, common);
+	ulong mux_reg = CRM_CLK_SEL(priv->mux / 32);
+	u16 mux_offs = priv->mux % 32;
+	unsigned long flags;
+
+	spin_lock_irqsave(cmn_priv->lock, flags);
+	if (index)
+		sf_rmw(cmn_priv, mux_reg, 0, BIT(mux_offs));
+	else
+		sf_rmw(cmn_priv, mux_reg, BIT(mux_offs), 0);
+
+	sf_writel(cmn_priv, CFG_LOAD, CFG_LOAD_DIV);
+	sf_writel(cmn_priv, CFG_LOAD, 0);
+	spin_unlock_irqrestore(cmn_priv->lock, flags);
+	return 0;
+}
+
+static const struct clk_ops sf21_clk_muxdiv_ops = {
+	.enable = sf21_muxdiv_enable,
+	.disable = sf21_muxdiv_disable,
+	.is_enabled = sf21_muxdiv_is_enabled,
+	.recalc_rate = sf21_muxdiv_recalc_rate,
+	.determine_rate = sf21_muxdiv_determine_rate,
+	.set_rate = sf21_muxdiv_set_rate,
+	.get_parent = sf21_muxdiv_get_parent,
+	.set_parent = sf21_muxdiv_set_parent,
+};
+
+#define SF21_MUXDIV(_name, _parents,				\
+			 _mux, _div_reg, _div_offs, _en, _flags)	\
+	struct sf21_clk_muxdiv _name = {				\
+		.common = SF_CLK_COMMON(#_name, _parents,		\
+					&sf21_clk_muxdiv_ops,	\
+					_flags),			\
+		.mux = _mux,						\
+		.en = _en,						\
+		.div_reg = _div_reg,					\
+		.div_offs = _div_offs,					\
+	}
+
+static const char *const clk_periph_parents[] = { "cmnpll_postdiv",
+						  "ddrpll_postdiv" };
+static const char *const clk_ddr_parents[] = { "ddrpll_postdiv",
+					       "cmnpll_postdiv" };
+static const char *const clk_gmac_usb_parents[] = { "cmnpll_vco",
+						    "ddrpll_postdiv" };
+
+static SF21_MUXDIV(muxdiv_cpu, clk_periph_parents, 1, 0, 0, 0, CLK_IGNORE_UNUSED);
+static SF21_MUXDIV(muxdiv_pic, clk_periph_parents, 3, 3, 16, 1, CLK_IGNORE_UNUSED);
+static SF21_MUXDIV(muxdiv_axi, clk_periph_parents, 5, 0, 8, 2, CLK_IS_CRITICAL);
+static SF21_MUXDIV(muxdiv_ahb, clk_periph_parents, 7, 0, 16, 3, CLK_IS_CRITICAL);
+static SF21_MUXDIV(muxdiv_apb, clk_periph_parents, 9, 0, 24, 4, CLK_IS_CRITICAL);
+static SF21_MUXDIV(muxdiv_uart, clk_periph_parents, 11, 1, 0, 5, 0);
+static SF21_MUXDIV(muxdiv_iram, clk_periph_parents, 13, 1, 8, 6, 0);
+static SF21_MUXDIV(muxdiv_npu, clk_periph_parents, 17, 1, 24, 8, 0);
+static SF21_MUXDIV(muxdiv_ddrphy, clk_ddr_parents, 19, 2, 0, 9, CLK_IS_CRITICAL);
+static SF21_MUXDIV(muxdiv_ddr_bypass, clk_ddr_parents, 21, 3, 0, 10, CLK_IS_CRITICAL);
+static SF21_MUXDIV(muxdiv_ethtsu, clk_periph_parents, 25, 2, 16, 12, 0);
+static SF21_MUXDIV(muxdiv_gmac_byp_ref, clk_gmac_usb_parents, 27, 2, 24, 13, 0);
+static SF21_MUXDIV(muxdiv_usb, clk_gmac_usb_parents, 33, 1, 16, 24, 0);
+static SF21_MUXDIV(muxdiv_usbphy, clk_gmac_usb_parents, 35, 2, 8, 25, 0);
+static SF21_MUXDIV(muxdiv_serdes_csr, clk_periph_parents, 47, 5, 0, 20, 0);
+static SF21_MUXDIV(muxdiv_crypt_csr, clk_periph_parents, 49, 5, 8, 21, 0);
+static SF21_MUXDIV(muxdiv_crypt_app, clk_periph_parents, 51, 5, 16, 22, 0);
+static SF21_MUXDIV(muxdiv_irom, clk_periph_parents, 53, 5, 24, 23, CLK_IS_CRITICAL);
+
+static int sf21_mux_determine_rate(struct clk_hw *hw,
+					struct clk_rate_request *req)
+{
+	req->rate = req->best_parent_rate;
+	return 0;
+}
+
+static const struct clk_ops sf21_clk_mux_ops = {
+	.get_parent = sf21_muxdiv_get_parent,
+	.set_parent = sf21_muxdiv_set_parent,
+	.determine_rate = sf21_mux_determine_rate,
+};
+
+#define SF21_MUX(_name, _parents, _mux, _flags)			\
+	struct sf21_clk_muxdiv _name = {				\
+		.common = SF_CLK_COMMON(#_name, _parents,		\
+					&sf21_clk_mux_ops,		\
+					_flags),			\
+		.mux = _mux,						\
+		.en = 0,						\
+		.div_reg = 0,						\
+		.div_offs = 0,						\
+	}
+
+static const char * const clk_boot_parents[] = { "muxdiv_irom", "xin25m" };
+
+static SF21_MUX(mux_boot, clk_boot_parents, 30, CLK_IS_CRITICAL);
+
+static const struct clk_ops sf21_clk_div_ops = {
+	.recalc_rate = sf21_muxdiv_recalc_rate,
+	.determine_rate = sf21_muxdiv_determine_rate,
+	.set_rate = sf21_muxdiv_set_rate,
+};
+
+#define SF21_DIV(_name, _parents, _div_reg, _div_offs, _flags)	\
+	struct sf21_clk_muxdiv _name = {				\
+		.common = SF_CLK_COMMON(#_name, _parents,		\
+					&sf21_clk_div_ops,		\
+					_flags),			\
+		.mux = 0,						\
+		.en = 0,						\
+		.div_reg = _div_reg,					\
+		.div_offs = _div_offs,					\
+	}
+
+static SF21_DIV(div_pvt, clk_pll_parents, 3, 8, 0);
+static SF21_DIV(div_pll_test, clk_pll_parents, 3, 24, 0);
+
+static const struct clk_ops sf21_clk_gate_ops = {
+	.enable = sf21_muxdiv_enable,
+	.disable = sf21_muxdiv_disable,
+	.is_enabled = sf21_muxdiv_is_enabled,
+};
+
+#define SF21_GATE(_name, _parents, _en, _flags)			\
+	struct sf21_clk_muxdiv _name = {				\
+		.common = SF_CLK_COMMON(#_name,				\
+					_parents,			\
+					&sf21_clk_gate_ops,	\
+					_flags),			\
+		.mux = 0,						\
+		.en = _en,						\
+		.div_reg = 0,						\
+		.div_offs = 0,						\
+	}
+
+static const char * const clk_pcie_parents[] = { "pciepll_fout1" };
+
+static SF21_GATE(pcie_refclk_n, clk_pcie_parents, 15, 0);
+static SF21_GATE(pcie_refclk_p, clk_pcie_parents, 16, 0);
+
+static struct clk_hw_onecell_data sf21_hw_clks = {
+	.num = CLK_MAX,
+	.hws = {
+		[CLK_CMNPLL_VCO] = &cmnpll_vco.hw,
+		[CLK_CMNPLL_POSTDIV] = &cmnpll_postdiv.hw,
+		[CLK_DDRPLL_POSTDIV] = &ddrpll_postdiv.hw,
+		[CLK_PCIEPLL_VCO] = &pciepll_vco.hw,
+		[CLK_PCIEPLL_FOUT0] = &pciepll_fout0.common.hw,
+		[CLK_PCIEPLL_FOUT1] = &pciepll_fout1.common.hw,
+		[CLK_PCIEPLL_FOUT2] = &pciepll_fout2.common.hw,
+		[CLK_PCIEPLL_FOUT3] = &pciepll_fout3.common.hw,
+		[CLK_CPU] = &muxdiv_cpu.common.hw,
+		[CLK_PIC] = &muxdiv_pic.common.hw,
+		[CLK_AXI] = &muxdiv_axi.common.hw,
+		[CLK_AHB] = &muxdiv_ahb.common.hw,
+		[CLK_APB] = &muxdiv_apb.common.hw,
+		[CLK_UART] = &muxdiv_uart.common.hw,
+		[CLK_IRAM] = &muxdiv_iram.common.hw,
+		[CLK_NPU] = &muxdiv_npu.common.hw,
+		[CLK_DDRPHY_REF] = &muxdiv_ddrphy.common.hw,
+		[CLK_DDR_BYPASS] = &muxdiv_ddr_bypass.common.hw,
+		[CLK_ETHTSU] = &muxdiv_ethtsu.common.hw,
+		[CLK_GMAC_BYP_REF] = &muxdiv_gmac_byp_ref.common.hw,
+		[CLK_USB] = &muxdiv_usb.common.hw,
+		[CLK_USBPHY] = &muxdiv_usbphy.common.hw,
+		[CLK_SERDES_CSR] = &muxdiv_serdes_csr.common.hw,
+		[CLK_CRYPT_CSR] = &muxdiv_crypt_csr.common.hw,
+		[CLK_CRYPT_APP] = &muxdiv_crypt_app.common.hw,
+		[CLK_IROM] = &muxdiv_irom.common.hw,
+		[CLK_BOOT] = &mux_boot.common.hw,
+		[CLK_PVT] = &div_pvt.common.hw,
+		[CLK_PLL_TEST] = &div_pll_test.common.hw,
+		[CLK_PCIE_REFN] = &pcie_refclk_n.common.hw,
+		[CLK_PCIE_REFP] = &pcie_refclk_p.common.hw,
+	}
+};
+
+struct sf21_clk_ctrl {
+	void __iomem *base;
+	spinlock_t lock;
+};
+
+static void __init sf21_topcrm_init(struct device_node *node)
+{
+	struct sf21_clk_ctrl *ctrl;
+	int i, ret;
+
+	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+	if (!ctrl)
+		return;
+
+	ctrl->base = of_iomap(node, 0);
+	if (!ctrl->base) {
+		pr_err("failed to map resources.\n");
+		return;
+	}
+
+	spin_lock_init(&ctrl->lock);
+
+	for (i = 0; i < sf21_hw_clks.num; i++) {
+		struct clk_hw *hw = sf21_hw_clks.hws[i];
+		struct sf_clk_common *common;
+
+		if (!hw)
+			continue;
+		common = hw_to_sf_clk_common(hw);
+		common->base = ctrl->base;
+		common->lock = &ctrl->lock;
+		ret = clk_hw_register(NULL, hw);
+		if (ret) {
+			pr_err("Couldn't register clock %d\n", i);
+			goto err;
+		}
+	}
+
+	ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
+				     &sf21_hw_clks);
+	if (ret) {
+		pr_err("failed to add hw provider.\n");
+		goto err;
+	}
+	return;
+err:
+	iounmap(ctrl->base);
+}
+
+CLK_OF_DECLARE(sf21_topcrm, "siflower,sf21-topcrm", sf21_topcrm_init);

+ 346 - 0
target/linux/siflower/files-6.6/drivers/gpio/gpio-siflower.c

@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/pinctrl/consumer.h>
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <asm/div64.h>
+
+#define GPIO_IR(n)	(0x40 * (n) + 0x00)
+#define GPIO_OR(n)	(0x40 * (n) + 0x04)
+#define GPIO_OEN(n)	(0x40 * (n) + 0x08)
+#define GPIO_IMR(n)	(0x40 * (n) + 0x0c)
+#define GPIO_GPIMR(n)	(0x40 * (n) + 0x10)
+#define GPIO_PIR(n)	(0x40 * (n) + 0x14)
+#define GPIO_ITR(n)	(0x40 * (n) + 0x18)
+#define GPIO_IFR(n)	(0x40 * (n) + 0x1c)
+#define GPIO_ICR(n)	(0x40 * (n) + 0x20)
+#define GPIO_GPxIR(n)	(0x4 * (n) + 0x4000)
+
+#define GPIOS_PER_GROUP	16
+
+struct sf_gpio_priv {
+	struct gpio_chip gc;
+	void __iomem *base;
+	struct clk *clk;
+	struct reset_control *rstc;
+	unsigned int irq[];
+};
+
+static u32 sf_gpio_rd(struct sf_gpio_priv *priv, unsigned long reg)
+{
+	return readl_relaxed(priv->base + reg);
+}
+
+static void sf_gpio_wr(struct sf_gpio_priv *priv, unsigned long reg,
+		       u32 val)
+{
+	writel_relaxed(val, priv->base + reg);
+}
+
+static int sf_gpio_get_value(struct gpio_chip *gc, unsigned int offset)
+{
+	struct sf_gpio_priv *priv = gpiochip_get_data(gc);
+
+	return sf_gpio_rd(priv, GPIO_IR(offset));
+}
+
+static void sf_gpio_set_value(struct gpio_chip *gc, unsigned int offset,
+			      int value)
+{
+	struct sf_gpio_priv *priv = gpiochip_get_data(gc);
+
+	sf_gpio_wr(priv, GPIO_OR(offset), value);
+}
+
+static int sf_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+	struct sf_gpio_priv *priv = gpiochip_get_data(gc);
+
+	if (sf_gpio_rd(priv, GPIO_OEN(offset)))
+		return GPIO_LINE_DIRECTION_IN;
+	else
+		return GPIO_LINE_DIRECTION_OUT;
+}
+
+static int sf_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
+{
+	struct sf_gpio_priv *priv = gpiochip_get_data(gc);
+
+	sf_gpio_wr(priv, GPIO_OEN(offset), 1);
+	return 0;
+}
+
+static int sf_gpio_direction_output(struct gpio_chip *gc, unsigned int offset,
+				    int value)
+{
+	struct sf_gpio_priv *priv = gpiochip_get_data(gc);
+
+	sf_gpio_wr(priv, GPIO_OR(offset), value);
+	sf_gpio_wr(priv, GPIO_OEN(offset), 0);
+	return 0;
+}
+
+static int sf_gpio_set_debounce(struct gpio_chip *gc, unsigned int offset,
+				u32 debounce)
+{
+	struct sf_gpio_priv *priv = gpiochip_get_data(gc);
+	unsigned long freq = clk_get_rate(priv->clk);
+	u64 mul;
+
+	/* (ICR + 1) * IFR = debounce_us * clkfreq_mhz / 4 */
+	mul = (u64)debounce * freq;
+	do_div(mul, 1000000 * 4);
+	if (mul > 0xff00)
+		return -EINVAL;
+
+	sf_gpio_wr(priv, GPIO_ICR(offset), 0xff);
+	sf_gpio_wr(priv, GPIO_IFR(offset), DIV_ROUND_UP(mul, 0x100));
+
+	return 0;
+}
+
+static int sf_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+			      unsigned long config)
+{
+	switch (pinconf_to_config_param(config)) {
+	case PIN_CONFIG_INPUT_DEBOUNCE:
+		return sf_gpio_set_debounce(gc, offset,
+			pinconf_to_config_argument(config));
+	default:
+		return gpiochip_generic_config(gc, offset, config);
+	}
+}
+
+static void sf_gpio_irq_ack(struct irq_data *data)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+	struct sf_gpio_priv *priv = gpiochip_get_data(gc);
+	unsigned long offset = irqd_to_hwirq(data);
+
+	sf_gpio_wr(priv, GPIO_PIR(offset), 0);
+}
+
+static void sf_gpio_irq_mask(struct irq_data *data)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+	struct sf_gpio_priv *priv = gpiochip_get_data(gc);
+	unsigned long offset = irqd_to_hwirq(data);
+
+	sf_gpio_wr(priv, GPIO_IMR(offset), 1);
+	sf_gpio_wr(priv, GPIO_GPIMR(offset), 1);
+}
+
+static void sf_gpio_irq_unmask(struct irq_data *data)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+	struct sf_gpio_priv *priv = gpiochip_get_data(gc);
+	unsigned long offset = irqd_to_hwirq(data);
+
+	sf_gpio_wr(priv, GPIO_IMR(offset), 0);
+	sf_gpio_wr(priv, GPIO_GPIMR(offset), 0);
+}
+
+/* We are actually setting the parents' affinity. */
+static int sf_gpio_irq_set_affinity(struct irq_data *data,
+				    const struct cpumask *dest, bool force)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+	unsigned long offset = irqd_to_hwirq(data);
+	const struct cpumask *pdest;
+	struct irq_desc *pdesc;
+	struct irq_data *pdata;
+	unsigned int group;
+	int ret;
+
+	/* Find the parent IRQ and call its irq_set_affinity */
+	group = offset / GPIOS_PER_GROUP;
+	if (group >= gc->irq.num_parents)
+		return -EINVAL;
+
+	pdesc = irq_to_desc(gc->irq.parents[group]);
+	if (!pdesc)
+		return -EINVAL;
+
+	pdata = irq_desc_get_irq_data(pdesc);
+	if (!pdata->chip->irq_set_affinity)
+		return -EINVAL;
+
+	ret = pdata->chip->irq_set_affinity(pdata, dest, force);
+	if (ret < 0)
+		return ret;
+
+	/* Copy its effective_affinity back */
+	pdest = irq_data_get_effective_affinity_mask(pdata);
+	irq_data_update_effective_affinity(data, pdest);
+	return ret;
+}
+
+static int sf_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+	struct sf_gpio_priv *priv = gpiochip_get_data(gc);
+	unsigned long offset = irqd_to_hwirq(data);
+	u32 val;
+
+	switch (flow_type) {
+	case IRQ_TYPE_EDGE_RISING:
+		val = 4;
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		val = 2;
+		break;
+	case IRQ_TYPE_EDGE_BOTH:
+		val = 6;
+		break;
+	case IRQ_TYPE_LEVEL_HIGH:
+		val = 1;
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+		val = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+	sf_gpio_wr(priv, GPIO_ITR(offset), val);
+
+	if (flow_type & IRQ_TYPE_LEVEL_MASK)
+		irq_set_handler_locked(data, handle_level_irq);
+	else
+		irq_set_handler_locked(data, handle_edge_irq);
+
+	return 0;
+}
+
+static const struct irq_chip sf_gpio_irqchip = {
+	.name			= KBUILD_MODNAME,
+	.irq_ack		= sf_gpio_irq_ack,
+	.irq_mask		= sf_gpio_irq_mask,
+	.irq_unmask		= sf_gpio_irq_unmask,
+	.irq_set_affinity	= sf_gpio_irq_set_affinity,
+	.irq_set_type		= sf_gpio_irq_set_type,
+	.flags			= IRQCHIP_IMMUTABLE,
+	GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static void sf_gpio_irq_handler(struct irq_desc *desc)
+{
+	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+	struct irq_chip *ic = irq_desc_get_chip(desc);
+	struct sf_gpio_priv *priv = gpiochip_get_data(gc);
+	unsigned int irq = irq_desc_get_irq(desc);
+	unsigned int group = irq - priv->irq[0];
+	unsigned long pending;
+	unsigned int n;
+
+	chained_irq_enter(ic, desc);
+
+	pending = sf_gpio_rd(priv, GPIO_GPxIR(group));
+	for_each_set_bit(n, &pending, GPIOS_PER_GROUP) {
+		generic_handle_domain_irq(gc->irq.domain,
+					  n + group * GPIOS_PER_GROUP);
+	}
+
+	chained_irq_exit(ic, desc);
+}
+
+static int sf_gpio_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct sf_gpio_priv *priv;
+	struct gpio_irq_chip *girq;
+	struct gpio_chip *gc;
+	u32 ngpios, ngroups;
+	int ret, i;
+
+	ret = of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpios);
+	if (ret)
+		return ret;
+
+	ngroups = DIV_ROUND_UP(ngpios, GPIOS_PER_GROUP);
+	priv = devm_kzalloc(dev, struct_size(priv, irq, ngroups), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, priv);
+
+	priv->base = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(priv->base))
+		return PTR_ERR(priv->base);
+
+	priv->clk = devm_clk_get_enabled(dev, NULL);
+	if (IS_ERR(priv->clk))
+		return PTR_ERR(priv->clk);
+
+	priv->rstc = devm_reset_control_get_optional(&pdev->dev, NULL);
+	if (IS_ERR(priv->rstc))
+		return PTR_ERR(priv->rstc);
+
+	ret = reset_control_deassert(priv->rstc);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < ngroups; i++) {
+		ret = platform_get_irq(pdev, i);
+		if (ret < 0)
+			return ret;
+
+		priv->irq[i] = ret;
+	}
+
+	gc = &priv->gc;
+	gc->label = KBUILD_MODNAME;
+	gc->parent = dev;
+	gc->owner = THIS_MODULE;
+	gc->request = gpiochip_generic_request;
+	gc->free = gpiochip_generic_free;
+	gc->get_direction = sf_gpio_get_direction;
+	gc->direction_input = sf_gpio_direction_input;
+	gc->direction_output = sf_gpio_direction_output;
+	gc->get = sf_gpio_get_value;
+	gc->set = sf_gpio_set_value;
+	gc->set_config = sf_gpio_set_config;
+	gc->base = -1;
+	gc->ngpio = ngpios;
+
+	girq = &gc->irq;
+	gpio_irq_chip_set_chip(girq, &sf_gpio_irqchip);
+	girq->num_parents = ngroups;
+	girq->parents = priv->irq;
+	girq->parent_handler = sf_gpio_irq_handler;
+	girq->default_type = IRQ_TYPE_NONE;
+	girq->handler = handle_bad_irq;
+
+	platform_set_drvdata(pdev, priv);
+	return devm_gpiochip_add_data(dev, gc, priv);
+}
+
+static void sf_gpio_remove(struct platform_device *pdev)
+{
+	struct sf_gpio_priv *priv = platform_get_drvdata(pdev);
+
+	reset_control_assert(priv->rstc);
+}
+
+static const struct of_device_id sf_gpio_ids[] = {
+	{ .compatible = "siflower,sf19a2890-gpio" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, sf_gpio_ids);
+
+static struct platform_driver sf_gpio_driver = {
+	.probe		= sf_gpio_probe,
+	.remove_new	= sf_gpio_remove,
+	.driver = {
+		.name		= "siflower_gpio",
+		.owner		= THIS_MODULE,
+		.of_match_table	= sf_gpio_ids,
+	},
+};
+module_platform_driver(sf_gpio_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Qingfang Deng <[email protected]>");
+MODULE_DESCRIPTION("GPIO driver for SiFlower SoCs");

+ 53 - 0
target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/Kconfig

@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Siflower network device configuration
+#
+
+config NET_VENDOR_SIFLOWER
+	bool "Siflower Ethernet"
+	default y
+	depends on ARCH_SIFLOWER
+
+if NET_VENDOR_SIFLOWER
+
+config NET_SIFLOWER_ETH_DPNS
+	tristate "Siflower DPNS driver"
+	help
+	  Support the Dataplane network subsystem of SiFlower SF21A6826/SF21H8898 SoC.
+
+config NET_SIFLOWER_ETH_XGMAC
+	tristate "Siflower Ethernet MAC driver"
+	depends on NET_SIFLOWER_ETH_DPNS
+	select MII
+	select PAGE_POOL
+	select PHYLINK
+	select NET_SIFLOWER_ETH_DMA
+        select NET_SIFLOWER_ETH_XPCS
+	help
+	  Support the Ethernet controller of SiFlower SF21A6826/SF21H8898 SoC.
+
+config NET_SIFLOWER_ETH_DMA
+	tristate "Siflower Ethernet DMA driver"
+	depends on NET_SIFLOWER_ETH_DPNS
+	select PAGE_POOL
+	help
+	  Support the Ethernet controller of SiFlower SF21A6826/SF21H8898 SoC.
+
+config NET_SIFLOWER_ETH_XPCS
+	tristate "Siflower Ethernet XPCS driver"
+	depends on NET_SIFLOWER_ETH_DPNS
+	select PAGE_POOL
+	help
+	  Support the PCS block of SiFlower SF21A6826/SF21H8898 SoC.
+
+if NET_SIFLOWER_ETH_DMA
+
+config NET_SIFLOWER_ETH_USE_INTERNAL_SRAM
+	bool "Use internal SRAM for DMA descriptors"
+	select SRAM
+	help
+	  Use internal SRAM instead of system memory for DMA descriptors.
+
+endif # NET_SIFLOWER_ETH_DMA
+
+endif # NET_VENDOR_SIFLOWER

+ 11 - 0
target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/Makefile

@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the siflower soc network device drivers.
+#
+
+sf_dpns-objs := sf_dpns.o sf_dpns_debugfs.o sf_dpns_tmu.o sf_dpns_se.o
+obj-$(CONFIG_NET_SIFLOWER_ETH_DPNS) += sf_dpns.o
+
+obj-$(CONFIG_NET_SIFLOWER_ETH_XGMAC) += sfxgmac.o
+obj-$(CONFIG_NET_SIFLOWER_ETH_DMA) += sfxgmac-dma.o
+obj-$(CONFIG_NET_SIFLOWER_ETH_XPCS) += sfxpcs.o

+ 152 - 0
target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/dma.h

@@ -0,0 +1,152 @@
+#ifndef _SFXGMAC_DMA_H
+#define _SFXGMAC_DMA_H
+
+#include <linux/clk.h>
+#include <linux/genalloc.h>
+#include <linux/if_vlan.h>
+#include <linux/mfd/syscon.h>
+#include <linux/netdevice.h>
+#include <linux/regmap.h>
+
+#ifdef CONFIG_NET_SIFLOWER_ETH_USE_INTERNAL_SRAM
+#define DMA_TX_SIZE	512
+#define DMA_RX_SIZE	512
+#else
+#define DMA_TX_SIZE	2048
+#define DMA_RX_SIZE	2048
+#endif
+#define DPNS_HOST_PORT	6
+#define DPNS_MAX_PORT	27
+#define DMA_CH_MAX	4
+#define DMA_CH_DISABLE	4
+#define DMA_OVPORT_CH	4
+#define SZ_1_5K		0x00000600
+#define SZ_3K		0x00000C00
+
+/* Either 8 (64-bit) or 16 (128-bit), configured in RTL */
+#define DMA_DATAWIDTH	8
+
+/* extra header room for skb to wifi */
+#define NET_WIFI_HEADERROOM_EXTRA SKB_DATA_ALIGN(32)
+
+/* Padding at the beginning of the allocated buffer, passed into skb_reserve */
+#define BUF_PAD		(NET_SKB_PAD + NET_IP_ALIGN + NET_WIFI_HEADERROOM_EXTRA)
+
+/* RX Buffer size, calculated by MTU + eth header + double VLAN tag + FCS */
+#define BUF_SIZE(x)	((x) + ETH_HLEN + VLAN_HLEN * 2 + ETH_FCS_LEN)
+
+/* RX Buffer alloc size, with padding and skb_shared_info, passed into
+ * page_pool_dev_alloc_frag */
+#define BUF_SIZE_ALLOC(x)	(SKB_DATA_ALIGN(BUF_SIZE(x) + BUF_PAD) + \
+				SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+/* RX Buffer size programmed into RBSZ field, must be multiple of datawidth */
+#define BUF_SIZE_ALIGN(x)	ALIGN(BUF_SIZE(x) + NET_IP_ALIGN, DMA_DATAWIDTH)
+
+/* Maximum value of the RBSZ field */
+#define BUF_SIZE_ALIGN_MAX	ALIGN_DOWN(FIELD_MAX(XGMAC_RBSZ), DMA_DATAWIDTH)
+
+/* TODO: use mtu in priv */
+#define BUF_SIZE_DEFAULT    SKB_MAX_HEAD(BUF_PAD)
+#define BUF_SIZE_DEFAULT_ALIGN  ALIGN(BUF_SIZE_DEFAULT, DMA_DATAWIDTH)
+
+/* skb handled by dpns flag */
+#define SF_DPNS_FLAG        47
+
+/* skb handled by dpns need to be forwarded */
+#define SF_CB_DPNS_FORWARD             22
+
+struct xgmac_dma_desc {
+	__le32 des0;
+	__le32 des1;
+	__le32 des2;
+	__le32 des3;
+};
+
+struct xgmac_skb_cb {
+	u8 id;
+	bool fastmode;
+};
+
+#define XGMAC_SKB_CB(skb)	((struct xgmac_skb_cb *)(skb)->cb)
+
+struct xgmac_tx_info {
+	dma_addr_t buf;
+	bool map_as_page;
+	unsigned len;
+	bool last_segment;
+};
+
+struct xgmac_txq {
+	struct xgmac_dma_desc *dma_tx ____cacheline_aligned_in_smp;
+	struct sk_buff **tx_skbuff;
+	struct xgmac_tx_info *tx_skbuff_dma;
+	unsigned int cur_tx;
+	unsigned int dirty_tx;
+	dma_addr_t dma_tx_phy;
+	dma_addr_t tx_tail_addr;
+	spinlock_t lock;
+	struct napi_struct napi ____cacheline_aligned_in_smp;
+	u32 idx;
+	u32 irq;
+	bool is_busy;
+};
+
+struct xgmac_dma_rx_buffer {
+	struct page *page;
+	unsigned int offset;
+};
+
+struct xgmac_rxq {
+	struct xgmac_dma_desc *dma_rx ____cacheline_aligned_in_smp;
+	struct page_pool *page_pool;
+	struct xgmac_dma_rx_buffer *buf_pool;
+	unsigned int cur_rx;
+	unsigned int dirty_rx;
+	dma_addr_t dma_rx_phy;
+	dma_addr_t rx_tail_addr;
+	struct napi_struct napi ____cacheline_aligned_in_smp;
+	u32 idx;
+	u32 irq;
+};
+
+enum {
+	DMA_CLK_AXI,
+	DMA_CLK_NPU,
+	DMA_CLK_CSR,
+	DMA_NUM_CLKS
+};
+
+struct xgmac_dma_priv {
+	void __iomem		*ioaddr;
+	struct device		*dev;
+	struct clk_bulk_data	clks[DMA_NUM_CLKS];
+	struct net_device	napi_dev;
+	/* RX Queue */
+	struct xgmac_rxq	rxq[DMA_CH_MAX];
+
+	/* TX Queue */
+	struct xgmac_txq	txq[DMA_CH_MAX];
+
+	/* associated net devices (vports) */
+	struct net_device	*ndevs[DPNS_MAX_PORT];
+
+	struct regmap		*ethsys;
+	refcount_t		refcnt;
+	u32			irq;
+	u8			ifindex;
+#ifdef CONFIG_NET_SIFLOWER_ETH_USE_INTERNAL_SRAM
+	struct gen_pool		*genpool;
+#endif
+	u16			rx_alloc_size;
+	u16			rx_buffer_size;
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_PAGE_POOL_STATS)
+	struct dentry		*dbgdir;
+#endif
+};
+
+netdev_tx_t xgmac_dma_xmit_fast(struct sk_buff *skb, struct net_device *dev);
+int xgmac_dma_open(struct xgmac_dma_priv *priv, struct net_device *dev, u8 id);
+int xgmac_dma_stop(struct xgmac_dma_priv *priv, struct net_device *dev, u8 id);
+
+#endif

+ 59 - 0
target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/dpns.h

@@ -0,0 +1,59 @@
+#ifndef __SF_DPNS_H__
+#define __SF_DPNS_H__
+#include <asm/mmio.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/reset.h>
+
+#define PKT_ERR_STG_CFG2		0x80038
+#define  ARP_REPLY_ERR_OP		GENMASK(18, 16)
+#define  ARP_REPLY_ERR_DROP		BIT(18)
+#define  ARP_REPLY_ERR_UP		BIT(17)
+#define  ARP_REPLY_ERR_FWD		BIT(16)
+#define  ARP_REQ_ERR_OP			GENMASK(14, 12)
+#define  ARP_REQ_ERR_DROP		BIT(14)
+#define  ARP_REQ_ERR_UP			BIT(13)
+#define  ARP_REQ_ERR_FWD		BIT(12)
+
+#define  PKT_ERR_ACTION_DROP		BIT(2)
+#define  PKT_ERR_ACTION_UP		BIT(1)
+#define  PKT_ERR_ACTION_FWD		BIT(0)
+
+#define NPU_MIB_BASE			0x380000
+#define NPU_MIB(x)			(NPU_MIB_BASE + (x) * 4)
+#define NPU_MIB_PKT_RCV_PORT(x)		(NPU_MIB_BASE + 0x2000 + (x) * 4)
+#define NPU_MIB_NCI_RD_DATA2		(NPU_MIB_BASE + 0x301c)
+#define NPU_MIB_NCI_RD_DATA3		(NPU_MIB_BASE + 0x3020)
+
+struct dpns_priv {
+	void __iomem *ioaddr;
+	struct clk *clk;
+	struct reset_control *npu_rst;
+	struct device *dev;
+	struct dentry *debugfs;
+};
+
+static inline u32 dpns_r32(struct dpns_priv *priv, unsigned reg)
+{
+	return readl(priv->ioaddr + reg);
+}
+
+static inline void dpns_w32(struct dpns_priv *priv, unsigned reg, u32 val)
+{
+	writel(val, priv->ioaddr + reg);
+}
+
+static inline void dpns_rmw(struct dpns_priv *priv, unsigned reg, u32 clr,
+			    u32 set)
+{
+	u32 val = dpns_r32(priv, reg);
+	val &= ~clr;
+	val |= set;
+	dpns_w32(priv, reg, val);
+}
+
+int dpns_se_init(struct dpns_priv *priv);
+int dpns_tmu_init(struct dpns_priv *priv);
+void sf_dpns_debugfs_init(struct dpns_priv *priv);
+
+#endif /* __SF_DPNS_H__ */

+ 782 - 0
target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/eth.h

@@ -0,0 +1,782 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+ * Copyright (c) 2022 SiFlower Ltd.
+ */
+#ifndef _SIFLOWER_ETH_H
+#define _SIFLOWER_ETH_H
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/netdevice.h>
+
+struct phylink_pcs *xpcs_port_get(struct platform_device *, unsigned int);
+void xpcs_port_put(struct platform_device *);
+
+#define reg_read(p, reg)		readl((p)->ioaddr + (reg))
+#define reg_write(p, reg, val)		writel(val, (p)->ioaddr + (reg))
+#define reg_rmw(p, reg, clear, set)				\
+	do {							\
+		void __iomem *ioaddr = (p)->ioaddr + (reg);	\
+		u32 val = readl(ioaddr);		\
+								\
+		val &= ~(clear);				\
+		val |= (set);					\
+		writel(val, ioaddr);			\
+	} while (0)
+#define reg_set(p, reg, set)		reg_rmw(p, reg, 0, set)
+#define reg_clear(p, reg, clear)	reg_rmw(p, reg, clear, 0)
+
+#define priv_to_netdev(p) \
+	((struct net_device*)((void*)(p) - ALIGN(sizeof(struct net_device), NETDEV_ALIGN)))
+
+#define offset_to_id(addr)	FIELD_GET(GENMASK(19, 14), addr)
+
+/* Maximum L2 frame size, including FCS */
+#define MAX_FRAME_SIZE		16383
+#define TSO_MAX_BUFF_SIZE	MAX_FRAME_SIZE
+
+/* Ethernet sysm defines */
+#define ETHSYS_MAC(n)		((n) * 4)
+#define ETHSYS_PHY_INTF_SEL	GENMASK(17, 16)
+#define ETHSYS_PHY_INTF_RGMII   FIELD_PREP(ETHSYS_PHY_INTF_SEL, 1)
+#define ETHSYS_MAC5_CTRL        0xdc
+#define MAC5_PHY_INTF_SEL       GENMASK(17, 16)
+#define MAC5_RX_DELAY_EN        BIT(24)
+#define MAC5_RX_DELAY           GENMASK(23, 16)
+#define MAC5_TX_DELAY_EN        BIT(8)
+#define MAC5_TX_DELAY           GENMASK(7, 0)
+#define MAC5_DELAY_MASK         (MAC5_TX_DELAY_EN | MAC5_TX_DELAY | MAC5_RX_DELAY | MAC5_RX_DELAY_EN)
+#define MAC5_DELAY_STEP         49
+#define MAC5_DELAY_DEFAULT      (0x41 * MAC5_DELAY_STEP)
+#define ETHSYS_QSG_CTRL		0x6c
+#define ETHSYS_SG_CTRL		0x70
+#define ETHSYS_REF_RPT_EN	BIT(10)
+#define ETHSYS_RST          0x88
+#define ETHSYS_RST_MAC5	    BIT(9)
+#define ETHSYS_RATIO_LOAD   0xec
+#define ETHSYS_NPU_BYPASS	0x8c
+#define ETHSYS_RX_QUEUE_ENABLE	0xb4
+#define ETHSYS_MRI_Q_MODE	GENMASK(31, 30)
+#define ETHSYS_MRI_OVPORT_MAX	GENMASK(21, 16)
+#define ETHSYS_MRI_OVPORT_MIN	GENMASK(13, 8)
+#define ETHSYS_MRI_Q_EN	0xb8
+#define ETHSYS_MRI_OVPORT_TOP_PRIO	GENMASK(5, 0)
+#define XGMAC_MTL_RXQ_DMA_MAP0	0x00001030
+#define ETHSYS_TX_DIS		0xd4
+#define ETHSYS_QS_SGMII_STATUS		0x128
+#define XGMAC_PORT_DH(n)      (BIT(12) >> ((n)*4))
+#define XGMAC_PORT_LINK(n)    (BIT(13)  >> ((n)*4))
+#define XGMAC_PORT0_SPD_MASK    GENMASK(3, 2)
+
+#define GMAC_HI_REG_AE		BIT(31)
+
+/* XGMAC Registers */
+#define XGMAC_TX_CONFIG			0x00000000
+#define XGMAC_CONFIG_SS_OFF		29
+#define XGMAC_CONFIG_SS_MASK		GENMASK(31, 29)
+#define XGMAC_CONFIG_SS_10000		(0x0 << XGMAC_CONFIG_SS_OFF)
+#define XGMAC_CONFIG_SS_2500_GMII	(0x2 << XGMAC_CONFIG_SS_OFF)
+#define XGMAC_CONFIG_SS_1000_GMII	(0x3 << XGMAC_CONFIG_SS_OFF)
+#define XGMAC_CONFIG_SS_100_MII		(0x4 << XGMAC_CONFIG_SS_OFF)
+#define XGMAC_CONFIG_SS_5000		(0x5 << XGMAC_CONFIG_SS_OFF)
+#define XGMAC_CONFIG_SS_2500		(0x6 << XGMAC_CONFIG_SS_OFF)
+#define XGMAC_CONFIG_SS_10_MII		(0x7 << XGMAC_CONFIG_SS_OFF)
+#define XGMAC_CONFIG_SARC		GENMASK(22, 20)
+#define XGMAC_CONFIG_SARC_SHIFT		20
+#define XGMAC_CONFIG_JD			BIT(16)
+#define XGMAC_CONFIG_IFP		BIT(11)
+enum inter_packet_gap {
+	XGMAC_CONTROL_IPG_88 = 0x00000100,
+	XGMAC_CONTROL_IPG_80 = 0x00000200,
+	XGMAC_CONTROL_IPG_72 = 0x00000300,
+	XGMAC_CONTROL_IPG_64 = 0x00000400,
+};
+#define XGMAC_CONFIG_TE			BIT(0)
+#define XGMAC_CORE_INIT_TX		(XGMAC_CONTROL_IPG_88)
+#define XGMAC_RX_CONFIG			0x00000004
+#define XGMAC_CONFIG_ARPEN		BIT(31)
+#define XGMAC_CONFIG_GPSL		GENMASK(29, 16)
+#define XGMAC_CONFIG_GPSL_SHIFT		16
+#define XGMAC_CONFIG_HDSMS		GENMASK(14, 12)
+#define XGMAC_CONFIG_HDSMS_SHIFT	12
+#define XGMAC_CONFIG_HDSMS_256		(0x2 << XGMAC_CONFIG_HDSMS_SHIFT)
+#define XGMAC_CONFIG_S2KP		BIT(11)
+#define XGMAC_CONFIG_LM			BIT(10)
+#define XGMAC_CONFIG_IPC		BIT(9)
+#define XGMAC_CONFIG_JE			BIT(8)
+#define XGMAC_CONFIG_WD			BIT(7)
+#define XGMAC_CONFIG_GPSLCE		BIT(6)
+#define XGMAC_CONFIG_DCRCC		BIT(3)
+#define XGMAC_CONFIG_CST		BIT(2)
+#define XGMAC_CONFIG_ACS		BIT(1)
+#define XGMAC_CONFIG_RE			BIT(0)
+#define XGMAC_CORE_INIT_RX		(XGMAC_CONFIG_ACS | XGMAC_CONFIG_CST |\
+					XGMAC_CONFIG_IPC)
+#define XGMAC_PACKET_FILTER		0x00000008
+#define XGMAC_FILTER_RA			BIT(31)
+#define XGMAC_FILTER_IPFE		BIT(20)
+#define XGMAC_FILTER_VTFE		BIT(16)
+#define XGMAC_FILTER_HPF		BIT(10)
+#define XGMAC_FILTER_PCF		BIT(7)
+#define XGMAC_FILTER_PM			BIT(4)
+#define XGMAC_FILTER_HMC		BIT(2)
+#define XGMAC_FILTER_PR			BIT(0)
+#define XGMAC_WD_JB_TIMEOUT		0xc
+#define XGMAC_PJE			BIT(24)
+#define XGMAC_JTO			GENMASK(19, 16)
+#define XGMAC_PWE			BIT(8)
+#define XGMAC_WTO			GENMASK(3, 0)
+#define XGMAC_HASH_TABLE(x)		(0x00000010 + (x) * 4)
+#define XGMAC_MAX_HASH_TABLE		8
+#define XGMAC_VLAN_TAG			0x00000050
+#define XGMAC_VLAN_EDVLP		BIT(26)
+#define XGMAC_VLAN_VTHM			BIT(25)
+#define XGMAC_VLAN_DOVLTC		BIT(20)
+#define XGMAC_VLAN_ESVL			BIT(18)
+#define XGMAC_VLAN_ETV			BIT(16)
+#define XGMAC_VLAN_VID			GENMASK(15, 0)
+#define XGMAC_VLAN_HASH_TABLE		0x00000058
+#define XGMAC_VLAN_INCL			0x00000060
+#define XGMAC_VLAN_VLTI			BIT(20)
+#define XGMAC_VLAN_CSVL			BIT(19)
+#define XGMAC_VLAN_VLC			GENMASK(17, 16)
+#define XGMAC_VLAN_VLC_SHIFT		16
+#define XGMAC_RXQ_CTRL0			0x000000a0
+#define XGMAC_RXQEN(x)			GENMASK((x) * 2 + 1, (x) * 2)
+#define XGMAC_RXQEN_SHIFT(x)		((x) * 2)
+#define XGMAC_RXQ_CTRL1			0x000000a4
+#define XGMAC_RQ			GENMASK(7, 4)
+#define XGMAC_RQ_SHIFT			4
+#define XGMAC_RXQ_CTRL2			0x000000a8
+#define XGMAC_RXQ_CTRL3			0x000000ac
+#define XGMAC_PSRQ(x)			GENMASK((x) * 8 + 7, (x) * 8)
+#define XGMAC_PSRQ_SHIFT(x)		((x) * 8)
+#define XGMAC_INT_STATUS		0x000000b0
+#define XGMAC_RGMII_LS			BIT(27)
+#define XGMAC_RGMII_SPD			GENMASK(26, 25)
+#define XGMAC_RGMII_DM			BIT(24)
+#define XGMAC_LS			GENMASK(25, 24)
+#define XGMAC_MMCRXIPIS			BIT(11)
+#define XGMAC_MMCTXIS			BIT(10)
+#define XGMAC_MMCRXIS			BIT(9)
+#define XGMAC_MMCIS			(XGMAC_MMCRXIPIS | XGMAC_MMCTXIS | XGMAC_MMCRXIS)
+#define XGMAC_LPIIS			BIT(5)
+#define XGMAC_PMTIS			BIT(4)
+#define XGMAC_SMI			BIT(1)
+#define XGMAC_LSI			BIT(0)
+#define XGMAC_INT_EN			0x000000b4
+#define XGMAC_TSIE			BIT(12)
+#define XGMAC_LPIIE			BIT(5)
+#define XGMAC_PMTIE			BIT(4)
+#define XGMAC_INT_DEFAULT_EN		(XGMAC_LPIIE | XGMAC_PMTIE)
+#define XGMAC_Qx_TX_FLOW_CTRL(x)	(0x00000070 + (x) * 4)
+#define XGMAC_PT			GENMASK(31, 16)
+#define XGMAC_PT_SHIFT			16
+#define XGMAC_TFE			BIT(1)
+#define XGMAC_RX_FLOW_CTRL		0x00000090
+#define XGMAC_UP			BIT(1)
+#define XGMAC_RFE			BIT(0)
+#define XGMAC_PMT			0x000000c0
+#define XGMAC_GLBLUCAST			BIT(9)
+#define XGMAC_RWKPKTEN			BIT(2)
+#define XGMAC_MGKPKTEN			BIT(1)
+#define XGMAC_PWRDWN			BIT(0)
+#define XGMAC_LPI_CTRL			0x000000d0
+#define XGMAC_TXCGE			BIT(21)
+#define XGMAC_LPIATE			BIT(20)
+#define XGMAC_LPITXA			BIT(19)
+#define XGMAC_PLS			BIT(17)
+#define XGMAC_LPITXEN			BIT(16)
+#define XGMAC_RLPIEX			BIT(3)
+#define XGMAC_RLPIEN			BIT(2)
+#define XGMAC_TLPIEX			BIT(1)
+#define XGMAC_TLPIEN			BIT(0)
+#define XGMAC_LPI_TIMER_CTRL		0x000000d4
+#define XGMAC_LPI_LST			GENMASK(25, 16)
+#define XGMAC_LPI_LST_DEFAULT		1000
+#define XGMAC_LPI_TWT			GENMASK(15, 0)
+#define XGMAC_LPI_TWT_DEFAULT		30
+#define XGMAC_LPI_AUTO_EN		0x000000d8
+#define XGMAC_LPI_AUTO_EN_MAX		0xffff8
+#define XGMAC_LPI_AUTO_EN_DEFAULT	10000
+#define XGMAC_LPI_1US			0x000000dc
+#define XGMAC_VERSION			0x00000110
+#define XGMAC_VERSION_USER		GENMASK(23, 16)
+#define XGMAC_VERSION_ID_MASK		GENMASK(15, 0)
+#define XGMAC_VERSION_ID		0x7631
+#define XGMAC_HW_FEATURE0		0x0000011c
+#define XGMAC_HWFEAT_SAVLANINS		BIT(27)
+#define XGMAC_HWFEAT_RXCOESEL		BIT(16)
+#define XGMAC_HWFEAT_TXCOESEL		BIT(14)
+#define XGMAC_HWFEAT_EEESEL		BIT(13)
+#define XGMAC_HWFEAT_TSSEL		BIT(12)
+#define XGMAC_HWFEAT_AVSEL		BIT(11)
+#define XGMAC_HWFEAT_RAVSEL		BIT(10)
+#define XGMAC_HWFEAT_ARPOFFSEL		BIT(9)
+#define XGMAC_HWFEAT_MMCSEL		BIT(8)
+#define XGMAC_HWFEAT_MGKSEL		BIT(7)
+#define XGMAC_HWFEAT_RWKSEL		BIT(6)
+#define XGMAC_HWFEAT_VLHASH		BIT(4)
+#define XGMAC_HWFEAT_GMIISEL		BIT(1)
+#define XGMAC_HW_FEATURE1		0x00000120
+#define XGMAC_HWFEAT_L3L4FNUM		GENMASK(30, 27)
+#define XGMAC_HWFEAT_HASHTBLSZ		GENMASK(25, 24)
+#define XGMAC_HWFEAT_RSSEN		BIT(20)
+#define XGMAC_HWFEAT_TSOEN		BIT(18)
+#define XGMAC_HWFEAT_SPHEN		BIT(17)
+#define XGMAC_HWFEAT_ADDR64		GENMASK(15, 14)
+#define XGMAC_HWFEAT_TXFIFOSIZE		GENMASK(10, 6)
+#define XGMAC_HWFEAT_RXFIFOSIZE		GENMASK(4, 0)
+#define XGMAC_HW_FEATURE2		0x00000124
+#define XGMAC_HWFEAT_PPSOUTNUM		GENMASK(26, 24)
+#define XGMAC_HWFEAT_TXCHCNT		GENMASK(21, 18)
+#define XGMAC_HWFEAT_RXCHCNT		GENMASK(15, 12)
+#define XGMAC_HWFEAT_TXQCNT		GENMASK(9, 6)
+#define XGMAC_HWFEAT_RXQCNT		GENMASK(3, 0)
+#define XGMAC_HW_FEATURE3		0x00000128
+#define XGMAC_HWFEAT_TBSSEL		BIT(27)
+#define XGMAC_HWFEAT_FPESEL		BIT(26)
+#define XGMAC_HWFEAT_ESTWID		GENMASK(24, 23)
+#define XGMAC_HWFEAT_ESTDEP		GENMASK(22, 20)
+#define XGMAC_HWFEAT_ESTSEL		BIT(19)
+#define XGMAC_HWFEAT_ASP		GENMASK(15, 14)
+#define XGMAC_HWFEAT_DVLAN		BIT(13)
+#define XGMAC_HWFEAT_FRPES		GENMASK(12, 11)
+#define XGMAC_HWFEAT_FRPPB		GENMASK(10, 9)
+#define XGMAC_HWFEAT_FRPSEL		BIT(3)
+#define XGMAC_MAC_EXT_CONFIG		0x00000140
+#define XGMAC_HD			BIT(24)
+#define XGMAC_MAC_DPP_FSM_INT_STATUS	0x00000150
+#define XGMAC_MAC_FSM_CONTROL		0x00000158
+#define XGMAC_PRTYEN			BIT(1)
+#define XGMAC_TMOUTEN			BIT(0)
+#define XGMAC_FPE_CTRL_STS		0x00000280
+#define XGMAC_EFPE			BIT(0)
+#define XGMAC_ADDRx_HIGH(x)		(0x00000300 + (x) * 0x8)
+#define XGMAC_ADDR_MAX			32
+#define XGMAC_AE			BIT(31)
+#define XGMAC_DCS			GENMASK(19, 16)
+#define XGMAC_DCS_SHIFT			16
+#define XGMAC_ADDRx_LOW(x)		(0x00000304 + (x) * 0x8)
+#define XGMAC_L3L4_ADDR_CTRL		0x00000c00
+#define XGMAC_IDDR			GENMASK(15, 8)
+#define XGMAC_IDDR_SHIFT		8
+#define XGMAC_IDDR_FNUM			4
+#define XGMAC_TT			BIT(1)
+#define XGMAC_XB			BIT(0)
+#define XGMAC_L3L4_DATA			0x00000c04
+#define XGMAC_L3L4_CTRL			0x0
+#define XGMAC_L4DPIM0			BIT(21)
+#define XGMAC_L4DPM0			BIT(20)
+#define XGMAC_L4SPIM0			BIT(19)
+#define XGMAC_L4SPM0			BIT(18)
+#define XGMAC_L4PEN0			BIT(16)
+#define XGMAC_L3HDBM0			GENMASK(15, 11)
+#define XGMAC_L3HSBM0			GENMASK(10, 6)
+#define XGMAC_L3DAIM0			BIT(5)
+#define XGMAC_L3DAM0			BIT(4)
+#define XGMAC_L3SAIM0			BIT(3)
+#define XGMAC_L3SAM0			BIT(2)
+#define XGMAC_L3PEN0			BIT(0)
+#define XGMAC_L4_ADDR			0x1
+#define XGMAC_L4DP0			GENMASK(31, 16)
+#define XGMAC_L4DP0_SHIFT		16
+#define XGMAC_L4SP0			GENMASK(15, 0)
+#define XGMAC_L3_ADDR0			0x4
+#define XGMAC_L3_ADDR1			0x5
+#define XGMAC_L3_ADDR2			0x6
+#define XGMAC_L3_ADDR3			0x7
+#define XGMAC_ARP_ADDR			0x00000c10
+#define XGMAC_RSS_CTRL			0x00000c80
+#define XGMAC_UDP4TE			BIT(3)
+#define XGMAC_TCP4TE			BIT(2)
+#define XGMAC_IP2TE			BIT(1)
+#define XGMAC_RSSE			BIT(0)
+#define XGMAC_RSS_ADDR			0x00000c88
+#define XGMAC_RSSIA_SHIFT		8
+#define XGMAC_ADDRT			BIT(2)
+#define XGMAC_CT			BIT(1)
+#define XGMAC_OB			BIT(0)
+#define XGMAC_RSS_DATA			0x00000c8c
+#define XGMAC_TIMESTAMP_STATUS		0x00000d20
+#define XGMAC_TXTSC			BIT(15)
+#define XGMAC_TXTIMESTAMP_NSEC		0x00000d30
+#define XGMAC_TXTSSTSLO			GENMASK(30, 0)
+#define XGMAC_TXTIMESTAMP_SEC		0x00000d34
+#define XGMAC_PPS_CONTROL		0x00000d70
+#define XGMAC_PPS_MAXIDX(x)		((((x) + 1) * 8) - 1)
+#define XGMAC_PPS_MINIDX(x)		((x) * 8)
+#define XGMAC_PPSx_MASK(x)		\
+	GENMASK(XGMAC_PPS_MAXIDX(x), XGMAC_PPS_MINIDX(x))
+#define XGMAC_TRGTMODSELx(x, val)	\
+	GENMASK(XGMAC_PPS_MAXIDX(x) - 1, XGMAC_PPS_MAXIDX(x) - 2) & \
+	((val) << (XGMAC_PPS_MAXIDX(x) - 2))
+#define XGMAC_PPSCMDx(x, val)		\
+	GENMASK(XGMAC_PPS_MINIDX(x) + 3, XGMAC_PPS_MINIDX(x)) & \
+	((val) << XGMAC_PPS_MINIDX(x))
+#define XGMAC_PPSCMD_START		0x2
+#define XGMAC_PPSCMD_STOP		0x5
+#define XGMAC_PPSEN0			BIT(4)
+#define XGMAC_PPSx_TARGET_TIME_SEC(x)	(0x00000d80 + (x) * 0x10)
+#define XGMAC_PPSx_TARGET_TIME_NSEC(x)	(0x00000d84 + (x) * 0x10)
+#define XGMAC_TRGTBUSY0			BIT(31)
+#define XGMAC_PPSx_INTERVAL(x)		(0x00000d88 + (x) * 0x10)
+#define XGMAC_PPSx_WIDTH(x)		(0x00000d8c + (x) * 0x10)
+
+#define XGMAC_MDIO_ADDR			0x00000200
+#define XGMAC_MDIO_DATA			0x00000204
+#define XGMAC_MDIO_INT_STATUS		0x00000214
+#define XGMAC_MDIO_INT_EN		0x00000218
+#define XGMAC_MDIO_INT_EN_SINGLE	BIT(12)
+#define XGMAC_MDIO_C22P			0x00000220
+
+/* MDIO defines */
+#define MII_GMAC_PA			GENMASK(15, 11)
+#define MII_GMAC_RA			GENMASK(10, 6)
+#define MII_GMAC_CR			GENMASK(5, 2)
+#define MII_GMAC_WRITE			BIT(1)
+#define MII_GMAC_BUSY			BIT(0)
+#define MII_DATA_MASK			GENMASK(15, 0)
+#define MII_XGMAC_DA			GENMASK(25, 21)
+#define MII_XGMAC_PA			GENMASK(20, 16)
+#define MII_XGMAC_RA			GENMASK(15, 0)
+#define MII_XGMAC_BUSY			BIT(22)
+#define MII_XGMAC_CR			GENMASK(21, 19)
+#define MII_XGMAC_SADDR			BIT(18)
+#define MII_XGMAC_CMD_SHIFT		16
+#define MII_XGMAC_WRITE			(1 << MII_XGMAC_CMD_SHIFT)
+#define MII_XGMAC_READ			(3 << MII_XGMAC_CMD_SHIFT)
+#define MII_XGMAC_PSE			BIT(30)
+#define MII_XGMAC_CRS			BIT(31)
+
+/* XGMAC MMC Registers */
+#define MMC_XGMAC_CONTROL		0x800
+#define MMC_XGMAC_CONTROL_RSTONRD	BIT(2)
+#define MMC_XGMAC_CONTROL_RESET		BIT(0)
+#define MMC_XGMAC_RX_INT_EN		0x80c
+#define MMC_XGMAC_TX_INT_EN		0x810
+
+#define MMC_XGMAC_TX_OCTET_GB		0x814
+#define MMC_XGMAC_TX_PKT_GB		0x81c
+#define MMC_XGMAC_TX_BROAD_PKT_G	0x824
+#define MMC_XGMAC_TX_MULTI_PKT_G	0x82c
+#define MMC_XGMAC_TX_64OCT_GB		0x834
+#define MMC_XGMAC_TX_65OCT_GB		0x83c
+#define MMC_XGMAC_TX_128OCT_GB		0x844
+#define MMC_XGMAC_TX_256OCT_GB		0x84c
+#define MMC_XGMAC_TX_512OCT_GB		0x854
+#define MMC_XGMAC_TX_1024OCT_GB		0x85c
+#define MMC_XGMAC_TX_UNI_PKT_GB		0x864
+#define MMC_XGMAC_TX_MULTI_PKT_GB	0x86c
+#define MMC_XGMAC_TX_BROAD_PKT_GB	0x874
+#define MMC_XGMAC_TX_UNDER		0x87c
+#define MMC_XGMAC_TX_OCTET_G		0x884
+#define MMC_XGMAC_TX_PKT_G		0x88c
+#define MMC_XGMAC_TX_PAUSE		0x894
+#define MMC_XGMAC_TX_VLAN_PKT_G		0x89c
+#define MMC_XGMAC_TX_LPI_USEC		0x8a4
+#define MMC_XGMAC_TX_LPI_TRAN		0x8a8
+
+#define MMC_XGMAC_RX_PKT_GB		0x900
+#define MMC_XGMAC_RX_OCTET_GB		0x908
+#define MMC_XGMAC_RX_OCTET_G		0x910
+#define MMC_XGMAC_RX_BROAD_PKT_G	0x918
+#define MMC_XGMAC_RX_MULTI_PKT_G	0x920
+#define MMC_XGMAC_RX_CRC_ERR		0x928
+#define MMC_XGMAC_RX_RUNT_ERR		0x930
+#define MMC_XGMAC_RX_JABBER_ERR		0x934
+#define MMC_XGMAC_RX_UNDER		0x938
+#define MMC_XGMAC_RX_OVER		0x93c
+#define MMC_XGMAC_RX_64OCT_GB		0x940
+#define MMC_XGMAC_RX_65OCT_GB		0x948
+#define MMC_XGMAC_RX_128OCT_GB		0x950
+#define MMC_XGMAC_RX_256OCT_GB		0x958
+#define MMC_XGMAC_RX_512OCT_GB		0x960
+#define MMC_XGMAC_RX_1024OCT_GB		0x968
+#define MMC_XGMAC_RX_UNI_PKT_G		0x970
+#define MMC_XGMAC_RX_LENGTH_ERR		0x978
+#define MMC_XGMAC_RX_RANGE		0x980
+#define MMC_XGMAC_RX_PAUSE		0x988
+#define MMC_XGMAC_RX_FIFOOVER_PKT	0x990
+#define MMC_XGMAC_RX_VLAN_PKT_GB	0x998
+#define MMC_XGMAC_RX_WATCHDOG_ERR	0x9a0
+#define MMC_XGMAC_RX_LPI_USEC		0x9a4
+#define MMC_XGMAC_RX_LPI_TRAN		0x9a8
+#define MMC_XGMAC_RX_DISCARD_PKT_GB	0x9ac
+#define MMC_XGMAC_RX_DISCARD_OCT_GB	0x9b4
+#define MMC_XGMAC_RX_ALIGN_ERR_PKT	0x9bc
+
+#define MMC_XGMAC_TX_SINGLE_COL_G	0xa40
+#define MMC_XGMAC_TX_MULTI_COL_G	0xa44
+#define MMC_XGMAC_TX_DEFER		0xa48
+#define MMC_XGMAC_TX_LATE_COL		0xa4c
+#define MMC_XGMAC_TX_EXCESSIVE_COL	0xa50
+#define MMC_XGMAC_TX_CARRIER		0xa54
+#define MMC_XGMAC_TX_EXCESSIVE_DEFER	0xa58
+
+#define MMC_XGMAC_RX_IPC_INTR_MASK	0xa5c
+
+#define MMC_XGMAC_RX_IPV4_PKT_G		0xa64
+#define MMC_XGMAC_RX_IPV4_HDRERR_PKT	0xa6c
+#define MMC_XGMAC_RX_IPV4_NOPAY_PKT	0xa74
+#define MMC_XGMAC_RX_IPV4_FRAG_PKT	0xa7c
+#define MMC_XGMAC_RX_IPV4_UDSBL_PKT	0xa84
+#define MMC_XGMAC_RX_IPV6_PKT_G		0xa8c
+#define MMC_XGMAC_RX_IPV6_HDRERR_PKT	0xa94
+#define MMC_XGMAC_RX_IPV6_NOPAY_PKT	0xa9c
+#define MMC_XGMAC_RX_UDP_PKT_G		0xaa4
+#define MMC_XGMAC_RX_UDP_ERR_PKT	0xaac
+#define MMC_XGMAC_RX_TCP_PKT_G		0xab4
+#define MMC_XGMAC_RX_TCP_ERR_PKT	0xabc
+#define MMC_XGMAC_RX_ICMP_PKT_G		0xac4
+#define MMC_XGMAC_RX_ICMP_ERR_PKT	0xacc
+#define MMC_XGMAC_RX_IPV4_OCTET_G	0xad4
+#define MMC_XGMAC_RX_IPV4_HDRERR_OCTET	0xadc
+#define MMC_XGMAC_RX_IPV4_NOPAY_OCTET	0xae4
+#define MMC_XGMAC_RX_IPV4_FRAG_OCTET	0xaec
+#define MMC_XGMAC_RX_IPV4_UDSBL_OCTET	0xaf4
+#define MMC_XGMAC_RX_IPV6_OCTET_G	0xafc
+#define MMC_XGMAC_RX_IPV6_HDRERR_OCTET	0xb04
+#define MMC_XGMAC_RX_IPV6_NOPAY_OCTET	0xb0c
+#define MMC_XGMAC_RX_UDP_OCTET_G	0xb14
+#define MMC_XGMAC_RX_UDP_ERR_OCTET	0xb1c
+#define MMC_XGMAC_RX_TCP_OCTET_G	0xb24
+#define MMC_XGMAC_RX_TCP_ERR_OCTET	0xb2c
+#define MMC_XGMAC_RX_ICMP_OCTET_G	0xb34
+#define MMC_XGMAC_RX_ICMP_ERR_OCTET	0xb3c
+
+/* MTL Registers */
+#define XGMAC_MTL_OPMODE		0x00001000
+#define XGMAC_FRPE			BIT(15)
+#define XGMAC_ETSALG			GENMASK(6, 5)
+#define XGMAC_WRR			(0x0 << 5)
+#define XGMAC_WFQ			(0x1 << 5)
+#define XGMAC_DWRR			(0x2 << 5)
+#define XGMAC_RAA			BIT(2)
+#define XGMAC_FTS			BIT(1)
+#define XGMAC_MTL_INT_STATUS		0x00001020
+#define XGMAC_MTL_RXQ_DMA_MAP0		0x00001030
+#define XGMAC_MTL_RXQ_DMA_MAP1		0x00001034
+#define XGMAC_QxMDMACH(x)		GENMASK((x) * 8 + 7, (x) * 8)
+#define XGMAC_QxMDMACH_SHIFT(x)		((x) * 8)
+#define XGMAC_QDDMACH			BIT(7)
+#define XGMAC_TC_PRTY_MAP0		0x00001040
+#define XGMAC_TC_PRTY_MAP1		0x00001044
+#define XGMAC_PSTC(x)			GENMASK((x) * 8 + 7, (x) * 8)
+#define XGMAC_PSTC_SHIFT(x)		((x) * 8)
+#define XGMAC_MTL_EST_CONTROL		0x00001050
+#define XGMAC_PTOV			GENMASK(31, 23)
+#define XGMAC_PTOV_SHIFT		23
+#define XGMAC_SSWL			BIT(1)
+#define XGMAC_EEST			BIT(0)
+#define XGMAC_MTL_EST_GCL_CONTROL	0x00001080
+#define XGMAC_BTR_LOW			0x0
+#define XGMAC_BTR_HIGH			0x1
+#define XGMAC_CTR_LOW			0x2
+#define XGMAC_CTR_HIGH			0x3
+#define XGMAC_TER			0x4
+#define XGMAC_LLR			0x5
+#define XGMAC_ADDR_SHIFT		8
+#define XGMAC_GCRR			BIT(2)
+#define XGMAC_SRWO			BIT(0)
+#define XGMAC_MTL_EST_GCL_DATA		0x00001084
+#define XGMAC_MTL_RXP_CONTROL_STATUS	0x000010a0
+#define XGMAC_RXPI			BIT(31)
+#define XGMAC_NPE			GENMASK(23, 16)
+#define XGMAC_NVE			GENMASK(7, 0)
+#define XGMAC_MTL_RXP_IACC_CTRL_ST	0x000010b0
+#define XGMAC_STARTBUSY			BIT(31)
+#define XGMAC_WRRDN			BIT(16)
+#define XGMAC_ADDR			GENMASK(9, 0)
+#define XGMAC_MTL_RXP_IACC_DATA		0x000010b4
+#define XGMAC_MTL_ECC_CONTROL		0x000010c0
+#define XGMAC_MTL_SAFETY_INT_STATUS	0x000010c4
+#define XGMAC_MEUIS			BIT(1)
+#define XGMAC_MECIS			BIT(0)
+#define XGMAC_MTL_ECC_INT_ENABLE	0x000010c8
+#define XGMAC_RPCEIE			BIT(12)
+#define XGMAC_ECEIE			BIT(8)
+#define XGMAC_RXCEIE			BIT(4)
+#define XGMAC_TXCEIE			BIT(0)
+#define XGMAC_MTL_ECC_INT_STATUS	0x000010cc
+#define XGMAC_MTL_TXQ_OPMODE(x)		(0x00001100 + 0x80 * (x))
+#define XGMAC_TQS			GENMASK(25, 16)
+#define XGMAC_TQS_SHIFT			16
+#define XGMAC_Q2TCMAP			GENMASK(10, 8)
+#define XGMAC_Q2TCMAP_SHIFT		8
+#define XGMAC_TTC			GENMASK(6, 4)
+#define XGMAC_TTC_SHIFT			4
+#define XGMAC_TXQEN			GENMASK(3, 2)
+#define XGMAC_TXQEN_SHIFT		2
+#define XGMAC_TSF			BIT(1)
+#define XGMAC_FTQ			BIT(0)
+#define XGMAC_MTL_TXQ_DEBUG(x)		(0x00001108 + 0x80 * (x))
+#define XGMAC_TRCPSTS			BIT(5)
+#define XGMAC_TXQSTS			BIT(4)
+#define XGMAC_TWCSTS			BIT(3)
+#define XGMAC_TRCSTS			GENMASK(2, 1)
+#define XGMAC_TCPAUSED			BIT(0)
+#define XGMAC_MTL_TCx_ETS_CONTROL(x)	(0x00001110 + 0x80 * (x))
+#define XGMAC_MTL_TCx_QUANTUM_WEIGHT(x)	(0x00001118 + 0x80 * (x))
+#define XGMAC_MTL_TCx_SENDSLOPE(x)	(0x0000111c + 0x80 * (x))
+#define XGMAC_MTL_TCx_HICREDIT(x)	(0x00001120 + 0x80 * (x))
+#define XGMAC_MTL_TCx_LOCREDIT(x)	(0x00001124 + 0x80 * (x))
+#define XGMAC_CC			BIT(3)
+#define XGMAC_TSA			GENMASK(1, 0)
+#define XGMAC_SP			(0x0 << 0)
+#define XGMAC_CBS			(0x1 << 0)
+#define XGMAC_ETS			(0x2 << 0)
+#define XGMAC_MTL_RXQ_OPMODE(x)		(0x00001140 + 0x80 * (x))
+#define XGMAC_RQS			GENMASK(25, 16)
+#define XGMAC_RQS_SHIFT			16
+#define XGMAC_EHFC			BIT(7)
+#define XGMAC_RSF			BIT(5)
+#define XGMAC_RTC			GENMASK(1, 0)
+#define XGMAC_RTC_SHIFT			0
+#define XGMAC_MTL_RXQ_OVF_CNT(x)		(0x00001144 + 0x80 * (x))
+#define XGMAC_MISCNTOVF			BIT(31)
+#define XGMAC_MISPKTCNT			GENMASK(26, 16)
+#define XGMAC_OVFCNTOVF			BIT(15)
+#define XGMAC_OVFPKTCNT			GENMASK(10, 0)
+#define XGMAC_MTL_RXQ_DEBUG(x)			(0x00001148 + 0x80 * (x))
+#define XGMAC_PRXQ				GENMASK(29, 16)
+#define XGMAC_RXQSTS			GENMASK(5, 4)
+#define XGMAC_RRCSTS			GENMASK(2, 1)
+#define XGMAC_RWCSTS			BIT(0)
+#define XGMAC_MTL_RXQ_FLOW_CONTROL(x)	(0x00001150 + 0x80 * (x))
+#define XGMAC_RFD			GENMASK(31, 17)
+#define XGMAC_RFD_SHIFT			17
+#define XGMAC_RFA			GENMASK(15, 1)
+#define XGMAC_RFA_SHIFT			1
+#define XGMAC_MTL_QINTEN(x)		(0x00001170 + 0x80 * (x))
+#define XGMAC_RXOIE			BIT(16)
+#define XGMAC_MTL_QINT_STATUS(x)	(0x00001174 + 0x80 * (x))
+#define XGMAC_RXOVFIS			BIT(16)
+#define XGMAC_ABPSIS			BIT(1)
+#define XGMAC_TXUNFIS			BIT(0)
+#define XGMAC_MAC_REGSIZE		(XGMAC_MTL_QINT_STATUS(15) / 4)
+
+#define XGMAC_DMA_MODE			0x00003000
+#define XGMAC_INTM			GENMASK(13, 12)
+#define XGMAC_SWR			BIT(0)
+#define XGMAC_DMA_SYSBUS_MODE		0x00003004
+#define XGMAC_WR_OSR_LMT		GENMASK(29, 24)
+#define XGMAC_WR_OSR_LMT_SHIFT		24
+#define XGMAC_RD_OSR_LMT		GENMASK(21, 16)
+#define XGMAC_RD_OSR_LMT_SHIFT		16
+#define XGMAC_EN_LPI			BIT(15)
+#define XGMAC_LPI_XIT_PKT		BIT(14)
+#define XGMAC_AAL			BIT(12)
+#define XGMAC_EAME			BIT(11)
+#define XGMAC_BLEN			GENMASK(7, 1)
+#define XGMAC_BLEN256			BIT(7)
+#define XGMAC_BLEN128			BIT(6)
+#define XGMAC_BLEN64			BIT(5)
+#define XGMAC_BLEN32			BIT(4)
+#define XGMAC_BLEN16			BIT(3)
+#define XGMAC_BLEN8			BIT(2)
+#define XGMAC_BLEN4			BIT(1)
+#define XGMAC_UNDEF			BIT(0)
+#define XGMAC_DMA_INT_STATUS		0x00003008
+#define XGMAC_MTLIS			BIT(16)
+#define XGMAC_DMA_DEBUG_STATUS(x)	(0x00003020 + 4 * (x))
+#define XGMAC_DMA_DBG_STS3_RDAS			BIT(0)
+#define XGMAC_DMA_DBG_STS1_TDAS			BIT(0)
+#define XGMAC_TX_EDMA_CTRL		0x00003040
+#define XGMAC_TEDM			GENMASK(31, 30)
+#define XGMAC_TDPS			GENMASK(29, 0)
+#define XGMAC_RX_EDMA_CTRL		0x00003044
+#define XGMAC_REDM			GENMASK(31, 30)
+#define XGMAC_RDPS			GENMASK(29, 0)
+#define XGMAC_DMA_TBS_CTRL0		0x00003054
+#define XGMAC_DMA_TBS_CTRL1		0x00003058
+#define XGMAC_DMA_TBS_CTRL2		0x0000305c
+#define XGMAC_DMA_TBS_CTRL3		0x00003060
+#define XGMAC_FTOS			GENMASK(31, 8)
+#define XGMAC_FTOV			BIT(0)
+#define XGMAC_DEF_FTOS			(XGMAC_FTOS | XGMAC_FTOV)
+#define XGMAC_DMA_SAFETY_INT_STATUS	0x00003064
+#define XGMAC_MCSIS			BIT(31)
+#define XGMAC_MSUIS			BIT(29)
+#define XGMAC_MSCIS			BIT(28)
+#define XGMAC_DEUIS			BIT(1)
+#define XGMAC_DECIS			BIT(0)
+#define XGMAC_DMA_ECC_INT_ENABLE	0x00003068
+#define XGMAC_DCEIE			BIT(1)
+#define XGMAC_TCEIE			BIT(0)
+#define XGMAC_DMA_ECC_INT_STATUS	0x0000306c
+#define XGMAC_DMA_CH_CONTROL(x)		(0x00003100 + 0x80 * (x))
+#define XGMAC_SPH			BIT(24)
+#define XGMAC_PBLx8			BIT(16)
+#define XGMAC_DMA_CH_TX_CONTROL(x)	(0x00003104 + 0x80 * (x))
+#define XGMAC_EDSE			BIT(28)
+#define XGMAC_TxPBL			GENMASK(21, 16)
+#define XGMAC_TxPBL_SHIFT		16
+#define XGMAC_TSE			BIT(12)
+#define XGMAC_OSP			BIT(4)
+#define XGMAC_TXST			BIT(0)
+#define XGMAC_DMA_CH_RX_CONTROL(x)	(0x00003108 + 0x80 * (x))
+#define XGMAC_RPF			BIT(31)
+#define XGMAC_RxPBL			GENMASK(21, 16)
+#define XGMAC_RxPBL_SHIFT		16
+#define XGMAC_RBSZ			GENMASK(14, 1)
+#define XGMAC_RBSZ_SHIFT		1
+#define XGMAC_RXST			BIT(0)
+#define XGMAC_DMA_CH_TxDESC_LADDR(x)	(0x00003114 + 0x80 * (x))
+#define XGMAC_DMA_CH_RxDESC_LADDR(x)	(0x0000311c + 0x80 * (x))
+#define XGMAC_DMA_CH_TxDESC_TAIL_LPTR(x)	(0x00003124 + 0x80 * (x))
+#define XGMAC_DMA_CH_RxDESC_TAIL_LPTR(x)	(0x0000312c + 0x80 * (x))
+#define XGMAC_DMA_CH_TxDESC_RING_LEN(x)		(0x00003130 + 0x80 * (x))
+#define XGMAC_DMA_CH_RxDESC_RING_LEN(x)		(0x00003134 + 0x80 * (x))
+#define XGMAC_OWRQ                     GENMASK(26, 24)
+#define XGMAC_DMA_CH_INT_EN(x)		(0x00003138 + 0x80 * (x))
+#define XGMAC_NIE			BIT(15)
+#define XGMAC_AIE			BIT(14)
+#define XGMAC_CDEE			BIT(13)
+#define XGMAC_FBEE			BIT(12)
+#define XGMAC_DDEE			BIT(9)
+#define XGMAC_RSE			BIT(8)
+#define XGMAC_RBUE			BIT(7)
+#define XGMAC_RIE			BIT(6)
+#define XGMAC_TBUE			BIT(2)
+#define XGMAC_TXSE			BIT(1)
+#define XGMAC_TIE			BIT(0)
+#define XGMAC_DMA_INT_DEFAULT_EN	(XGMAC_DMA_INT_NORMAL_EN | \
+				XGMAC_DMA_INT_ABNORMAL_EN)
+#define XGMAC_DMA_INT_NORMAL_EN		(XGMAC_NIE | XGMAC_TIE | XGMAC_RIE)
+#define XGMAC_DMA_INT_ABNORMAL_EN	(XGMAC_AIE | XGMAC_RBUE | XGMAC_CDEE | XGMAC_DDEE | XGMAC_FBEE)
+#define XGMAC_DMA_CH_Rx_WATCHDOG(x)	(0x0000313c + 0x80 * (x))
+#define XGMAC_PSEL                     BIT(31)
+#define XGMAC_RBCT                     GENMASK(25, 16)
+#define XGMAC_RWTU                     GENMASK(13, 12)
+#define XGMAC_RWT			GENMASK(7, 0)
+#define XGMAC_DMA_CH_CUR_TxDESC_LADDR(x)	(0x00003144 + 0x80 * (x))
+#define XGMAC_DMA_CH_CUR_RxDESC_LADDR(x)	(0x0000314c + 0x80 * (x))
+#define XGMAC_DMA_CH_CUR_TxBUFF_LADDR(x)	(0x00003154 + 0x80 * (x))
+#define XGMAC_DMA_CH_CUR_RxBUFF_LADDR(x)	(0x0000315c + 0x80 * (x))
+#define XGMAC_DMA_CH_STATUS(x)		(0x00003160 + 0x80 * (x))
+#define XGMAC_NIS			BIT(15)
+#define XGMAC_AIS			BIT(14)
+#define XGMAC_CDE			BIT(13)
+#define XGMAC_FBE			BIT(12)
+#define XGMAC_DDE			BIT(9)
+#define XGMAC_RPS			BIT(8)
+#define XGMAC_RBU			BIT(7)
+#define XGMAC_RI			BIT(6)
+#define XGMAC_TBU			BIT(2)
+#define XGMAC_TPS			BIT(1)
+#define XGMAC_TI			BIT(0)
+#define XGMAC_DMA_CH_DEBUG_STATUS(x)	(0x00003164 + 0x80 * (x))
+#define XGMAC_RDTS			GENMASK(27, 19)
+#define XGMAC_RDFS			GENMASK(18, 16)
+#define XGMAC_TDTS			GENMASK(11, 8)
+#define XGMAC_TDRS			GENMASK(7, 6)
+#define XGMAC_TDXS			GENMASK(5, 3)
+#define XGMAC_TDFS			GENMASK(2, 0)
+#define XGMAC_REGSIZE			((0x0000317c + (0x80 * 15)) / 4)
+
+#define XGMAC_DMA_STATUS_MSK_COMMON	(XGMAC_NIS | XGMAC_AIS | XGMAC_FBE)
+#define XGMAC_DMA_STATUS_MSK_RX		(XGMAC_RBU | XGMAC_RI | \
+					 XGMAC_DMA_STATUS_MSK_COMMON)
+#define XGMAC_DMA_STATUS_MSK_TX		(XGMAC_TBU | XGMAC_TPS | XGMAC_TI | \
+					 XGMAC_DMA_STATUS_MSK_COMMON)
+
+/* Descriptors */
+#define XGMAC_TDES0_LTV			BIT(31)
+#define XGMAC_TDES0_LT			GENMASK(7, 0)
+#define XGMAC_TDES1_LT			GENMASK(31, 8)
+#define XGMAC_TDES2_IVT			GENMASK(31, 16)
+#define XGMAC_TDES2_IVT_SHIFT		16
+#define XGMAC_TDES2_IOC			BIT(31)
+#define XGMAC_TDES2_TTSE		BIT(30)
+#define XGMAC_TDES2_B2L			GENMASK(29, 16)
+#define XGMAC_TDES2_B2L_SHIFT		16
+#define XGMAC_TDES2_VTIR		GENMASK(15, 14)
+#define XGMAC_TDES2_VTIR_SHIFT		14
+#define XGMAC_TDES2_B1L			GENMASK(13, 0)
+#define XGMAC_TDES3_OWN			BIT(31)
+#define XGMAC_TDES3_CTXT		BIT(30)
+#define XGMAC_TDES3_FD			BIT(29)
+#define XGMAC_TDES3_LD			BIT(28)
+#define XGMAC_TDES3_CPC			GENMASK(27, 26)
+#define XGMAC_TDES3_CPC_SHIFT		26
+#define XGMAC_TDES3_TCMSSV		BIT(26)
+#define XGMAC_TDES3_SAIC		GENMASK(25, 23)
+#define XGMAC_TDES3_SAIC_SHIFT		23
+#define XGMAC_TDES3_TBSV		BIT(24)
+#define XGMAC_TDES3_THL			GENMASK(22, 19)
+#define XGMAC_TDES3_THL_SHIFT		19
+#define XGMAC_TDES3_IVTIR		GENMASK(19, 18)
+#define XGMAC_TDES3_IVTIR_SHIFT		18
+#define XGMAC_TDES3_TSE			BIT(18)
+#define XGMAC_TDES3_IVLTV		BIT(17)
+#define XGMAC_TDES3_CIC			GENMASK(17, 16)
+#define XGMAC_TDES3_CIC_SHIFT		16
+#define XGMAC_TDES3_TPL			GENMASK(17, 0)
+#define XGMAC_TDES3_VLTV		BIT(16)
+#define XGMAC_TDES3_VT			GENMASK(15, 0)
+#define XGMAC_TDES3_FL			GENMASK(14, 0)
+#define XGMAC_TDES3_PIDV		BIT(25)
+#define XGMAC_TDES0_QUEUE_ID	GENMASK(19, 17)
+#define XGMAC_TDES0_FAST_MODE	BIT(16)
+#define XGMAC_TDES0_OVPORT		GENMASK(12, 8)
+#define XGMAC_TDES0_IVPORT		GENMASK(4, 0)
+
+
+#define XGMAC_RDES0_IP_FRAG  	GENMASK(31, 30)
+enum {
+	FRAG_NONE,
+	FRAG_FIRST,
+	FRAG_MIDDLE,
+	FRAG_LAST,
+};
+#define XGMAC_RDES0_L3_TYPE		GENMASK(29, 28)
+enum {
+	L3_TYPE_IPV4,
+	L3_TYPE_IPV6,
+	L3_TYPE_IPIP6,
+	L3_TYPE_UNKNOWN,
+};
+#define XGMAC_RDES0_L4_TYPE		GENMASK(27, 26)
+enum {
+	L4_TYPE_TCP,
+	L4_TYPE_UDP,
+	L4_TYPE_ICMP,
+	L4_TYPE_UNKNOWN,
+};
+#define XGMAC_RDES0_RPT_INDEX	GENMASK(25, 22)
+#define XGMAC_RDES0_STA_INDEX	GENMASK(21, 12)
+#define XGMAC_RDES0_OVPORT   	GENMASK(11, 6)
+#define XGMAC_RDES0_IVPORT   	GENMASK(5, 0)
+#define XGMAC_RDES2_DFRAG		BIT(31)
+#define XGMAC_RDES2_OVID		GENMASK(27, 16)
+#define XGMAC_RDES3_OWN			BIT(31)
+#define XGMAC_RDES3_CTXT		BIT(30)
+#define XGMAC_RDES3_FD			BIT(29)
+#define XGMAC_RDES3_LD			BIT(28)
+#define XGMAC_RDES3_CDA			BIT(27)
+#define XGMAC_RDES3_RSV			BIT(26)
+#define XGMAC_RDES3_TCI_PRI		GENMASK(22, 20)
+#define XGMAC_RDES3_ET			GENMASK(19, 16)
+#define XGMAC_RDES3_ES			BIT(15)
+#define XGMAC_RDES3_PL			GENMASK(13, 0)
+
+#define XGMAC_RDES0_SPORT   	GENMASK(31, 16)
+#define XGMAC_RDES0_ETH_TYPE	GENMASK(15, 0)
+#define XGMAC_RDES1_UP_REASON   GENMASK(31, 24)
+#define XGMAC_RDES1_RXHASH      GENMASK(23, 8)
+#define XGMAC_RDES1_TNP     	BIT(6)
+#define XGMAC_RDES1_DSCP        GENMASK(5, 0)
+#define XGMAC_RDES2_SMAC_0_31   GENMASK(31, 0)
+#define XGMAC_RDES3_SMAC_32_47  GENMASK(15, 0)
+#define XGMAC_RDES3_PKT_TYPE    GENMASK(17, 16)
+enum {
+	PKT_TYPE_UCAST,
+	PKT_TYPE_MCAST,
+	PKT_TYPE_UNKNOWN,
+	PKT_TYPE_BCAST,
+};
+#define XGMAC_RDES3_IOC			BIT(30)
+
+#endif

+ 76 - 0
target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/sf_dpns.c

@@ -0,0 +1,76 @@
+#include "linux/delay.h"
+#include "linux/reset.h"
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include "dpns.h"
+
+
+
+static int dpns_probe(struct platform_device *pdev)
+{
+	struct dpns_priv *priv;
+	int ret;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+	priv->dev = &pdev->dev;
+	priv->ioaddr = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(priv->ioaddr))
+		return PTR_ERR(priv->ioaddr);
+
+	priv->clk = devm_clk_get_enabled(priv->dev, NULL);
+	if (IS_ERR(priv->clk))
+		return PTR_ERR(priv->clk);
+
+	priv->npu_rst = devm_reset_control_get_exclusive(priv->dev, "npu");
+	if (IS_ERR(priv->npu_rst))
+		return PTR_ERR(priv->npu_rst);
+
+	reset_control_assert(priv->npu_rst);
+	reset_control_deassert(priv->npu_rst);
+
+	ret = dpns_se_init(priv);
+	if (ret)
+		return dev_err_probe(priv->dev, ret, "failed to initialize SE.\n");
+
+	ret = dpns_tmu_init(priv);
+	if (ret)
+		return dev_err_probe(priv->dev, ret, "failed to initialize TMU.\n");
+
+	sf_dpns_debugfs_init(priv);
+	platform_set_drvdata(pdev, priv);
+	return 0;
+}
+
+static void dpns_remove(struct platform_device *pdev) {
+	struct dpns_priv *priv = platform_get_drvdata(pdev);
+	debugfs_remove_recursive(priv->debugfs);
+	reset_control_assert(priv->npu_rst);
+}
+
+static const struct of_device_id dpns_match[] = {
+	{ .compatible = "siflower,sf21-dpns" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, dpns_match);
+
+static struct platform_driver dpns_driver = {
+	.probe	= dpns_probe,
+	.remove_new = dpns_remove,
+	.driver	= {
+		.name		= "sfdpns",
+		.of_match_table	= dpns_match,
+	},
+};
+module_platform_driver(dpns_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Qingfang Deng <[email protected]>");
+MODULE_DESCRIPTION("NPU stub driver for SF21A6826/SF21H8898 SoC");

+ 427 - 0
target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/sf_dpns_debugfs.c

@@ -0,0 +1,427 @@
+#include <linux/debugfs.h>
+#include "dpns.h"
+#include "dma.h"
+
+static const char * const sf21_dpns_mib_name[] = {
+	"pkt_rcv_drop_port0",
+	"pkt_rcv_drop_port1",
+	"pkt_rcv_drop_port2",
+	"pkt_rcv_drop_port3",
+	"pkt_rcv_drop_port4",
+	"pkt_rcv_drop_port5",
+	"pkt_rcv_drop_port6",
+	"pkt_rcv_drop_spl0",
+	"pkt_rcv_drop_spl1",
+	"pkt_rcv_drop_spl2",
+	"pkt_rcv_drop_spl3",
+	"pkt_rcv_drop_spl4",
+	"pkt_rcv_drop_spl5",
+	"pkt_rcv_drop_spl6",
+	"pkt_rcv_trans_cnt0",
+	"pkt_rcv_trans_cnt1",
+	"pkt_rcv_trans_cnt2",
+	"pkt_rcv_trans_cnt3",
+	"pkt_rcv_trans_cnt4",
+	"pkt_rcv_trans_cnt5",
+	"pkt_rcv_trans_cnt6",
+	"pkt_rcv_total0",
+	"pkt_rcv_total1",
+	"pkt_rcv_total2",
+	"pkt_rcv_total3",
+	"pkt_rcv_total4",
+	"pkt_rcv_total5",
+	"pkt_rcv_total6",
+	"pkt_rcv",
+	"udp",
+	"tcp",
+	"ipv4",
+	"ipv6",
+	"icmpv4",
+	"icmpv6",
+	"other_protocol",
+	"ipv4_sip_eq_dip",
+	"ipv4_icmp_frag",
+	"ipv4_icmp_ping_too_big",
+	"ipv4_udp_sp_eq_dp",
+	"ipv4_tcp_flagchk_err",
+	"ipv4_tcp_sq_eq_dp",
+	"ipv4_tcp_frag_off1",
+	"ipv4_tcp_syn_err",
+	"ipv4_tcp_xmas",
+	"ipv4_tcp_null",
+	"ipv4_tcp_too_short",
+	"ipv4_icmp4_redirect",
+	"ipv4_icmp_smurf",
+	"ipv6_sip_eq_dip",
+	"ipv6_icmp_frag",
+	"ipv6_icmp_ping_too_big",
+	"ipv6_udp_sp_eq_dp",
+	"ipv6_tcp_flagchk_err",
+	"ipv6_tcp_sq_eq_dp",
+	"ipv6_tcp_frag_off1",
+	"ipv6_tcp_syn_err",
+	"ipv6_tcp_xmas",
+	"ipv6_tcp_null",
+	"ipv6_tcp_too_short",
+	"ipv6_icmp4_redirect",
+	"ipv6_icmp_smurf",
+	"ipv4in6_pls",
+	"frame_ismc_pls",
+	NULL,
+	NULL,
+	NULL,
+	NULL,
+	"arp_reply_err_fwd",
+	"arp_req_err_fwd",
+	"pkt_len_less_l2hd_err_fwd",
+	"pkt_len_less_60B_err_fwd",
+	"smac_is_mc_err_fwd",
+	"smac_is_bc_err_fwd",
+	"smac_eq_dmac_err_fwd",
+	"smac_eq_zero_err_fwd",
+	"dmac_eq_zero_err_fwd",
+	"dribble_err_fwd",
+	"runt_err_fwd",
+	"giant_frame_err_fwd",
+	"watchdog_err_fwd",
+	"gmii_err_fwd",
+	"dos_err_fwd",
+	"ttl_err_fwd",
+	"payload_chksum_err_fwd",
+	"ip_version_err_fwd",
+	"ip_hd_chksum_err_fwd",
+	"crc_err_fwd",
+	"pkt_len_err_fwd",
+	"arp_reply_err_up",
+	"arp_req_err_up",
+	"pkt_len_less_l2hd_err_up",
+	"pkt_len_less_60B_err_up",
+	"smac_is_mc_err_up",
+	"smac_is_bc_err_up",
+	"smac_eq_dmac_err_up",
+	"smac_eq_zero_err_up",
+	"dmac_eq_zero_err_up",
+	"dribble_err_up",
+	"runt_err_up",
+	"giant_frame_err_up",
+	"watchdog_err_up",
+	"gmii_err_up",
+	"dos_err_up",
+	"ttl_err_up",
+	"payload_chksum_err_up",
+	"ip_version_err_up",
+	"ip_hd_chksum_err_up",
+	"crc_err_up",
+	"pkt_len_err_up",
+	"arp_reply_err_drop",
+	"arp_req_err_drop",
+	"pkt_len_less_l2hd_err_drop",
+	"pkt_len_less_60B_err_drop",
+	"smac_is_mc_err_drop",
+	"smac_is_bc_err_drop",
+	"smac_eq_dmac_err_drop",
+	"smac_eq_zero_err_drop",
+	"dmac_eq_zero_err_drop",
+	"dribble_err_drop",
+	"runt_err_drop",
+	"giant_frame_err_drop",
+	"watchdog_err_drop",
+	"gmii_err_drop",
+	"dos_err_drop",
+	"ttl_err_drop",
+	"payload_chksum_err_drop",
+	"ip_version_err_drop",
+	"ip_hd_chksum_err_drop",
+	"crc_err_drop",
+	"pkt_len_err_drop",
+	"ivlan_vid_input_mf",
+	"ivlan_vid_pass_mf",
+	"ivlan_vid_port_based_srch",
+	"ivlan_vid_xlt_srch",
+	"ivlan_vid_vfp_srch",
+	"ivlan_vid_port_based_resp",
+	"ivlan_vid_xlt_resp",
+	"ivlan_vid_vfp_resp",
+	"ivlan_vid_port_based_hit",
+	"ivlan_vid_xlt_hit",
+	"ivlan_vid_vfp_hit",
+	"ivlan_vid_output_mf",
+	"ivlan_vid_port_based_pass",
+	"ivlan_vid_cp_drop",
+	"ivlan_vid_cp_up",
+	"ivlan_lkp_input_mf",
+	"ivlan_lkp_pass_mf",
+	"ivlan_lkp_srch",
+	"ivlan_lkp_resp",
+	"ivlan_lkp_hit",
+	"ivlan_lkp_output_mf",
+	"ivlan_lkp_cp_drop",
+	"ivlan_lkp_cp_up",
+	"l2_input_mf",
+	"l2_pass_mf",
+	"l2_flood_spl_srch_cnt",
+	"l2_da_srch",
+	"l2_sa_srch",
+	"l2_flood_spl_resp_cnt",
+	"l2_da_resp",
+	"l2_sa_resp",
+	"l2_flood_spl_cnt",
+	"l2_da_hit",
+	"l2_sa_hit",
+	"l2_output_mf",
+	"l2_cp_drop",
+	"l2_cp_up",
+	"l2_cp_fwd",
+	"l2_cp_up_fwd",
+	"nat_input_mf",
+	"nat_pass_mf",
+	"nat_srch",
+	"nat_resp",
+	"nat_hit",
+	"nat_output_mf",
+	"nat_v4_search",
+	"nat_dnat",
+	"nat_v4_hit",
+	"nat_dnat_hit",
+	"l3_input_mf",
+	"l3_pass_mf",
+	"l3_uc_srch",
+	"l3_mcsg_srch",
+	"l3_uc_resp",
+	"l3_mcsg_resp",
+	"l3_uc_hit",
+	"l3_mcsg_hit",
+	"l3_output_mf",
+	"l3_v6_mf",
+	"l3_mc",
+	"l3_v6_srch",
+	"l3_mc_srch",
+	"l3_v6_hit",
+	"l3_mc_hit",
+	"iacl_input_mf",
+	"iacl_pass_mf",
+	"iacl_srch",
+	"iacl_resp",
+	"iacl_hit",
+	"iacl_output_mf",
+	"iacl_v6",
+	"iacl_v6_srch",
+	"iacl_v6_hit",
+	"tmu_port0_phy_tran",
+	"tmu_port1_phy_tran",
+	"tmu_port2_phy_tran",
+	"tmu_port3_phy_tran",
+	"tmu_port4_phy_tran",
+	"tmu_port5_phy_tran",
+	"tmu_port6_phy_tran",
+	"tmu_port7_phy_tran",
+	"tmu_port8_phy_tran",
+	"tmu_port9_phy_tran",
+	"tmu_port10_phy_tran",
+	"tmu_port11_phy_tran",
+	"tmu_port12_phy_tran",
+	"tmu_port13_phy_tran",
+	"tmu_port14_phy_tran",
+	"tmu_port15_phy_tran",
+	"tmu_port16_phy_tran",
+	"tmu_port17_phy_tran",
+	"tmu_port18_phy_tran",
+	"tmu_port19_phy_tran",
+	"tmu_port20_phy_tran",
+	"tmu_port21_phy_tran",
+	"tmu_port22_phy_tran",
+	"tmu_port23_phy_tran",
+	"tmu_port24_phy_tran",
+	"tmu_port25_phy_tran",
+	"tmu_port26_phy_tran",
+	"tmu_port0_phy_drop_rclm",
+	"tmu_port1_phy_drop_rclm",
+	"tmu_port2_phy_drop_rclm",
+	"tmu_port3_phy_drop_rclm",
+	"tmu_port4_phy_drop_rclm",
+	"tmu_port5_phy_drop_rclm",
+	"tmu_port6_phy_drop_rclm",
+	"tmu_port7_phy_drop_rclm",
+	"tmu_port8_phy_drop_rclm",
+	"tmu_port9_phy_drop_rclm",
+	"tmu_port10_phy_drop_rclm",
+	"tmu_port11_phy_drop_rclm",
+	"tmu_port12_phy_drop_rclm",
+	"tmu_port13_phy_drop_rclm",
+	"tmu_port14_phy_drop_rclm",
+	"tmu_port15_phy_drop_rclm",
+	"tmu_port16_phy_drop_rclm",
+	"tmu_port17_phy_drop_rclm",
+	"tmu_port18_phy_drop_rclm",
+	"tmu_port19_phy_drop_rclm",
+	"tmu_port20_phy_drop_rclm",
+	"tmu_port21_phy_drop_rclm",
+	"tmu_port22_phy_drop_rclm",
+	"tmu_port23_phy_drop_rclm",
+	"tmu_port24_phy_drop_rclm",
+	"tmu_port25_phy_drop_rclm",
+	"tmu_port26_phy_drop_rclm",
+	"tmu_drop_bit_cnt",
+	"nat_cp_drop_cnt",
+	"nat_cp_up_cnt",
+	"nat_fwd_cnt",
+	"nat_cp_fwd_cnt",
+	"l3_cp_up_fwd_cnt",
+	"l3_cp_fwd_cnt",
+	"l3_cp_up_cnt",
+	"l3_drop_bit_cnt",
+	NULL,
+	NULL,
+	NULL,
+	NULL,
+	NULL,
+	NULL,
+	NULL,
+	NULL,
+	NULL,
+	NULL,
+	NULL,
+	NULL,
+	NULL,
+	NULL,
+	NULL,
+	NULL,
+	NULL,
+	NULL,
+	"arp_intf_input_mf",
+	"arp_intf_pass_mf",
+	"arp_intf_intf_srch",
+	"arp_intf_arp_srch",
+	"arp_intf_intf_resp",
+	"arp_intf_arp_resp",
+	"arp_intf_intf_hit",
+	"arp_intf_arp_hit",
+	"arp_intf_output_mf",
+	"arp_intf_v6_mf",
+	"arp_intf_mc",
+	"arp_intf_v6_srch",
+	"arp_intf_mc_srch",
+	"arp_intf_v6_hit",
+	"arp_intf_mc_hit",
+	"evlan_lkp_input_mf",
+	"evlan_lkp_pass_mf",
+	"evlan_lkp_port_tpid_srch",
+	"evlan_lkp_tag_mem_srch",
+	"evlan_lkp_vlan_srch",
+	"evlan_lkp_port_tpid_resp",
+	"evlan_lkp_tag_mem_resp",
+	"evlan_lkp_vlan_resp",
+	"evlan_lkp_port_tpid_hit",
+	"evlan_lkp_tag_mem_hit",
+	"evlan_lkp_vlan_hit",
+	"evlan_lkp_output_mf",
+	"evlan_lkp_cp_drop",
+	"evlan_lkp_cp_up",
+	"evlan_lkp_cp_fwd",
+	"evlan_act_input_mf",
+	"evlan_act_pass_mf",
+	"evlan_act_srch",
+	"evlan_xlt_srch_cnt",
+	"evlan_act_resp",
+	"evlan_xlt_resp_hit",
+	NULL,
+	"evlan_xlt_hit_cnt",
+	"evlan_act_output_mf",
+	"evlan_act_cp_drop",
+	"evlan_act_cp_cpu",
+	"eacl_input_mf",
+	"eacl_pass_mf",
+	"eacl_srch",
+	"eacl_resp",
+	"eacl_hit",
+	"eacl_output_mf",
+	"eacl_v6",
+	"eacl_v6_srch",
+	"eacl_v6_hit",
+	"md2port_0_data_sof",
+	"md2port_0_data_eof",
+	"md2port_1_data_sof",
+	"md2port_1_data_eof",
+	"md2port_2_data_sof",
+	"md2port_2_data_eof",
+	"md2port_3_data_sof",
+	"md2port_3_data_eof",
+	"md2port_4_data_sof",
+	"md2port_4_data_eof",
+	"md2port_5_data_sof",
+	"md2port_5_data_eof",
+	"md2port_6_data_sof",
+	"md2port_6_data_eof",
+	"pkt_separate_free_cnt",
+	"pkt_whold_free_cnt",
+	"se2md_result_cnt",
+	"md2se_key_cnt",
+	"mem2md_data_cnt",
+	"md2mem_rd_cnt",
+	"modify_drop_cnt",
+	"mipp_cnt[0]",
+	"mipp_cnt[1]",
+	"ipv6_hdr_add",
+	"ipv6_hdr_del",
+	"otpid_replace",
+	"itpid_replace",
+	"ppp_hdr_add",
+	"ppp_hdr_del",
+	"avlan_replace",
+	"avlan_add",
+	"avlan_del",
+	"ovlan_replace",
+	"ovlan_add",
+	"ovlan_del",
+	"ivlan_replace",
+	"ivlan_add",
+	"ivlan_del",
+};
+
+static int
+sf_dpns_mib_show(struct seq_file *m, void *private)
+{
+	struct dpns_priv *priv = m->private;
+	u64 bytes;
+	u32 count;
+	int i;
+	seq_printf(m, "General MIBs:\n");
+	for (i = 0; i < ARRAY_SIZE(sf21_dpns_mib_name); i++) {
+		if (!sf21_dpns_mib_name[i])
+			continue;
+		count = dpns_r32(priv, NPU_MIB(i));
+		seq_printf(m, "name:%-30s packets:%11u\n",
+			   sf21_dpns_mib_name[i], count);
+	}
+	seq_printf(m, "Port MIBs:\n");
+	for (i = 0; i < DPNS_MAX_PORT; i++) {
+		count = dpns_r32(priv, NPU_MIB_PKT_RCV_PORT(i));
+		bytes = dpns_r32(priv, NPU_MIB_NCI_RD_DATA2) |
+			(u64)dpns_r32(priv, NPU_MIB_NCI_RD_DATA3) << 32;
+		seq_printf(m,
+			   "name:pkt_rcv_port%-18u packets:%11u bytes:%20llu\n",
+			   i, count, bytes);
+	}
+	return 0;
+}
+
+static int sf_dpns_mib_open(struct inode *inode, struct file *file)
+{
+	return single_open_size(file, sf_dpns_mib_show, inode->i_private,
+				56 * (ARRAY_SIZE(sf21_dpns_mib_name) + 1) +
+					83 * (DPNS_MAX_PORT + 1));
+}
+
+static const struct file_operations sf_dpns_mib_fops = {
+	.owner		= THIS_MODULE,
+	.open		= sf_dpns_mib_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+void sf_dpns_debugfs_init(struct dpns_priv *priv)
+{
+	priv->debugfs = debugfs_create_dir(dev_name(priv->dev), NULL);
+	debugfs_create_file("mib", S_IRUSR, priv->debugfs, priv, &sf_dpns_mib_fops);
+}

+ 50 - 0
target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/sf_dpns_se.c

@@ -0,0 +1,50 @@
+#include "dpns.h"
+#include <linux/iopoll.h>
+#include <linux/bitfield.h>
+#include "sf_dpns_se.h"
+
+static int dpns_populate_table(struct dpns_priv *priv)
+{
+	void __iomem *ioaddr = priv->ioaddr;
+	int ret, i;
+	u32 reg;
+
+	dpns_rmw(priv, SE_CONFIG0, SE_IPSPL_ZERO_LIMIT,
+		 SE_IPORT_TABLE_VALID);
+	dpns_w32(priv, SE_TB_WRDATA(0), 0xa0000);
+	for (i = 0; i < 6; i++) {
+		reg = SE_TB_OP_WR | FIELD_PREP(SE_TB_OP_REQ_ADDR, i) |
+		      FIELD_PREP(SE_TB_OP_REQ_ID, SE_TB_IPORT);
+		dpns_w32(priv, SE_TB_OP, reg);
+		ret = readl_poll_timeout(ioaddr + SE_TB_OP, reg,
+					 !(reg & SE_TB_OP_BUSY), 0, 100);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+int dpns_se_init(struct dpns_priv *priv) {
+	int ret;
+	u32 reg;
+
+	dpns_w32(priv, SE_CLR_RAM_CTRL, SE_CLR_RAM_ALL);
+	dpns_w32(priv, SE_TCAM_CLR, SE_TCAM_CLR);
+
+	ret = readl_poll_timeout(priv->ioaddr + SE_CLR_RAM_CTRL, reg, !reg, 0,
+				 1000);
+	if (ret)
+		return ret;
+	ret = readl_poll_timeout(priv->ioaddr + SE_TCAM_CLR, reg, !reg, 0,
+				 1000);
+	if (ret)
+		return ret;
+
+	/* Upload ARP packets which NPU considers invalid to host. */
+	dpns_rmw(priv, PKT_ERR_STG_CFG2, ARP_REQ_ERR_OP | ARP_REPLY_ERR_OP,
+		 ARP_REQ_ERR_UP | ARP_REPLY_ERR_UP);
+
+	ret = dpns_populate_table(priv);
+	return ret;
+}

+ 77 - 0
target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/sf_dpns_se.h

@@ -0,0 +1,77 @@
+#ifndef __SF_DPNS_SE_H__
+#define __SF_DPNS_SE_H__
+#include "dpns.h"
+
+#define SE_INT_STATUS			0x180000
+#define  SE_INT_EVACT_SCH_OVF		BIT(14)
+#define  SE_INT_L2_MF_SPL_OVF		BIT(13)
+#define  SE_INT_L2_MP_SCH_OVF		BIT(12)
+#define  SE_INT_MODIFY_OVF		BIT(11)
+#define  SE_INT_EVXLT_LKP_OVF		BIT(10)
+#define  SE_INT_EVLAN_LKP_OVF		BIT(9)
+#define  SE_INT_EVSCH_OVF		BIT(8)
+#define  SE_INT_MACSCH_OVF		BIT(7)
+#define  SE_INT_MSPLS_OVF		BIT(6)
+#define  SE_INT_MACSPL_LKP_OVF		BIT(5)
+#define  SE_INT_L2_LKP_BUF_OVF		BIT(4)
+#define  SE_INT_L2_LKP_SCH_OVF		BIT(3)
+#define  SE_INT_IVXLT_OVF		BIT(2)
+#define  SE_INT_IVLKP_OVF		BIT(1)
+#define  SE_INT_IVPSPL_LKP_OVF		BIT(0)
+
+#define SE_CLR_RAM_CTRL			0x180004
+#define  SE_CLR_RAM_ALL			GENMASK(20, 0)
+
+#define SE_CONFIG0			0x180008
+#define  SE_L2_VID_ZERO_MODE		BIT(27)
+#define  SE_IPSPL_DIS_STEP		BIT(26)
+#define  SE_IPSPL_CMPT_LEN		GENMASK(25, 20)
+#define  SE_IPSPL_ZERO_LIMIT		BIT(19)
+#define  SE_IPSPL_CNT_MODE		GENMASK(18, 17)
+#define  SE_IVLKP_CFG_DISABLE		BIT(16)
+#define  SE_IVLKP_CFG_ENTR_MINUS1	GENMASK(15, 10)
+#define  SE_PORTBV_TABLE_VALID		BIT(9)
+#define  SE_IPORT_TABLE_VALID		BIT(8)
+#define  SE_IVXLT_CFG_DISABLE		BIT(7)
+#define  SE_IVXLT_CFG_ENTR_MINUS1	GENMASK(6, 1)
+#define  SE_IPSPL_MODE			BIT(0)
+
+#define SE_CONFIG1			0x18000c
+#define  SE_UNUC_SPL_CNT_MODE		BIT(30)
+#define  SE_L2_HASH_POLY_SEL(x)		GENMASK((x) * 3 + 2, (x) * 3)
+
+#define SE_CONFIG2			0x180010
+#define  SE_EVACT_TABLE_VALID		BIT(31)
+#define  SE_EVXLT_CFG_DIS		BIT(30)
+#define  SE_EVXLT_CFG_ENTR_MINUS1	GENMASK(29, 24)
+#define  SE_L2_MFSPL_ZERO_LIMIT		BIT(23)
+#define  SE_L2_MFSPL_CNT_MODE		GENMASK(22, 21)
+#define  SE_MFSPL_MODE			BIT(20)
+#define  SE_MACSPL_ZERO_LIMIT		BIT(19)
+#define  SE_MACSPL_CNT_MODE		GENMASK(18, 17)
+#define  SE_PTPID_TABLE_VALID		BIT(16)
+#define  SE_OTPID_TABLE_VALID		BIT(15)
+#define  SE_EVLKP_CFG_DIS_TB		BIT(14)
+#define  SE_EVLKP_CFG_ENTR_MINUS1	GENMASK(13, 8)
+#define  SE_INTF_TABLE_VALID		BIT(7)
+#define  SE_MAC_TABLE_VALID		BIT(6)
+#define  SE_MACSPL_MODE			BIT(5)
+#define  SE_L2_AGE_CLR_AFTER_RD		BIT(4)
+#define  SE_L2_SEG_NUM_MINUS1		GENMASK(3, 0)
+
+#define SE_TB_OP			0x18003c
+#define  SE_TB_OP_BUSY			BIT(31)
+#define  SE_TB_OP_WR			BIT(24)
+#define  SE_TB_OP_REQ_ID		GENMASK(21, 16)
+#define   SE_TB_IPORT			1
+#define  SE_TB_OP_REQ_ADDR		GENMASK(15, 0)
+
+#define SE_TB_WRDATA(x)			(0x180040 + 4 * (x))
+#define SE_TB_RDDATA(x)			(0x180080 + 4 * (x))
+
+#define SE_TCAM_CLR			0x190004
+#define  SE_TCAM_CLR_ALL		GENMASK(5, 0)
+#define  SE_TCAM_CLR_ACL_SPL		BIT(5)
+#define  SE_TCAM_CLR_BLK(x)		BIT(x)
+
+#endif

+ 247 - 0
target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/sf_dpns_tmu.c

@@ -0,0 +1,247 @@
+#include <linux/of_device.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include "dpns.h"
+#include "sf_dpns_tmu.h"
+
+static u32 tmu_rm32(struct dpns_priv *priv, u32 reg, u32 mask, u32 shift)
+{
+	u32 t;
+
+	t = dpns_r32(priv, reg);
+	t &= mask;
+	t >>= shift;
+
+	return t;
+}
+
+static void tmu_rmw32(struct dpns_priv *priv, u32 reg, u32 mask, u32 shift, u32 val)
+{
+	u32 t;
+
+	val <<= shift;
+	val &= mask;
+	t = dpns_r32(priv, reg);
+	t &= ~mask;
+	t |= val;
+	dpns_w32(priv, reg, t);
+}
+
+static int is_valid_port_idx(struct dpns_priv *priv, u32 port)
+{
+	if (port >= TMU_MAX_PORT_CNT)
+		return 0;
+
+	return 1;
+}
+
+static int is_valid_queue_idx(u32 q)
+{
+	if (q >= QUE_MAX_NUM_PER_PORT)
+		return 0;
+
+	return 1;
+}
+
+static int is_valid_sched_idx(struct dpns_priv *priv, u32 sched)
+{
+	if (sched >= QUE_SCH_NUM_PER_PORT)
+		return 0;
+
+	return 1;
+}
+
+static int is_valid_shaper_idx(struct dpns_priv *priv, u32 shaper)
+{
+	if (shaper >= QUE_SHAPER_NUM_PER_PORT)
+		return 0;
+
+	return 1;
+}
+
+static int tmu_port_writel(struct dpns_priv *priv, u32 port, u32 reg, u32 val)
+{
+	if (!is_valid_port_idx(priv, port))
+		return -EINVAL;
+
+	dpns_w32(priv, TMU_PORT_BASE(port) + reg, val);
+	return 0;
+}
+
+static int tmu_port_rm32(struct dpns_priv *priv, u32 port, u32 reg, u32 mask, u32 shift, u32 *val)
+{
+	if (!is_valid_port_idx(priv, port))
+		return -EINVAL;
+
+	*val = tmu_rm32(priv, TMU_PORT_BASE(port) + reg, mask, shift);
+	return 0;
+}
+
+static int tmu_port_rmw32(struct dpns_priv *priv, u32 port, u32 reg, u32 mask, u32 shift, u32 val)
+{
+	if (!is_valid_port_idx(priv, port))
+		return -EINVAL;
+
+	tmu_rmw32(priv, TMU_PORT_BASE(port) + reg, mask, shift, val);
+
+	return 0;
+}
+
+static int tmu_queue_writel(struct dpns_priv *priv, u32 port, u32 queue, u32 reg, u32 val)
+{
+	if (!is_valid_queue_idx(queue))
+		return -EINVAL;
+
+	return tmu_port_writel(priv, port, TMU_QUEUE_BASE(queue) + reg, val);
+}
+
+static int tmu_sched_writel(struct dpns_priv *priv, u32 port, u32 sched, u32 reg, u32 val)
+{
+	if (!is_valid_sched_idx(priv, sched))
+		return -EINVAL;
+
+	return tmu_port_writel(priv, port, TMU_SCHED_BASE(sched) + reg, val);
+}
+
+static int tmu_shaper_writel(struct dpns_priv *priv, u32 port, u32 shaper, u32 reg, u32 val)
+{
+	if (!is_valid_shaper_idx(priv, shaper))
+		return -EINVAL;
+
+	return tmu_port_writel(priv, port, TMU_SHAPER_BASE(shaper) + reg, val);
+}
+
+static int tmu_shaper_rmw32(struct dpns_priv *priv, u32 port, u32 shaper, u32 reg, u32 mask, u32 shift, u32 val)
+{
+	if (!is_valid_shaper_idx(priv, shaper))
+		return -EINVAL;
+
+	return tmu_port_rmw32(priv, port, TMU_SHAPER_BASE(shaper) + reg, mask, shift, val);
+}
+
+static int tdq_ctrl_is_configurable(struct dpns_priv *priv, u32 port)
+{
+	u32 val = 0;
+	int err;
+
+	if ((err = tmu_port_rm32(priv, port,
+	                         TMU_TDQ_CTRL,
+	                         TMU_TDQ_ALLOW_CFG,
+	                         TMU_TDQ_ALLOW_CFG_SHIFT,
+	                         &val))) {
+		return 0;
+	}
+
+	return val;
+}
+
+static void tmu_port_queue_cfg(struct dpns_priv *priv, u32 port)
+{
+	int comp;
+
+	for (comp = 0; comp < QUE_MAX_NUM_PER_PORT; comp++) {
+		tmu_queue_writel(priv, port, comp, TMU_PORT_QUEUE_CFG0, 0x00011f00);
+
+		tmu_queue_writel(priv, port, comp, TMU_PORT_QUEUE_CFG1, 0x00000000);
+		tmu_queue_writel(priv, port, comp, TMU_PORT_QUEUE_CFG2, 0x00000000);
+		tmu_queue_writel(priv, port, comp, TMU_PORT_QUEUE_STS0, 0x00000000);
+		tmu_queue_writel(priv, port, comp, TMU_PORT_QUEUE_STS1, 0x00000000);
+		tmu_queue_writel(priv, port, comp, TMU_PORT_QUEUE_STS2, 0x00000000);
+		tmu_queue_writel(priv, port, comp, TMU_PORT_QUEUE_CFG3, 0x000005ee);
+	}
+}
+
+static void tmu_port_sched_cfg(struct dpns_priv *priv, u32 port)
+{
+	int comp;
+	for (comp = 0; comp < QUE_SCH_NUM_PER_PORT; comp++) {
+		tmu_sched_writel(priv, port, comp, TMU_SCH_CTRL,      0x00000000);
+		tmu_sched_writel(priv, port, comp, TMU_SCH_Q0_WEIGHT, 0x00000000);
+		tmu_sched_writel(priv, port, comp, TMU_SCH_Q1_WEIGHT, 0x00000000);
+		tmu_sched_writel(priv, port, comp, TMU_SCH_Q2_WEIGHT, 0x00000000);
+		tmu_sched_writel(priv, port, comp, TMU_SCH_Q3_WEIGHT, 0x00000000);
+		tmu_sched_writel(priv, port, comp, TMU_SCH_Q4_WEIGHT, 0x00000000);
+		tmu_sched_writel(priv, port, comp, TMU_SCH_Q5_WEIGHT, 0x00000000);
+		tmu_sched_writel(priv, port, comp, TMU_SCH_Q6_WEIGHT, 0x00000000);
+		tmu_sched_writel(priv, port, comp, TMU_SCH_Q7_WEIGHT, 0x00000000);
+
+		switch (comp) {
+		case 0:
+			tmu_sched_writel(priv, port, comp, TMU_SCH_QUEUE_ALLOC0, 0x03020100);
+			tmu_sched_writel(priv, port, comp, TMU_SCH_QUEUE_ALLOC1, 0x08080808);
+			break;
+
+		case 1:
+			tmu_sched_writel(priv, port, comp, TMU_SCH_QUEUE_ALLOC0, 0x06050400);
+			tmu_sched_writel(priv, port, comp, TMU_SCH_QUEUE_ALLOC1, 0x08080807);
+			break;
+
+		default:
+			break;
+		}
+
+		tmu_sched_writel(priv, port, comp, TMU_SCH_BIT_RATE, 0x00000000);
+
+		if (comp == 0)
+			tmu_sched_writel(priv, port, comp, TMU_SCH0_POS, 0x00000000);
+	}
+}
+
+static void tmu_port_shaper_cfg(struct dpns_priv *priv, u32 port)
+{
+	int comp;
+	for (comp = 0; comp < QUE_SHAPER_NUM_PER_PORT; comp++) {
+		tmu_shaper_writel(priv, port, comp, TMU_SHP_CTRL,       0x00000000);
+		tmu_shaper_writel(priv, port, comp, TMU_SHP_WEIGHT,     0x00000000);
+		tmu_shaper_writel(priv, port, comp, TMU_SHP_CTRL2,      0x00000000);
+		tmu_shaper_writel(priv, port, comp, TMU_SHP_MIN_CREDIT, 0x0003ff00);
+		tmu_shaper_writel(priv, port, comp, TMU_SHP_MAX_CREDIT, 0x00000400);
+		tmu_shaper_rmw32(priv, port, comp, TMU_SHP_CTRL2, TMU_SHP_POS, TMU_SHP_POS_SHIFT, comp);
+	}
+}
+
+static void _tmu_reset(struct dpns_priv *priv, u32 port)
+{
+	tmu_port_queue_cfg(priv, port);
+	tmu_port_sched_cfg(priv, port);
+	tmu_port_shaper_cfg(priv, port);
+
+	// Cause tmu shaper rate limit not include pkt preamble(8byte)/IFG(12byte)/FCS(4Byte)
+	// so config 24 byte here
+	tmu_port_writel(priv, port, TMU_TDQ_IFG, 0x00000018);
+
+	if (tdq_ctrl_is_configurable(priv, port))
+		tmu_port_writel(priv, port, TMU_TDQ_CTRL, 0x0000002f);
+}
+
+static int tmu_reset(struct dpns_priv *priv)
+{
+	int port;
+
+	dpns_w32(priv, TMU_CTRL, 0x00000006);
+	dpns_w32(priv, TMU_LLM_FIFO_CTRL0, 0x07fe07ff);
+	dpns_w32(priv, TMU_LLM_FIFO_CTRL1, 0x00280024);
+
+	for (port = 0; port < TMU_MAX_PORT_CNT; port++) {
+		_tmu_reset(priv, port);
+	}
+
+	return 0;
+}
+
+int dpns_tmu_init(struct dpns_priv *priv)
+{
+	int err;
+
+	if ((err = tmu_reset(priv)) != 0)
+		return err;
+
+	return err;
+}

+ 315 - 0
target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/sf_dpns_tmu.h

@@ -0,0 +1,315 @@
+#ifndef __SF_TMU_H__
+#define __SF_TMU_H__
+#include <linux/bitfield.h>
+// npu clk is 400MHz in mpw, will change to 600MHz in fullmask
+// TODO: should use mpw define to diff
+#define LIF_SHP_CLKDIV_DEF      (3)
+#define LIF_WEIGHT_MAX          (0x7ff)
+#define TMU_SHP_CLKDIV_DEF      (6)
+#define TMU_SHP_CLKDIV_MAX      (15)
+#define TMU_WEIGHT_MAX			(256)
+#define TMU_SHP_INT_WGHT_MAX    ((TMU_SHP_WEIGHT_INT_MASK) >> TMU_SHP_WEIGHT_INT_SHIFT)
+#define TMU_SHP_FRAC_WGHT_MAX   ((TMU_SHP_WEIGHT_FRAC_MASK) >> TMU_SHP_WEIGHT_FRAC_SHIFT)
+
+#define TMU_VERSION_INFO		0x148000
+#define TMU_ID_SHIFT			0
+#define TMU_ID				GENMASK(15, 0)
+#define TMU_VERSION_SHIFT		16
+#define TMU_VERSION			GENMASK(23, 16)
+#define TMU_REVISION_SHIFT		24
+#define TMU_REVISION			GENMASK(31, 24)
+
+#define TMU_CTRL			0x148004
+#define TMU_MF_IN_CNT_EN_SHIFT		1
+#define TMU_MF_IN_CNT_EN		BIT(1)
+#define TMU_MD_RDY_EN_SHIFT		2
+#define TMU_MD_RDY_EN			BIT(2)
+#define TMU_DEBUG_SEL_SHIFT		3
+#define TMU_DEBUG_SEL			GENMASK(8, 3)
+
+#define TMU_LLM_FIFO_CTRL0		0x148008
+#define TMU_LLM_FIFO_FULL_ASSERT_SHIFT	0
+#define TMU_LLM_FIFO_FULL_ASSERT	GENMASK(11, 0)
+#define TMU_LLM_FIFO_FULL_NEGATE_SHIFT	16
+#define TMU_LLM_FIFO_FULL_NEGATE	GENMASK(27, 16)
+
+#define TMU_LLM_FIFO_CTRL1		0x14800c
+#define TMU_LLM_FIFO_EMPTY_ASSERT_SHIFT	0
+#define TMU_LLM_FIFO_EMPTY_ASSERT	GENMASK(11, 0)
+#define TMU_LLM_FIFO_EMPTY_NEGATE_SHIFT	16
+#define TMU_LLM_FIFO_EMPTY_NEGATE	GENMASK(27, 16)
+
+#define TMU_RD_CLR_EN          0x2800c0
+#define TMU_BUF_THR0           0x2800d8
+
+/* 36 ports in registers */
+#define TMU_PORT0			0x0000
+
+#define TMU_PORT_SZ			0x2000
+#define TMU_PORT_CNT			36
+#define TMU_PORT_CNT_V1			10
+
+/* 8 queues for each port */
+#define TMU_PORT_QUEUE0			0x100000
+
+#define TMU_PORT_QUEUE_SZ		0x20
+#define TMU_PORT_QUEUE_CNT		8
+
+#define TMU_PORT_QUEUE_CFG0		0x00
+/* 0x00: mix tail drop
+ * 0x01: tail drop (default)
+ * 0x02: WRED
+ * 0x03: buf count tail drop
+ */
+#define TMU_DROP_TYPE_SHIFT		0
+#define TMU_DROP_TYPE			GENMASK(1, 0)
+#define TMU_QUEUE_MAX_SHIFT		8
+#define TMU_QUEUE_MAX			GENMASK(18, 8)
+#define TMU_QUEUE_MIN_SHIFT		20
+#define TMU_QUEUE_MIN			GENMASK(30, 20) // related to WRED
+
+#define TMU_QUEUE_MIX_TAIL_DROP		0x00
+#define TMU_QUEUE_TAIL_DROP		0x01
+#define TMU_QUEUE_WRED			0x02
+#define TMU_QUEUE_BUF_CNT_TAIL_DROP	0x03
+
+/* drop probability of each stage in WRED */
+#define TMU_PORT_QUEUE_CFG1		0x04
+#define TMU_WRED_HW_PROB_STG0_SHIFT	0
+#define TMU_WRED_HW_PROB_STG0		GENMASK(4, 0)
+#define TMU_WRED_HW_PROB_STG1_SHIFT	5
+#define TMU_WRED_HW_PROB_STG1		GENMASK(9, 5)
+#define TMU_WRED_HW_PROB_STG2_SHIFT	10
+#define TMU_WRED_HW_PROB_STG2		GENMASK(14, 10)
+#define TMU_WRED_HW_PROB_STG3_SHIFT	15
+#define TMU_WRED_HW_PROB_STG3		GENMASK(19, 15)
+#define TMU_WRED_HW_PROB_STG4_SHIFT	20
+#define TMU_WRED_HW_PROB_STG4		GENMASK(24, 20)
+#define TMU_WRED_HW_PROB_STG5_SHIFT	25
+#define TMU_WRED_HW_PROB_STG5		GENMASK(29, 25)
+
+#define TMU_PORT_QUEUE_CFG2		0x08
+#define TMU_WRED_HW_PROB_STG6_SHIFT	0
+#define TMU_WRED_HW_PROB_STG6		GENMASK(4, 0)
+#define TMU_WRED_HW_PROB_STG7_SHIFT	5
+#define TMU_WRED_HW_PROB_STG7		GENMASK(9, 5)
+
+#define TMU_WRED_PROB_CNT		8
+
+/* RO */
+#define TMU_PORT_QUEUE_STS0		0x0c
+#define TMU_QUEUE_HEAD_PTR_SHIFT	0
+#define TMU_QUEUE_HEAD_PTR		GENMASK(10, 0)
+#define TMU_QUEUE_TAIL_PTR_SHIFT	16
+#define TMU_QUEUE_TAIL_PTR		GENMASK(26, 16)
+
+/* RO */
+#define TMU_PORT_QUEUE_STS1		0x10
+#define TMU_QUEUE_PKT_CNT_SHIFT		0
+#define TMU_QUEUE_PKT_CNT		GENMASK(11, 0)
+
+/* RO */
+#define TMU_PORT_QUEUE_STS2		0x14
+#define TMU_QUEUE_BUF_CNT_SHIFT		0
+#define TMU_QUEUE_BUF_CNT		GENMASK(11, 0)
+
+/* max buffer cell of queue */
+#define TMU_PORT_QUEUE_CFG3		0x18
+#define TMU_QUEUE_BUF_MAX_SHIFT		0
+#define TMU_QUEUE_BUF_MAX		GENMASK(11, 0)
+
+/* 2 schedulers (dequeuing) for each port */
+#define TMU_SCH0			0x101000
+#define TMU_SCH1			0x101040
+
+#define TMU_SCH_SZ			0x40
+#define TMU_SCH_CNT			2
+
+#define TMU_SCH_CTRL			0x00
+#define TMU_SCH_ALGO_SHIFT		0
+#define TMU_SCH_ALGO			GENMASK(6, 0)
+
+/* TMU_SCH_ALGO */
+#define TMU_SCH_PQ			0x00
+#define TMU_SCH_WFQ			0x01
+#define TMU_SCH_DWRR			0x02
+#define TMU_SCH_RR			0x03
+#define TMU_SCH_WRR			0x04
+
+#define TMU_SCH_Q0_WEIGHT		0x10
+#define TMU_SCH_Q1_WEIGHT		0x14
+#define TMU_SCH_Q2_WEIGHT		0x18
+#define TMU_SCH_Q3_WEIGHT		0x1c
+#define TMU_SCH_Q4_WEIGHT		0x20
+#define TMU_SCH_Q5_WEIGHT		0x24
+#define TMU_SCH_Q6_WEIGHT		0x28
+#define TMU_SCH_Q7_WEIGHT		0x2c
+#define TMU_SCH_Q_WEIGHT_SZ		4
+
+#define TMU_SCH_Q_WEIGHT_CNT		8
+
+/* TMU_SCH_Qn_WEIGHT */
+#define TMU_SCH_QUEUE_WEIGHT_SHIFT	0
+#define TMU_SCH_QUEUE_WEIGHT		GENMASK(31, 0)
+
+/* port queue and scheduler selection */
+#define TMU_SCH_QUEUE_ALLOC0		0x30
+#define TMU_SCH_Q0_ALLOC_SHIFT		0
+#define TMU_SCH_Q0_ALLOC		GENMASK(3, 0)
+#define TMU_SCH_Q1_ALLOC_SHIFT		8
+#define TMU_SCH_Q1_ALLOC		GENMASK(11, 8)
+#define TMU_SCH_Q2_ALLOC_SHIFT		16
+#define TMU_SCH_Q2_ALLOC		GENMASK(19, 16)
+#define TMU_SCH_Q3_ALLOC_SHIFT		24
+#define TMU_SCH_Q3_ALLOC		GENMASK(27, 24)
+
+#define TMU_SCH_QUEUE_ALLOC1		0x34
+#define TMU_SCH_Q4_ALLOC_SHIFT		0
+#define TMU_SCH_Q4_ALLOC		GENMASK(3, 0)
+#define TMU_SCH_Q5_ALLOC_SHIFT		8
+#define TMU_SCH_Q5_ALLOC		GENMASK(11, 8)
+#define TMU_SCH_Q6_ALLOC_SHIFT		16
+#define TMU_SCH_Q6_ALLOC		GENMASK(19, 16)
+#define TMU_SCH_Q7_ALLOC_SHIFT		24
+#define TMU_SCH_Q7_ALLOC		GENMASK(27, 24)
+
+#define TMU_SCH_Q_ALLOC_CNT		8
+
+// schedule by pkt_len or pkt_cnt
+#define TMU_SCH_BIT_RATE		0x38
+#define TMU_SCH_BIT_RATE_SHIFT		0
+#define TMU_SCH_BIT_RATE_MASK		GENMASK(31, 0)
+#define TMU_SCH_BIT_RATE_PKT_LEN	0x00
+#define TMU_SCH_BIT_RATE_PKT_CNT	0x01
+
+// RW
+// SCH0 Only, to select how to connect to SCH1
+#define TMU_SCH0_POS			0x3c
+#define TMU_SCH0_POS_SHIFT		0
+#define TMU_SCH0_POS_MASK		GENMASK(3, 0)
+
+/* 5 shapers for each port */
+#define TMU_SHP0			0x101080
+
+#define TMU_SHP_SZ			0x0020
+#define TMU_SHP_CNT			6
+
+#define TMU_SHP_CTRL			0x00
+#define TMU_SHP_EN_SHIFT		0
+#define TMU_SHP_EN			BIT(0)
+#define TMU_SHP_CLK_DIV_SHIFT		1
+#define TMU_SHP_CLK_DIV			GENMASK(31, 1)
+
+/* byte size of per token (credit) */
+#define TMU_SHP_WEIGHT			0x04
+#define TMU_SHP_WEIGHT_FRAC_SHIFT	0
+#define TMU_SHP_WEIGHT_FRAC_MASK	GENMASK(11, 0)
+#define TMU_SHP_WEIGHT_INT_SHIFT        12
+#define TMU_SHP_WEIGHT_INT_MASK         GENMASK(19, 12)
+
+#define TMU_SHP_MAX_CREDIT		0x08
+#define TMU_SHP_MAX_CREDIT_SHIFT	10
+#define TMU_SHP_MAX_CREDIT_MASK		GENMASK(31, 10)
+
+// (fraction part num) = (register fraction part) / (2 ^ 12)
+#define TMU_SHP_FRAC_WEIGHT_2DBL(reg)   (((double)(reg)) / (1 << 12))
+#define TMU_SHP_DBL_2FRAC_WEIGHT(dbl)   (((double)(dbl)) * (1 << 12))
+
+#define TMU_SHP_CTRL2			0x0c
+#define TMU_SHP_BIT_RATE_SHIFT		0
+#define TMU_SHP_BIT_RATE		BIT(0)
+#define TMU_SHP_POS_SHIFT		1
+#define TMU_SHP_POS			GENMASK(5, 1)	// RW
+#define TMU_SHP_MODE_SHIFT		6
+#define TMU_SHP_MODE			BIT(6)
+
+/* TMU_SHP_BIT_RATE */
+#define TMU_SHP_SCHED_PKT_LEN		0
+#define TMU_SHP_SCHED_PKT_CNT		1
+
+/* TMU_SHP_MODE */
+#define TMU_SHP_MODE_KEEP_CREDIT	0
+#define TMU_SHP_MODE_CLEAR_CREDIT	1
+
+#define TMU_SHP_MIN_CREDIT		0x10
+#define TMU_SHP_MIN_CREDIT_SHIFT	0
+#define TMU_SHP_MIN_CREDIT_MASK		GENMASK(21, 0)
+
+/* RO */
+#define TMU_SHP_STATUS			0x14
+#define TMU_SHP_CURR_STATUS_SHIFT	0	// shaper is working or not
+#define TMU_SHP_CURR_STATUS		BIT(0)
+#define TMU_SHP_CREDIT_CNTR_SHIFT	1
+#define TMU_SHP_CREDIT_CNTR		GENMASK(23, 1)
+
+/* dequeue stage configs */
+#define TMU_TDQ				0x101140
+
+/* effective only when TMU_SCH_BIT_RATE is PKT_LEN mode.
+ * ifg value can be used to adjust packet length of scheduler.
+ */
+#define TMU_TDQ_IFG			(TMU_TDQ + 0x00)
+#define TMU_TDQ_IIF_CFG_SHIFT		0
+#define TMU_TDQ_IIF_CFG			GENMASK(7, 0)
+
+#define TMU_TDQ_CTRL			(TMU_TDQ + 0x04)
+#define TMU_SHP_CLK_CNT_EN_SHIFT	0
+#define TMU_SHP_CLK_CNT_EN		BIT(0)
+#define TMU_TDQ_HW_EN_SHIFT		1
+#define TMU_TDQ_HW_EN			BIT(1)
+#define TMU_SCH0_EN_SHIFT		2
+#define TMU_SCH0_EN			BIT(2)
+#define TMU_SCH1_EN_SHIFT		3
+#define TMU_SCH1_EN			BIT(3)
+#define TMU_TDQ_ALLOW_CFG_SHIFT		4
+#define TMU_TDQ_ALLOW_CFG		BIT(4)	// RO, 1 = configurable
+#define TMU_PKT_LEFT_IGNORE_SHIFT	5
+#define TMU_PKT_LEFT_IGNORE		BIT(5)
+
+#define TMU_PORT_BASE(port)		(TMU_PORT0 + TMU_PORT_SZ * (port))
+#define TMU_QUEUE_BASE(q)		(TMU_PORT_QUEUE0 + TMU_PORT_QUEUE_SZ * (q))
+#define TMU_SCHED_BASE(sch)		(TMU_SCH0 + TMU_SCH_SZ * (sch))
+#define TMU_SHAPER_BASE(shp)		(TMU_SHP0 + TMU_SHP_SZ * (shp))
+
+
+
+#define TMU_MAX_PORT_CNT 10
+#define QUE_MAX_NUM_PER_PORT 8
+#define QUE_SHAPER_NUM_PER_PORT 6
+#define QUE_SCH_NUM_PER_PORT 2
+
+enum TMU_QUEUE_TYPE {
+	TMU_Q_MIX_TAIL_DROP = 0,
+	TMU_Q_TAIL_DROP,
+	TMU_Q_WRED,
+	TMU_Q_BUF_CNT_TAIL_DROP,
+	NUM_TMU_QUEUE_TYPES,
+};
+
+enum TMU_SCHED_ALG {
+	TMU_SCHED_PQ = 0,
+	TMU_SCHED_WFQ,
+	TMU_SCHED_DWRR,
+	TMU_SCHED_RR,
+	TMU_SCHED_WRR,
+	NUM_TMU_SCEHD_ALGS,
+};
+
+enum TMU_BITRATE_MODE {
+	TMU_BITRATE_PKTLEN = 0,
+	TMU_BITRATE_PKTCNT,
+	NUM_TMU_BITRATE_MODES,
+};
+
+static const u8 sched_q_weight_regs[] = {
+	TMU_SCH_Q0_WEIGHT,
+	TMU_SCH_Q1_WEIGHT,
+	TMU_SCH_Q2_WEIGHT,
+	TMU_SCH_Q3_WEIGHT,
+	TMU_SCH_Q4_WEIGHT,
+	TMU_SCH_Q5_WEIGHT,
+	TMU_SCH_Q6_WEIGHT,
+	TMU_SCH_Q7_WEIGHT,
+};
+
+#endif /* __SF_TMU_H__ */

+ 1618 - 0
target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/sfxgmac-dma.c

@@ -0,0 +1,1618 @@
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/if_ether.h>
+#include <linux/of_platform.h>
+#include <linux/seq_file.h>
+#include <linux/tcp.h>
+#include <net/page_pool/helpers.h>
+
+#include "sfxgmac-ext.h"
+#include "dma.h"
+#include "eth.h"
+
+struct xgmac_dma_desc_rx {
+	struct xgmac_dma_desc norm;
+	struct xgmac_dma_desc ctxt;
+};
+
+static void xgmac_dma_set_tx_head_ptr(struct xgmac_dma_priv *priv,
+				      dma_addr_t addr, u32 queue)
+{
+	reg_write(priv, XGMAC_DMA_CH_TxDESC_LADDR(queue), lower_32_bits(addr));
+}
+
+static void xgmac_dma_set_rx_head_ptr(struct xgmac_dma_priv *priv,
+				      dma_addr_t addr, u32 queue)
+{
+	reg_write(priv, XGMAC_DMA_CH_RxDESC_LADDR(queue), lower_32_bits(addr));
+}
+
+static void xgmac_dma_set_tx_tail_ptr(struct xgmac_dma_priv *priv,
+				      dma_addr_t addr, u32 queue)
+{
+	reg_write(priv, XGMAC_DMA_CH_TxDESC_TAIL_LPTR(queue), lower_32_bits(addr));
+}
+
+static void xgmac_dma_set_rx_tail_ptr(struct xgmac_dma_priv *priv,
+				      dma_addr_t addr, u32 queue)
+{
+	reg_write(priv, XGMAC_DMA_CH_RxDESC_TAIL_LPTR(queue), lower_32_bits(addr));
+}
+
+static void xgmac_dma_set_tx_desc_addr(struct xgmac_dma_desc *p,
+				       dma_addr_t addr)
+{
+	p->des0 = cpu_to_le32(lower_32_bits(addr));
+	p->des1 = 0;
+}
+
+static void xgmac_dma_set_rx_desc_addr(struct xgmac_dma_desc *p,
+				       dma_addr_t addr)
+{
+	p->des0 = cpu_to_le32(lower_32_bits(addr));
+	p->des1 = 0;
+	p->des2 = 0;
+}
+
+static void xgmac_dma_set_rx_owner(struct xgmac_dma_desc *p, bool int_en)
+{
+	u32 val = XGMAC_RDES3_OWN;
+
+	if (int_en)
+		val |= XGMAC_RDES3_IOC;
+	p->des3 = cpu_to_le32(val);
+}
+
+static void xgmac_dma_init_rx_desc(struct xgmac_dma_desc *p,
+				   dma_addr_t addr, bool int_en)
+{
+	xgmac_dma_set_rx_desc_addr(p, addr);
+	xgmac_dma_set_rx_owner(p, int_en);
+}
+
+static __always_inline void
+xgmac_dma_rx_coe_hash(const struct net_device *dev, struct sk_buff *skb,
+		      u32 rdes0, u32 rdes_ctx1)
+{
+	bool rxcoe, is_l3, is_l4, not_tunnel, not_frag;
+	u32 hash;
+	/* RX COE is only available if:
+	 * 1. It's not an IP fragment
+	 * 2. It's not a tunnel packet (4in6, PPPoE, etc.)
+	 * 3. Its L4 protocol is known (TCP, UDP, or ICMP)
+	 *
+	 * If all of the above are true, mark the skb as
+	 * CHECKSUM_UNNECESSARY
+	 *
+	 * Note: bit-wise ops are used to avoid branches
+	 */
+	not_frag = FIELD_GET(XGMAC_RDES0_IP_FRAG, rdes0) == FRAG_NONE;
+	not_tunnel = FIELD_GET(XGMAC_RDES1_TNP, rdes_ctx1) != 1;
+	is_l3 = (FIELD_GET(XGMAC_RDES0_L3_TYPE, rdes0) == L3_TYPE_IPV4) |
+		(FIELD_GET(XGMAC_RDES0_L3_TYPE, rdes0) == L3_TYPE_IPV6);
+	is_l4 = FIELD_GET(XGMAC_RDES0_L4_TYPE, rdes0) != L4_TYPE_UNKNOWN;
+
+	rxcoe = !!(dev->features & NETIF_F_RXCSUM) & not_frag & not_tunnel & is_l3 & is_l4;
+
+	skb->ip_summed = rxcoe ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
+
+	/* Fill in skb->hash */
+	hash = FIELD_GET(XGMAC_RDES1_RXHASH, rdes_ctx1);
+	__skb_set_hash(skb, hash, false, is_l4);
+}
+
+static u32 xgmac_dma_tx_avail(struct xgmac_txq *txq)
+{
+	if (txq->dirty_tx > txq->cur_tx)
+		return txq->dirty_tx - txq->cur_tx - 1;
+	else
+		return DMA_TX_SIZE - txq->cur_tx + txq->dirty_tx - 1;
+}
+
+static void xgmac_dma_rx_refill(struct xgmac_dma_priv *priv,
+				struct xgmac_rxq *rxq)
+{
+	u32 entry = rxq->dirty_rx;
+	u16 channel = rxq->idx;
+	s32 dirty;
+
+	if (rxq->dirty_rx <= rxq->cur_rx)
+		dirty = rxq->cur_rx - rxq->dirty_rx;
+	else
+		dirty = DMA_RX_SIZE - rxq->dirty_rx + rxq->cur_rx;
+
+	for (; dirty > 0; dirty -= 2) {
+		struct xgmac_dma_rx_buffer *buf = &rxq->buf_pool[entry];
+		struct xgmac_dma_desc *p = &rxq->dma_rx[entry];
+
+		if (likely(buf->page == NULL)) {
+			buf->page = page_pool_dev_alloc_frag(rxq->page_pool,
+							     &buf->offset,
+							     priv->rx_alloc_size);
+			if (unlikely(!buf->page))
+				break;
+		}
+
+		xgmac_dma_init_rx_desc(p, page_pool_get_dma_addr(buf->page) + buf->offset + BUF_PAD, false);
+		/* No buffer space required by context descs */
+		xgmac_dma_init_rx_desc(p + 1, 0, false);
+
+		entry = (entry + 2) % DMA_RX_SIZE;
+	}
+
+	rxq->dirty_rx = entry;
+	rxq->rx_tail_addr = rxq->dirty_rx * sizeof(struct xgmac_dma_desc) +
+			    rxq->dma_rx_phy;
+
+	xgmac_dma_set_rx_tail_ptr(priv, rxq->rx_tail_addr, channel);
+}
+
+static int xgmac_dma_poll_rx(struct xgmac_rxq *rxq, int budget)
+{
+	struct xgmac_dma_priv *priv = container_of(rxq, struct xgmac_dma_priv,
+						   rxq[rxq->idx]);
+	unsigned int next_entry = rxq->cur_rx;
+	int count = 0;
+
+	for (; count < budget; count++) {
+		u32 len, rdes0, rdes2, rdes3, rdes_ctx0, rdes_ctx1, rdes_ctx2, rdes_ctx3, sta_index, rpt_index;
+		struct xgmac_dma_rx_buffer *buf;
+		register struct xgmac_dma_desc_rx rx;
+		struct net_device *netdev;
+		struct sk_buff *skb;
+		unsigned int entry;
+		u8 id, up_reason, vlan_pri, no_frag;
+		u16 ovid, sport, eth_type, dscp, pkt_type, tnp;
+		u64 smac;
+
+		entry = next_entry;
+		buf = &rxq->buf_pool[entry];
+
+		rx = __READ_ONCE(*(struct xgmac_dma_desc_rx *)&rxq->dma_rx[next_entry]);
+		/* check if owned by the DMA otherwise go ahead */
+		if (unlikely(le32_to_cpu(rx.ctxt.des3) & XGMAC_RDES3_OWN))
+			break;
+
+		rxq->cur_rx = (entry + 2) % DMA_RX_SIZE;
+		next_entry = rxq->cur_rx;
+
+		rdes3 = le32_to_cpu(rx.norm.des3);
+		if (unlikely(!(rdes3 & XGMAC_RDES3_LD)))
+			continue;
+
+		if (unlikely(rdes3 & XGMAC_RDES3_ES)) {
+			pr_debug_ratelimited("error type: 0x%lx\n",
+					FIELD_GET(XGMAC_RDES3_ET, rdes3));
+			continue;
+		}
+
+		rdes0 = le32_to_cpu(rx.norm.des0);
+		/* get ivport */
+		id = FIELD_GET(XGMAC_RDES0_IVPORT, rdes0);
+		netdev = priv->ndevs[id];
+		if (unlikely(!netdev))
+			continue;
+
+		/* When memory is tight, the buf->addr may be empty */
+		if (unlikely(!buf->page))
+			break;
+
+		len = FIELD_GET(XGMAC_RDES3_PL, rdes3);
+		dma_sync_single_for_cpu(priv->dev, page_pool_get_dma_addr(buf->page) + buf->offset + BUF_PAD, len, DMA_FROM_DEVICE);
+		prefetch(page_address(buf->page) + buf->offset + BUF_PAD);
+		skb = napi_build_skb(page_address(buf->page) + buf->offset, priv->rx_alloc_size);
+		if (unlikely(!skb))
+			break;
+
+		buf->page = NULL;
+		skb_mark_for_recycle(skb);
+		skb_reserve(skb, BUF_PAD);
+		__skb_put(skb, len);
+
+		rdes2 = le32_to_cpu(rx.norm.des2);
+		ovid = FIELD_GET(XGMAC_RDES2_OVID, rdes2);
+		no_frag = FIELD_GET(XGMAC_RDES2_DFRAG, rdes2);
+		vlan_pri = FIELD_GET(XGMAC_RDES3_TCI_PRI, rdes3);
+		sta_index = FIELD_GET(XGMAC_RDES0_STA_INDEX, rdes0);
+		rpt_index = FIELD_GET(XGMAC_RDES0_RPT_INDEX, rdes0);
+
+		/* get the context descriptor */
+		rdes_ctx0 = le32_to_cpu(rx.ctxt.des0);
+		rdes_ctx1 = le32_to_cpu(rx.ctxt.des1);
+		rdes_ctx2 = le32_to_cpu(rx.ctxt.des2);
+		rdes_ctx3 = le32_to_cpu(rx.ctxt.des3);
+		sport = FIELD_GET(XGMAC_RDES0_SPORT, rdes_ctx0);
+		eth_type = FIELD_GET(XGMAC_RDES0_ETH_TYPE, rdes_ctx0);
+		dscp = FIELD_GET(XGMAC_RDES1_DSCP, rdes_ctx1);
+		tnp = FIELD_GET(XGMAC_RDES1_TNP, rdes_ctx1);
+		up_reason = FIELD_GET(XGMAC_RDES1_UP_REASON, rdes_ctx1);
+		smac = rdes_ctx2 | FIELD_GET(XGMAC_RDES3_SMAC_32_47, rdes_ctx3) << 32;
+		pkt_type = FIELD_GET(XGMAC_RDES3_PKT_TYPE, rdes_ctx3);
+		pr_debug_ratelimited("%s: up_reason:%02x sta_index:%u rpt_index:%u "
+				"ovid:%u vlan_pri:%u no_frag:%u sport:%u eth_type:0x%x "
+				"dscp:%u tnp:%u pkt_type:%u smac:%llx\n",
+				netdev->name, up_reason, sta_index, rpt_index,
+				ovid, vlan_pri, no_frag, sport, eth_type,
+				dscp, tnp, pkt_type, smac);
+
+		xgmac_dma_rx_coe_hash(netdev, skb, rdes0, rdes_ctx1);
+
+		skb_record_rx_queue(skb, rxq->idx);
+		skb->protocol = eth_type_trans(skb, netdev);
+		napi_gro_receive(&rxq->napi, skb);
+	}
+
+	xgmac_dma_rx_refill(priv, rxq);
+
+	return count;
+}
+
+static int xgmac_dma_poll_tx(struct xgmac_txq *txq, int budget)
+{
+	struct xgmac_dma_priv *priv = container_of(txq, struct xgmac_dma_priv,
+						   txq[txq->idx]);
+	u32 bytes_compl[DPNS_HOST_PORT] = {}, pkts_compl[DPNS_HOST_PORT] = {};
+	u32 entry, count = 0, i;
+
+	spin_lock_bh(&txq->lock);
+
+	entry = txq->dirty_tx;
+	while (entry != txq->cur_tx && count < budget) {
+		struct xgmac_dma_desc *p = &txq->dma_tx[entry];
+		struct sk_buff *skb = txq->tx_skbuff[entry];
+		u32 tdes3 = le32_to_cpu(READ_ONCE(p->des3));
+
+		/* Check if the descriptor is owned by the DMA */
+		if (unlikely(tdes3 & XGMAC_TDES3_OWN))
+			break;
+
+		/* Check if the descriptor is a context descriptor */
+		if (unlikely(tdes3 & XGMAC_TDES3_CTXT))
+			goto next;
+
+		count++;
+		/* Make sure descriptor fields are read after reading
+		 * the own bit.
+		 */
+		dma_rmb();
+
+		if (likely(txq->tx_skbuff_dma[entry].buf)) {
+			if (txq->tx_skbuff_dma[entry].map_as_page)
+				dma_unmap_page(priv->dev,
+					       txq->tx_skbuff_dma[entry].buf,
+					       txq->tx_skbuff_dma[entry].len,
+					       DMA_TO_DEVICE);
+			else
+				dma_unmap_single(priv->dev,
+						 txq->tx_skbuff_dma[entry].buf,
+						 txq->tx_skbuff_dma[entry].len,
+						 DMA_TO_DEVICE);
+			txq->tx_skbuff_dma[entry].buf = 0;
+			txq->tx_skbuff_dma[entry].len = 0;
+			txq->tx_skbuff_dma[entry].map_as_page = false;
+		}
+
+		txq->tx_skbuff_dma[entry].last_segment = false;
+
+		if (likely(skb)) {
+			u8 id = XGMAC_SKB_CB(skb)->id;
+
+			if (XGMAC_SKB_CB(skb)->fastmode) {
+				pkts_compl[id]++;
+				bytes_compl[id] += skb->len;
+			}
+			napi_consume_skb(skb, budget);
+			txq->tx_skbuff[entry] = NULL;
+		}
+
+next:
+		entry = (entry + 1) % DMA_TX_SIZE;
+	}
+	txq->dirty_tx = entry;
+
+	for (i = 0; i < DPNS_HOST_PORT; i++) {
+		struct net_device *dev = priv->ndevs[i];
+		struct netdev_queue *queue;
+
+		if (!dev)
+			continue;
+
+		queue = netdev_get_tx_queue(dev, txq->idx);
+		netdev_tx_completed_queue(queue, pkts_compl[i], bytes_compl[i]);
+
+		if (unlikely(netif_tx_queue_stopped(queue) &&
+			     xgmac_dma_tx_avail(txq) > DMA_TX_SIZE / 4))
+			netif_tx_wake_queue(queue);
+	}
+
+	spin_unlock_bh(&txq->lock);
+
+	return count;
+}
+
+static int xgmac_dma_napi_rx(struct napi_struct *napi, int budget)
+{
+	struct xgmac_rxq *rxq = container_of(napi, struct xgmac_rxq, napi);
+	int work_done;
+
+	work_done = xgmac_dma_poll_rx(rxq, budget);
+	if (work_done < budget && napi_complete_done(napi, work_done))
+		enable_irq(rxq->irq);
+
+	return work_done;
+}
+
+static int xgmac_dma_napi_tx(struct napi_struct *napi, int budget)
+{
+	struct xgmac_txq *txq = container_of(napi, struct xgmac_txq, napi);
+	int work_done = xgmac_dma_poll_tx(txq, budget);
+
+	if (work_done < budget && napi_complete_done(napi, work_done))
+		enable_irq(txq->irq);
+
+	return work_done;
+}
+
+static irqreturn_t xgmac_dma_irq_misc(int irq, void *dev_id)
+{
+	struct xgmac_dma_priv *priv = dev_id;
+	u32 mtl_status = reg_read(priv, XGMAC_MTL_INT_STATUS);
+	u32 dma_status = reg_read(priv, XGMAC_DMA_INT_STATUS);
+	u32 i;
+
+	//dev_info(priv->dev, "irq %d mtl %#08x dma %#08x\n",
+	//		irq, mtl_status, dma_status);
+
+	for (i = 0; i < DMA_CH_MAX; i++) {
+		if (BIT(i) & mtl_status) {
+			u32 status = reg_read(priv, XGMAC_MTL_QINT_STATUS(i));
+
+			if (status & XGMAC_RXOVFIS)
+				pr_debug_ratelimited("RX queue %u overflow\n", i);
+
+			/* Clear interrupts */
+			reg_write(priv, XGMAC_MTL_QINT_STATUS(i), status);
+		}
+
+		if (BIT(i) & dma_status) {
+			u32 status = reg_read(priv, XGMAC_DMA_CH_STATUS(i));
+			status &= ~(XGMAC_TI | XGMAC_TBU | XGMAC_RI);
+
+			/* ABNORMAL interrupts */
+			if (unlikely(status & XGMAC_AIS)) {
+				if (status & XGMAC_CDE)
+					dev_info(priv->dev, "Trigger queue %u CDE\n", i);
+
+				if (status & XGMAC_DDE)
+					dev_info(priv->dev, "Trigger queue %u DDE\n", i);
+
+				if (status & XGMAC_FBE) {
+					dev_info(priv->dev, "Trigger queue %u FBE\n", i);
+					/* TODO: restart TX */
+				}
+
+				if (status & XGMAC_RBU) {
+					pr_debug_ratelimited("Trigger queue %u RBU\n", i);
+				}
+			}
+
+			reg_write(priv, XGMAC_DMA_CH_STATUS(i), status);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t xgmac_dma_irq_tx(int irq, void *dev_id)
+{
+	struct xgmac_txq *txq = dev_id;
+	struct xgmac_dma_priv *priv = container_of(txq, struct xgmac_dma_priv,
+						   txq[txq->idx]);
+	u16 channel = txq->idx;
+	u32 status = reg_read(priv, XGMAC_DMA_CH_STATUS(channel)) &
+		(XGMAC_TBU | XGMAC_TI);
+
+	if (unlikely(!status))
+		return IRQ_NONE;
+
+	/* Clear interrupts */
+	reg_write(priv, XGMAC_DMA_CH_STATUS(channel), status);
+
+	/* TX NORMAL interrupts */
+	if (likely(napi_schedule_prep(&txq->napi))) {
+		/* Disable TX interrupt */
+		disable_irq_nosync(irq);
+
+		/* Turn on polling */
+		__napi_schedule_irqoff(&txq->napi);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t xgmac_dma_irq_rx(int irq, void *dev_id)
+{
+	struct xgmac_rxq *rxq = dev_id;
+	struct xgmac_dma_priv *priv = container_of(rxq, struct xgmac_dma_priv,
+						   rxq[rxq->idx]);
+	u16 channel = rxq->idx;
+	u32 status = reg_read(priv, XGMAC_DMA_CH_STATUS(channel)) &
+		(XGMAC_RI);
+
+	if (unlikely(!status))
+		return IRQ_NONE;
+
+	/* Clear interrupts */
+	reg_write(priv, XGMAC_DMA_CH_STATUS(channel), status);
+
+	/* RX NORMAL interrupts */
+	if (likely(napi_schedule_prep(&rxq->napi))) {
+		/* Disable RX interrupt */
+		disable_irq_nosync(irq);
+
+		/* Turn on polling */
+		__napi_schedule_irqoff(&rxq->napi);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int xgmac_dma_soft_reset(struct xgmac_dma_priv *priv)
+{
+	unsigned long timeout = jiffies + HZ;
+
+	reg_write(priv, XGMAC_DMA_MODE, XGMAC_SWR);
+	do {
+		if (!(reg_read(priv, XGMAC_DMA_MODE) & XGMAC_SWR))
+			return 0;
+
+		cond_resched();
+	} while (time_after(timeout, jiffies));
+
+	dev_err(priv->dev, "DMA reset timed out\n");
+	return -ETIMEDOUT;
+}
+
+static int xgmac_dma_init(struct xgmac_dma_priv *priv)
+{
+	int ret;
+	u32 i;
+
+	/* DMA SW reset */
+	ret = xgmac_dma_soft_reset(priv);
+	if (ret)
+		return ret;
+
+	/* DMA Configuration */
+	/* Exclude per-channel interrupts from sbd_intr_o */
+	reg_write(priv, XGMAC_DMA_MODE, FIELD_PREP(XGMAC_INTM, 1));
+	reg_rmw(priv, XGMAC_DMA_SYSBUS_MODE,
+		XGMAC_RD_OSR_LMT | XGMAC_WR_OSR_LMT,
+		FIELD_PREP(XGMAC_RD_OSR_LMT, 31) | XGMAC_EN_LPI |
+		FIELD_PREP(XGMAC_WR_OSR_LMT, 31) | XGMAC_UNDEF);
+
+	reg_write(priv, XGMAC_TX_EDMA_CTRL, 1);
+	reg_write(priv, XGMAC_RX_EDMA_CTRL, 1);
+
+	/* enable rx_queue 0 1 2 3 */
+	regmap_write(priv->ethsys, ETHSYS_RX_QUEUE_ENABLE, 0xAA);
+
+	/* Use static RX Queue to DMA mapping
+	 * queue 0 to channel 0
+	 * queue 1 to channel 1
+	 * queue 2 to channel 2
+	 * queue 3 to chennel 3
+	 * queue 4 to channel 4
+	*/
+	reg_write(priv, XGMAC_MTL_RXQ_DMA_MAP0, 0x03020100);
+	reg_write(priv, XGMAC_MTL_RXQ_DMA_MAP1, 0x4);
+
+	/* DMA Channel Configuration
+	 * TXQs share 8KB, RXQs share 16KB
+	 *
+	 * Configured queue size = 256B * (1 + (TQS or RQS field))
+	 *
+	 * The maximum limit of PBL is queue size / datawidth / 2
+	 *
+	 * TxPBL must be limited to 32 for Tx COE to work on 1500 MTU, see the
+	 * comment in xgmac_dma_xmit() for detail.
+	 */
+	for (i = 0; i < DMA_CH_MAX; i++) {
+		/* set RxPBL to 16 beats for ddr schedule with 128bit (16*8) */
+		reg_rmw(priv, XGMAC_DMA_CH_RX_CONTROL(i), XGMAC_RxPBL,
+			FIELD_PREP(XGMAC_RxPBL, 16));
+		/* set TxPBL to 16 beats for ddr schedule with 128bit (16*8) */
+		reg_rmw(priv, XGMAC_DMA_CH_TX_CONTROL(i), XGMAC_TxPBL,
+			FIELD_PREP(XGMAC_TxPBL, 16) | XGMAC_TSE | XGMAC_OSP);
+
+		/* Enable TX queue, store-and-forward mode
+		 * each queue size 2k bytes
+		 */
+		reg_rmw(priv, XGMAC_MTL_TXQ_OPMODE(i), XGMAC_TQS | XGMAC_TXQEN,
+			FIELD_PREP(XGMAC_TQS, 0x7) | XGMAC_TSF |
+				FIELD_PREP(XGMAC_TXQEN, 0x2));
+
+		/* Enable RX queue
+		 * each queue size 4k bytes
+		 */
+		reg_write(priv, XGMAC_MTL_RXQ_OPMODE(i),
+			  FIELD_PREP(XGMAC_RQS, 0xf));
+	}
+
+	return 0;
+}
+
+static void xgmac_dma_free_rx_buffer(struct xgmac_dma_priv *priv, struct xgmac_rxq *rxq, u32 i)
+{
+	struct xgmac_dma_rx_buffer *buf = &rxq->buf_pool[i];
+
+	if (buf->page) {
+		page_pool_put_full_page(rxq->page_pool, buf->page, false);
+		buf->page = NULL;
+	}
+}
+
+static int xgmac_dma_init_rx_buffers(struct xgmac_dma_priv *priv, struct xgmac_rxq *rxq, u32 i,
+				     struct xgmac_dma_desc *p, u16 rx_alloc_size)
+{
+	struct xgmac_dma_rx_buffer *buf = &rxq->buf_pool[i];
+
+	buf->page = page_pool_dev_alloc_frag(rxq->page_pool, &buf->offset, rx_alloc_size);
+	if (!buf->page)
+		return -ENOMEM;
+
+	xgmac_dma_init_rx_desc(p, page_pool_get_dma_addr(buf->page) + buf->offset + BUF_PAD, false);
+
+	return 0;
+}
+
+static int xgmac_dma_init_rx_rings(struct xgmac_dma_priv *priv)
+{
+	u32 queue, i;
+	int ret;
+
+	for (queue = 0; queue < DMA_CH_MAX; queue++) {
+		struct xgmac_rxq *rxq = &priv->rxq[queue];
+
+		for (i = 0; i < DMA_RX_SIZE; i += 2) {
+			struct xgmac_dma_desc *p = &rxq->dma_rx[i];
+
+			ret = xgmac_dma_init_rx_buffers(priv, rxq, i, p, priv->rx_alloc_size);
+			if (ret)
+				goto err_init_rx_buffers;
+			/* No buffer space required by context descs */
+			xgmac_dma_init_rx_desc(p + 1, 0, false);
+		}
+
+		rxq->cur_rx = 0;
+		rxq->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
+	}
+
+	return 0;
+
+err_init_rx_buffers:
+	while (queue >= 0) {
+		while (--i >= 0)
+			xgmac_dma_free_rx_buffer(priv, &priv->rxq[queue], i);
+
+		if (queue == 0)
+			break;
+
+		i = DMA_RX_SIZE;
+		queue--;
+	}
+
+	return ret;
+}
+
+static void xgmac_dma_init_tx_rings(struct xgmac_dma_priv *priv)
+{
+	u32 queue, i;
+
+	for (queue = 0; queue < DMA_CH_MAX; queue++) {
+		struct xgmac_txq *txq = &priv->txq[queue];
+
+		for (i = 0; i < DMA_TX_SIZE; i++) {
+			struct xgmac_dma_desc *p = &txq->dma_tx[i];
+
+			memset(p, 0, sizeof(*p));
+
+			txq->tx_skbuff_dma[i].buf = 0;
+			txq->tx_skbuff_dma[i].map_as_page = false;
+			txq->tx_skbuff_dma[i].len = 0;
+			txq->tx_skbuff_dma[i].last_segment = false;
+			txq->tx_skbuff[i] = NULL;
+		}
+
+		txq->dirty_tx = 0;
+		txq->cur_tx = 0;
+	}
+}
+
+static void xgmac_dma_free_tx_buffer(struct xgmac_txq *txq, u32 i)
+{
+	struct xgmac_dma_priv *priv = container_of(txq, struct xgmac_dma_priv,
+						   txq[txq->idx]);
+
+	if (txq->tx_skbuff_dma[i].buf) {
+		if (txq->tx_skbuff_dma[i].map_as_page)
+			dma_unmap_page(priv->dev,
+				       txq->tx_skbuff_dma[i].buf,
+				       txq->tx_skbuff_dma[i].len,
+				       DMA_TO_DEVICE);
+		else
+			dma_unmap_single(priv->dev,
+					 txq->tx_skbuff_dma[i].buf,
+					 txq->tx_skbuff_dma[i].len,
+					 DMA_TO_DEVICE);
+	}
+
+	if (txq->tx_skbuff[i]) {
+		dev_kfree_skb(txq->tx_skbuff[i]);
+		txq->tx_skbuff[i] = NULL;
+		txq->tx_skbuff_dma[i].buf = 0;
+		txq->tx_skbuff_dma[i].map_as_page = false;
+	}
+}
+
+static void xgmac_dma_free_rx_skbufs(struct xgmac_dma_priv *priv, struct xgmac_rxq *rxq)
+{
+	u32 i;
+
+	for (i = 0; i < DMA_RX_SIZE; i++)
+		xgmac_dma_free_rx_buffer(priv, rxq, i);
+}
+
+static void xgmac_dma_free_tx_skbufs(struct xgmac_txq *txq)
+{
+	u32 i;
+
+	for (i = 0; i < DMA_TX_SIZE; i++)
+		xgmac_dma_free_tx_buffer(txq, i);
+}
+
+static void xgmac_dma_free_rx_descs(struct xgmac_dma_priv *priv)
+{
+	u32 i;
+
+	/* Free RX queue resources */
+	for (i = 0; i < DMA_CH_MAX; i++) {
+		struct xgmac_rxq *rxq = &priv->rxq[i];
+
+		/* Release the DMA RX socket buffers */
+		xgmac_dma_free_rx_skbufs(priv, rxq);
+
+		/* Free DMA RX descs */
+#ifdef CONFIG_NET_SIFLOWER_ETH_USE_INTERNAL_SRAM
+		gen_pool_free(priv->genpool, (uintptr_t)rxq->dma_rx,
+			      DMA_RX_SIZE * sizeof(*rxq->dma_rx));
+#else
+		dma_free_coherent(priv->dev, DMA_RX_SIZE * sizeof(*rxq->dma_rx),
+				  (void *)rxq->dma_rx, rxq->dma_rx_phy);
+#endif
+
+		kfree(rxq->buf_pool);
+		if (rxq->page_pool)
+			page_pool_destroy(rxq->page_pool);
+	}
+}
+
+static void xgmac_dma_free_tx_descs(struct xgmac_dma_priv *priv)
+{
+	u32 i;
+
+	/* Free TX queue resources */
+	for (i = 0; i < DMA_CH_MAX; i++) {
+		struct xgmac_txq *txq = &priv->txq[i];
+		spin_lock_bh(&txq->lock);
+		txq->is_busy = true;
+		spin_unlock_bh(&txq->lock);
+		/* Release the DMA TX socket buffers */
+		xgmac_dma_free_tx_skbufs(txq);
+
+		/* Free DMA TX descs */
+#ifdef CONFIG_NET_SIFLOWER_ETH_USE_INTERNAL_SRAM
+		gen_pool_free(priv->genpool, (uintptr_t)txq->dma_tx,
+			      DMA_TX_SIZE * sizeof(*txq->dma_tx));
+#else
+		dma_free_coherent(priv->dev, DMA_TX_SIZE * sizeof(*txq->dma_tx),
+				  txq->dma_tx, txq->dma_tx_phy);
+#endif
+		txq->dma_tx = NULL;
+		kfree(txq->tx_skbuff_dma);
+		txq->tx_skbuff_dma = NULL;
+		kfree(txq->tx_skbuff);
+		txq->tx_skbuff = NULL;
+
+		spin_lock_bh(&txq->lock);
+		txq->is_busy = false;
+		spin_unlock_bh(&txq->lock);
+	}
+}
+
+static int xgmac_dma_alloc_rx_descs(struct xgmac_dma_priv *priv)
+{
+	struct page_pool_params pp_params = {};
+	int ret = -ENOMEM;
+	u32 i;
+
+	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | PP_FLAG_PAGE_FRAG;
+	pp_params.pool_size = DMA_RX_SIZE;
+	pp_params.order = 0;
+	pp_params.max_len = PAGE_SIZE;
+	pp_params.nid = dev_to_node(priv->dev);
+	pp_params.dev = priv->dev;
+	pp_params.dma_dir = DMA_FROM_DEVICE;
+
+	/* RX queues buffers and DMA */
+	for (i = 0; i < DMA_CH_MAX; i++) {
+		struct xgmac_rxq *rxq = &priv->rxq[i];
+
+		rxq->page_pool = page_pool_create(&pp_params);
+		if (IS_ERR(rxq->page_pool)) {
+			ret = PTR_ERR(rxq->page_pool);
+			rxq->page_pool = NULL;
+			goto err_dma;
+		}
+
+		rxq->buf_pool = kcalloc(DMA_RX_SIZE, sizeof(*rxq->buf_pool),
+					GFP_KERNEL);
+		if (!rxq->buf_pool)
+			goto err_dma;
+
+#ifdef CONFIG_NET_SIFLOWER_ETH_USE_INTERNAL_SRAM
+		rxq->dma_rx = gen_pool_dma_alloc(priv->genpool, DMA_RX_SIZE *
+						 sizeof(*rxq->dma_rx),
+						 &rxq->dma_rx_phy);
+#else
+		rxq->dma_rx = dma_alloc_coherent(priv->dev, DMA_RX_SIZE *
+						 sizeof(*rxq->dma_rx),
+						 &rxq->dma_rx_phy, GFP_KERNEL);
+#endif
+		if (!rxq->dma_rx)
+			goto err_dma;
+	}
+
+	return 0;
+
+err_dma:
+	xgmac_dma_free_rx_descs(priv);
+	return ret;
+}
+
+static int xgmac_dma_alloc_tx_descs(struct xgmac_dma_priv *priv)
+{
+	int ret = -ENOMEM;
+	u32 i;
+
+	for (i = 0; i < DMA_CH_MAX; i++) {
+		struct xgmac_txq *txq = &priv->txq[i];
+
+		txq->tx_skbuff_dma = kcalloc(DMA_TX_SIZE,
+					     sizeof(*txq->tx_skbuff_dma),
+					     GFP_KERNEL);
+		if (!txq->tx_skbuff_dma)
+			goto err_dma;
+
+		txq->tx_skbuff = kcalloc(DMA_TX_SIZE,
+					 sizeof(struct sk_buff *),
+					 GFP_KERNEL);
+		if (!txq->tx_skbuff)
+			goto err_dma;
+
+#ifdef CONFIG_NET_SIFLOWER_ETH_USE_INTERNAL_SRAM
+		txq->dma_tx = gen_pool_dma_zalloc(priv->genpool, DMA_TX_SIZE *
+						  sizeof(*txq->dma_tx),
+						  &txq->dma_tx_phy);
+#else
+		txq->dma_tx = dma_alloc_coherent(priv->dev, DMA_TX_SIZE *
+						 sizeof(*txq->dma_tx),
+						 &txq->dma_tx_phy, GFP_KERNEL);
+#endif
+		if (!txq->dma_tx)
+			goto err_dma;
+	}
+
+	return 0;
+
+err_dma:
+	xgmac_dma_free_tx_descs(priv);
+	return ret;
+}
+
+static int xgmac_dma_enable(struct xgmac_dma_priv *priv)
+{
+	int ret;
+	u32 i;
+
+	ret = xgmac_dma_alloc_rx_descs(priv);
+	if (ret)
+		return ret;
+
+	ret = xgmac_dma_init_rx_rings(priv);
+	if (ret)
+		return ret;
+
+	ret = xgmac_dma_alloc_tx_descs(priv);
+	if (ret)
+		return ret;
+
+	xgmac_dma_init_tx_rings(priv);
+
+	for (i = 0; i < DMA_CH_MAX; i++) {
+		struct xgmac_rxq *rxq = &priv->rxq[i];
+		struct xgmac_txq *txq = &priv->txq[i];
+
+		/*
+		 * Initiate the WDT with packet count of 32 to enable IRQ
+		 * set watchdog timer 2048 * 100 * 2.5ns = 0.512ms
+		 * */
+		reg_write(priv, XGMAC_DMA_CH_Rx_WATCHDOG(i),
+			  XGMAC_PSEL | FIELD_PREP(XGMAC_RBCT, 32) |
+			  FIELD_PREP(XGMAC_RWTU, 3) | FIELD_PREP(XGMAC_RWT, 100));
+
+		/* Set RX buffer size */
+		reg_rmw(priv, XGMAC_DMA_CH_RX_CONTROL(i), XGMAC_RBSZ,
+			FIELD_PREP(XGMAC_RBSZ, priv->rx_buffer_size));
+
+		/* Set head pointer */
+		xgmac_dma_set_rx_head_ptr(priv, rxq->dma_rx_phy, i);
+		xgmac_dma_set_tx_head_ptr(priv, txq->dma_tx_phy, i);
+
+		/* Set tail pointer */
+		rxq->rx_tail_addr = DMA_RX_SIZE * sizeof(*rxq->dma_rx) +
+				    rxq->dma_rx_phy;
+		txq->tx_tail_addr = txq->dma_tx_phy;
+		xgmac_dma_set_rx_tail_ptr(priv, rxq->rx_tail_addr, i);
+		xgmac_dma_set_tx_tail_ptr(priv, txq->tx_tail_addr, i);
+
+		/* Set ring length */
+		reg_write(priv, XGMAC_DMA_CH_RxDESC_RING_LEN(i),
+			  ((DMA_RX_SIZE - 1) | (FIELD_PREP(XGMAC_OWRQ, 7))));
+		reg_write(priv, XGMAC_DMA_CH_TxDESC_RING_LEN(i),
+			  DMA_TX_SIZE - 1);
+
+		/* Enable NAPI poll */
+		napi_enable(&rxq->napi);
+		napi_enable(&txq->napi);
+
+		/* Enable interrupt */
+		reg_write(priv, XGMAC_DMA_CH_INT_EN(i),
+			  XGMAC_DMA_INT_DEFAULT_EN);
+
+		/* Enable MTL RX overflow interrupt */
+		reg_write(priv, XGMAC_MTL_QINTEN(i), XGMAC_RXOIE);
+
+		/* Start DMA */
+		reg_set(priv, XGMAC_DMA_CH_RX_CONTROL(i), XGMAC_RXST);
+		reg_set(priv, XGMAC_DMA_CH_TX_CONTROL(i), XGMAC_TXST);
+	}
+
+	return 0;
+}
+
+static void xgmac_dma_disable(struct xgmac_dma_priv *priv)
+{
+	u32 i;
+
+	for (i = 0; i < DMA_CH_MAX; i++) {
+		struct xgmac_rxq *rxq = &priv->rxq[i];
+		struct xgmac_txq *txq = &priv->txq[i];
+
+		/* Disable interrupts */
+		reg_write(priv, XGMAC_DMA_CH_INT_EN(i), 0);
+		reg_write(priv, XGMAC_MTL_QINTEN(i), 0);
+
+		/* Disable DMA transfer */
+		reg_clear(priv, XGMAC_DMA_CH_RX_CONTROL(i), XGMAC_RXST);
+		reg_clear(priv, XGMAC_DMA_CH_TX_CONTROL(i), XGMAC_TXST);
+
+		/* Disable NAPI poll */
+		napi_disable(&rxq->napi);
+		napi_disable(&txq->napi);
+
+		/* Clear all pending interrupts */
+		reg_write(priv, XGMAC_DMA_CH_STATUS(i), -1);
+		reg_write(priv, XGMAC_MTL_QINT_STATUS(i), -1);
+	}
+
+	/* Free resources */
+	xgmac_dma_free_rx_descs(priv);
+	xgmac_dma_free_tx_descs(priv);
+}
+
+static void xgmac_dma_stop_queue(struct xgmac_dma_priv *priv, int queue)
+{
+	int i;
+
+	for (i = 0; i < 6; i++) {
+		struct net_device *dev = priv->ndevs[i];
+
+		if (!dev)
+			continue;
+
+		netif_tx_stop_queue(netdev_get_tx_queue(dev, queue));
+	}
+}
+
+static void xgmac_dma_tso_fill_desc(struct xgmac_txq *txq, dma_addr_t des,
+				    unsigned int pay_len, bool last_segment)
+{
+	struct xgmac_dma_desc *desc;
+	int tmp_len = pay_len;
+	u32 entry;
+
+	/* 1. put every 2 16K-1 buffers into one desc */
+	while (tmp_len > TSO_MAX_BUFF_SIZE) {
+		bool ld = (last_segment && tmp_len <= TSO_MAX_BUFF_SIZE * 2);
+		entry = txq->cur_tx;
+		entry = (entry + 1) % DMA_TX_SIZE;
+		txq->cur_tx = entry;
+		desc = &txq->dma_tx[entry];
+
+		desc->des0 = cpu_to_le32(des);
+		desc->des1 = cpu_to_le32(des + TSO_MAX_BUFF_SIZE);
+		desc->des2 = cpu_to_le32((ld ? XGMAC_TDES2_IOC : 0) | XGMAC_TDES2_B1L |
+					 FIELD_PREP(XGMAC_TDES2_B2L,
+						    min(tmp_len - TSO_MAX_BUFF_SIZE,
+						        TSO_MAX_BUFF_SIZE)));
+		desc->des3 = cpu_to_le32(XGMAC_TDES3_OWN |
+					 (ld ? XGMAC_TDES3_LD : 0));
+
+		tmp_len -= TSO_MAX_BUFF_SIZE * 2;
+		des += TSO_MAX_BUFF_SIZE * 2;
+	}
+	/* 2. put the last buffer, if exists */
+	if (tmp_len > 0) {
+		entry = txq->cur_tx;
+		entry = (entry + 1) % DMA_TX_SIZE;
+		txq->cur_tx = entry;
+		desc = &txq->dma_tx[entry];
+
+		desc->des0 = cpu_to_le32(des);
+		desc->des1 = cpu_to_le32(0);
+		desc->des2 = cpu_to_le32((last_segment ? XGMAC_TDES2_IOC : 0) |
+					 FIELD_PREP(XGMAC_TDES2_B1L, tmp_len));
+		desc->des3 = cpu_to_le32(XGMAC_TDES3_OWN |
+					 (last_segment ? XGMAC_TDES3_LD : 0));
+	}
+}
+
+static netdev_tx_t xgmac_dma_tso_xmit(struct sk_buff *skb,
+				      struct xgmac_dma_priv *priv)
+{
+	u16 queue = skb_get_queue_mapping(skb);
+	u16 channel = queue;
+	u32 size = (queue == DMA_CH_DISABLE) ? SZ_2K : SZ_1_5K;
+	struct xgmac_txq *txq = &priv->txq[queue];
+	struct xgmac_dma_desc *desc, *first, *ctxt;
+	struct xgmac_skb_cb *cb = XGMAC_SKB_CB(skb);
+	u8 nfrags = skb_shinfo(skb)->nr_frags;
+	struct net_device *dev = skb->dev;
+	u32 first_entry, entry, i, tdes0, pay_len;
+	u32 proto_hdr_len, hdr;
+	bool last_segment;
+	dma_addr_t des;
+	u16 mss;
+
+	hdr = tcp_hdrlen(skb);
+	proto_hdr_len = skb_transport_offset(skb) + hdr;
+	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
+
+	spin_lock(&txq->lock);
+	/* Desc availability based on threshold should be enough safe */
+	if (unlikely(xgmac_dma_tx_avail(txq) <
+		     max_t(u32, nfrags, (skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE) + 2)) {
+		xgmac_dma_stop_queue(priv, queue);
+		pr_debug_ratelimited("%s: Tx Ring full when queue awake\n",
+			__func__);
+		spin_unlock(&txq->lock);
+		return NETDEV_TX_BUSY;
+	}
+
+	mss = skb_shinfo(skb)->gso_size;
+	/* The header length + MSS + TxPBL must be less than Tx Queue size */
+	mss = min_t(u16, mss, size - 16 - proto_hdr_len - 1);
+
+	entry = txq->cur_tx;
+	desc = &txq->dma_tx[entry];
+	ctxt = desc;
+	/* Prepare TX context descriptor */
+	tdes0 = XGMAC_TDES0_FAST_MODE |
+		FIELD_PREP(XGMAC_TDES0_OVPORT, cb->id) |
+		FIELD_PREP(XGMAC_TDES0_IVPORT, DPNS_HOST_PORT);
+	ctxt->des0 = cpu_to_le32(tdes0);
+	ctxt->des1 = 0;
+	ctxt->des2 = cpu_to_le32(mss);
+
+	entry = (entry + 1) % DMA_TX_SIZE;
+	txq->cur_tx = first_entry = entry;
+	desc = &txq->dma_tx[entry];
+	first = desc;
+
+	/* first descriptor: fill Headers on Buf1 */
+	last_segment = (nfrags == 0);
+	des = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
+			     DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(priv->dev, des)))
+		goto drop;
+
+	txq->tx_skbuff_dma[first_entry].buf = des;
+	txq->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
+	first->des0 = cpu_to_le32(des);
+
+	/* Fill start of payload in buff2 of first descriptor */
+	first->des1 = cpu_to_le32(pay_len ? des + proto_hdr_len : 0);
+	if (pay_len > TSO_MAX_BUFF_SIZE) {
+		/* Need more descs if the buffer size > 16383 */
+		pay_len -= TSO_MAX_BUFF_SIZE;
+		first->des2 =
+			cpu_to_le32(FIELD_PREP(XGMAC_TDES2_B1L, proto_hdr_len) |
+				    XGMAC_TDES2_B2L);
+		des += proto_hdr_len + TSO_MAX_BUFF_SIZE;
+	} else {
+		first->des2 =
+			cpu_to_le32((last_segment ? XGMAC_TDES2_IOC : 0) |
+				    FIELD_PREP(XGMAC_TDES2_B1L, proto_hdr_len) |
+				    FIELD_PREP(XGMAC_TDES2_B2L, pay_len));
+		pay_len = 0;
+	}
+	first->des3 = cpu_to_le32(
+		XGMAC_TDES3_OWN | XGMAC_TDES3_FD |
+		(last_segment && !pay_len ? XGMAC_TDES3_LD : 0) |
+		FIELD_PREP(XGMAC_TDES3_THL, hdr / 4) | XGMAC_TDES3_TSE |
+		FIELD_PREP(XGMAC_TDES3_TPL, skb->len - proto_hdr_len));
+
+	/* Put the remaining headlen buffer */
+	xgmac_dma_tso_fill_desc(txq, des, pay_len, last_segment);
+	entry = txq->cur_tx;
+	/* Prepare fragments */
+	for (i = 0; i < nfrags; i++) {
+		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		des = skb_frag_dma_map(priv->dev, frag, 0, skb_frag_size(frag),
+				       DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(priv->dev, des)))
+			goto drop;
+
+		last_segment = (i == nfrags - 1);
+
+		xgmac_dma_tso_fill_desc(txq, des, skb_frag_size(frag), last_segment);
+		entry = txq->cur_tx;
+		txq->tx_skbuff_dma[entry].buf = des;
+		txq->tx_skbuff_dma[entry].len = skb_frag_size(frag);
+		txq->tx_skbuff_dma[entry].map_as_page = true;
+	}
+	txq->tx_skbuff_dma[entry].last_segment = true;
+	/* Only the last descriptor gets to point to the skb. */
+	txq->tx_skbuff[entry] = skb;
+
+	/* We've used all descriptors we need for this skb, however,
+	 * advance cur_tx so that it references a fresh descriptor.
+	 * ndo_start_xmit will fill this descriptor the next time it's
+	 * called and xgmac_dma_poll_tx may clean up to this descriptor.
+	 */
+	entry = (entry + 1) % DMA_TX_SIZE;
+	txq->cur_tx = entry;
+
+	if (unlikely(xgmac_dma_tx_avail(txq) <= (MAX_SKB_FRAGS + 1)))
+		xgmac_dma_stop_queue(priv, queue);
+
+	skb_tx_timestamp(skb);
+	ctxt->des3 = cpu_to_le32(XGMAC_TDES3_OWN | XGMAC_TDES3_CTXT |
+				 XGMAC_TDES3_TCMSSV | XGMAC_TDES3_PIDV);
+	dma_wmb();
+	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
+	txq->tx_tail_addr = txq->dma_tx_phy + txq->cur_tx * sizeof(*desc);
+	xgmac_dma_set_tx_tail_ptr(priv, txq->tx_tail_addr, channel);
+	spin_unlock(&txq->lock);
+	return NETDEV_TX_OK;
+drop:
+	dev->stats.tx_dropped++;
+	spin_unlock(&txq->lock);
+	dev_kfree_skb(skb);
+	return NETDEV_TX_OK;
+}
+
+
+netdev_tx_t xgmac_dma_xmit(struct sk_buff *skb, struct xgmac_dma_priv *priv)
+{
+	u16 queue = skb_get_queue_mapping(skb);
+	u16 channel = queue;
+	u32 size = (queue == DMA_CH_DISABLE) ? SZ_2K : SZ_1_5K;
+	struct xgmac_txq *txq = &priv->txq[queue];
+	u8 nfrags = skb_shinfo(skb)->nr_frags;
+	struct xgmac_dma_desc *desc, *first, *ctxt;
+	struct xgmac_skb_cb *cb = XGMAC_SKB_CB(skb);
+	struct net_device *dev = skb->dev;
+	u32 nopaged_len = skb_headlen(skb);
+	u32 first_entry, entry, i, tdes0, cic = 0;
+	bool last_segment;
+	dma_addr_t des;
+
+	if (skb_is_gso(skb) && skb_is_gso_tcp(skb))
+		return xgmac_dma_tso_xmit(skb, priv);
+
+	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+		/* Tx COE only works with packets that are LESS THAN the
+		 * following number of bytes in size:
+		 * TXQ SIZE in bytes – ((PBL + 5)*(DATAWIDTH in bytes))
+		 *
+		 * Thus for queue size of 2K, Tx COE of 1500 MTU works only if
+		 * PBL <= 32.
+		 */
+		const unsigned int txcoeovh = (16 + 5) * 8;
+
+		if (unlikely(skb->len >= size - txcoeovh)) {
+			if (unlikely(skb_checksum_help(skb)))
+				goto drop_kfree;
+		} else {
+			cic = XGMAC_TDES3_CIC;
+		}
+	}
+
+	spin_lock(&txq->lock);
+	if (txq->is_busy || (NULL == txq->dma_tx) || (NULL == txq->tx_skbuff_dma) || (NULL == txq->tx_skbuff)) {
+		spin_unlock(&txq->lock);
+		return NETDEV_TX_BUSY;
+	}
+
+	/* We need at least 2 + nfrags free TX descriptors to xmit a packet */
+	if (unlikely(xgmac_dma_tx_avail(txq) < nfrags + 2)) {
+		xgmac_dma_stop_queue(priv, queue);
+		pr_debug_ratelimited("%s: Tx Ring full when queue awake\n",
+			__func__);
+		spin_unlock(&txq->lock);
+		return NETDEV_TX_BUSY;
+	}
+
+	entry = txq->cur_tx;
+	desc = &txq->dma_tx[entry];
+	ctxt = desc;
+
+	/* Prepare TX context descriptor */
+	if (cb->fastmode)
+		tdes0 = XGMAC_TDES0_FAST_MODE |
+			FIELD_PREP(XGMAC_TDES0_OVPORT, cb->id) |
+			FIELD_PREP(XGMAC_TDES0_IVPORT, DPNS_HOST_PORT);
+	else
+		tdes0 = FIELD_PREP(XGMAC_TDES0_OVPORT, DPNS_HOST_PORT) |
+			FIELD_PREP(XGMAC_TDES0_IVPORT, cb->id);
+
+	ctxt->des0 = cpu_to_le32(tdes0);
+	ctxt->des1 = 0;
+	ctxt->des2 = 0;
+
+	entry = (entry + 1) % DMA_TX_SIZE;
+	first_entry = entry;
+	desc = &txq->dma_tx[entry];
+	first = desc;
+
+	for (i = 0; i < nfrags; i++) {
+		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+		u32 len = skb_frag_size(frag);
+
+		last_segment = (i == (nfrags - 1));
+		entry = (entry + 1) % DMA_TX_SIZE;
+		desc = &txq->dma_tx[entry];
+		des = skb_frag_dma_map(priv->dev, frag, 0, len, DMA_TO_DEVICE);
+		if (dma_mapping_error(priv->dev, des))
+			goto drop;
+
+		txq->tx_skbuff_dma[entry].buf = des;
+
+		xgmac_dma_set_tx_desc_addr(desc, des);
+
+		txq->tx_skbuff_dma[entry].map_as_page = true;
+		txq->tx_skbuff_dma[entry].len = len;
+		txq->tx_skbuff_dma[entry].last_segment = last_segment;
+		/* Prepare the descriptor and set the own bit too */
+		desc->des2 = cpu_to_le32(FIELD_PREP(XGMAC_TDES2_B1L, len) |
+					 (last_segment ? XGMAC_TDES2_IOC : 0));
+		desc->des3 = cpu_to_le32(XGMAC_TDES3_OWN | cic |
+					 (last_segment ? XGMAC_TDES3_LD : 0) |
+					 FIELD_PREP(XGMAC_TDES3_FL, skb->len));
+	}
+
+	/* Only the last descriptor gets to point to the skb. */
+	txq->tx_skbuff[entry] = skb;
+
+	/* We've used all descriptors we need for this skb, however,
+	 * advance cur_tx so that it references a fresh descriptor.
+	 * ndo_start_xmit will fill this descriptor the next time it's
+	 * called and xgmac_dma_poll_tx may clean up to this descriptor.
+	 */
+	entry = (entry + 1) % DMA_TX_SIZE;
+	txq->cur_tx = entry;
+
+	if (unlikely(xgmac_dma_tx_avail(txq) <= (MAX_SKB_FRAGS + 1)))
+		xgmac_dma_stop_queue(priv, queue);
+
+	skb_tx_timestamp(skb);
+
+	/* Ready to fill the first descriptor and set the OWN bit w/o any
+	 * problems because all the descriptors are actually ready to be
+	 * passed to the DMA engine.
+	 */
+	last_segment = (nfrags == 0);
+	des = dma_map_single(priv->dev, skb->data, nopaged_len, DMA_TO_DEVICE);
+	if (dma_mapping_error(priv->dev, des))
+		goto drop;
+
+	txq->tx_skbuff_dma[first_entry].buf = des;
+	txq->tx_skbuff_dma[first_entry].len = nopaged_len;
+	txq->tx_skbuff_dma[first_entry].last_segment = last_segment;
+	/* Prepare the first descriptor setting the OWN bit too */
+	xgmac_dma_set_tx_desc_addr(first, des);
+	first->des2 = cpu_to_le32(FIELD_PREP(XGMAC_TDES2_B1L, nopaged_len) |
+				  (last_segment ? XGMAC_TDES2_IOC : 0));
+	first->des3 = cpu_to_le32(XGMAC_TDES3_OWN | XGMAC_TDES3_FD | cic |
+				  (last_segment ? XGMAC_TDES3_LD : 0) |
+				  FIELD_PREP(XGMAC_TDES3_FL, skb->len));
+
+	ctxt->des3 = cpu_to_le32(XGMAC_TDES3_OWN | XGMAC_TDES3_CTXT | XGMAC_TDES3_PIDV);
+	/* The descriptor must be set before tail poiner update and then barrier
+	 * is needed to make sure that all is coherent before granting the
+	 * DMA engine.
+	 */
+	dma_wmb();
+
+	if (dev)
+		netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
+	txq->tx_tail_addr = txq->dma_tx_phy + txq->cur_tx * sizeof(*desc);
+	xgmac_dma_set_tx_tail_ptr(priv, txq->tx_tail_addr, channel);
+	spin_unlock(&txq->lock);
+	return NETDEV_TX_OK;
+
+drop:
+	if (dev)
+		dev->stats.tx_dropped++;
+	spin_unlock(&txq->lock);
+drop_kfree:
+	dev_kfree_skb(skb);
+	return NETDEV_TX_OK;
+}
+
+netdev_tx_t xgmac_dma_xmit_fast(struct sk_buff *skb, struct net_device *dev)
+{
+	struct gmac_common *priv = netdev_priv(dev);
+	struct xgmac_skb_cb *cb = XGMAC_SKB_CB(skb);
+	netdev_tx_t ret;
+
+	cb->id = priv->id;
+	cb->fastmode = true;
+
+	ret = xgmac_dma_xmit(skb, priv->dma);
+	if (unlikely(ret != NETDEV_TX_OK)) {
+		pr_debug_ratelimited("%s: Tx Ring full when queue awake\n",
+			__func__);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(xgmac_dma_xmit_fast);
+
+int xgmac_dma_open(struct xgmac_dma_priv *priv, struct net_device *dev, u8 id)
+{
+	if (id >= ARRAY_SIZE(priv->ndevs))
+		return -EINVAL;
+
+	if (priv->ndevs[id])
+		return -EBUSY;
+
+	priv->ndevs[id] = dev;
+
+	/* we run multiple netdevs on the same DMA ring so we only bring it
+	 * up once
+	 */
+	if (!refcount_read(&priv->refcnt)) {
+		int ret = xgmac_dma_enable(priv);
+		if (ret) {
+			priv->ndevs[id] = NULL;
+			return ret;
+		}
+
+		refcount_set(&priv->refcnt, 1);
+	} else {
+		refcount_inc(&priv->refcnt);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(xgmac_dma_open);
+
+int xgmac_dma_stop(struct xgmac_dma_priv *priv, struct net_device *dev, u8 id)
+{
+	if (id >= ARRAY_SIZE(priv->ndevs) || priv->ndevs[id] != dev)
+		return -EINVAL;
+
+	/* only shutdown DMA if this is the last user */
+	if (refcount_dec_and_test(&priv->refcnt))
+		xgmac_dma_disable(priv);
+
+	priv->ndevs[id] = NULL;
+
+	return 0;
+}
+EXPORT_SYMBOL(xgmac_dma_stop);
+
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_PAGE_POOL_STATS)
+static int xgmac_dma_stats_show(struct seq_file *m, void *v)
+{
+	struct xgmac_dma_priv *priv = m->private;
+	int i;
+
+	for (i = 0; i < DMA_CH_MAX; i++) {
+		struct page_pool_stats stats = {};
+
+		page_pool_get_stats(priv->rxq[i].page_pool, &stats);
+		seq_printf(m, "RX alloc statistics:\n"
+			"fast:\t%llu\n"
+			"slow:\t%llu\n"
+			"empty:\t%llu\n"
+			"refill:\t%llu\n"
+			"waive:\t%llu\n",
+			stats.alloc_stats.fast, stats.alloc_stats.slow,
+			stats.alloc_stats.empty, stats.alloc_stats.refill,
+			stats.alloc_stats.waive);
+
+		seq_printf(m, "RX recycle statistics:\n"
+			"cached:\t%llu\n"
+			"cache_full:\t%llu\n"
+			"ring:\t%llu\n"
+			"ring_full:\t%llu\n"
+			"released_refcnt:\t%llu\n",
+			stats.recycle_stats.cached, stats.recycle_stats.cache_full,
+			stats.recycle_stats.ring, stats.recycle_stats.ring_full,
+			stats.recycle_stats.released_refcnt);
+	}
+	return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(xgmac_dma_stats);
+#endif
+
+static int xgmac_dma_debug_show(struct seq_file *m, void *v)
+{
+	struct xgmac_dma_priv *priv = m->private;
+	int i, j;
+
+	for (i = 0; i < DMA_CH_MAX; i++) {
+		j = (i == DMA_CH_DISABLE) ? DMA_OVPORT_CH : i;
+		spin_lock_bh(&(priv->txq[i].lock));
+		seq_printf(m, "txq %d curr:%d dirty:%d\n",
+				i, priv->txq[i].cur_tx, priv->txq[i].dirty_tx);
+		seq_printf(m, "      MTL Opmode:0x%x debug:0x%x\n",
+				reg_read(priv, XGMAC_MTL_TXQ_OPMODE(j)),
+				reg_read(priv, XGMAC_MTL_TXQ_DEBUG(j)));
+		seq_printf(m, "rxq %d curr:%d dirty:%d\n",
+				i, priv->rxq[i].cur_rx, priv->rxq[i].dirty_rx);
+		seq_printf(m, "      MTL Opmode:0x%x debug:0x%x\n",
+				reg_read(priv, XGMAC_MTL_RXQ_OPMODE(j)),
+				reg_read(priv, XGMAC_MTL_RXQ_DEBUG(j)));
+		spin_unlock_bh(&(priv->txq[i].lock));
+
+		seq_printf(m, "DMA channel %d status:0x%x debug sts:0x%x\n", j,
+				reg_read(priv, XGMAC_DMA_CH_STATUS(j)),
+				reg_read(priv, XGMAC_DMA_CH_DEBUG_STATUS(j)));
+		seq_printf(m, "              TxDesc HAddr:0x%x TAddr:0x%x\n",
+				reg_read(priv, XGMAC_DMA_CH_TxDESC_LADDR(j)),
+				reg_read(priv, XGMAC_DMA_CH_TxDESC_TAIL_LPTR(j)));
+		seq_printf(m, "                     Cur desAddr:0x%x bufAddr:0x%x\n",
+				reg_read(priv, XGMAC_DMA_CH_CUR_TxDESC_LADDR(j)),
+				reg_read(priv, XGMAC_DMA_CH_CUR_TxBUFF_LADDR(j)));
+		seq_printf(m, "              RxDesc HAddr:0x%x TAddr:0x%x\n",
+				reg_read(priv, XGMAC_DMA_CH_RxDESC_LADDR(j)),
+				reg_read(priv, XGMAC_DMA_CH_RxDESC_TAIL_LPTR(j)));
+		seq_printf(m, "                     Cur desAddr:0x%x bufAddr:0x%x\n",
+				reg_read(priv, XGMAC_DMA_CH_CUR_RxDESC_LADDR(j)),
+				reg_read(priv, XGMAC_DMA_CH_CUR_RxBUFF_LADDR(j)));
+	}
+
+	seq_printf(m, "DMA debug sts0:0x%x sts1:0x%x sts3:0x%x\n",
+			reg_read(priv, XGMAC_DMA_DEBUG_STATUS(0)),
+			reg_read(priv, XGMAC_DMA_DEBUG_STATUS(1)),
+			reg_read(priv, XGMAC_DMA_DEBUG_STATUS(3)));
+	return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(xgmac_dma_debug);
+
+static int xgmac_dma_probe(struct platform_device *pdev)
+{
+	struct xgmac_dma_priv *priv;
+	const char *irq_name;
+	char buf[4];
+	int ret;
+	u32 i;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->dev = &pdev->dev;
+	priv->ioaddr = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(priv->ioaddr))
+		return PTR_ERR(priv->ioaddr);
+
+	/* Request all clocks at once */
+	priv->clks[DMA_CLK_AXI].id = "axi";
+	priv->clks[DMA_CLK_NPU].id = "npu";
+	priv->clks[DMA_CLK_CSR].id = "csr";
+	ret = devm_clk_bulk_get(&pdev->dev, DMA_NUM_CLKS, priv->clks);
+	if (ret)
+		return ret;
+
+#ifdef CONFIG_NET_SIFLOWER_ETH_USE_INTERNAL_SRAM
+	priv->genpool = of_gen_pool_get(pdev->dev.of_node, "iram", 0);
+	if (!priv->genpool)
+		return -ENODEV;
+#endif
+
+	priv->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+						       "ethsys");
+	if (IS_ERR(priv->ethsys))
+		return PTR_ERR(priv->ethsys);
+
+	/* lif/hif reset and release reset*/
+	ret = regmap_clear_bits(priv->ethsys, ETHSYS_RST, BIT(7) | BIT(8));
+	if (ret)
+		return ret;
+
+	ret = regmap_set_bits(priv->ethsys, ETHSYS_RST, BIT(7) | BIT(8));
+	if (ret)
+		return ret;
+
+	/* set the mapping mode 0
+	 * hash random use queue 0-3
+	 */
+	ret = regmap_write(priv->ethsys, ETHSYS_MRI_Q_EN, 0x003F003F);
+
+	/* we run multiple netdevs on the same DMA ring so we need a dummy
+	 * device for NAPI to work
+	 */
+	init_dummy_netdev(&priv->napi_dev);
+
+	/* DMA IRQ */
+	ret = platform_get_irq_byname(pdev, "sbd");
+	if (ret < 0)
+		return ret;
+
+	priv->irq = ret;
+	ret = devm_request_irq(&pdev->dev, ret, xgmac_dma_irq_misc, 0,
+			       "xgmac_dma_sbd", priv);
+	if (ret)
+		return ret;
+
+	irq_set_affinity_hint(priv->irq, cpumask_of(1));
+
+	/* TX IRQ */
+	for (i = 0; i < DMA_CH_MAX; i++) {
+		snprintf(buf, sizeof(buf), "tx%u", i);
+		ret = platform_get_irq_byname(pdev, buf);
+		if (ret < 0)
+			goto out_napi_del;
+
+		priv->txq[i].irq = ret;
+
+		irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+					  "xgmac_dma_txq%u", i);
+		if (!irq_name) {
+			ret = -ENOMEM;
+			goto out_napi_del;
+		}
+
+		ret = devm_request_irq(&pdev->dev, ret, xgmac_dma_irq_tx, 0,
+				       irq_name, &priv->txq[i]);
+		if (ret)
+			goto out_napi_del;
+
+		priv->txq[i].idx = i;
+		spin_lock_init(&priv->txq[i].lock);
+		netif_napi_add_tx_weight(&priv->napi_dev, &priv->txq[i].napi,
+				  xgmac_dma_napi_tx, NAPI_POLL_WEIGHT);
+		irq_set_affinity_hint(priv->txq[i].irq, cpumask_of(i % NR_CPUS));
+	}
+
+	/* RX IRQ */
+#ifdef CONFIG_NET_SIFLOWER_ETH_RX_THREAD
+	strscpy(priv->napi_dev.name, KBUILD_MODNAME, IFNAMSIZ);
+	priv->napi_dev.threaded = 1;
+#endif
+	for (i = 0; i < DMA_CH_MAX; i++) {
+		snprintf(buf, sizeof(buf), "rx%u", i);
+		ret = platform_get_irq_byname(pdev, buf);
+		if (ret < 0)
+			goto out_napi_del;
+
+		priv->rxq[i].irq = ret;
+
+		irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+					  "xgmac_dma_rxq%u", i);
+		if (!irq_name) {
+			ret = -ENOMEM;
+			goto out_napi_del;
+		}
+
+		ret = devm_request_irq(&pdev->dev, ret, xgmac_dma_irq_rx, 0,
+				       irq_name, &priv->rxq[i]);
+		if (ret)
+			goto out_napi_del;
+
+		priv->rxq[i].idx = i;
+		netif_napi_add_weight(&priv->napi_dev, &priv->rxq[i].napi,
+			       xgmac_dma_napi_rx, NAPI_POLL_WEIGHT);
+		irq_set_affinity_hint(priv->rxq[i].irq, cpumask_of(i % NR_CPUS));
+	}
+
+	priv->rx_alloc_size = BUF_SIZE_ALLOC(ETH_DATA_LEN);
+	priv->rx_buffer_size = BUF_SIZE_ALIGN(ETH_DATA_LEN);
+	platform_set_drvdata(pdev, priv);
+	ret = clk_bulk_prepare_enable(DMA_NUM_CLKS, priv->clks);
+	if (ret)
+		goto out_napi_del;
+
+	ret = xgmac_dma_init(priv);
+	if (ret)
+		goto out_clk_disable;
+
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_PAGE_POOL_STATS)
+	priv->dbgdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+	if (IS_ERR(priv->dbgdir)) {
+		ret = PTR_ERR(priv->dbgdir);
+		goto out_clk_disable;
+	}
+	debugfs_create_file("rx_stats", 0444, priv->dbgdir, priv, &xgmac_dma_stats_fops);
+	debugfs_create_file("debug", 0444, priv->dbgdir, priv, &xgmac_dma_debug_fops);
+#endif
+
+	return ret;
+out_clk_disable:
+	clk_bulk_disable_unprepare(DMA_NUM_CLKS, priv->clks);
+out_napi_del:
+	for (i = 0; i < DMA_CH_MAX; i++) {
+		irq_set_affinity_hint(priv->rxq[i].irq, NULL);
+		irq_set_affinity_hint(priv->txq[i].irq, NULL);
+		netif_napi_del(&priv->rxq[i].napi);
+		netif_napi_del(&priv->txq[i].napi);
+	}
+	return ret;
+}
+
+static void xgmac_dma_remove(struct platform_device *pdev)
+{
+	struct xgmac_dma_priv *priv = platform_get_drvdata(pdev);
+	int i;
+
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_PAGE_POOL_STATS)
+	debugfs_remove(priv->dbgdir);
+#endif
+	xgmac_dma_soft_reset(priv);
+	clk_bulk_disable_unprepare(DMA_NUM_CLKS, priv->clks);
+	for (i = 0; i < DMA_CH_MAX; i++) {
+		irq_set_affinity_hint(priv->rxq[i].irq, NULL);
+		irq_set_affinity_hint(priv->txq[i].irq, NULL);
+		netif_napi_del(&priv->rxq[i].napi);
+		netif_napi_del(&priv->txq[i].napi);
+	}
+}
+
+static const struct of_device_id xgmac_dma_match[] = {
+	{ .compatible = "siflower,sf21-xgmac-dma" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, xgmac_dma_match);
+
+static struct platform_driver xgmac_dma_driver = {
+	.probe	= xgmac_dma_probe,
+	.remove_new	= xgmac_dma_remove,
+	.driver	= {
+		.name		= "sfxgmac_dma",
+		.of_match_table	= xgmac_dma_match,
+	},
+};
+module_platform_driver(xgmac_dma_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Qingfang Deng <[email protected]>");
+MODULE_DESCRIPTION("Ethernet DMA driver for SF21A6826/SF21H8898 SoC");

+ 31 - 0
target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/sfxgmac-ext.h

@@ -0,0 +1,31 @@
+#ifndef __XGMAC_EXT_H_
+#define __XGMAC_EXT_H__
+
+#include <linux/clk.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/phylink.h>
+
+#define SF_GMAC_DUNMMY_ID 0xfa
+
+#define GMAC_COMMON_STRUCT 			\
+	void __iomem *ioaddr;			\
+	struct device *dev; 			\
+	struct clk *csr_clk;			\
+	struct xgmac_dma_priv *dma; 		\
+	struct regmap *ethsys; 			\
+	struct phylink *phylink; 		\
+	struct phylink_config phylink_config; 	\
+	u8 id; 					\
+	bool phy_supports_eee;			\
+	bool tx_lpi_enabled;			\
+	struct platform_device *pcs_dev;	\
+	void *dp_port;
+
+struct gmac_common {
+	GMAC_COMMON_STRUCT;
+};
+
+#endif

+ 1324 - 0
target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/sfxgmac.c

@@ -0,0 +1,1324 @@
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/ethtool.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/crc32.h>
+
+#include "dma.h"
+#include "eth.h"
+#include "sfxgmac-ext.h"
+
+struct xgmac_mib_desc {
+	char name[ETH_GSTRING_LEN];
+	u16 offset;
+};
+#define MIB_DESC(n, o, s) { .name = (n), .offset = (o), }
+
+static const struct xgmac_mib_desc xgmac_mib[] = {
+	MIB_DESC("tx_bytes",	MMC_XGMAC_TX_OCTET_GB, 1),
+	MIB_DESC("tx_packets",	MMC_XGMAC_TX_PKT_GB, 1),
+	MIB_DESC("tx_broadcast_packets_good",	MMC_XGMAC_TX_BROAD_PKT_G, 1),
+	MIB_DESC("tx_multicast_packets_good",	MMC_XGMAC_TX_MULTI_PKT_G, 1),
+	MIB_DESC("tx_64_byte_packets",	MMC_XGMAC_TX_64OCT_GB, 1),
+	MIB_DESC("tx_65_to_127_byte_packets",	MMC_XGMAC_TX_65OCT_GB, 1),
+	MIB_DESC("tx_128_to_255_byte_packets",	MMC_XGMAC_TX_128OCT_GB, 1),
+	MIB_DESC("tx_256_to_511_byte_packets",	MMC_XGMAC_TX_256OCT_GB, 1),
+	MIB_DESC("tx_512_to_1023_byte_packets",	MMC_XGMAC_TX_512OCT_GB, 1),
+	MIB_DESC("tx_1024_to_max_byte_packets",	MMC_XGMAC_TX_1024OCT_GB, 1),
+	MIB_DESC("tx_unicast_packets",	MMC_XGMAC_TX_UNI_PKT_GB, 1),
+	MIB_DESC("tx_multicast_packets",	MMC_XGMAC_TX_MULTI_PKT_GB, 1),
+	MIB_DESC("tx_broadcast_packets",	MMC_XGMAC_TX_BROAD_PKT_GB, 1),
+	MIB_DESC("tx_underflow_errors",	MMC_XGMAC_TX_UNDER, 1),
+	MIB_DESC("tx_bytes_good",	MMC_XGMAC_TX_OCTET_G, 1),
+	MIB_DESC("tx_packets_good",	MMC_XGMAC_TX_PKT_G, 1),
+	MIB_DESC("tx_pause_frames",	MMC_XGMAC_TX_PAUSE, 1),
+	MIB_DESC("tx_vlan_packets_good",	MMC_XGMAC_TX_VLAN_PKT_G, 1),
+	MIB_DESC("tx_lpi_usec",	MMC_XGMAC_TX_LPI_USEC, 0),
+	MIB_DESC("tx_lpi_tran",	MMC_XGMAC_TX_LPI_TRAN, 0),
+	MIB_DESC("rx_packets",	MMC_XGMAC_RX_PKT_GB, 1),
+	MIB_DESC("rx_bytes",	MMC_XGMAC_RX_OCTET_GB, 1),
+	MIB_DESC("rx_bytes_good",	MMC_XGMAC_RX_OCTET_G, 1),
+	MIB_DESC("rx_broadcast_packets_good",	MMC_XGMAC_RX_BROAD_PKT_G, 1),
+	MIB_DESC("rx_multicast_packets_good",	MMC_XGMAC_RX_MULTI_PKT_G, 1),
+	MIB_DESC("rx_crc_errors",	MMC_XGMAC_RX_CRC_ERR, 1),
+	MIB_DESC("rx_crc_errors_small_packets",	MMC_XGMAC_RX_RUNT_ERR, 0),
+	MIB_DESC("rx_crc_errors_giant_packets",	MMC_XGMAC_RX_JABBER_ERR, 0),
+	MIB_DESC("rx_undersize_packets_good",	MMC_XGMAC_RX_UNDER, 0),
+	MIB_DESC("rx_oversize_packets_good",	MMC_XGMAC_RX_OVER, 0),
+	MIB_DESC("rx_64_byte_packets",	MMC_XGMAC_RX_64OCT_GB, 1),
+	MIB_DESC("rx_65_to_127_byte_packets",	MMC_XGMAC_RX_65OCT_GB, 1),
+	MIB_DESC("rx_128_to_255_byte_packets",	MMC_XGMAC_RX_128OCT_GB, 1),
+	MIB_DESC("rx_256_to_511_byte_packets",	MMC_XGMAC_RX_256OCT_GB, 1),
+	MIB_DESC("rx_512_to_1023_byte_packets",	MMC_XGMAC_RX_512OCT_GB, 1),
+	MIB_DESC("rx_1024_to_max_byte_packets",	MMC_XGMAC_RX_1024OCT_GB, 1),
+	MIB_DESC("rx_unicast_packets_good",	MMC_XGMAC_RX_UNI_PKT_G, 1),
+	MIB_DESC("rx_length_errors",	MMC_XGMAC_RX_LENGTH_ERR, 1),
+	MIB_DESC("rx_out_of_range_errors",	MMC_XGMAC_RX_RANGE, 1),
+	MIB_DESC("rx_pause_frames",	MMC_XGMAC_RX_PAUSE, 1),
+	MIB_DESC("rx_fifo_overflow_errors",	MMC_XGMAC_RX_FIFOOVER_PKT, 1),
+	MIB_DESC("rx_vlan_packets",	MMC_XGMAC_RX_VLAN_PKT_GB, 1),
+	MIB_DESC("rx_watchdog_errors",	MMC_XGMAC_RX_WATCHDOG_ERR, 0),
+	MIB_DESC("rx_lpi_usec",	MMC_XGMAC_RX_LPI_USEC, 0),
+	MIB_DESC("rx_lpi_tran",	MMC_XGMAC_RX_LPI_TRAN, 0),
+	MIB_DESC("rx_discard_packets",	MMC_XGMAC_RX_DISCARD_PKT_GB, 1),
+	MIB_DESC("rx_discard_bytes",	MMC_XGMAC_RX_DISCARD_OCT_GB, 1),
+	MIB_DESC("rx_alignment_errors",	MMC_XGMAC_RX_ALIGN_ERR_PKT, 0),
+	MIB_DESC("tx_single_collision_packets",	MMC_XGMAC_TX_SINGLE_COL_G, 0),
+	MIB_DESC("tx_multiple_collision_packets",	MMC_XGMAC_TX_MULTI_COL_G, 0),
+	MIB_DESC("tx_deferred_packets",	MMC_XGMAC_TX_DEFER, 0),
+	MIB_DESC("tx_late_collision_errors",	MMC_XGMAC_TX_LATE_COL, 0),
+	MIB_DESC("tx_excessive_collision_errors",	MMC_XGMAC_TX_EXCESSIVE_COL, 0),
+	MIB_DESC("tx_carrier_sense_errors",	MMC_XGMAC_TX_CARRIER, 0),
+	MIB_DESC("tx_excessive_deferral_errors",	MMC_XGMAC_TX_EXCESSIVE_DEFER, 0),
+	MIB_DESC("rx_ipv4_packets_good",	MMC_XGMAC_RX_IPV4_PKT_G, 1),
+	MIB_DESC("rx_ipv4_header_error_packets",	MMC_XGMAC_RX_IPV4_HDRERR_PKT, 1),
+	MIB_DESC("rx_ipv4_no_payload_packets",	MMC_XGMAC_RX_IPV4_NOPAY_PKT, 1),
+	MIB_DESC("rx_ipv4_fragment_packets",	MMC_XGMAC_RX_IPV4_FRAG_PKT, 1),
+	MIB_DESC("rx_ipv4_udp_sum_zero_packets",	MMC_XGMAC_RX_IPV4_UDSBL_PKT, 1),
+	MIB_DESC("rx_ipv6_packets_good",	MMC_XGMAC_RX_IPV6_PKT_G, 1),
+	MIB_DESC("rx_ipv6_header_error_packets",	MMC_XGMAC_RX_IPV6_HDRERR_PKT, 1),
+	MIB_DESC("rx_ipv6_no_payload_packets",	MMC_XGMAC_RX_IPV6_NOPAY_PKT, 1),
+	MIB_DESC("rx_udp_packets_good",	MMC_XGMAC_RX_UDP_PKT_G, 1),
+	MIB_DESC("rx_udp_sum_error_packets",	MMC_XGMAC_RX_UDP_ERR_PKT, 1),
+	MIB_DESC("rx_tcp_packets_good",	MMC_XGMAC_RX_TCP_PKT_G, 1),
+	MIB_DESC("rx_tcp_sum_error_packets",	MMC_XGMAC_RX_TCP_ERR_PKT, 1),
+	MIB_DESC("rx_icmp_packets_good",	MMC_XGMAC_RX_ICMP_PKT_G, 1),
+	MIB_DESC("rx_icmp_sum_error_packets",	MMC_XGMAC_RX_ICMP_ERR_PKT, 1),
+	MIB_DESC("rx_ipv4_bytes_good",	MMC_XGMAC_RX_IPV4_OCTET_G, 1),
+	MIB_DESC("rx_ipv4_header_error_bytes",	MMC_XGMAC_RX_IPV4_HDRERR_OCTET, 1),
+	MIB_DESC("rx_ipv4_no_payload_bytes",	MMC_XGMAC_RX_IPV4_NOPAY_OCTET, 1),
+	MIB_DESC("rx_ipv4_fragment_bytes",	MMC_XGMAC_RX_IPV4_FRAG_OCTET, 1),
+	MIB_DESC("rx_ipv4_udp_sum_zero_bytes",	MMC_XGMAC_RX_IPV4_UDSBL_OCTET, 1),
+	MIB_DESC("rx_ipv6_bytes_good",	MMC_XGMAC_RX_IPV6_OCTET_G, 1),
+	MIB_DESC("rx_ipv6_header_error_bytes",	MMC_XGMAC_RX_IPV6_HDRERR_OCTET, 1),
+	MIB_DESC("rx_ipv6_no_payload_bytes",	MMC_XGMAC_RX_IPV6_NOPAY_OCTET, 1),
+	MIB_DESC("rx_udp_bytes_good",	MMC_XGMAC_RX_UDP_OCTET_G, 1),
+	MIB_DESC("rx_udp_sum_error_bytes",	MMC_XGMAC_RX_UDP_ERR_OCTET, 1),
+	MIB_DESC("rx_tcp_bytes_good",	MMC_XGMAC_RX_TCP_OCTET_G, 1),
+	MIB_DESC("rx_tcp_sum_error_bytes",	MMC_XGMAC_RX_TCP_ERR_OCTET, 1),
+	MIB_DESC("rx_icmp_bytes_good",	MMC_XGMAC_RX_ICMP_OCTET_G, 1),
+	MIB_DESC("rx_icmp_sum_error_bytes",	MMC_XGMAC_RX_ICMP_ERR_OCTET, 1),
+};
+
+struct xgmac_priv {
+	GMAC_COMMON_STRUCT;
+	struct mii_bus *mii;
+	wait_queue_head_t mdio_wait;
+	spinlock_t stats_lock;
+	u32 mdio_ctrl;
+	int sbd_irq;
+	char irq_name[16];
+	u64 mib_cache[ARRAY_SIZE(xgmac_mib)];
+	struct phylink_pcs *pcs;
+};
+
+// Used by ndo_get_stats64, don't change this without changing xgmac_mib[]!
+enum {
+	STATS64_TX_PKT_GB = 1,
+	STATS64_TX_UNDER = 13,
+	STATS64_TX_OCTET_G = 14,
+	STATS64_TX_PKT_G = 15,
+	STATS64_RX_PKT_GB = 20,
+	STATS64_RX_OCTET_G = 22,
+	STATS64_RX_BROAD_PKT_G = 23,
+	STATS64_RX_MULTI_PKT_G = 24,
+	STATS64_RX_CRC_ERR = 25,
+	STATS64_RX_UNI_PKT_G = 36,
+	STATS64_RX_LENGTH_ERR = 37,
+	STATS64_RX_RANGE = 38,
+	STATS64_RX_FIFOOVER_PKT = 40,
+	STATS64_RX_ALIGN_ERR_PKT = 47,
+	STATS64_TX_SINGLE_COL_G = 48,
+	STATS64_TX_MULTI_COL_G = 49,
+	STATS64_TX_LATE_COL = 51,
+	STATS64_TX_EXCESSIVE_COL = 52,
+	STATS64_TX_CARRIER = 53,
+};
+
+// Sync MIB counters to software. Caller must hold stats_lock.
+static void xgmac_mib_sync(struct xgmac_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(xgmac_mib); i++)
+		priv->mib_cache[i] += reg_read(priv, xgmac_mib[i].offset);
+}
+
+static void xgmac_mib_irq_enable(struct xgmac_priv *priv)
+{
+	reg_write(priv, MMC_XGMAC_RX_INT_EN, ~0);
+	reg_write(priv, MMC_XGMAC_TX_INT_EN, ~0);
+	reg_write(priv, MMC_XGMAC_RX_IPC_INTR_MASK, 0);
+}
+
+static void xgmac_mib_irq_disable(struct xgmac_priv *priv)
+{
+	reg_write(priv, MMC_XGMAC_RX_INT_EN, 0);
+	reg_write(priv, MMC_XGMAC_TX_INT_EN, 0);
+	reg_write(priv, MMC_XGMAC_RX_IPC_INTR_MASK, ~0);
+}
+
+static void xgmac_mib_sync_begin(struct xgmac_priv *priv)
+{
+	xgmac_mib_irq_disable(priv);
+	spin_lock(&priv->stats_lock);
+}
+
+static void xgmac_mib_sync_end(struct xgmac_priv *priv)
+{
+	spin_unlock(&priv->stats_lock);
+	xgmac_mib_irq_enable(priv);
+}
+
+static int xgmac_mdio_wait(struct xgmac_priv *priv)
+{
+	unsigned long ret;
+	u32 val;
+
+	ret = wait_event_timeout(priv->mdio_wait,
+				 !((val = reg_read(priv, XGMAC_MDIO_DATA)) & MII_XGMAC_BUSY),
+				 HZ);
+	if (ret)
+		return FIELD_GET(MII_DATA_MASK, val);
+
+	return -ETIMEDOUT;
+}
+
+static int xgmac_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+	struct xgmac_priv *priv = bus->priv;
+	u32 reg;
+	int ret;
+
+	ret = xgmac_mdio_wait(priv);
+	if (ret < 0)
+		return ret;
+
+	reg_set(priv, XGMAC_MDIO_C22P, BIT(addr));
+
+	reg = FIELD_PREP(MII_XGMAC_PA, addr) | FIELD_PREP(MII_XGMAC_RA, regnum);
+	reg_write(priv, XGMAC_MDIO_ADDR, reg);
+
+	reg = MII_XGMAC_BUSY | MII_XGMAC_READ | priv->mdio_ctrl;
+	reg_write(priv, XGMAC_MDIO_DATA, reg);
+
+	return xgmac_mdio_wait(priv);
+}
+
+static int xgmac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+{
+	struct xgmac_priv *priv = bus->priv;
+	u32 reg;
+	int ret;
+
+	ret = xgmac_mdio_wait(priv);
+	if (ret < 0)
+		return ret;
+
+	reg_set(priv, XGMAC_MDIO_C22P, BIT(addr));
+
+	reg = FIELD_PREP(MII_XGMAC_PA, addr) | FIELD_PREP(MII_XGMAC_RA, regnum);
+	reg_write(priv, XGMAC_MDIO_ADDR, reg);
+
+	reg = MII_XGMAC_BUSY | MII_XGMAC_WRITE | priv->mdio_ctrl |
+	      FIELD_PREP(MII_DATA_MASK, val);
+	reg_write(priv, XGMAC_MDIO_DATA, reg);
+
+	ret = xgmac_mdio_wait(priv);
+	return ret < 0 ? ret : 0;
+}
+
+static int xgmac_mdio_read_c45(struct mii_bus *bus, int addr, int devnum, int regnum)
+{
+	struct xgmac_priv *priv = bus->priv;
+	u32 reg;
+	int ret;
+
+	ret = xgmac_mdio_wait(priv);
+	if (ret < 0)
+		return ret;
+
+	reg_clear(priv, XGMAC_MDIO_C22P, BIT(addr));
+
+	reg = FIELD_PREP(MII_XGMAC_PA, addr) | FIELD_PREP(MII_XGMAC_DA, devnum) |
+	      FIELD_PREP(MII_XGMAC_RA, regnum);
+	reg_write(priv, XGMAC_MDIO_ADDR, reg);
+
+	reg = MII_XGMAC_BUSY | MII_XGMAC_READ | priv->mdio_ctrl;
+	reg_write(priv, XGMAC_MDIO_DATA, reg);
+
+	return xgmac_mdio_wait(priv);
+}
+
+static int xgmac_mdio_write_c45(struct mii_bus *bus, int addr, int devnum, int regnum, u16 val)
+{
+	struct xgmac_priv *priv = bus->priv;
+	u32 reg;
+	int ret;
+
+	ret = xgmac_mdio_wait(priv);
+	if (ret < 0)
+		return ret;
+
+	reg_clear(priv, XGMAC_MDIO_C22P, BIT(addr));
+
+	reg = FIELD_PREP(MII_XGMAC_PA, addr) | FIELD_PREP(MII_XGMAC_DA, devnum) |
+	      FIELD_PREP(MII_XGMAC_RA, regnum);
+	reg_write(priv, XGMAC_MDIO_ADDR, reg);
+
+	reg = MII_XGMAC_BUSY | MII_XGMAC_WRITE | priv->mdio_ctrl |
+	      FIELD_PREP(MII_DATA_MASK, val);
+	reg_write(priv, XGMAC_MDIO_DATA, reg);
+
+	ret = xgmac_mdio_wait(priv);
+	return ret < 0 ? ret : 0;
+}
+
+static int xgmac_mdio_init(struct xgmac_priv *priv)
+{
+	struct device_node *mdio_node = NULL, *np = priv->dev->of_node;
+	u32 csr_freq = clk_get_rate(priv->csr_clk);
+	int ret = -ENOMEM;
+	u32 freq;
+
+	mdio_node = of_get_child_by_name(np, "mdio");
+	if (!mdio_node)
+		return 0;
+
+	if (of_property_read_bool(mdio_node, "suppress-preamble"))
+		priv->mdio_ctrl |= MII_XGMAC_PSE;
+
+	if (of_property_read_u32(mdio_node, "clock-frequency", &freq))
+		freq = 2500000;
+
+	if (freq > DIV_ROUND_UP(csr_freq, 4))
+		dev_warn(priv->dev,
+			 "MDC frequency %uHz too high, reducing to %uHz\n",
+			 freq, DIV_ROUND_UP(csr_freq, 4));
+	else if (freq < csr_freq / 202)
+		dev_warn(priv->dev,
+			 "MDC frequency %uHz too low, increasing to %uHz\n",
+			 freq, csr_freq / 202);
+
+	if (freq >= DIV_ROUND_UP(csr_freq, 4))
+		priv->mdio_ctrl |= MII_XGMAC_CRS | FIELD_PREP(MII_XGMAC_CR, 0);
+	else if (freq >= DIV_ROUND_UP(csr_freq, 6))
+		priv->mdio_ctrl |= MII_XGMAC_CRS | FIELD_PREP(MII_XGMAC_CR, 1);
+	else if (freq >= DIV_ROUND_UP(csr_freq, 8))
+		priv->mdio_ctrl |= MII_XGMAC_CRS | FIELD_PREP(MII_XGMAC_CR, 2);
+	else if (freq >= DIV_ROUND_UP(csr_freq, 10))
+		priv->mdio_ctrl |= MII_XGMAC_CRS | FIELD_PREP(MII_XGMAC_CR, 3);
+	else if (freq >= DIV_ROUND_UP(csr_freq, 12))
+		priv->mdio_ctrl |= MII_XGMAC_CRS | FIELD_PREP(MII_XGMAC_CR, 4);
+	else if (freq >= DIV_ROUND_UP(csr_freq, 14))
+		priv->mdio_ctrl |= MII_XGMAC_CRS | FIELD_PREP(MII_XGMAC_CR, 5);
+	else if (freq >= DIV_ROUND_UP(csr_freq, 16))
+		priv->mdio_ctrl |= MII_XGMAC_CRS | FIELD_PREP(MII_XGMAC_CR, 6);
+	else if (freq >= DIV_ROUND_UP(csr_freq, 18))
+		priv->mdio_ctrl |= MII_XGMAC_CRS | FIELD_PREP(MII_XGMAC_CR, 7);
+	else if (freq >= DIV_ROUND_UP(csr_freq, 62))
+		priv->mdio_ctrl |= FIELD_PREP(MII_XGMAC_CR, 0);
+	else if (freq >= DIV_ROUND_UP(csr_freq, 102))
+		priv->mdio_ctrl |= FIELD_PREP(MII_XGMAC_CR, 1);
+	else if (freq >= DIV_ROUND_UP(csr_freq, 122))
+		priv->mdio_ctrl |= FIELD_PREP(MII_XGMAC_CR, 2);
+	else if (freq >= DIV_ROUND_UP(csr_freq, 142))
+		priv->mdio_ctrl |= FIELD_PREP(MII_XGMAC_CR, 3);
+	else if (freq >= DIV_ROUND_UP(csr_freq, 162))
+		priv->mdio_ctrl |= FIELD_PREP(MII_XGMAC_CR, 4);
+	else
+		priv->mdio_ctrl |= FIELD_PREP(MII_XGMAC_CR, 5);
+
+	priv->mii = devm_mdiobus_alloc(priv->dev);
+	if (!priv->mii)
+		goto cleanup;
+
+	priv->mii->name = "xgmac";
+	priv->mii->priv = priv;
+	priv->mii->read = xgmac_mdio_read;
+	priv->mii->write = xgmac_mdio_write;
+	priv->mii->read_c45 = xgmac_mdio_read_c45;
+	priv->mii->write_c45 = xgmac_mdio_write_c45;
+	snprintf(priv->mii->id, MII_BUS_ID_SIZE, "xgmac%u", priv->id);
+	init_waitqueue_head(&priv->mdio_wait);
+	reg_write(priv, XGMAC_MDIO_INT_EN, XGMAC_MDIO_INT_EN_SINGLE);
+
+	ret = devm_of_mdiobus_register(priv->dev, priv->mii, mdio_node);
+cleanup:
+	of_node_put(mdio_node);
+	return ret;
+}
+
+static irqreturn_t xgmac_irq(int irq, void *dev_id)
+{
+	struct xgmac_priv *priv = dev_id;
+	irqreturn_t ret = IRQ_NONE;
+	u32 status;
+
+	status = reg_read(priv, XGMAC_INT_STATUS);
+	if (status & XGMAC_MMCIS) {
+		if (spin_trylock(&priv->stats_lock)) {
+			xgmac_mib_sync(priv);
+			spin_unlock(&priv->stats_lock);
+		}
+		ret = IRQ_HANDLED;
+	}
+	if (status & XGMAC_SMI) {
+		reg_read(priv, XGMAC_MDIO_INT_STATUS);
+		wake_up(&priv->mdio_wait);
+		ret = IRQ_HANDLED;
+	}
+	/* LSI is set regardless of LSIE bit in XGMAC_INT_EN */
+	if (status & XGMAC_LSI && reg_read(priv, XGMAC_INT_EN) & XGMAC_LSI) {
+		phylink_mac_change(priv->phylink, status & XGMAC_RGMII_LS);
+		ret = IRQ_HANDLED;
+	}
+
+	return ret;
+}
+
+static void xgmac_write_mac_addr(struct xgmac_priv *priv, const u8 *addr,
+				 u32 reg)
+{
+	u32 val;
+
+	/* For MAC Addr registers we have to set the Address Enable (AE)
+	 * bit that has no effect on the High Reg 0 where the bit 31 (MO)
+	 * is RO.
+	 */
+	val = GMAC_HI_REG_AE | (addr[5] << 8) | addr[4];
+	reg_write(priv, XGMAC_ADDRx_HIGH(reg), val);
+	val = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+	reg_write(priv, XGMAC_ADDRx_LOW(reg), val);
+}
+
+static int xgmac_open(struct net_device *dev)
+{
+	struct xgmac_priv *priv = netdev_priv(dev);
+	int ret;
+
+	ret = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0);
+	if (ret)
+		return ret;
+
+	ret = xgmac_dma_open(priv->dma, dev, priv->id);
+	if (ret) {
+		phylink_disconnect_phy(priv->phylink);
+		return ret;
+	}
+
+	phylink_start(priv->phylink);
+	netif_tx_start_all_queues(dev);
+
+	return 0;
+}
+
+static int xgmac_stop(struct net_device *dev)
+{
+	struct xgmac_priv *priv = netdev_priv(dev);
+
+	phylink_stop(priv->phylink);
+	netif_tx_stop_all_queues(dev);
+	phylink_disconnect_phy(priv->phylink);
+
+	return xgmac_dma_stop(priv->dma, dev, priv->id);
+}
+
+static void xgmac_set_rx_mode(struct net_device *dev)
+{
+	struct xgmac_priv *priv = netdev_priv(dev);
+	unsigned int value = 0;
+	unsigned int perfect_addr_number = 4;
+
+	pr_debug("%s: # mcasts %d, # unicast %d\n", __func__,
+		 netdev_mc_count(dev), netdev_uc_count(dev));
+
+	if (dev->flags & IFF_PROMISC) {
+		value = XGMAC_FILTER_PR;
+	} else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
+		value = XGMAC_FILTER_PM;	/* pass all multi */
+	}
+
+	/* Handle multiple unicast addresses (perfect filtering) */
+	if (netdev_uc_count(dev) > perfect_addr_number)
+		/* Switch to promiscuous mode if more than unicast
+		 * addresses are requested than supported by hardware.
+		 */
+		value |= XGMAC_FILTER_PR;
+	else {
+		int reg = 1;
+		struct netdev_hw_addr *ha;
+
+		netdev_for_each_uc_addr(ha, dev) {
+			xgmac_write_mac_addr(priv, ha->addr, reg);
+			reg++;
+		}
+
+		while (reg <= perfect_addr_number) {
+			reg_write(priv, XGMAC_ADDRx_HIGH(reg), 0);
+			reg_write(priv, XGMAC_ADDRx_LOW(reg), 0);
+			reg++;
+		}
+	}
+
+#ifdef FRAME_FILTER_DEBUG
+	/* Enable Receive all mode (to debug filtering_fail errors) */
+	value |= XGMAC_FILTER_RA;
+#endif
+	reg_write(priv, XGMAC_PACKET_FILTER, value);
+}
+
+static int xgmac_set_mac_address(struct net_device *dev, void *p)
+{
+	struct xgmac_priv *priv = netdev_priv(dev);
+	int ret;
+
+	ret = eth_mac_addr(dev, p);
+	if (ret)
+		return ret;
+
+	xgmac_write_mac_addr(priv, dev->dev_addr, 0);
+
+	return 0;
+}
+
+static int xgmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct xgmac_priv *priv = netdev_priv(dev);
+
+	return phylink_mii_ioctl(priv->phylink, ifr, cmd);
+}
+
+static int xgmac_set_features(struct net_device *dev,
+			      netdev_features_t features)
+{
+	netdev_features_t diff = dev->features ^ features;
+	struct xgmac_priv *priv = netdev_priv(dev);
+	u32 ctrl = reg_read(priv, XGMAC_RX_CONFIG);
+
+	if (diff & NETIF_F_LOOPBACK) {
+		if (features & NETIF_F_LOOPBACK) {
+			netdev_info(dev, "MAC internal loopback enabled\n");
+			ctrl |= XGMAC_CONFIG_LM;
+		} else {
+			netdev_info(dev, "MAC internal loopback disabled\n");
+			ctrl &= ~XGMAC_CONFIG_LM;
+		}
+	}
+
+	if (diff & NETIF_F_RXFCS) {
+		if (features & NETIF_F_RXFCS) {
+			netdev_info(dev, "MAC FCS stripping disabled\n");
+			ctrl &= ~(XGMAC_CONFIG_ACS | XGMAC_CONFIG_CST);
+		} else {
+			netdev_info(dev, "MAC FCS stripping enabled\n");
+			ctrl |= XGMAC_CONFIG_ACS | XGMAC_CONFIG_CST;
+		}
+	}
+
+	if (diff & NETIF_F_RXCSUM) {
+		if (features & NETIF_F_RXCSUM) {
+			netdev_info(dev, "MAC Rx checksum offload enabled\n");
+			ctrl |= XGMAC_CONFIG_IPC;
+		} else {
+			netdev_info(dev, "MAC Rx checksum offload disabled\n");
+			ctrl &= ~XGMAC_CONFIG_IPC;
+		}
+	}
+
+	if (diff & NETIF_F_RXALL) {
+		u32 rff = reg_read(priv, XGMAC_PACKET_FILTER);
+
+		if (features & NETIF_F_RXALL) {
+			ctrl |= XGMAC_CONFIG_DCRCC;
+			rff |= XGMAC_FILTER_RA;
+		} else {
+			ctrl &= ~XGMAC_CONFIG_DCRCC;
+			rff &= ~XGMAC_FILTER_RA;
+		}
+		reg_write(priv, XGMAC_PACKET_FILTER, rff);
+	}
+	reg_write(priv, XGMAC_RX_CONFIG, ctrl);
+
+	return 0;
+}
+
+static int xgmac_get_phys_port_id(struct net_device *dev,
+					  struct netdev_phys_item_id *ppid)
+{
+	struct xgmac_priv *priv = netdev_priv(dev);
+
+	ppid->id[0] = priv->id;
+	ppid->id_len = 1;
+
+	return 0;
+}
+
+static int xgmac_get_port_parent_id(struct net_device *dev,
+					  struct netdev_phys_item_id *ppid)
+{
+	ppid->id[0] = SF_GMAC_DUNMMY_ID;
+	ppid->id_len = 1;
+
+	return 0;
+}
+
+static void xgmac_neigh_destroy(struct net_device *dev,
+				      struct neighbour *n)
+{
+	// struct xgmac_priv *priv = netdev_priv(dev);
+
+	/** TODO: call dpns->ops->port_neigh_destroy(dp_port, n); */
+	return;
+}
+static void xgmac_get_stats64(struct net_device *dev,
+			      struct rtnl_link_stats64 *stats)
+{
+	struct xgmac_priv *priv = netdev_priv(dev);
+	const u64 *mib;
+
+	// Compile-time check in case someone changes the order
+	BUILD_BUG_ON(xgmac_mib[STATS64_TX_PKT_GB].offset != MMC_XGMAC_TX_PKT_GB);
+	BUILD_BUG_ON(xgmac_mib[STATS64_TX_UNDER].offset != MMC_XGMAC_TX_UNDER);
+	BUILD_BUG_ON(xgmac_mib[STATS64_TX_OCTET_G].offset != MMC_XGMAC_TX_OCTET_G);
+	BUILD_BUG_ON(xgmac_mib[STATS64_TX_PKT_G].offset != MMC_XGMAC_TX_PKT_G);
+	BUILD_BUG_ON(xgmac_mib[STATS64_RX_PKT_GB].offset != MMC_XGMAC_RX_PKT_GB);
+	BUILD_BUG_ON(xgmac_mib[STATS64_RX_OCTET_G].offset != MMC_XGMAC_RX_OCTET_G);
+	BUILD_BUG_ON(xgmac_mib[STATS64_RX_BROAD_PKT_G].offset != MMC_XGMAC_RX_BROAD_PKT_G);
+	BUILD_BUG_ON(xgmac_mib[STATS64_RX_MULTI_PKT_G].offset != MMC_XGMAC_RX_MULTI_PKT_G);
+	BUILD_BUG_ON(xgmac_mib[STATS64_RX_CRC_ERR].offset != MMC_XGMAC_RX_CRC_ERR);
+	BUILD_BUG_ON(xgmac_mib[STATS64_RX_UNI_PKT_G].offset != MMC_XGMAC_RX_UNI_PKT_G);
+	BUILD_BUG_ON(xgmac_mib[STATS64_RX_LENGTH_ERR].offset != MMC_XGMAC_RX_LENGTH_ERR);
+	BUILD_BUG_ON(xgmac_mib[STATS64_RX_RANGE].offset != MMC_XGMAC_RX_RANGE);
+	BUILD_BUG_ON(xgmac_mib[STATS64_RX_FIFOOVER_PKT].offset != MMC_XGMAC_RX_FIFOOVER_PKT);
+	BUILD_BUG_ON(xgmac_mib[STATS64_RX_ALIGN_ERR_PKT].offset != MMC_XGMAC_RX_ALIGN_ERR_PKT);
+	BUILD_BUG_ON(xgmac_mib[STATS64_TX_SINGLE_COL_G].offset != MMC_XGMAC_TX_SINGLE_COL_G);
+	BUILD_BUG_ON(xgmac_mib[STATS64_TX_MULTI_COL_G].offset != MMC_XGMAC_TX_MULTI_COL_G);
+	BUILD_BUG_ON(xgmac_mib[STATS64_TX_LATE_COL].offset != MMC_XGMAC_TX_LATE_COL);
+	BUILD_BUG_ON(xgmac_mib[STATS64_TX_EXCESSIVE_COL].offset != MMC_XGMAC_TX_EXCESSIVE_COL);
+	BUILD_BUG_ON(xgmac_mib[STATS64_TX_CARRIER].offset != MMC_XGMAC_TX_CARRIER);
+
+	xgmac_mib_sync_begin(priv);
+	xgmac_mib_sync(priv);
+	mib = priv->mib_cache;
+	stats->rx_packets = mib[STATS64_RX_BROAD_PKT_G] +
+			    mib[STATS64_RX_MULTI_PKT_G] +
+			    mib[STATS64_RX_UNI_PKT_G];
+	stats->tx_packets = mib[STATS64_TX_PKT_G];
+	stats->rx_bytes = mib[STATS64_RX_OCTET_G];
+	stats->tx_bytes = mib[STATS64_TX_OCTET_G];
+	stats->rx_errors = mib[STATS64_RX_PKT_GB] - stats->rx_packets;
+	stats->tx_errors = mib[STATS64_TX_PKT_GB] - stats->tx_packets;
+	stats->multicast = mib[STATS64_RX_BROAD_PKT_G] +
+			   mib[STATS64_RX_MULTI_PKT_G];
+	stats->collisions = mib[STATS64_TX_SINGLE_COL_G] +
+			    mib[STATS64_TX_MULTI_COL_G];
+	stats->rx_length_errors = mib[STATS64_RX_LENGTH_ERR] +
+				  mib[STATS64_RX_RANGE];
+	stats->rx_over_errors = mib[STATS64_RX_FIFOOVER_PKT];
+	stats->rx_crc_errors = mib[STATS64_RX_CRC_ERR];
+	stats->rx_frame_errors = mib[STATS64_RX_ALIGN_ERR_PKT];
+	stats->tx_aborted_errors = mib[STATS64_TX_EXCESSIVE_COL];
+	stats->tx_carrier_errors = mib[STATS64_TX_CARRIER];
+	stats->tx_fifo_errors = mib[STATS64_TX_UNDER];
+	stats->tx_window_errors = mib[STATS64_TX_LATE_COL];
+	xgmac_mib_sync_end(priv);
+}
+
+static int xgmac_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct xgmac_priv *priv = netdev_priv(dev);
+	u32 step;
+
+	dev->mtu = new_mtu;
+
+	/* Configure GPSL field in XGMAC_RX_CONFIG. L2 header and FCS must
+	 * be taken into account. For a MTU of 1500 bytes, this field is set
+	 * to 1518. For VLAN tagged packets, the hardware adds 4 bytes
+	 * (single tagged) or 8 bytes (double tagged) to the programmed value.
+	 */
+	new_mtu += ETH_HLEN + ETH_FCS_LEN;
+	reg_rmw(priv, XGMAC_RX_CONFIG, XGMAC_CONFIG_GPSL,
+		FIELD_PREP(XGMAC_CONFIG_GPSL, new_mtu) | XGMAC_CONFIG_GPSLCE);
+
+	/* Configure Rx watchdog and Tx jabber threshold, with 1KB step.
+	 * Unlike the GPSL field, the hardware does not count the VLAN tag
+	 * overhead so it must be manually added.
+	 */
+	new_mtu = min(new_mtu + VLAN_HLEN * 2, MAX_FRAME_SIZE);
+	step = DIV_ROUND_UP((u32)new_mtu, SZ_1K);
+	// The step begins at 2KB
+	step = step < 2 ? 0 : step - 2;
+	reg_write(priv, XGMAC_WD_JB_TIMEOUT, XGMAC_PJE | XGMAC_PWE |
+		  FIELD_PREP(XGMAC_JTO, step) | FIELD_PREP(XGMAC_WTO, step));
+
+	return 0;
+}
+
+static const struct net_device_ops xgmac_netdev_ops = {
+	.ndo_open		= xgmac_open,
+	.ndo_stop		= xgmac_stop,
+	.ndo_start_xmit		= xgmac_dma_xmit_fast,
+	.ndo_set_rx_mode	= xgmac_set_rx_mode,
+	.ndo_set_mac_address	= xgmac_set_mac_address,
+	.ndo_do_ioctl		= xgmac_ioctl,
+	.ndo_set_features	= xgmac_set_features,
+	.ndo_get_phys_port_id	= xgmac_get_phys_port_id,
+	.ndo_get_port_parent_id	= xgmac_get_port_parent_id,
+	.ndo_neigh_destroy	= xgmac_neigh_destroy,
+	.ndo_get_stats64	= xgmac_get_stats64,
+	.ndo_change_mtu		= xgmac_change_mtu,
+};
+
+static void xgmac_validate(struct phylink_config *config,
+			   unsigned long *supported,
+			   struct phylink_link_state *state)
+{
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = {};
+
+	phylink_set(mac_supported, 10baseT_Half);
+	phylink_set(mac_supported, 10baseT_Full);
+	phylink_set(mac_supported, 100baseT_Half);
+	phylink_set(mac_supported, 100baseT_Full);
+	phylink_set(mac_supported, 1000baseT_Full);
+	phylink_set(mac_supported, 1000baseX_Full);
+	phylink_set(mac_supported, 1000baseKX_Full);
+	phylink_set(mac_supported, 2500baseT_Full);
+	phylink_set(mac_supported, 2500baseX_Full);
+
+	phylink_set(mac_supported, Autoneg);
+	phylink_set(mac_supported, Pause);
+	phylink_set(mac_supported, Asym_Pause);
+	phylink_set_port_modes(mac_supported);
+
+	linkmode_and(supported, supported, mac_supported);
+	linkmode_and(state->advertising, state->advertising, mac_supported);
+}
+
+static struct xgmac_priv *sfxgmac_phylink_to_port(struct phylink_config *config)
+{
+	return container_of(config, struct xgmac_priv, phylink_config);
+}
+
+static struct phylink_pcs *xgmac_mac_selct_pcs(struct phylink_config *config,
+					    phy_interface_t interface)
+{
+	struct xgmac_priv *priv = sfxgmac_phylink_to_port(config);
+
+	return priv->pcs;
+}
+
+static void xgmac_toggle_tx_lpi(struct xgmac_priv *priv)
+{
+	if (priv->phy_supports_eee && priv->tx_lpi_enabled) {
+		reg_set(priv, XGMAC_LPI_CTRL,
+			XGMAC_LPIATE | XGMAC_LPITXA | XGMAC_LPITXEN);
+	} else {
+		reg_clear(priv, XGMAC_LPI_CTRL,
+			  XGMAC_LPIATE | XGMAC_LPITXA | XGMAC_LPITXEN);
+	}
+}
+
+static void xgmac_mac_config(struct phylink_config *config, unsigned int mode,
+			     const struct phylink_link_state *state)
+{
+	struct xgmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+	/* Enable link change interrupt for RGMII */
+	if (phy_interface_mode_is_rgmii(state->interface) &&
+	    phylink_autoneg_inband(mode))
+		reg_set(priv, XGMAC_INT_EN, XGMAC_LSI);
+	else
+		reg_clear(priv, XGMAC_INT_EN, XGMAC_LSI);
+}
+
+static void xgmac_mac_reset(struct xgmac_priv *priv)
+{
+	struct net_device *ndev = priv_to_netdev(priv);
+	int ret;
+
+	if (priv->id < 5) {
+		ret = regmap_clear_bits(priv->ethsys, ETHSYS_RST, BIT(priv->id));
+		if (ret)
+			return;
+
+		ret = regmap_set_bits(priv->ethsys, ETHSYS_RST, BIT(priv->id));
+		if (ret)
+			return;
+	}else {
+		ret = regmap_clear_bits(priv->ethsys, ETHSYS_RST, ETHSYS_RST_MAC5);
+		if (ret)
+			return;
+
+		/* set mac5 phy mode to rgmii, should set under mac reset */
+		ret = regmap_write(priv->ethsys, ETHSYS_MAC5_CTRL,
+				FIELD_PREP(MAC5_PHY_INTF_SEL, 1));
+		if (ret)
+			return;
+
+		ret = regmap_set_bits(priv->ethsys, ETHSYS_RST, ETHSYS_RST_MAC5);
+		if (ret)
+			return;
+	}
+
+	regmap_set_bits(priv->ethsys, ETHSYS_TX_DIS, BIT(priv->id));
+	reg_clear(priv, XGMAC_TX_CONFIG, XGMAC_CONFIG_TE);
+	reg_clear(priv, XGMAC_INT_EN, XGMAC_LSI);
+	xgmac_write_mac_addr(priv, ndev->dev_addr, 0);
+	reg_set(priv, MMC_XGMAC_CONTROL,
+		MMC_XGMAC_CONTROL_RESET | MMC_XGMAC_CONTROL_RSTONRD);
+	reg_write(priv, XGMAC_TX_CONFIG, XGMAC_CORE_INIT_TX);
+	reg_write(priv, XGMAC_RX_CONFIG, XGMAC_CORE_INIT_RX);
+	reg_write(priv, XGMAC_WD_JB_TIMEOUT, XGMAC_PJE | XGMAC_PWE);
+	reg_write(priv, XGMAC_VLAN_TAG, XGMAC_VLAN_EDVLP);
+	reg_write(priv, XGMAC_LPI_TIMER_CTRL,
+		  FIELD_PREP(XGMAC_LPI_LST, XGMAC_LPI_LST_DEFAULT) |
+		  FIELD_PREP(XGMAC_LPI_TWT, XGMAC_LPI_TWT_DEFAULT));
+	reg_write(priv, XGMAC_LPI_AUTO_EN, XGMAC_LPI_AUTO_EN_DEFAULT);
+	xgmac_mib_irq_enable(priv);
+	reg_write(priv, XGMAC_LPI_1US,
+		  clk_get_rate(priv->csr_clk) / 1000000 - 1);
+}
+
+static void wait_queue_empty(struct xgmac_priv *priv,
+		phy_interface_t interface)
+{
+	unsigned long timeout = jiffies + HZ/50;
+
+	do {
+		if (!(reg_read(priv, XGMAC_MTL_TXQ_OPMODE(0)) & XGMAC_FTQ))
+			return;
+
+		cond_resched();
+	} while (time_after(timeout, jiffies));
+
+	xgmac_mac_reset(priv);
+	printk("wait queue empty timed out\n");
+}
+
+static void xgmac_mac_link_down(struct phylink_config *config,
+				unsigned int mode, phy_interface_t interface)
+{
+	struct xgmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+	regmap_set_bits(priv->ethsys, ETHSYS_TX_DIS, BIT(priv->id));
+	reg_clear(priv, XGMAC_TX_CONFIG, XGMAC_CONFIG_TE);
+	reg_set(priv, XGMAC_MTL_TXQ_OPMODE(0), XGMAC_FTQ);
+	wait_queue_empty(priv, interface);
+	reg_clear(priv, XGMAC_LPI_CTRL, XGMAC_PLS);
+}
+
+static void xgmac_mac_link_up(struct phylink_config *config,
+			      struct phy_device *phy, unsigned int mode,
+			      phy_interface_t interface, int speed, int duplex,
+			      bool tx_pause, bool rx_pause) {
+	struct xgmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+	u32 txc, rxfc, txfc;
+
+	txc = reg_read(priv, XGMAC_TX_CONFIG);
+	txc |= XGMAC_CONFIG_TE;
+	txc &= ~XGMAC_CONFIG_SS_MASK;
+
+	switch (speed) {
+	case SPEED_2500:
+		txc |= XGMAC_CONFIG_SS_2500_GMII;
+		break;
+	case SPEED_1000:
+		txc |= XGMAC_CONFIG_SS_1000_GMII;
+		break;
+	case SPEED_100:
+		txc |= XGMAC_CONFIG_SS_100_MII;
+		break;
+	case SPEED_10:
+		txc |= XGMAC_CONFIG_SS_10_MII;
+		break;
+	default:
+		return;
+	}
+
+	if (duplex == DUPLEX_FULL)
+		reg_clear(priv, XGMAC_MAC_EXT_CONFIG, XGMAC_HD);
+	else
+		reg_set(priv, XGMAC_MAC_EXT_CONFIG, XGMAC_HD);
+
+	rxfc = XGMAC_UP;
+	if (rx_pause)
+		rxfc |= XGMAC_RFE;
+
+	reg_write(priv, XGMAC_RX_FLOW_CTRL, rxfc);
+
+	txfc = 0;
+	if (tx_pause) {
+		txfc |= XGMAC_TFE;
+		if (duplex == DUPLEX_FULL)
+			txfc |= FIELD_PREP(XGMAC_PT, 0x400);
+	}
+	reg_write(priv, XGMAC_Qx_TX_FLOW_CTRL(0), txfc);
+
+	reg_write(priv, XGMAC_TX_CONFIG, txc);
+	reg_set(priv, XGMAC_RX_CONFIG, XGMAC_CONFIG_RE);
+	reg_set(priv, XGMAC_LPI_CTRL, XGMAC_PLS);
+	regmap_clear_bits(priv->ethsys, ETHSYS_TX_DIS, BIT(priv->id));
+
+	if (phy)
+		priv->phy_supports_eee = !phy_init_eee(phy, true);
+	else
+		priv->phy_supports_eee = false;
+
+	xgmac_toggle_tx_lpi(priv);
+}
+
+static const struct phylink_mac_ops xgmac_phylink_mac_ops = {
+	.validate	= xgmac_validate,
+	.mac_select_pcs = xgmac_mac_selct_pcs,
+	.mac_config	= xgmac_mac_config,
+	.mac_link_down	= xgmac_mac_link_down,
+	.mac_link_up	= xgmac_mac_link_up,
+};
+
+static void xgmac_ethtool_get_wol(struct net_device *dev,
+				 struct ethtool_wolinfo *wol)
+{
+	struct xgmac_priv *priv = netdev_priv(dev);
+
+	return phylink_ethtool_get_wol(priv->phylink, wol);
+}
+
+static int xgmac_ethtool_set_wol(struct net_device *dev,
+				struct ethtool_wolinfo *wol)
+{
+	struct xgmac_priv *priv = netdev_priv(dev);
+
+	return phylink_ethtool_set_wol(priv->phylink, wol);
+}
+
+static int xgmac_ethtool_nway_reset(struct net_device *dev)
+{
+	struct xgmac_priv *priv = netdev_priv(dev);
+
+	return phylink_ethtool_nway_reset(priv->phylink);
+}
+
+static void xgmac_ethtool_get_pauseparam(struct net_device *dev,
+					 struct ethtool_pauseparam *pause)
+{
+	struct xgmac_priv *priv = netdev_priv(dev);
+
+	phylink_ethtool_get_pauseparam(priv->phylink, pause);
+}
+
+static int xgmac_ethtool_set_pauseparam(struct net_device *dev,
+					struct ethtool_pauseparam *pause)
+{
+	struct xgmac_priv *priv = netdev_priv(dev);
+
+	return phylink_ethtool_set_pauseparam(priv->phylink, pause);
+}
+
+
+static void xgmac_ethtool_get_strings(struct net_device *dev, u32 stringset,
+				      u8 *data)
+{
+	int i;
+
+	if (stringset != ETH_SS_STATS)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(xgmac_mib); i++) {
+		memcpy(data, xgmac_mib[i].name, ETH_GSTRING_LEN);
+		data += ETH_GSTRING_LEN;
+	}
+}
+
+static void xgmac_ethtool_get_stats(struct net_device *dev,
+				    struct ethtool_stats *stats, u64 *data)
+{
+	struct xgmac_priv *priv = netdev_priv(dev);
+
+	xgmac_mib_sync_begin(priv);
+	xgmac_mib_sync(priv);
+	memcpy(data, priv->mib_cache, sizeof(priv->mib_cache));
+	xgmac_mib_sync_end(priv);
+}
+
+static int xgmac_ethtool_reset(struct net_device *dev, u32 *flags)
+{
+	struct xgmac_priv *priv = netdev_priv(dev);
+
+	if (*flags & ETH_RESET_MGMT) {
+		xgmac_mib_sync_begin(priv);
+		reg_set(priv, MMC_XGMAC_CONTROL, MMC_XGMAC_CONTROL_RESET);
+		memset(priv->mib_cache, 0, sizeof(priv->mib_cache));
+		xgmac_mib_sync_end(priv);
+		*flags &= ~ETH_RESET_MGMT;
+	}
+
+	return 0;
+}
+
+static int xgmac_ethtool_get_sset_count(struct net_device *dev, int stringset)
+{
+	switch (stringset) {
+	case ETH_SS_STATS:
+		return ARRAY_SIZE(xgmac_mib);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int xgmac_ethtool_get_link_ksettings(struct net_device *dev,
+					    struct ethtool_link_ksettings *cmd)
+{
+	struct xgmac_priv *priv = netdev_priv(dev);
+
+	return phylink_ethtool_ksettings_get(priv->phylink, cmd);
+}
+
+static int xgmac_ethtool_get_eee(struct net_device *dev, struct ethtool_eee *e)
+{
+	struct xgmac_priv *priv = netdev_priv(dev);
+	int ret;
+
+	ret = phylink_ethtool_get_eee(priv->phylink, e);
+	if (ret)
+		return ret;
+
+	e->tx_lpi_enabled = priv->tx_lpi_enabled;
+	e->tx_lpi_timer = reg_read(priv, XGMAC_LPI_AUTO_EN);
+
+	return 0;
+}
+
+static int xgmac_ethtool_set_eee(struct net_device *dev, struct ethtool_eee *e)
+{
+	struct xgmac_priv *priv = netdev_priv(dev);
+	int ret;
+
+	if (e->tx_lpi_timer > XGMAC_LPI_AUTO_EN_MAX)
+		return -EINVAL;
+
+	ret = phylink_ethtool_set_eee(priv->phylink, e);
+	if (ret)
+		return ret;
+
+	priv->tx_lpi_enabled = e->tx_lpi_enabled;
+	xgmac_toggle_tx_lpi(priv);
+
+	reg_write(priv, XGMAC_LPI_AUTO_EN, e->tx_lpi_timer);
+
+	return 0;
+}
+
+static int xgmac_ethtool_set_link_ksettings(struct net_device *dev,
+					    const struct ethtool_link_ksettings *cmd)
+{
+	struct xgmac_priv *priv = netdev_priv(dev);
+
+	return phylink_ethtool_ksettings_set(priv->phylink, cmd);
+}
+
+static const struct ethtool_ops xgmac_ethtool_ops = {
+	.get_wol		= xgmac_ethtool_get_wol,
+	.set_wol		= xgmac_ethtool_set_wol,
+	.nway_reset		= xgmac_ethtool_nway_reset,
+	.get_link		= ethtool_op_get_link,
+	.get_pauseparam		= xgmac_ethtool_get_pauseparam,
+	.set_pauseparam		= xgmac_ethtool_set_pauseparam,
+	.get_strings		= xgmac_ethtool_get_strings,
+	.get_ethtool_stats	= xgmac_ethtool_get_stats,
+	.reset			= xgmac_ethtool_reset,
+	.get_sset_count		= xgmac_ethtool_get_sset_count,
+	.get_eee		= xgmac_ethtool_get_eee,
+	.set_eee		= xgmac_ethtool_set_eee,
+	.get_link_ksettings	= xgmac_ethtool_get_link_ksettings,
+	.set_link_ksettings	= xgmac_ethtool_set_link_ksettings,
+};
+
+static int xgmac_rgmii_delay(struct xgmac_priv *priv, phy_interface_t phy_mode)
+{
+	u32 reg = 0, rxd = MAC5_DELAY_DEFAULT, txd = MAC5_DELAY_DEFAULT;
+
+	of_property_read_u32(priv->dev->of_node, "rx-internal-delay-ps", &rxd);
+	of_property_read_u32(priv->dev->of_node, "tx-internal-delay-ps", &txd);
+
+	rxd = DIV_ROUND_CLOSEST(rxd, MAC5_DELAY_STEP);
+	txd = DIV_ROUND_CLOSEST(txd, MAC5_DELAY_STEP);
+
+	if (rxd > 256 || txd > 256)
+		return -EINVAL;
+
+	if (rxd)
+		reg |= FIELD_PREP(MAC5_RX_DELAY, rxd - 1) | MAC5_RX_DELAY_EN;
+
+	if (txd)
+		reg |= FIELD_PREP(MAC5_TX_DELAY, txd - 1) | MAC5_TX_DELAY_EN;
+
+	return regmap_update_bits(priv->ethsys, ETHSYS_MAC(5),
+				  MAC5_DELAY_MASK, reg);
+}
+
+static int xgmac_phy_setup(struct xgmac_priv *priv)
+{
+	struct device *dev = priv->dev;
+	struct device_node *np = dev->of_node;
+	phy_interface_t phy_mode;
+	struct phylink *phylink;
+	struct platform_device *pcs_dev;
+	struct phylink_pcs *pcs = NULL;
+	int ret;
+
+	ret = of_get_phy_mode(np, &phy_mode);
+	if (ret)
+		return ret;
+
+	if (phy_interface_mode_is_rgmii(phy_mode)) {
+		ret = xgmac_rgmii_delay(priv, phy_mode);
+		if (ret)
+			return ret;
+
+		priv->csr_clk = devm_clk_get_enabled(dev, "rgmii");
+		if (IS_ERR(priv->csr_clk))
+			return PTR_ERR(priv->csr_clk);
+
+		if (ret)
+			return ret;
+	} else {
+		struct of_phandle_args pcs_args;
+		ret = of_parse_phandle_with_fixed_args(np, "pcs-handle", 1, 0,
+						       &pcs_args);
+		if (ret)
+			return ret;
+
+		pcs_dev = of_find_device_by_node(pcs_args.np);
+		of_node_put(pcs_args.np);
+		if (!pcs_dev)
+			return -ENODEV;
+
+		pcs = xpcs_port_get(pcs_dev, pcs_args.args[0]);
+		if (IS_ERR(pcs))
+			return PTR_ERR(pcs);
+
+		priv->pcs_dev = pcs_dev;
+	}
+
+	priv->phylink_config.dev = &priv_to_netdev(priv)->dev;
+	priv->phylink_config.type = PHYLINK_NETDEV;
+
+	__set_bit(phy_mode, priv->phylink_config.supported_interfaces);
+
+	priv->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+						MAC_10 | MAC_100 | MAC_1000FD |
+						MAC_2500FD;
+
+
+	phylink = phylink_create(&priv->phylink_config, dev->fwnode, phy_mode,
+				 &xgmac_phylink_mac_ops);
+	if (IS_ERR(phylink))
+		return PTR_ERR(phylink);
+
+	if (pcs)
+		priv->pcs = pcs;
+
+	priv->phylink = phylink;
+
+	return 0;
+}
+
+static int xgmac_probe(struct platform_device *pdev)
+{
+	static bool mac_disable_tx_set;
+	struct platform_device *dma_pdev;
+	struct device_node *dma_node;
+	struct net_device *ndev;
+	struct xgmac_priv *priv;
+	struct resource *r;
+	u32 ver;
+	int ret;
+
+	dma_node = of_parse_phandle(pdev->dev.of_node, "dmas", 0);
+	if (!dma_node)
+		return -ENODEV;
+
+	dma_pdev = of_find_device_by_node(dma_node);
+	of_node_put(dma_node);
+	if (!dma_pdev)
+		return -ENODEV;
+
+	ndev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(*priv), DMA_CH_MAX,
+				       DMA_CH_MAX);
+	if (!ndev)
+		return -ENOMEM;
+
+	SET_NETDEV_DEV(ndev, &pdev->dev);
+	platform_set_drvdata(pdev, ndev);
+	priv = netdev_priv(ndev);
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->ioaddr = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(priv->ioaddr))
+		return PTR_ERR(priv->ioaddr);
+
+	priv->csr_clk = devm_clk_get_enabled(&pdev->dev, "csr");
+	if (IS_ERR(priv->csr_clk))
+		return PTR_ERR(priv->csr_clk);
+
+	priv->id = offset_to_id(r->start);
+	priv->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+						       "ethsys");
+	if (IS_ERR(priv->ethsys))
+		return PTR_ERR(priv->ethsys);
+
+	if (priv->id < 5) {
+		/* mac reset and release reset*/
+		ret = regmap_clear_bits(priv->ethsys, ETHSYS_RST, BIT(priv->id));
+		if (ret)
+			return ret;
+
+		ret = regmap_set_bits(priv->ethsys, ETHSYS_RST, BIT(priv->id));
+		if (ret)
+			return ret;
+
+	} else {
+		ret = regmap_clear_bits(priv->ethsys, ETHSYS_RST, ETHSYS_RST_MAC5);
+		if (ret)
+			return ret;
+
+		/* set mac5 phy mode to rgmii, should set under mac reset */
+		ret = regmap_write(priv->ethsys, ETHSYS_MAC5_CTRL,
+				FIELD_PREP(MAC5_PHY_INTF_SEL, 1));
+		if (ret)
+			return ret;
+
+		ret = regmap_set_bits(priv->ethsys, ETHSYS_RST, ETHSYS_RST_MAC5);
+		if (ret)
+			return ret;
+	}
+
+	if (!mac_disable_tx_set) {
+		/* Disable all MAC Tx, once and only once */
+		mac_disable_tx_set = true;
+		ret = regmap_write(priv->ethsys, ETHSYS_TX_DIS, 0xff);
+		if (ret)
+			return ret;
+	}
+
+	ver = reg_read(priv, XGMAC_VERSION);
+	if (FIELD_GET(XGMAC_VERSION_ID_MASK, ver) != XGMAC_VERSION_ID)
+		return -ENODEV;
+
+	spin_lock_init(&priv->stats_lock);
+	priv->dev = &pdev->dev;
+	priv->dma = platform_get_drvdata(dma_pdev);
+	if (!priv->dma)
+		return -EPROBE_DEFER;
+
+	ndev->base_addr = r->start;
+	ndev->netdev_ops = &xgmac_netdev_ops;
+	ndev->ethtool_ops = &xgmac_ethtool_ops;
+	ndev->features = NETIF_F_RXHASH | NETIF_F_RXCSUM | NETIF_F_GRO |
+			 NETIF_F_SG | NETIF_F_LLTX | NETIF_F_HW_TC |
+			 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO |
+			 NETIF_F_TSO6;
+	ndev->hw_features = (ndev->features & ~NETIF_F_RXHASH) |
+			    NETIF_F_LOOPBACK | NETIF_F_RXFCS | NETIF_F_RXALL |
+			    NETIF_F_HW_L2FW_DOFFLOAD;
+	ndev->vlan_features = ndev->features;
+	ndev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
+	ndev->max_mtu = MAX_FRAME_SIZE - ETH_HLEN - ETH_FCS_LEN;
+
+	/* read-clear interrupt status before registering */
+	reg_read(priv, XGMAC_MDIO_INT_STATUS);
+	reg_read(priv, XGMAC_INT_STATUS);
+	ret = platform_get_irq(pdev, 0);
+	if (ret < 0)
+		return ret;
+
+	reg_clear(priv, XGMAC_INT_EN, XGMAC_LSI);
+	snprintf(priv->irq_name, sizeof(priv->irq_name), "xgmac%u_sbd",
+		 priv->id);
+	ret = devm_request_irq(&pdev->dev, ret, xgmac_irq, 0, priv->irq_name,
+			       priv);
+	if (ret)
+		return ret;
+
+	ret = xgmac_mdio_init(priv);
+	if (ret)
+		return ret;
+
+	ret = of_get_ethdev_address(pdev->dev.of_node, ndev);
+	if (ret == -EPROBE_DEFER)
+		return ret;
+
+	if (ret) {
+		eth_hw_addr_random(ndev);
+		dev_warn(&pdev->dev, "generated random MAC address %pM\n",
+			 ndev->dev_addr);
+	}
+
+	ret = xgmac_phy_setup(priv);
+	if (ret)
+		return ret;
+
+	xgmac_write_mac_addr(priv, ndev->dev_addr, 0);
+	reg_set(priv, MMC_XGMAC_CONTROL,
+		MMC_XGMAC_CONTROL_RESET | MMC_XGMAC_CONTROL_RSTONRD);
+	reg_write(priv, XGMAC_TX_CONFIG, XGMAC_CORE_INIT_TX);
+	reg_write(priv, XGMAC_RX_CONFIG, XGMAC_CORE_INIT_RX);
+	reg_write(priv, XGMAC_WD_JB_TIMEOUT, XGMAC_PJE | XGMAC_PWE);
+	reg_write(priv, XGMAC_VLAN_TAG, XGMAC_VLAN_EDVLP);
+	reg_write(priv, XGMAC_LPI_TIMER_CTRL,
+		  FIELD_PREP(XGMAC_LPI_LST, XGMAC_LPI_LST_DEFAULT) |
+		  FIELD_PREP(XGMAC_LPI_TWT, XGMAC_LPI_TWT_DEFAULT));
+	reg_write(priv, XGMAC_LPI_AUTO_EN, XGMAC_LPI_AUTO_EN_DEFAULT);
+	xgmac_mib_irq_enable(priv);
+	reg_write(priv, XGMAC_LPI_1US,
+		  clk_get_rate(priv->csr_clk) / 1000000 - 1);
+
+	ret = register_netdev(ndev);
+	if (ret)
+		goto phy_cleanup;
+
+	return 0;
+phy_cleanup:
+	phylink_destroy(priv->phylink);
+	if (priv->pcs_dev)
+		xpcs_port_put(priv->pcs_dev);
+	return ret;
+}
+
+static void xgmac_remove(struct platform_device *pdev)
+{
+	struct net_device *dev = platform_get_drvdata(pdev);
+	struct xgmac_priv *priv = netdev_priv(dev);
+
+	unregister_netdev(dev);
+	phylink_destroy(priv->phylink);
+	if (priv->pcs_dev)
+		xpcs_port_put(priv->pcs_dev);
+	regmap_clear_bits(priv->ethsys, ETHSYS_RST, BIT(priv->id));
+}
+
+static const struct of_device_id xgmac_match[] = {
+	{ .compatible = "siflower,sf21-xgmac" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, xgmac_match);
+
+static struct platform_driver xgmac_driver = {
+	.probe	= xgmac_probe,
+	.remove_new	= xgmac_remove,
+	.driver	= {
+		.name		= "sfxgmac",
+		.of_match_table	= xgmac_match,
+	},
+};
+module_platform_driver(xgmac_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Qingfang Deng <[email protected]>");
+MODULE_DESCRIPTION("Ethernet XGMAC driver for SF21A6826/SF21H8898 SoC");

+ 621 - 0
target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/sfxpcs.c

@@ -0,0 +1,621 @@
+#define pr_fmt(fmt)	"xpcs: " fmt
+
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/phylink.h>
+#include <linux/of_platform.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+#include <linux/regmap.h>
+#include <asm-generic/bug.h>
+
+#include "sfxpcs.h"
+#include "eth.h"
+
+#define DEV_MASK    GENMASK(20, 16)
+#define REG_MASK    GENMASK(15, 0)
+
+#define DW_PCS_PORTS		4
+#define DW_QSGMII_MMD1          0x1a
+#define DW_QSGMII_MMD2          0x1b
+#define DW_QSGMII_MMD3          0x1c
+#define MDIO_CTRL1			MII_BMCR
+#define MDIO_CTRL1_RESET    BMCR_RESET
+
+static inline void *PDE_DATA(const struct inode *inode) {BUG(); return NULL;};
+
+enum {
+	XPCS_CLK_REF,
+	XPCS_CLK_EEE,
+	XPCS_CLK_CSR,
+	XPCS_NUM_CLKS
+};
+
+struct xpcs_port {
+	struct phylink_pcs pcs;
+	unsigned int index;
+};
+
+struct xpcs_priv {
+	void __iomem *ioaddr;
+	struct regmap *ethsys;
+	struct clk_bulk_data clks[XPCS_NUM_CLKS];
+	u8 power_save_count;
+	u8 port_count;
+	u8 id;
+	struct xpcs_port ports[DW_PCS_PORTS];
+};
+
+static int xpcs_qsgmii_port_to_devad(unsigned int port)
+{
+	switch (port) {
+	case 0:
+		return MDIO_MMD_VEND2;
+	case 1:
+		return DW_QSGMII_MMD1;
+	case 2:
+		return DW_QSGMII_MMD2;
+	case 3:
+		return DW_QSGMII_MMD3;
+	default:
+		BUG();
+		return -EINVAL;
+	}
+}
+
+static u16 xpcs_read(struct xpcs_priv *priv, int devad, int reg)
+{
+	ulong r;
+
+	r = FIELD_PREP(REG_MASK, reg) | FIELD_PREP(DEV_MASK, devad);
+	r <<= 2;
+
+	return readw_relaxed(priv->ioaddr + r);
+}
+
+static void xpcs_write(struct xpcs_priv *priv, int devad, int reg, u16 val)
+{
+	ulong r;
+
+	r = FIELD_PREP(REG_MASK, reg) | FIELD_PREP(DEV_MASK, devad);
+	r <<= 2;
+
+	writew_relaxed(val, priv->ioaddr + r);
+}
+
+static inline void xpcs_clear(struct xpcs_priv *priv, int devad, int reg, u16 clear)
+{
+	xpcs_write(priv, devad, reg, xpcs_read(priv, devad, reg) & ~clear);
+}
+
+static inline void xpcs_set(struct xpcs_priv *priv, int devad, int reg, u16 set)
+{
+	xpcs_write(priv, devad, reg, xpcs_read(priv, devad, reg) | set);
+}
+
+static int xpcs_poll_reset(struct xpcs_priv *priv, int devad)
+{
+	int timeout = 100000;
+
+	while (xpcs_read(priv, devad, MDIO_CTRL1) & MDIO_CTRL1_RESET) {
+		if (!--timeout) {
+			pr_err("Timed out waiting for reset\n");
+			return -ETIMEDOUT;
+		}
+	}
+
+	return 0;
+}
+
+static int xpcs_poll_pg(struct xpcs_priv *priv, int devad, u16 val)
+{
+	u32 timeout = 0;
+
+	while (FIELD_GET(PSEQ_STATE,
+				xpcs_read(priv, devad, DW_VR_MII_DIG_STS)) != val) {
+		if (timeout >= 100) {
+			pr_err("Timed out waiting for power state\n");
+			return -ETIMEDOUT;
+		}
+		timeout++;
+		udelay(100);
+	}
+	return 0;
+}
+
+static int xpcs_serdes_power_down(struct xpcs_priv *priv)
+{
+	xpcs_write(priv, MDIO_MMD_VEND2, MII_BMCR, BMCR_PDOWN);
+	return xpcs_poll_pg(priv, MDIO_MMD_VEND2, PSEQ_STATE_DOWN);
+}
+
+static int xpcs_serdes_power_up(struct xpcs_priv *priv)
+{
+	/* When powered down, this register cannot be read.
+	 * speed/duplex/AN will be configured in pcs_config/pcs_link_up.
+	 */
+	xpcs_write(priv, MDIO_MMD_VEND2, MII_BMCR, 0);
+	return xpcs_poll_pg(priv, MDIO_MMD_VEND2, PSEQ_STATE_GOOD);
+}
+
+/* Read AN result for 1000Base-X/2500Base-X */
+static void xpcs_8023z_resolve_link(struct xpcs_priv *priv,
+				    struct phylink_link_state *state,
+				    int fd_bit)
+{
+	bool tx_pause, rx_pause;
+	u16 adv, lpa;
+
+	adv = xpcs_read(priv, MDIO_MMD_VEND2, MII_ADVERTISE);
+	lpa = xpcs_read(priv, MDIO_MMD_VEND2, MII_LPA);
+
+	mii_lpa_mod_linkmode_x(state->lp_advertising, lpa, fd_bit);
+
+	if (linkmode_test_bit(fd_bit, state->advertising) &&
+	    linkmode_test_bit(fd_bit, state->lp_advertising)) {
+		state->duplex = DUPLEX_FULL;
+	} else {
+		/* negotiation failure */
+		state->link = false;
+	}
+
+	linkmode_resolve_pause(state->advertising, state->lp_advertising,
+			       &tx_pause, &rx_pause);
+
+	if (tx_pause)
+		state->pause |= MLO_PAUSE_TX;
+	if (rx_pause)
+		state->pause |= MLO_PAUSE_RX;
+}
+
+static void xpcs_get_state(struct phylink_pcs *pcs,
+			   struct phylink_link_state *state)
+{
+	struct xpcs_port *port;
+	struct xpcs_priv *priv;
+	u16 intrsts, bmsr;
+	int mmd;
+
+	port = container_of(pcs, struct xpcs_port, pcs);
+	priv = container_of(port, struct xpcs_priv, ports[port->index]);
+	bmsr = xpcs_read(priv, MDIO_MMD_VEND2, MII_BMSR);
+
+	state->link = !!(bmsr & BMSR_LSTATUS);
+	state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
+	if (!state->link)
+		return;
+
+	switch (state->interface) {
+	case PHY_INTERFACE_MODE_SGMII:
+	case PHY_INTERFACE_MODE_QSGMII:
+		mmd = xpcs_qsgmii_port_to_devad(port->index);
+		/* For SGMII/QSGMII, link speed and duplex can be read from
+		 * DW_VR_MII_AN_INTR_STS */
+		intrsts = xpcs_read(priv, mmd, DW_VR_MII_AN_INTR_STS);
+
+		state->link = !!(intrsts & DW_VR_MII_C37_ANSGM_SP_LNKSTS);
+		if (!state->link)
+			break;
+
+		switch (FIELD_GET(DW_VR_MII_AN_STS_C37_ANSGM_SP, intrsts)) {
+		case DW_VR_MII_C37_ANSGM_SP_10:
+			state->speed = SPEED_10;
+			break;
+		case DW_VR_MII_C37_ANSGM_SP_100:
+			state->speed = SPEED_100;
+			break;
+		case DW_VR_MII_C37_ANSGM_SP_1000:
+			state->speed = SPEED_1000;
+			break;
+		}
+
+		state->duplex = (intrsts & DW_VR_MII_AN_STS_C37_ANSGM_FD) ?
+				DUPLEX_FULL : DUPLEX_HALF;
+		break;
+	case PHY_INTERFACE_MODE_1000BASEX:
+		state->speed = SPEED_1000;
+		xpcs_8023z_resolve_link(priv, state,
+					ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
+		break;
+	case PHY_INTERFACE_MODE_2500BASEX:
+		state->speed = SPEED_2500;
+		xpcs_8023z_resolve_link(priv, state,
+					ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
+		break;
+	default:
+		break;
+	}
+}
+
+static void xpcs_qsgmii_init(struct xpcs_priv *priv)
+{
+	u16 reg;
+
+	reg = xpcs_read(priv, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL);
+	/* Already configured for QSGMII? skip. */
+	if (FIELD_GET(DW_VR_MII_PCS_MODE_MASK, reg) == DW_VR_MII_PCS_MODE_C37_QSGMII)
+		return;
+
+	reg = FIELD_PREP(DW_VR_MII_PCS_MODE_MASK, DW_VR_MII_PCS_MODE_C37_QSGMII);
+	xpcs_write(priv, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL, reg);
+
+	xpcs_write(priv, MDIO_MMD_VEND2, DW_VR_MII_MP_6G_MPLL_CTRL1, 0x28);
+
+	reg = xpcs_read(priv, MDIO_MMD_VEND2, DW_VR_MII_MP_6G_MPLL_CTRL0);
+	reg &= ~LANE_10BIT_SEL;
+	xpcs_write(priv, MDIO_MMD_VEND2, DW_VR_MII_MP_6G_MPLL_CTRL0, reg);
+
+	xpcs_write(priv, MDIO_MMD_VEND2, DW_VR_MII_MP_6G_MISC_CTRL1, 0x0);
+
+	reg = xpcs_read(priv, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1);
+	reg &= ~DW_VR_MII_DIG_CTRL1_2G5_EN;
+	reg &= ~DW_VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE;
+	xpcs_write(priv, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, reg);
+
+	xpcs_serdes_power_down(priv);
+	xpcs_serdes_power_up(priv);
+}
+
+static void xpcs_1000basex_sgmii_common_init(struct xpcs_priv *priv)
+{
+	u16 reg;
+
+	xpcs_write(priv, MDIO_MMD_VEND2, DW_VR_MII_MP_6G_MPLL_CTRL1, 0x28);
+
+	reg = xpcs_read(priv, MDIO_MMD_VEND2, DW_VR_MII_MP_6G_MPLL_CTRL0);
+	reg |= LANE_10BIT_SEL;
+	xpcs_write(priv, MDIO_MMD_VEND2, DW_VR_MII_MP_6G_MPLL_CTRL0, reg);
+
+	xpcs_write(priv, MDIO_MMD_VEND2, DW_VR_MII_MP_6G_MISC_CTRL1, 0xa);
+
+	reg = xpcs_read(priv, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1);
+	reg &= ~DW_VR_MII_DIG_CTRL1_2G5_EN;
+	reg &= ~DW_VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE;
+	xpcs_write(priv, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, reg);
+
+	xpcs_serdes_power_down(priv);
+	xpcs_serdes_power_up(priv);
+}
+
+static void xpcs_1000basex_init(struct xpcs_priv *priv)
+{
+	u16 reg;
+
+	reg = FIELD_PREP(DW_VR_MII_PCS_MODE_MASK, DW_VR_MII_PCS_MODE_C37_1000BASEX);
+	xpcs_write(priv, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL, reg);
+
+	xpcs_1000basex_sgmii_common_init(priv);
+}
+
+static void xpcs_sgmii_init(struct xpcs_priv *priv)
+{
+	u16 reg;
+
+	reg = FIELD_PREP(DW_VR_MII_PCS_MODE_MASK, DW_VR_MII_PCS_MODE_C37_SGMII);
+	xpcs_write(priv, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL, reg);
+
+	xpcs_1000basex_sgmii_common_init(priv);
+}
+
+static void xpcs_2500basex_init(struct xpcs_priv *priv)
+{
+	u16 reg;
+
+	reg = FIELD_PREP(DW_VR_MII_PCS_MODE_MASK, DW_VR_MII_PCS_MODE_C37_1000BASEX);
+	xpcs_write(priv, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL, reg);
+
+	xpcs_write(priv, MDIO_MMD_VEND2, DW_VR_MII_MP_6G_MPLL_CTRL1, 0x32);
+
+	reg = xpcs_read(priv, MDIO_MMD_VEND2, DW_VR_MII_MP_6G_MPLL_CTRL0);
+	reg |= LANE_10BIT_SEL;
+	xpcs_write(priv, MDIO_MMD_VEND2, DW_VR_MII_MP_6G_MPLL_CTRL0, reg);
+
+	reg = xpcs_read(priv, MDIO_MMD_VEND2, DW_VR_MII_MP_6G_RXGENCTRL0);
+	reg |= RX_ALIGN_EN_0;
+	xpcs_write(priv, MDIO_MMD_VEND2, DW_VR_MII_MP_6G_RXGENCTRL0, reg);
+
+	xpcs_write(priv, MDIO_MMD_VEND2, DW_VR_MII_MP_6G_MISC_CTRL1, 0x5);
+
+	xpcs_write(priv, MDIO_MMD_VEND2, DW_VR_MII_LINK_TIMER_CTRL,
+		   DW_VR_MII_LINK_TIMER_2500BASEX);
+
+	reg = xpcs_read(priv, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1);
+	reg |= DW_VR_MII_DIG_CTRL1_2G5_EN;
+	reg |= DW_VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE;
+	xpcs_write(priv, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, reg);
+
+	xpcs_serdes_power_down(priv);
+	xpcs_serdes_power_up(priv);
+}
+
+static int xpcs_config(struct phylink_pcs *pcs, unsigned int mode,
+		       phy_interface_t interface,
+		       const unsigned long *advertising,
+		       bool permit_pause_to_mac)
+{
+	struct xpcs_port *port;
+	struct xpcs_priv *priv;
+	u16 val;
+	int mmd;
+
+	port = container_of(pcs, struct xpcs_port, pcs);
+	priv = container_of(port, struct xpcs_priv, ports[port->index]);
+
+	/* Port 1,2,3 only exist in QSGMII mode */
+	if (port->index && interface != PHY_INTERFACE_MODE_QSGMII)
+		return -EINVAL;
+
+	/* Disable AN */
+	mmd = xpcs_qsgmii_port_to_devad(port->index);
+	xpcs_clear(priv, mmd, MII_BMCR, BMCR_ANENABLE);
+
+	switch (interface) {
+	case PHY_INTERFACE_MODE_QSGMII:
+		xpcs_qsgmii_init(priv);
+		break;
+	case PHY_INTERFACE_MODE_SGMII:
+		xpcs_sgmii_init(priv);
+		break;
+	case PHY_INTERFACE_MODE_2500BASEX:
+		xpcs_2500basex_init(priv);
+		break;
+	case PHY_INTERFACE_MODE_1000BASEX:
+		xpcs_1000basex_init(priv);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* Enable interrupt for in-band status */
+	val = xpcs_read(priv, mmd, DW_VR_MII_AN_CTRL);
+	if (phylink_autoneg_inband(mode))
+		val |= DW_VR_MII_AN_INTR_EN;
+	else
+		val &= ~DW_VR_MII_AN_INTR_EN;
+	xpcs_write(priv, mmd, DW_VR_MII_AN_CTRL, val);
+
+	if (interface != PHY_INTERFACE_MODE_2500BASEX) {
+		val = xpcs_read(priv, mmd, DW_VR_MII_DIG_CTRL1);
+		/* Enable speed auto switch for SGMII/QSGMII */
+		val |= DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
+		xpcs_write(priv, mmd, DW_VR_MII_DIG_CTRL1, val);
+	}
+
+	/* Configure AN ADV for 802.3z modes */
+	if (phy_interface_mode_is_8023z(interface)) {
+		int fd_bit;
+		u16 adv;
+
+		fd_bit = interface == PHY_INTERFACE_MODE_1000BASEX ?
+			 ETHTOOL_LINK_MODE_1000baseX_Full_BIT :
+			 ETHTOOL_LINK_MODE_2500baseX_Full_BIT;
+		adv = linkmode_adv_to_mii_adv_x(advertising, fd_bit);
+
+		xpcs_write(priv, MDIO_MMD_VEND2, MII_ADVERTISE, adv);
+	}
+
+	/* Enable AN */
+	if (interface != PHY_INTERFACE_MODE_2500BASEX)
+		xpcs_write(priv, mmd, MII_BMCR, BMCR_ANENABLE);
+
+	return 0;
+}
+
+static void xpcs_an_restart(struct phylink_pcs *pcs)
+{
+	struct xpcs_port *port;
+	struct xpcs_priv *priv;
+	int mmd;
+
+	port = container_of(pcs, struct xpcs_port, pcs);
+	priv = container_of(port, struct xpcs_priv, ports[port->index]);
+
+	mmd = xpcs_qsgmii_port_to_devad(port->index);
+	xpcs_set(priv, mmd, MII_BMCR, BMCR_ANRESTART);
+}
+
+static void xpcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+			 phy_interface_t interface, int speed, int duplex)
+{
+	struct xpcs_port *port;
+	struct xpcs_priv *priv;
+	u16 bmcr;
+	int mmd;
+
+	/* Skip speed and duplex configuration for SGMII/QSGMII in-band */
+	if (phylink_autoneg_inband(mode) &&
+	    !phy_interface_mode_is_8023z(interface))
+		return;
+
+	/* 1000/2500 BaseX should only use the max speed */
+	if (phy_interface_mode_is_8023z(interface))
+		speed = SPEED_1000;
+
+	port = container_of(pcs, struct xpcs_port, pcs);
+	priv = container_of(port, struct xpcs_priv, ports[port->index]);
+
+	mmd = xpcs_qsgmii_port_to_devad(port->index);
+	bmcr = xpcs_read(priv, mmd, MII_BMCR);
+	bmcr &= ~(BMCR_SPEED1000 | BMCR_SPEED100 | BMCR_SPEED10);
+
+	switch (speed) {
+	case SPEED_2500:
+	case SPEED_1000:
+		bmcr |= BMCR_SPEED1000;
+		break;
+	case SPEED_100:
+		bmcr |= BMCR_SPEED100;
+		break;
+	case SPEED_10:
+		bmcr |= BMCR_SPEED10;
+		break;
+	}
+	if (duplex == DUPLEX_FULL)
+		bmcr |= BMCR_FULLDPLX;
+	else
+		bmcr &= ~BMCR_FULLDPLX;
+
+	xpcs_write(priv, mmd, MII_BMCR, bmcr);
+}
+
+static const struct phylink_pcs_ops xpcs_phylink_ops = {
+	.pcs_get_state	= xpcs_get_state,
+	.pcs_config	= xpcs_config,
+	.pcs_an_restart	= xpcs_an_restart,
+	.pcs_link_up	= xpcs_link_up,
+};
+
+struct phylink_pcs *xpcs_port_get(struct platform_device *pdev,
+				  unsigned int port)
+{
+	struct xpcs_priv *priv = platform_get_drvdata(pdev);
+
+	if (port >= DW_PCS_PORTS)
+		return ERR_PTR(-EINVAL);
+
+	priv->port_count++;
+	priv->power_save_count++;
+	return &priv->ports[port].pcs;
+}
+EXPORT_SYMBOL(xpcs_port_get);
+
+
+
+void xpcs_port_put(struct platform_device *pdev)
+{
+	struct xpcs_priv *priv = platform_get_drvdata(pdev);
+
+	priv->port_count--;
+	priv->power_save_count--;
+}
+EXPORT_SYMBOL(xpcs_port_put);
+
+static irqreturn_t xpcs_irq(int irq, void *dev_id)
+{
+	struct xpcs_priv *priv = dev_id;
+	irqreturn_t ret = IRQ_NONE;
+	int i;
+
+	for (i = 0; i < DW_PCS_PORTS; i++) {
+		int mmd = xpcs_qsgmii_port_to_devad(i);
+		u16 intrsts = xpcs_read(priv, mmd, DW_VR_MII_AN_INTR_STS);
+		bool up;
+
+		if (!(intrsts & DW_VR_MII_C37_ANCMPLT_INTR))
+			continue;
+
+		xpcs_write(priv, mmd, DW_VR_MII_AN_INTR_STS, 0);
+		up = xpcs_read(priv, MDIO_MMD_VEND2, MII_BMSR) & BMSR_LSTATUS;
+		up |= intrsts & DW_VR_MII_C37_ANSGM_SP_LNKSTS;
+		phylink_pcs_change(&priv->ports[i].pcs, up);
+		ret = IRQ_HANDLED;
+	}
+
+	return ret;
+}
+
+static int xpcs_probe(struct platform_device *pdev)
+{
+	struct xpcs_priv *priv;
+	struct resource *r;
+	int ret, i;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, priv);
+	for (i = 0; i < DW_PCS_PORTS; i++) {
+		priv->ports[i].index = i;
+		priv->ports[i].pcs.ops = &xpcs_phylink_ops;
+	}
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->ioaddr = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(priv->ioaddr))
+		return PTR_ERR(priv->ioaddr);
+
+	priv->id = !!(r->start & BIT(24));
+	priv->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+						       "ethsys");
+	if (IS_ERR(priv->ethsys))
+		return PTR_ERR(priv->ethsys);
+
+	priv->clks[XPCS_CLK_REF].id = "ref";
+	priv->clks[XPCS_CLK_EEE].id = "eee";
+	priv->clks[XPCS_CLK_CSR].id = "csr";
+	ret = devm_clk_bulk_get(&pdev->dev, XPCS_NUM_CLKS, priv->clks);
+	if (ret)
+		return ret;
+
+	ret = clk_bulk_prepare_enable(XPCS_NUM_CLKS, priv->clks);
+	if (ret)
+		return ret;
+
+	ret = regmap_set_bits(priv->ethsys, ETHSYS_RST, BIT(5 + priv->id));
+	if (ret)
+		return ret;
+
+	ret = regmap_write(priv->ethsys,
+			   ETHSYS_QSG_CTRL + priv->id * sizeof(u32), 0x601);
+	if (ret)
+		return ret;
+
+	/* set ethtsuclk to 100MHz */
+	ret = clk_set_rate(priv->clks[XPCS_CLK_EEE].clk, 100000000);
+	if (ret)
+		return ret;
+
+	/* Soft reset the PCS */
+	xpcs_write(priv, MDIO_MMD_VEND2, MII_BMCR, BMCR_RESET);
+	ret = xpcs_poll_reset(priv, MDIO_MMD_VEND2);
+	if (ret)
+		return ret;
+
+	/* Enable EEE */
+	xpcs_set(priv, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL0,
+		 DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN);
+
+	/* Start from the power up state */
+	ret = xpcs_serdes_power_up(priv);
+	if (ret)
+		return ret;
+
+	ret = platform_get_irq(pdev, 0);
+	if (ret < 0)
+		return ret;
+
+	ret = devm_request_irq(&pdev->dev, ret, xpcs_irq, 0, KBUILD_MODNAME, priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void xpcs_remove(struct platform_device *pdev)
+{
+	struct xpcs_priv *priv = platform_get_drvdata(pdev);
+
+	clk_bulk_disable_unprepare(XPCS_NUM_CLKS, priv->clks);
+	regmap_clear_bits(priv->ethsys, ETHSYS_RST, BIT(5 + priv->id));
+}
+
+static const struct of_device_id xpcs_match[] = {
+	{ .compatible = "siflower,sf21-xpcs" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, xpcs_match);
+
+static struct platform_driver xpcs_driver = {
+	.probe	= xpcs_probe,
+	.remove_new	= xpcs_remove,
+	.driver	= {
+		.name		= "sfxpcs",
+		.of_match_table	= xpcs_match,
+	},
+};
+module_platform_driver(xpcs_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Qingfang Deng <[email protected]>");
+MODULE_DESCRIPTION("XPCS driver for SF21A6826/SF21H8898 SoC");

+ 251 - 0
target/linux/siflower/files-6.6/drivers/net/ethernet/siflower/sfxpcs.h

@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define SYNOPSYS_XPCS_ID		0x7996ced0
+#define SYNOPSYS_XPCS_MASK		0xffffffff
+
+/* Vendor regs access */
+#define DW_VENDOR			BIT(15)
+
+/* VR_XS_PCS */
+#define DW_USXGMII_RST			BIT(10)
+#define DW_USXGMII_EN			BIT(9)
+#define DW_VR_XS_PCS_DIG_STS		0x0010
+#define DW_RXFIFO_ERR			GENMASK(6, 5)
+
+/* SR_MII */
+#define DW_USXGMII_FULL			BIT(8)
+#define DW_USXGMII_SS_MASK		(BIT(13) | BIT(6) | BIT(5))
+#define DW_USXGMII_10000		(BIT(13) | BIT(6))
+#define DW_USXGMII_5000			(BIT(13) | BIT(5))
+#define DW_USXGMII_2500			(BIT(5))
+#define DW_USXGMII_1000			(BIT(6))
+#define DW_USXGMII_100			(BIT(13))
+#define DW_USXGMII_10			(0)
+
+/* SR_AN */
+#define DW_SR_AN_ADV1			0x10
+#define DW_SR_AN_ADV2			0x11
+#define DW_SR_AN_ADV3			0x12
+#define DW_SR_AN_LP_ABL1		0x13
+#define DW_SR_AN_LP_ABL2		0x14
+#define DW_SR_AN_LP_ABL3		0x15
+
+/* Clause 73 Defines */
+/* AN_LP_ABL1 */
+#define DW_C73_PAUSE			BIT(10)
+#define DW_C73_ASYM_PAUSE		BIT(11)
+#define DW_C73_AN_ADV_SF		0x1
+/* AN_LP_ABL2 */
+#define DW_C73_1000KX			BIT(5)
+#define DW_C73_10000KX4			BIT(6)
+#define DW_C73_10000KR			BIT(7)
+/* AN_LP_ABL3 */
+#define DW_C73_2500KX			BIT(0)
+#define DW_C73_5000KR			BIT(1)
+
+/* Clause 37 Defines */
+/* VR MII MMD registers offsets */
+#define DW_VR_MII_MMD_CTRL		0x0000
+#define DW_VR_MII_DIG_CTRL1		0x8000
+#define DW_VR_MII_AN_CTRL		0x8001
+#define DW_VR_MII_AN_INTR_STS		0x8002
+#define DW_VR_MII_DBG_CTRL		0x8005
+#define DW_VR_MII_LINK_TIMER_CTRL	0x800a
+#define DW_VR_MII_DIG_STS       0x8010
+#define DW_VR_MII_MP_6G_RXGENCTRL0  0x8058
+#define DW_VR_MII_MP_6G_MPLL_CTRL0  0x8078
+#define DW_VR_MII_MP_6G_MPLL_CTRL1  0x8079
+#define DW_VR_MII_MP_6G_MISC_CTRL1  0x809a
+/* Enable 2.5G Mode */
+#define DW_VR_MII_DIG_CTRL1_2G5_EN	BIT(2)
+/* EEE Mode Control Register */
+#define DW_VR_MII_EEE_MCTRL0		0x8006
+#define DW_VR_MII_EEE_MCTRL1		0x800b
+#define DW_VR_MII_DIG_CTRL2		0x80e1
+
+/* VR_MII_DIG_CTRL1 */
+#define DW_VR_MII_DIG_CTRL1_EN_25G_MODE		BIT(2)
+#define DW_VR_MII_DIG_CTRL1_CL37_TMR_OVR_RIDE	BIT(3)
+#define DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW		BIT(9)
+
+/* VR_MII_DIG_CTRL2 */
+#define DW_VR_MII_DIG_CTRL2_TX_POL_INV		BIT(4)
+#define DW_VR_MII_DIG_CTRL2_RX_POL_INV		BIT(0)
+
+/* VR_MII_AN_CTRL */
+#define DW_VR_MII_AN_CTRL_TX_CONFIG_SHIFT	3
+#define DW_VR_MII_TX_CONFIG_MASK		BIT(3)
+#define DW_VR_MII_TX_CONFIG_PHY_SIDE_SGMII	0x1
+#define DW_VR_MII_TX_CONFIG_MAC_SIDE_SGMII	0x0
+#define DW_VR_MII_AN_CTRL_PCS_MODE_SHIFT	1
+#define DW_VR_MII_PCS_MODE_MASK			GENMASK(2, 1)
+#define DW_VR_MII_PCS_MODE_C37_1000BASEX	0x0
+#define DW_VR_MII_PCS_MODE_C37_SGMII		0x2
+#define DW_VR_MII_PCS_MODE_C37_QSGMII		0x3
+#define DW_VR_MII_AN_INTR_EN			BIT(0)
+
+/* VR_MII_AN_INTR_STS */
+#define DW_VR_MII_AN_STS_C37_ANSGM_FD		BIT(1)
+#define DW_VR_MII_AN_STS_C37_ANSGM_SP_SHIFT	2
+#define DW_VR_MII_AN_STS_C37_ANSGM_SP		GENMASK(3, 2)
+#define DW_VR_MII_C37_ANSGM_SP_10		0x0
+#define DW_VR_MII_C37_ANSGM_SP_100		0x1
+#define DW_VR_MII_C37_ANSGM_SP_1000		0x2
+#define DW_VR_MII_C37_ANSGM_SP_LNKSTS		BIT(4)
+#define DW_VR_MII_C37_ANCMPLT_INTR		BIT(0)
+
+/* VR_MII_LINK_TIMER_CTRL */
+#define DW_VR_MII_LINK_TIMER_2500BASEX		0x2faf
+
+/* VR_MII_DIG_STS */
+#define PSEQ_STATE              GENMASK(4, 2)
+#define PSEQ_STATE_GOOD             4
+#define PSEQ_STATE_DOWN             6
+
+/* VR_MII_MP_6G_MPLL_CTRL0 */
+#define LANE_10BIT_SEL              BIT(1)
+
+/* VR_MII_MP_6G_RXGENCTRL0 */
+#define RX_ALIGN_EN_0               BIT(4)
+
+/* SR MII MMD Control defines */
+#define AN_CL37_EN			BIT(12)	/* Enable Clause 37 auto-nego */
+#define SGMII_SPEED_SS13		BIT(13)	/* SGMII speed along with SS6 */
+#define SGMII_SPEED_SS6			BIT(6)	/* SGMII speed along with SS13 */
+
+/* VR MII EEE Control 0 defines */
+#define DW_VR_MII_EEE_LTX_EN			BIT(0)  /* LPI Tx Enable */
+#define DW_VR_MII_EEE_LRX_EN			BIT(1)  /* LPI Rx Enable */
+#define DW_VR_MII_EEE_TX_QUIET_EN		BIT(2)  /* Tx Quiet Enable */
+#define DW_VR_MII_EEE_RX_QUIET_EN		BIT(3)  /* Rx Quiet Enable */
+#define DW_VR_MII_EEE_TX_EN_CTRL		BIT(4)  /* Tx Control Enable */
+#define DW_VR_MII_EEE_RX_EN_CTRL		BIT(7)  /* Rx Control Enable */
+
+#define DW_VR_MII_EEE_MULT_FACT_100NS_SHIFT	8
+#define DW_VR_MII_EEE_MULT_FACT_100NS		GENMASK(11, 8)
+
+/* VR MII EEE Control 1 defines */
+#define DW_VR_MII_EEE_TRN_LPI		BIT(0)	/* Transparent Mode Enable */
+
+/* Additional MMDs for QSGMII */
+#define DW_QSGMII_MMD1			0x1a
+#define DW_QSGMII_MMD2			0x1b
+#define DW_QSGMII_MMD3			0x1c
+
+/* PMA MMD registers */
+#define XS_PMA_MMD_BaseAddress 0x8020
+#define VR_XS_PMA_RX_LSTS (XS_PMA_MMD_BaseAddress + 0x0)
+#define VR_XS_PMA_RX_LSTS_RegisterResetValue 0x0
+#define VR_XS_PMA_MP_12G_16G_25G_TX_GENCTRL0 (XS_PMA_MMD_BaseAddress + 0x10)
+#define VR_XS_PMA_MP_12G_16G_25G_TX_GENCTRL0_RegisterResetValue 0x1000
+#define VR_XS_PMA_MP_12G_16G_25G_TX_GENCTRL1 (XS_PMA_MMD_BaseAddress + 0x11)
+#define VR_XS_PMA_MP_12G_16G_25G_TX_GENCTRL1_RegisterResetValue 0x1510
+#define VR_XS_PMA_MP_12G_16G_TX_GENCTRL2 (XS_PMA_MMD_BaseAddress + 0x12)
+#define VR_XS_PMA_MP_12G_16G_TX_GENCTRL2_RegisterResetValue 0x300
+#define VR_XS_PMA_MP_12G_16G_25G_TX_BOOST_CTRL (XS_PMA_MMD_BaseAddress + 0x13)
+#define VR_XS_PMA_MP_12G_16G_25G_TX_BOOST_CTRL_RegisterResetValue 0xf
+#define VR_XS_PMA_MP_12G_16G_25G_TX_RATE_CTRL (XS_PMA_MMD_BaseAddress + 0x14)
+#define VR_XS_PMA_MP_12G_16G_25G_TX_RATE_CTRL_RegisterResetValue 0x0
+#define VR_XS_PMA_MP_12G_16G_25G_TX_POWER_STATE_CTRL (XS_PMA_MMD_BaseAddress + 0x15)
+#define VR_XS_PMA_MP_12G_16G_25G_TX_POWER_STATE_CTRL_RegisterResetValue 0x0
+#define VR_XS_PMA_MP_12G_16G_25G_TX_EQ_CTRL0 (XS_PMA_MMD_BaseAddress + 0x16)
+#define VR_XS_PMA_MP_12G_16G_25G_TX_EQ_CTRL0_RegisterResetValue 0x2800
+#define VR_XS_PMA_MP_12G_16G_25G_TX_EQ_CTRL1 (XS_PMA_MMD_BaseAddress + 0x17)
+#define VR_XS_PMA_MP_12G_16G_25G_TX_EQ_CTRL1_RegisterResetValue 0x0
+#define VR_XS_PMA_MP_16G_25G_TX_GENCTRL3 (XS_PMA_MMD_BaseAddress + 0x1c)
+#define VR_XS_PMA_MP_16G_25G_TX_GENCTRL3_RegisterResetValue 0x0
+#define VR_XS_PMA_MP_16G_25G_TX_GENCTRL4 (XS_PMA_MMD_BaseAddress + 0x1d)
+#define VR_XS_PMA_MP_16G_25G_TX_GENCTRL4_RegisterResetValue 0x0
+#define VR_XS_PMA_MP_16G_25G_TX_MISC_CTRL0 (XS_PMA_MMD_BaseAddress + 0x1e)
+#define VR_XS_PMA_MP_16G_25G_TX_MISC_CTRL0_RegisterResetValue 0x0
+#define VR_XS_PMA_MP_12G_16G_25G_TX_STS (XS_PMA_MMD_BaseAddress + 0x20)
+#define VR_XS_PMA_MP_12G_16G_25G_TX_STS_RegisterResetValue 0x0
+#define VR_XS_PMA_MP_12G_16G_25G_RX_GENCTRL0 (XS_PMA_MMD_BaseAddress + 0x30)
+#define VR_XS_PMA_MP_12G_16G_25G_RX_GENCTRL0_RegisterResetValue 0x101
+#define VR_XS_PMA_MP_12G_16G_25G_RX_GENCTRL1 (XS_PMA_MMD_BaseAddress + 0x31)
+#define VR_XS_PMA_MP_12G_16G_25G_RX_GENCTRL1_RegisterResetValue 0x1100
+#define VR_XS_PMA_MP_12G_16G_RX_GENCTRL2 (XS_PMA_MMD_BaseAddress + 0x32)
+#define VR_XS_PMA_MP_12G_16G_RX_GENCTRL2_RegisterResetValue 0x300
+#define VR_XS_PMA_MP_12G_16G_RX_GENCTRL3 (XS_PMA_MMD_BaseAddress + 0x33)
+#define VR_XS_PMA_MP_12G_16G_RX_GENCTRL3_RegisterResetValue 0x1
+#define VR_XS_PMA_MP_12G_16G_25G_RX_RATE_CTRL (XS_PMA_MMD_BaseAddress + 0x34)
+#define VR_XS_PMA_MP_12G_16G_25G_RX_RATE_CTRL_RegisterResetValue 0x0
+#define VR_XS_PMA_MP_12G_16G_25G_RX_POWER_STATE_CTRL (XS_PMA_MMD_BaseAddress + 0x35)
+#define VR_XS_PMA_MP_12G_16G_25G_RX_POWER_STATE_CTRL_RegisterResetValue 0x0
+#define VR_XS_PMA_MP_12G_16G_25G_RX_CDR_CTRL (XS_PMA_MMD_BaseAddress + 0x36)
+#define VR_XS_PMA_MP_12G_16G_25G_RX_CDR_CTRL_RegisterResetValue 0x0
+#define VR_XS_PMA_MP_12G_16G_25G_RX_ATTN_CTRL (XS_PMA_MMD_BaseAddress + 0x37)
+#define VR_XS_PMA_MP_12G_16G_25G_RX_ATTN_CTRL_RegisterResetValue 0x0
+#define VR_XS_PMA_MP_16G_25G_RX_EQ_CTRL0 (XS_PMA_MMD_BaseAddress + 0x38)
+#define VR_XS_PMA_MP_16G_25G_RX_EQ_CTRL0_RegisterResetValue 0x5550
+#define VR_XS_PMA_MP_12G_16G_25G_RX_EQ_CTRL4 (XS_PMA_MMD_BaseAddress + 0x3c)
+#define VR_XS_PMA_MP_12G_16G_25G_RX_EQ_CTRL4_RegisterResetValue 0x11
+#define VR_XS_PMA_MP_16G_25G_RX_EQ_CTRL5 (XS_PMA_MMD_BaseAddress + 0x3d)
+#define VR_XS_PMA_MP_16G_25G_RX_EQ_CTRL5_RegisterResetValue 0x30
+#define VR_XS_PMA_MP_12G_16G_25G_DFE_TAP_CTRL0 (XS_PMA_MMD_BaseAddress + 0x3e)
+#define VR_XS_PMA_MP_12G_16G_25G_DFE_TAP_CTRL0_RegisterResetValue 0x0
+#define VR_XS_PMA_MP_12G_16G_25G_RX_STS (XS_PMA_MMD_BaseAddress + 0x40)
+#define VR_XS_PMA_MP_12G_16G_25G_RX_STS_RegisterResetValue 0x0
+#define VR_XS_PMA_MP_16G_25G_RX_PPM_STS0 (XS_PMA_MMD_BaseAddress + 0x41)
+#define VR_XS_PMA_MP_16G_25G_RX_PPM_STS0_RegisterResetValue 0x0
+#define VR_XS_PMA_MP_16G_RX_CDR_CTRL1 (XS_PMA_MMD_BaseAddress + 0x44)
+#define VR_XS_PMA_MP_16G_RX_CDR_CTRL1_RegisterResetValue 0x111
+#define VR_XS_PMA_MP_16G_25G_RX_PPM_CTRL0 (XS_PMA_MMD_BaseAddress + 0x45)
+#define VR_XS_PMA_MP_16G_25G_RX_PPM_CTRL0_RegisterResetValue 0x12
+#define VR_XS_PMA_MP_16G_25G_RX_GENCTRL4 (XS_PMA_MMD_BaseAddress + 0x48)
+#define VR_XS_PMA_MP_16G_25G_RX_GENCTRL4_RegisterResetValue 0x0
+#define VR_XS_PMA_MP_16G_25G_RX_MISC_CTRL0 (XS_PMA_MMD_BaseAddress + 0x49)
+#define VR_XS_PMA_MP_16G_25G_RX_MISC_CTRL0_RegisterResetValue 0x12
+#define VR_XS_PMA_MP_16G_25G_RX_IQ_CTRL0 (XS_PMA_MMD_BaseAddress + 0x4b)
+#define VR_XS_PMA_MP_16G_25G_RX_IQ_CTRL0_RegisterResetValue 0x0
+#define VR_XS_PMA_MP_12G_16G_25G_MPLL_CMN_CTRL (XS_PMA_MMD_BaseAddress + 0x50)
+#define VR_XS_PMA_MP_12G_16G_25G_MPLL_CMN_CTRL_RegisterResetValue 0x1
+#define VR_XS_PMA_MP_12G_16G_MPLLA_CTRL0 (XS_PMA_MMD_BaseAddress + 0x51)
+#define VR_XS_PMA_MP_12G_16G_MPLLA_CTRL0_RegisterResetValue 0x21
+#define VR_XS_PMA_MP_16G_MPLLA_CTRL1 (XS_PMA_MMD_BaseAddress + 0x52)
+#define VR_XS_PMA_MP_16G_MPLLA_CTRL1_RegisterResetValue 0x0
+#define VR_XS_PMA_MP_12G_16G_MPLLA_CTRL2 (XS_PMA_MMD_BaseAddress + 0x53)
+#define VR_XS_PMA_MP_12G_16G_MPLLA_CTRL2_RegisterResetValue 0x600
+#define VR_XS_PMA_MP_12G_16G_MPLLB_CTRL0 (XS_PMA_MMD_BaseAddress + 0x54)
+#define VR_XS_PMA_MP_12G_16G_MPLLB_CTRL0_RegisterResetValue 0x8000
+#define VR_XS_PMA_MP_16G_MPLLB_CTRL1 (XS_PMA_MMD_BaseAddress + 0x55)
+#define VR_XS_PMA_MP_16G_MPLLB_CTRL1_RegisterResetValue 0x0
+#define VR_XS_PMA_MP_12G_16G_MPLLB_CTRL2 (XS_PMA_MMD_BaseAddress + 0x56)
+#define VR_XS_PMA_MP_12G_16G_MPLLB_CTRL2_RegisterResetValue 0x0
+#define VR_XS_PMA_MP_16G_MPLLA_CTRL3 (XS_PMA_MMD_BaseAddress + 0x57)
+#define VR_XS_PMA_MP_16G_MPLLA_CTRL3_RegisterResetValue 0xa016
+#define VR_XS_PMA_MP_16G_MPLLB_CTRL3 (XS_PMA_MMD_BaseAddress + 0x58)
+#define VR_XS_PMA_MP_16G_MPLLB_CTRL3_RegisterResetValue 0x0
+#define VR_XS_PMA_MP_16G_MPLLA_CTRL4 (XS_PMA_MMD_BaseAddress + 0x59)
+#define VR_XS_PMA_MP_16G_MPLLA_CTRL4_RegisterResetValue 0x0
+#define VR_XS_PMA_MP_16G_MPLLA_CTRL5 (XS_PMA_MMD_BaseAddress + 0x5a)
+#define VR_XS_PMA_MP_16G_MPLLA_CTRL5_RegisterResetValue 0x0
+#define VR_XS_PMA_MP_16G_MPLLB_CTRL4 (XS_PMA_MMD_BaseAddress + 0x5b)
+#define VR_XS_PMA_MP_16G_MPLLB_CTRL4_RegisterResetValue 0x0
+#define VR_XS_PMA_MP_16G_MPLLB_CTRL5 (XS_PMA_MMD_BaseAddress + 0x5c)
+#define VR_XS_PMA_MP_16G_MPLLB_CTRL5_RegisterResetValue 0x0
+#define VR_XS_PMA_MP_12G_16G_25G_MISC_CTRL0 (XS_PMA_MMD_BaseAddress + 0x70)
+#define VR_XS_PMA_MP_12G_16G_25G_MISC_CTRL0_RegisterResetValue 0x5100
+#define VR_XS_PMA_MP_12G_16G_25G_REF_CLK_CTRL (XS_PMA_MMD_BaseAddress + 0x71)
+#define VR_XS_PMA_MP_12G_16G_25G_REF_CLK_CTRL_RegisterResetValue 0x71
+#define VR_XS_PMA_MP_12G_16G_25G_REF_CLK_CTRL_REF_RPT_CLK_EN BIT(8)
+#define VR_XS_PMA_MP_12G_16G_25G_VCO_CAL_LD0 (XS_PMA_MMD_BaseAddress + 0x72)
+#define VR_XS_PMA_MP_12G_16G_25G_VCO_CAL_LD0_RegisterResetValue 0x549
+#define VR_XS_PMA_MP_16G_25G_VCO_CAL_REF0 (XS_PMA_MMD_BaseAddress + 0x76)
+#define VR_XS_PMA_MP_16G_25G_VCO_CAL_REF0_RegisterResetValue 0x29
+#define VR_XS_PMA_MP_12G_16G_25G_MISC_STS (XS_PMA_MMD_BaseAddress + 0x78)
+#define VR_XS_PMA_MP_12G_16G_25G_MISC_STS_RegisterResetValue 0x200
+#define VR_XS_PMA_MP_12G_16G_25G_MISC_CTRL1 (XS_PMA_MMD_BaseAddress + 0x79)
+#define VR_XS_PMA_MP_12G_16G_25G_MISC_CTRL1_RegisterResetValue 0xffff
+#define VR_XS_PMA_MP_12G_16G_25G_EEE_CTRL (XS_PMA_MMD_BaseAddress + 0x7a)
+#define VR_XS_PMA_MP_12G_16G_25G_EEE_CTRL_RegisterResetValue 0x4f
+#define VR_XS_PMA_MP_12G_16G_25G_SRAM (XS_PMA_MMD_BaseAddress + 0x7b)
+#define VR_XS_PMA_MP_12G_16G_25G_SRAM_INIT_DN BIT(0)
+#define VR_XS_PMA_MP_12G_16G_25G_SRAM_EXT_LD_DN BIT(1)
+#define VR_XS_PMA_MP_16G_25G_MISC_CTRL2 (XS_PMA_MMD_BaseAddress + 0x7c)
+#define VR_XS_PMA_MP_16G_25G_MISC_CTRL2_RegisterResetValue 0x1
+#define VR_XS_PMA_SNPS_CR_CTRL (XS_PMA_MMD_BaseAddress + 0x80)
+#define VR_XS_PMA_SNPS_CR_CTRL_START_BUSY BIT(0)
+#define VR_XS_PMA_SNPS_CR_CTRL_WR_RDN	BIT(1)
+#define VR_XS_PMA_SNPS_CR_ADDR (XS_PMA_MMD_BaseAddress + 0x81)
+#define VR_XS_PMA_SNPS_CR_DATA (XS_PMA_MMD_BaseAddress + 0x82)

+ 193 - 0
target/linux/siflower/files-6.6/drivers/net/ethernet/stmicro/stmmac/dwmac-sf19a2890.c

@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Siflower SF19A2890 GMAC glue layer
+ * SF19A2890 GMAC is a DWMAC 3.73a with a custom HNAT engine
+ * between its MAC and DMA engine.
+ *
+ * Copyright (C) 2024 Chuanhong Guo <[email protected]>
+ */
+
+#include <linux/of_net.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/nvmem-consumer.h>
+
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+struct sf19a2890_gmac_priv {
+	struct device *dev;
+	void __iomem *gmac_cfg;
+	struct clk *gmac_byp_ref_clk;
+};
+
+#define REG_MISC		0x0
+#define  MISC_PHY_INTF_SEL	GENMASK(2, 0)
+#define   PHY_IF_GMII_MII	0
+#define   PHY_IF_RGMII		1
+#define   PHY_IF_RMII		4
+#define  MISC_PTP_AUX_TS_TRIG	BIT(3)
+#define  MISC_SBD_FLOWCTRL	BIT(4)
+#define  CLK_RMII_OEN		BIT(5)
+
+#define REG_CLK_TX_DELAY	0x4
+#define REG_CLK_RX_PHY_DELAY	0x8
+#define REG_CLK_RX_PHY_DELAY_EN	0xc
+
+/* Siflower stores RGMII delay as a 4-byte hex string in MTD. */
+#define SFGMAC_DELAY_STR_LEN	4
+static int sfgmac_set_delay_from_nvmem(struct sf19a2890_gmac_priv *priv)
+{
+	struct device_node *np = priv->dev->of_node;
+	int ret = 0;
+	struct nvmem_cell *cell;
+	const void *data;
+	size_t retlen;
+	u16 gmac_delay;
+	u8 delay_tx, delay_rx;
+
+	cell = of_nvmem_cell_get(np, "rgmii-delay");
+	if (IS_ERR(cell))
+		return PTR_ERR(cell);
+
+	data = nvmem_cell_read(cell, &retlen);
+	nvmem_cell_put(cell);
+
+	if (IS_ERR(data))
+		return PTR_ERR(data);
+
+	if (retlen < SFGMAC_DELAY_STR_LEN) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	ret = kstrtou16(data, 16, &gmac_delay);
+	if (ret == 0) {
+		delay_tx = (gmac_delay >> 8) & 0xff;
+		delay_rx = gmac_delay & 0xff;
+		writel(delay_tx, priv->gmac_cfg + REG_CLK_TX_DELAY);
+		writel(delay_rx, priv->gmac_cfg + REG_CLK_RX_PHY_DELAY);
+		if (delay_rx)
+			writel(1, priv->gmac_cfg + REG_CLK_RX_PHY_DELAY_EN);
+	}
+
+exit:
+	kfree(data);
+
+	return ret;
+}
+
+static int sfgmac_setup_phy_interface(struct sf19a2890_gmac_priv *priv)
+{
+	phy_interface_t phy_iface;
+	int mode;
+	u32 reg;
+
+	of_get_phy_mode(priv->dev->of_node, &phy_iface);
+	switch (phy_iface) {
+	case PHY_INTERFACE_MODE_MII:
+	case PHY_INTERFACE_MODE_GMII:
+		mode = PHY_IF_GMII_MII;
+		break;
+	case PHY_INTERFACE_MODE_RMII:
+		mode = PHY_IF_RMII;
+		break;
+	case PHY_INTERFACE_MODE_RGMII:
+	case PHY_INTERFACE_MODE_RGMII_ID:
+	case PHY_INTERFACE_MODE_RGMII_RXID:
+	case PHY_INTERFACE_MODE_RGMII_TXID:
+		mode = PHY_IF_RGMII;
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+	reg = readl(priv->gmac_cfg + REG_MISC);
+	reg &= ~MISC_PHY_INTF_SEL;
+	reg |= FIELD_PREP(MISC_PHY_INTF_SEL, mode);
+	writel(reg, priv->gmac_cfg + REG_MISC);
+	return 0;
+}
+
+static int sf19a2890_gmac_probe(struct platform_device *pdev)
+{
+	struct plat_stmmacenet_data *plat_dat;
+	struct sf19a2890_gmac_priv *priv;
+	struct stmmac_resources stmmac_res;
+	int ret;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->dev = &pdev->dev;
+
+	priv->gmac_byp_ref_clk = devm_clk_get_enabled(&pdev->dev, "gmac_byp_ref");
+	if (IS_ERR(priv->gmac_byp_ref_clk))
+		return PTR_ERR(priv->gmac_byp_ref_clk);
+
+	priv->gmac_cfg = devm_platform_ioremap_resource(pdev, 1);
+	if (IS_ERR(priv->gmac_cfg)) {
+		dev_err(&pdev->dev, "failed to map regs for gmac config.\n");
+		return PTR_ERR(priv->gmac_cfg);
+	}
+
+	ret = sfgmac_set_delay_from_nvmem(priv);
+	if (ret == -EPROBE_DEFER)
+		return -EPROBE_DEFER;
+
+	ret = sfgmac_setup_phy_interface(priv);
+	if (ret)
+		return ret;
+
+	ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+	if (ret)
+		return ret;
+
+	plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+	if (IS_ERR(plat_dat)) {
+		dev_err(&pdev->dev, "dt configuration failed\n");
+		return PTR_ERR(plat_dat);
+	}
+
+	plat_dat->bsp_priv = priv;
+	/* This DWMAC has PCSSEL set, but it's not SGMII capable, and doesn't
+	 * return anything in PCS registers under RGMII mode.
+	 * Set this flag to bypass reading pcs regs stmmac_ethtool_get_link_ksettings.
+	 * No idea if it's correct or not.
+	 */
+	plat_dat->flags |= STMMAC_FLAG_HAS_INTEGRATED_PCS;
+
+	ret = stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
+	if (ret)
+		goto err_remove_config_dt;
+
+	return 0;
+
+err_remove_config_dt:
+	if (pdev->dev.of_node)
+		stmmac_remove_config_dt(pdev, plat_dat);
+
+	return ret;
+}
+
+static const struct of_device_id dwmac_sf19a2890_match[] = {
+	{ .compatible = "siflower,sf19a2890-gmac"},
+	{ }
+};
+MODULE_DEVICE_TABLE(of, dwmac_sf19a2890_match);
+
+static struct platform_driver sf19a2890_gmac_driver = {
+	.probe  = sf19a2890_gmac_probe,
+	.remove_new = stmmac_pltfr_remove,
+	.driver = {
+		.name           = "sf19a2890-gmac",
+		.pm		= &stmmac_pltfr_pm_ops,
+		.of_match_table = dwmac_sf19a2890_match,
+	},
+};
+module_platform_driver(sf19a2890_gmac_driver);
+
+MODULE_DESCRIPTION("SF19A2890 GMAC driver");
+MODULE_LICENSE("GPL");

+ 877 - 0
target/linux/siflower/files-6.6/drivers/net/phy/siflower.c

@@ -0,0 +1,877 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * drivers/net/phy/siflower.c
+ *
+ * Driver for Siflower PHYs
+ *
+ * Copyright (c) 2023 Siflower, Inc.
+ *
+ * Support : Siflower Phys:
+ *		Giga phys: p1211f, p1240
+ */
+#include <linux/bitops.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+
+/* for wol feature */
+#include <linux/netdevice.h>
+
+/* WOL Enable Flag:
+ * disable by default to enable system WOL feature of phy
+ * please define this phy to 1 otherwise, define it to 0.
+ */
+#define SIFLOWER_PHY_WOL_FEATURE_ENABLE                         0
+#define SIFLOWER_PHY_WOL_PASSWD_ENABLE                          0
+
+#define SIFLOWER_PHY_MODE_SET_ENABLE                            0
+#define SIFLOWER_PHY_RXC_DELAY_SET_ENABLE                       0
+#define SIFLOWER_PHY_RXC_DELAY_VAL                              0x40
+#define SIFLOWER_PHY_TXC_DELAY_VAL                              0x40
+#define SIFLOWER_PHY_CLK_OUT_125M_ENABLE                        1
+
+#define SFPHY_GLB_DISABLE                                      0
+#define SFPHY_GLB_ENABLE                                       1
+#define SFPHY_LINK_DOWN                                        0
+#define SFPHY_LINK_UP                                          1
+/* Mask used for ID comparisons */
+#define SIFLOWER_PHY_ID_MASK                                    0xffffffff
+
+/* SF1211F PHY IDs */
+#define SF1211F_PHY_ID                                          0xADB40412
+/* SF1240 PHY IDs */
+#define SF1240_PHY_ID                                          0xADB40411
+
+/* SF1211F PHY LED */
+#define SF1211F_EXTREG_LED0                                     0x1E33   // 0
+#define SF1211F_EXTREG_LED1                                     0x1E34   // 00101111
+#define SF1211F_EXTREG_LED2                                     0x1E35   // 0x40
+/* SF1240 PHY BX LED */
+#define SF1240_EXTREG_LEDCTRL                                  0x0621
+#define SF1240_EXTREG_LED0_1                                   0x0700
+#define SF1240_EXTREG_LED0_2                                   0x0701
+#define SF1240_EXTREG_LED1_1                                   0x0702
+#define SF1240_EXTREG_LED1_2                                   0x0703
+#define SF1240_EXTREG_LED2_1                                   0x0706
+#define SF1240_EXTREG_LED2_2                                   0x0707
+#define SF1240_EXTREG_LED3_1                                   0x0708
+#define SF1240_EXTREG_LED3_2                                   0x0709
+#define SF1240_EXTREG_LED4_1                                   0x070C
+#define SF1240_EXTREG_LED4_2                                   0x070D
+#define SF1240_EXTREG_LED5_1                                   0x070E
+#define SF1240_EXTREG_LED5_2                                   0x070F
+#define SF1240_EXTREG_LED6_1                                   0x0712
+#define SF1240_EXTREG_LED6_2                                   0x0713
+#define SF1240_EXTREG_LED7_1                                   0x0714
+#define SF1240_EXTREG_LED7_2                                   0x0715
+
+/* PHY MODE OPSREG*/
+#define SF1211F_EXTREG_GET_PORT_PHY_MODE                        0x062B
+#define SF1211F_EXTREG_PHY_MODE_MASK                            0x0070
+/* Magic Packet MAC address registers */
+#define SIFLOWER_MAGIC_PACKET_MAC_ADDR                          0x0229
+/* Magic Packet MAC Passwd registers */
+#define SIFLOWER_MAGIC_PACKET_PASSWD_ADDR                       0x022F
+#define SIFLOWER_PHY_WOL_PULSE_MODE_SET                         0x062a
+
+/* Magic Packet MAC Passwd Val*/
+#define SIFLOWER_MAGIC_PACKET_PASSWD1                            0x11
+#define SIFLOWER_MAGIC_PACKET_PASSWD2                            0x22
+#define SIFLOWER_MAGIC_PACKET_PASSWD3                            0x33
+#define SIFLOWER_MAGIC_PACKET_PASSWD4                            0x44
+#define SIFLOWER_MAGIC_PACKET_PASSWD5                            0x55
+#define SIFLOWER_MAGIC_PACKET_PASSWD6                            0x66
+
+/* Siflower wol config register */
+#define SIFLOWER_WOL_CFG_REG0                                   0x0220
+#define SIFLOWER_WOL_CFG_REG1                                   0x0221
+#define SIFLOWER_WOL_CFG_REG2                                   0x0222
+#define SIFLOWER_WOL_STA_REG                                    0x0223
+/* 8 PHY MODE */
+#define SF1211F_EXTREG_PHY_MODE_UTP_TO_RGMII                    0x00
+#define SF1211F_EXTREG_PHY_MODE_FIBER_TO_RGMII                  0x10
+#define SF1211F_EXTREG_PHY_MODE_UTP_OR_FIBER_TO_RGMII           0x20
+#define SF1211F_EXTREG_PHY_MODE_UTP_TO_SGMII                    0x30
+#define SF1211F_EXTREG_PHY_MODE_SGMII_PHY_TO_RGMII_MAC          0x40
+#define SF1211F_EXTREG_PHY_MODE_SGMII_MAC_TO_RGMII_PHY          0x50
+#define SF1211F_EXTREG_PHY_MODE_UTP_TO_FIBER_AUTO               0x60
+#define SF1211F_EXTREG_PHY_MODE_UTP_TO_FIBER_FORCE              0x70
+
+/* PHY EXTRW OPSREG */
+#define SF1211F_EXTREG_ADDR                                     0x0E
+#define SF1211F_EXTREG_DATA                                     0x0D
+/* PHY PAGE SPACE */
+#define SFPHY_REG_UTP_SPACE                                    0
+#define SFPHY_REG_FIBER_SPACE                                  1
+
+/* PHY PAGE SELECT */
+#define SF1211F_EXTREG_PHY_MODE_PAGE_SELECT                     0x0016
+#define SFPHY_REG_UTP_SPACE_SETADDR                            0x0000
+#define SFPHY_REG_FIBER_SPACE_SETADDR                          0x0100
+//utp
+#define UTP_REG_PAUSE_CAP                                      0x0400    /* Can pause                   */
+#define UTP_REG_PAUSE_ASYM                                     0x0800    /* Can pause asymetrically     */
+//fiber
+#define FIBER_REG_PAUSE_CAP                                    0x0080    /* Can pause                   */
+#define FIBER_REG_PAUSE_ASYM                                   0x0100    /* Can pause asymetrically     */
+
+/* specific status register */
+#define SIFLOWER_SPEC_REG                                       0x0011
+
+/* Interrupt Enable Register */
+#define SIFLOWER_INTR_REG                                       0x0017
+/* WOL TYPE */
+#define SIFLOWER_WOL_TYPE                                       BIT(0)
+/* WOL Pulse Width */
+#define SIFLOWER_WOL_WIDTH1                                     BIT(1)
+#define SIFLOWER_WOL_WIDTH2                                     BIT(2)
+/* WOL dest addr check enable */
+#define SIFLOWER_WOL_SECURE_CHECK                               BIT(5)
+/* WOL crc check enable */
+#define SIFLOWER_WOL_CRC_CHECK                                  BIT(4)
+/* WOL dest addr check enable */
+#define SIFLOWER_WOL_DESTADDR_CHECK                             BIT(5)
+/* WOL Event Interrupt Enable */
+#define SIFLOWER_WOL_INTR_EN                                    BIT(2)
+/* WOL Enable */
+#define SIFLOWER_WOL_EN                                         BIT(7)
+
+#define SIFLOWER_WOL_RESTARTANEG                                BIT(9)
+/* GET PHY MODE */
+#define SFPHY_MODE_CURR                                        sfphy_get_port_type(phydev)
+
+enum siflower_port_type_e
+{
+	SFPHY_PORT_TYPE_UTP,
+	SFPHY_PORT_TYPE_FIBER,
+	SFPHY_PORT_TYPE_COMBO,
+	SFPHY_PORT_TYPE_EXT
+};
+enum siflower_wol_type_e
+{
+	SFPHY_WOL_TYPE_LEVEL,
+	SFPHY_WOL_TYPE_PULSE,
+	SFPHY_WOL_TYPE_EXT
+};
+
+enum siflower_wol_width_e
+{
+	SFPHY_WOL_WIDTH_84MS,
+	SFPHY_WOL_WIDTH_168MS,
+	SFPHY_WOL_WIDTH_336MS,
+	SFPHY_WOL_WIDTH_672MS,
+	SFPHY_WOL_WIDTH_EXT
+};
+
+typedef struct siflower_wol_cfg_s
+{
+	int wolen;
+	int type;
+	int width;
+	int secure;
+	int checkcrc;
+	int checkdst;
+}siflower_wol_cfg_t;
+
+static int sf1211f_phy_ext_read(struct phy_device *phydev, u32 regnum)
+{
+	int ret, val, oldpage = 0, oldval = 0;
+
+	phy_lock_mdio_bus(phydev);
+
+	ret = __phy_read(phydev, SF1211F_EXTREG_ADDR);
+	if (ret < 0)
+		goto err_handle;
+	oldval = ret;
+
+	/* Force change to utp page */
+	ret = __phy_read(phydev, SF1211F_EXTREG_PHY_MODE_PAGE_SELECT);//get old page
+	if (ret < 0)
+		goto err_handle;
+	oldpage = ret;
+
+	ret = __phy_write(phydev, SF1211F_EXTREG_PHY_MODE_PAGE_SELECT, SFPHY_REG_UTP_SPACE_SETADDR);
+	if (ret < 0)
+		goto err_handle;
+
+	/* Default utp ext rw */
+	ret = __phy_write(phydev, SF1211F_EXTREG_ADDR, regnum);
+	if (ret < 0)
+		goto err_handle;
+
+	ret = __phy_read(phydev, SF1211F_EXTREG_DATA);
+	if (ret < 0)
+		goto err_handle;
+	val = ret;
+
+	/* Recover to old page */
+	ret = __phy_write(phydev, SF1211F_EXTREG_PHY_MODE_PAGE_SELECT, oldpage);
+	if (ret < 0)
+		goto err_handle;
+
+	ret = __phy_write(phydev, SF1211F_EXTREG_ADDR, oldval);
+	if (ret < 0)
+		goto err_handle;
+	ret = val;
+
+err_handle:
+	phy_unlock_mdio_bus(phydev);
+	return ret;
+}
+
+static int sf1211f_phy_ext_write(struct phy_device *phydev, u32 regnum, u16 val)
+{
+	int ret, oldpage = 0, oldval = 0;
+
+	phy_lock_mdio_bus(phydev);
+
+	ret = __phy_read(phydev, SF1211F_EXTREG_ADDR);
+	if (ret < 0)
+		goto err_handle;
+	oldval = ret;
+
+	/* Force change to utp page */
+	ret = __phy_read(phydev, SF1211F_EXTREG_PHY_MODE_PAGE_SELECT); //get old page
+	if (ret < 0)
+		goto err_handle;
+	oldpage = ret;
+
+	ret = __phy_write(phydev, SF1211F_EXTREG_PHY_MODE_PAGE_SELECT, SFPHY_REG_UTP_SPACE_SETADDR);
+	if (ret < 0)
+		goto err_handle;
+
+	/* Default utp ext rw */
+	ret = __phy_write(phydev, SF1211F_EXTREG_ADDR, regnum);
+	if (ret < 0)
+		goto err_handle;
+
+	ret = __phy_write(phydev, SF1211F_EXTREG_DATA, val);
+	if (ret < 0)
+		goto err_handle;
+
+	/* Recover to old page */
+	ret = __phy_write(phydev, SF1211F_EXTREG_PHY_MODE_PAGE_SELECT, oldpage);
+	if (ret < 0)
+		goto err_handle;
+
+	ret = __phy_write(phydev, SF1211F_EXTREG_ADDR, oldval);
+	if (ret < 0)
+		goto err_handle;
+
+err_handle:
+	phy_unlock_mdio_bus(phydev);
+	return ret;
+
+}
+
+static int siflower_phy_select_reg_page(struct phy_device *phydev, int space)
+{
+	int ret;
+	if (space == SFPHY_REG_UTP_SPACE)
+		ret = phy_write(phydev, SF1211F_EXTREG_PHY_MODE_PAGE_SELECT, SFPHY_REG_UTP_SPACE_SETADDR);
+	else
+		ret = phy_write(phydev, SF1211F_EXTREG_PHY_MODE_PAGE_SELECT, SFPHY_REG_FIBER_SPACE_SETADDR);
+	return ret;
+}
+
+static int siflower_phy_get_reg_page(struct phy_device *phydev)
+{
+	return phy_read(phydev, SF1211F_EXTREG_PHY_MODE_PAGE_SELECT);
+}
+
+static int siflower_phy_ext_read(struct phy_device *phydev, u32 regnum)
+{
+	return sf1211f_phy_ext_read(phydev, regnum);
+}
+
+
+static int siflower_phy_ext_write(struct phy_device *phydev, u32 regnum, u16 val)
+{
+	return sf1211f_phy_ext_write(phydev, regnum, val);
+}
+
+static int sfphy_page_read(struct phy_device *phydev, int page, u32 regnum)
+{
+	int ret, val, oldpage = 0, oldval = 0;
+
+	phy_lock_mdio_bus(phydev);
+
+	ret = __phy_read(phydev, SF1211F_EXTREG_ADDR);
+	if (ret < 0)
+		goto err_handle;
+	oldval = ret;
+
+	ret = __phy_read(phydev, SF1211F_EXTREG_PHY_MODE_PAGE_SELECT);
+	if (ret < 0)
+		goto err_handle;
+	oldpage = ret;
+
+	//Select page
+	ret = __phy_write(phydev, SF1211F_EXTREG_PHY_MODE_PAGE_SELECT, (page << 8));
+	if (ret < 0)
+		goto err_handle;
+
+	ret = __phy_read(phydev, regnum);
+	if (ret < 0)
+		goto err_handle;
+	val = ret;
+
+	/* Recover to old page */
+	ret = __phy_write(phydev, SF1211F_EXTREG_PHY_MODE_PAGE_SELECT, oldpage);
+	if (ret < 0)
+		goto err_handle;
+
+	ret = __phy_write(phydev, SF1211F_EXTREG_ADDR, oldval);
+	if (ret < 0)
+		goto err_handle;
+	ret = val;
+
+err_handle:
+	phy_unlock_mdio_bus(phydev);
+	return ret;
+}
+
+static int sfphy_page_write(struct phy_device *phydev, int page, u32 regnum, u16 value)
+{
+	int ret, oldpage = 0, oldval = 0;
+
+	phy_lock_mdio_bus(phydev);
+
+	ret = __phy_read(phydev, SF1211F_EXTREG_ADDR);
+	if (ret < 0)
+		goto err_handle;
+	oldval = ret;
+
+	ret = __phy_read(phydev, SF1211F_EXTREG_PHY_MODE_PAGE_SELECT);
+	if (ret < 0)
+		goto err_handle;
+	oldpage = ret;
+
+	//Select page
+	ret = __phy_write(phydev, SF1211F_EXTREG_PHY_MODE_PAGE_SELECT, (page << 8));
+	if(ret<0)
+		goto err_handle;
+
+	ret = __phy_write(phydev, regnum, value);
+	if(ret<0)
+		goto err_handle;
+
+	/* Recover to old page */
+	ret = __phy_write(phydev, SF1211F_EXTREG_PHY_MODE_PAGE_SELECT, oldpage);
+	if (ret < 0)
+		goto err_handle;
+
+	ret = __phy_write(phydev, SF1211F_EXTREG_ADDR, oldval);
+	if (ret < 0)
+		goto err_handle;
+
+err_handle:
+	phy_unlock_mdio_bus(phydev);
+	return ret;
+}
+
+//get port type
+static int sfphy_get_port_type(struct phy_device *phydev)
+{
+	int ret, mode;
+
+	ret = siflower_phy_ext_read(phydev, SF1211F_EXTREG_GET_PORT_PHY_MODE);
+	if (ret < 0)
+		return ret;
+	ret &= SF1211F_EXTREG_PHY_MODE_MASK;
+
+	if (ret == SF1211F_EXTREG_PHY_MODE_UTP_TO_RGMII ||
+		ret == SF1211F_EXTREG_PHY_MODE_UTP_TO_SGMII) {
+		mode = SFPHY_PORT_TYPE_UTP;
+	} else if (ret == SF1211F_EXTREG_PHY_MODE_FIBER_TO_RGMII ||
+		ret == SF1211F_EXTREG_PHY_MODE_SGMII_PHY_TO_RGMII_MAC ||
+		ret == SF1211F_EXTREG_PHY_MODE_SGMII_MAC_TO_RGMII_PHY) {
+		mode = SFPHY_PORT_TYPE_FIBER;
+	} else {
+		mode = SFPHY_PORT_TYPE_COMBO;
+	}
+
+	return mode;
+}
+
+static int sfphy_restart_aneg(struct phy_device *phydev)
+{
+	int ret, ctl;
+
+	ctl = sfphy_page_read(phydev, SFPHY_REG_FIBER_SPACE, MII_BMCR);
+	if (ctl < 0)
+		return ctl;
+	ctl |= BMCR_ANENABLE;
+	ret = sfphy_page_write(phydev, SFPHY_REG_FIBER_SPACE, MII_BMCR, ctl);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+int sf1211f_config_aneg(struct phy_device *phydev)
+{
+	int ret, phymode, oldpage = 0;
+
+	phymode = SFPHY_MODE_CURR;
+
+	if (phymode == SFPHY_PORT_TYPE_UTP || phymode == SFPHY_PORT_TYPE_COMBO) {
+		oldpage = siflower_phy_get_reg_page(phydev);
+		if (oldpage < 0)
+			return oldpage;
+		ret = siflower_phy_select_reg_page(phydev, SFPHY_REG_UTP_SPACE);
+		if (ret < 0)
+			return ret;
+		ret = genphy_config_aneg(phydev);
+		if (ret < 0)
+			return ret;
+		ret = siflower_phy_select_reg_page(phydev, oldpage);
+		if (ret < 0)
+			return ret;
+	}
+
+	if (phymode == SFPHY_PORT_TYPE_FIBER || phymode == SFPHY_PORT_TYPE_COMBO) {
+		oldpage = siflower_phy_get_reg_page(phydev);
+		if (oldpage < 0)
+			return oldpage;
+		ret = siflower_phy_select_reg_page(phydev, SFPHY_REG_FIBER_SPACE);
+		if (ret < 0)
+			return ret;
+		if (AUTONEG_ENABLE != phydev->autoneg)
+			return genphy_setup_forced(phydev);
+		ret = sfphy_restart_aneg(phydev);
+		if (ret < 0)
+			return ret;
+		ret = siflower_phy_select_reg_page(phydev, oldpage);
+		if (ret < 0)
+			return ret;
+	}
+	return 0;
+}
+
+int sf1211f_aneg_done(struct phy_device *phydev)
+{
+	int val = 0;
+
+	val = phy_read(phydev, 0x16);
+
+	if (val == SFPHY_REG_FIBER_SPACE_SETADDR) {
+		val = phy_read(phydev, 0x1);
+		val = phy_read(phydev, 0x1);
+		return (val < 0) ? val : (val & BMSR_LSTATUS);
+	}
+
+	return genphy_aneg_done(phydev);
+}
+
+#if (SIFLOWER_PHY_WOL_FEATURE_ENABLE)
+static void siflower_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
+{
+	int val = 0;
+	wol->supported = WAKE_MAGIC;
+	wol->wolopts = 0;
+
+	val = siflower_phy_ext_read(phydev, SIFLOWER_WOL_CFG_REG1);
+	if (val < 0)
+		return;
+
+	if (val & SIFLOWER_WOL_EN)
+		wol->wolopts |= WAKE_MAGIC;
+
+	return;
+}
+
+static int siflower_wol_en_cfg(struct phy_device *phydev, siflower_wol_cfg_t wol_cfg)
+{
+	int ret, val0,val1;
+
+	val0 = siflower_phy_ext_read(phydev, SIFLOWER_WOL_CFG_REG0);
+	if (val0 < 0)
+		return val0;
+	val1 = siflower_phy_ext_read(phydev, SIFLOWER_WOL_CFG_REG1);
+	if (val1 < 0)
+		return val1;
+	if (wol_cfg.wolen) {
+		val1 |= SIFLOWER_WOL_EN;
+		if (wol_cfg.type == SFPHY_WOL_TYPE_LEVEL) {
+			val0 |= SIFLOWER_WOL_TYPE;
+		} else if (wol_cfg.type == SFPHY_WOL_TYPE_PULSE) {
+			ret = siflower_phy_ext_write(phydev, SIFLOWER_PHY_WOL_PULSE_MODE_SET, 0x04);//set int pin pulse
+			if (ret < 0)
+				return ret;
+			val0 &= ~SIFLOWER_WOL_TYPE;
+			if (wol_cfg.width == SFPHY_WOL_WIDTH_84MS) {
+				val0 &= ~SIFLOWER_WOL_WIDTH1;
+				val0 &= ~SIFLOWER_WOL_WIDTH2;
+			} else if (wol_cfg.width == SFPHY_WOL_WIDTH_168MS) {
+				val0 |= SIFLOWER_WOL_WIDTH1;
+				val0 &= ~SIFLOWER_WOL_WIDTH2;
+			} else if (wol_cfg.width == SFPHY_WOL_WIDTH_336MS) {
+				val0 &= ~SIFLOWER_WOL_WIDTH1;
+				val0 |= SIFLOWER_WOL_WIDTH2;
+			} else if (wol_cfg.width == SFPHY_WOL_WIDTH_672MS) {
+				val0 |= SIFLOWER_WOL_WIDTH1;
+				val0 |= SIFLOWER_WOL_WIDTH2;
+			}
+		}
+		if (wol_cfg.secure == SFPHY_GLB_ENABLE)
+			val1 |= SIFLOWER_WOL_SECURE_CHECK;
+		else
+			val1 &= ~SIFLOWER_WOL_SECURE_CHECK;
+		if (wol_cfg.checkcrc == SFPHY_GLB_ENABLE)
+			val0 |= SIFLOWER_WOL_CRC_CHECK;
+		else
+			val0 &= ~SIFLOWER_WOL_CRC_CHECK;
+		if (wol_cfg.checkdst == SFPHY_GLB_ENABLE)
+			val0 |= SIFLOWER_WOL_DESTADDR_CHECK;
+		else
+			val0 &= ~SIFLOWER_WOL_DESTADDR_CHECK;
+	} else {
+		val1 &= ~SIFLOWER_WOL_EN;
+	}
+
+	ret = siflower_phy_ext_write(phydev, SIFLOWER_WOL_CFG_REG0, val0);
+	if (ret < 0)
+		return ret;
+	ret = siflower_phy_ext_write(phydev, SIFLOWER_WOL_CFG_REG1, val1);
+	if (ret < 0)
+		return ret;
+	return 0;
+}
+
+static int siflower_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
+{
+	int ret, val, i, phymode;
+	siflower_wol_cfg_t wol_cfg;
+
+	phymode = SFPHY_MODE_CURR;
+	memset(&wol_cfg,0,sizeof(siflower_wol_cfg_t));
+
+	if (wol->wolopts & WAKE_MAGIC) {
+		if (phymode == SFPHY_PORT_TYPE_UTP || phymode == SFPHY_PORT_TYPE_COMBO) {
+		/* Enable the WOL interrupt */
+		val = sfphy_page_read(phydev, SFPHY_REG_UTP_SPACE, SIFLOWER_INTR_REG);
+		val |= SIFLOWER_WOL_INTR_EN;
+		ret = sfphy_page_write(phydev, SFPHY_REG_UTP_SPACE, SIFLOWER_INTR_REG, val);
+		if (ret < 0)
+			return ret;
+		}
+		if (phymode == SFPHY_PORT_TYPE_FIBER || phymode == SFPHY_PORT_TYPE_COMBO) {
+			/* Enable the WOL interrupt */
+			val = sfphy_page_read(phydev, SFPHY_REG_FIBER_SPACE, SIFLOWER_INTR_REG);
+			val |= SIFLOWER_WOL_INTR_EN;
+			ret = sfphy_page_write(phydev, SFPHY_REG_FIBER_SPACE, SIFLOWER_INTR_REG, val);
+			if (ret < 0)
+				return ret;
+		}
+		/* Set the WOL config */
+		wol_cfg.wolen = SFPHY_GLB_ENABLE;
+		wol_cfg.type  = SFPHY_WOL_TYPE_PULSE;
+		wol_cfg.width = SFPHY_WOL_WIDTH_672MS;
+		wol_cfg.checkdst  = SFPHY_GLB_ENABLE;
+		wol_cfg.checkcrc = SFPHY_GLB_ENABLE;
+		ret = siflower_wol_en_cfg(phydev, wol_cfg);
+		if (ret < 0)
+			return ret;
+
+		/* Store the device address for the magic packet */
+		for(i = 0; i < 6; ++i) {
+			ret = siflower_phy_ext_write(phydev, SIFLOWER_MAGIC_PACKET_MAC_ADDR - i,
+				((phydev->attached_dev->dev_addr[i])));
+			if (ret < 0)
+				return ret;
+		}
+#if SIFLOWER_PHY_WOL_PASSWD_ENABLE
+		/* Set passwd for the magic packet */
+		ret = siflower_phy_ext_write(phydev, SIFLOWER_MAGIC_PACKET_PASSWD_ADDR, SIFLOWER_MAGIC_PACKET_PASSWD1);
+		if (ret < 0)
+			return ret;
+		ret = siflower_phy_ext_write(phydev, SIFLOWER_MAGIC_PACKET_PASSWD_ADDR - 1, SIFLOWER_MAGIC_PACKET_PASSWD2);
+		if (ret < 0)
+			return ret;
+		ret = siflower_phy_ext_write(phydev, SIFLOWER_MAGIC_PACKET_PASSWD_ADDR - 2, SIFLOWER_MAGIC_PACKET_PASSWD3);
+		if (ret < 0)
+			return ret;
+		ret = siflower_phy_ext_write(phydev, SIFLOWER_MAGIC_PACKET_PASSWD_ADDR - 3, SIFLOWER_MAGIC_PACKET_PASSWD4);
+		if (ret < 0)
+			return ret;
+		ret = siflower_phy_ext_write(phydev, SIFLOWER_MAGIC_PACKET_PASSWD_ADDR - 4, SIFLOWER_MAGIC_PACKET_PASSWD5);
+		if (ret < 0)
+			return ret;
+		ret = siflower_phy_ext_write(phydev, SIFLOWER_MAGIC_PACKET_PASSWD_ADDR - 5, SIFLOWER_MAGIC_PACKET_PASSWD6);
+		if (ret < 0)
+			return ret;
+#endif
+	} else {
+		wol_cfg.wolen = SFPHY_GLB_DISABLE;
+		wol_cfg.type  = SFPHY_WOL_TYPE_EXT;
+		wol_cfg.width = SFPHY_WOL_WIDTH_EXT;
+		wol_cfg.checkdst  = SFPHY_GLB_DISABLE;
+		wol_cfg.checkcrc  = SFPHY_GLB_DISABLE;
+		ret = siflower_wol_en_cfg(phydev, wol_cfg);
+		if (ret < 0)
+			return ret;
+	}
+
+	if (val == SF1211F_EXTREG_PHY_MODE_UTP_TO_SGMII) {
+		val = sfphy_page_read(phydev, SFPHY_REG_UTP_SPACE, MII_BMCR);
+		val |= SIFLOWER_WOL_RESTARTANEG;
+		ret = sfphy_page_write(phydev, SFPHY_REG_UTP_SPACE, MII_BMCR, val);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+#endif
+static int sf1211f_rxc_txc_init(struct phy_device *phydev)
+{
+	int ret;
+
+	ret = (siflower_phy_ext_read(phydev, SF1211F_EXTREG_GET_PORT_PHY_MODE) &
+		SF1211F_EXTREG_PHY_MODE_MASK);
+	if (ret < 0)
+		return ret;
+
+	if ((ret == SF1211F_EXTREG_PHY_MODE_UTP_TO_SGMII) ||
+		(ret == SF1211F_EXTREG_PHY_MODE_UTP_TO_FIBER_AUTO) ||
+		(ret == SF1211F_EXTREG_PHY_MODE_UTP_TO_FIBER_FORCE))
+		return 0;
+
+	// Init rxc and enable rxc
+	if (ret == SF1211F_EXTREG_PHY_MODE_UTP_TO_RGMII) {
+		ret = phy_read(phydev, 0x11);
+		if ((ret & 0x4) == 0x0) {
+			ret = siflower_phy_ext_write(phydev,0x1E0C, 0x17);
+			if (ret < 0)
+				return ret;
+			ret = siflower_phy_ext_write(phydev,0x1E58, 0x00);
+			if (ret < 0)
+				return ret;
+		}
+	}
+
+	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID){
+		// Init rxc delay
+		ret = siflower_phy_ext_write(phydev,0x0282, SIFLOWER_PHY_RXC_DELAY_VAL);
+		if (ret < 0)
+			return ret;
+	}
+	else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID){
+		// Init txc delay
+		ret = siflower_phy_ext_write(phydev,0x0281, SIFLOWER_PHY_TXC_DELAY_VAL);
+		if (ret < 0)
+			return ret;
+	}
+	else if(phydev->interface == PHY_INTERFACE_MODE_RGMII_ID){
+		ret = siflower_phy_ext_write(phydev,0x0282, SIFLOWER_PHY_RXC_DELAY_VAL);
+		if (ret < 0)
+			return ret;
+		ret = siflower_phy_ext_write(phydev,0x0281, SIFLOWER_PHY_TXC_DELAY_VAL);
+		if (ret < 0)
+			return ret;
+	}
+
+	return ret;
+}
+
+static int sf1211f_config_opt(struct phy_device *phydev)
+{
+	int ret;
+	//100M utp optimise
+	ret = siflower_phy_ext_write(phydev, 0x0149, 0x84);
+	if (ret < 0)
+		return ret;
+
+	ret = siflower_phy_ext_write(phydev, 0x014A, 0x86);
+	if (ret < 0)
+		return ret;
+
+	ret = siflower_phy_ext_write(phydev, 0x023C, 0x81);
+	if (ret < 0)
+		return ret;
+
+	//1000M utp optimise
+	ret = siflower_phy_ext_write(phydev, 0x0184, 0x85);
+	if (ret < 0)
+		return ret;
+
+	ret = siflower_phy_ext_write(phydev, 0x0185, 0x86);
+	if (ret < 0)
+		return ret;
+
+	ret = siflower_phy_ext_write(phydev, 0x0186, 0x85);
+	if (ret < 0)
+		return ret;
+
+	ret = siflower_phy_ext_write(phydev, 0x0187, 0x86);
+	if (ret < 0)
+		return ret;
+	return ret;
+}
+#if SIFLOWER_PHY_CLK_OUT_125M_ENABLE
+static int sf1211f_clkout_init(struct phy_device *phydev)
+{
+	int ret;
+
+	ret = siflower_phy_ext_write(phydev, 0x0272 , 0x09);
+
+	return ret;
+}
+#endif
+
+#if SIFLOWER_PHY_MODE_SET_ENABLE
+//set mode
+static int phy_mode_set(struct phy_device *phydev, u16 phyMode)
+{
+	int ret, num = 0;
+
+	ret = siflower_phy_ext_read(phydev, 0xC417);
+	if (ret < 0)
+		return ret;
+
+	ret = (ret & 0xF0) | (0x8 | phyMode);
+
+	ret = siflower_phy_ext_write(phydev, 0xC417, ret);
+	if (ret < 0)
+		return ret;
+
+	while ((siflower_phy_ext_read(phydev, 0xC415) & 0x07) != phyMode) {
+		msleep(10);
+		if(++num == 5) {
+			printk("Phy Mode Set Time Out!\r\n");
+			break;
+		}
+	}
+
+	while (siflower_phy_ext_read(phydev, 0xC413) != 0) {
+		msleep(10);
+		if(++num == 10) {
+			printk("Phy Mode Set Time Out!\r\n");
+			break;
+		}
+	}
+
+	return 0;
+}
+#endif
+
+int sf1240_config_init(struct phy_device *phydev)
+{
+	int ret;
+		ret = genphy_read_abilities(phydev);
+	if (ret < 0)
+		return ret;
+
+	linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+			phydev->supported, ESTATUS_1000_TFULL);
+	linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+			phydev->advertising, ESTATUS_1000_TFULL);
+	return 0;
+}
+
+int sf1211f_config_init(struct phy_device *phydev)
+{
+	int ret, phymode;
+
+#if SIFLOWER_PHY_WOL_FEATURE_ENABLE
+	struct ethtool_wolinfo wol;
+#endif
+
+#if SIFLOWER_PHY_MODE_SET_ENABLE
+	ret = phy_mode_set(phydev, 0x0);
+	if (ret < 0)
+		return ret;
+#endif
+	phymode = SFPHY_MODE_CURR;
+
+	if (phymode == SFPHY_PORT_TYPE_UTP || phymode == SFPHY_PORT_TYPE_COMBO) {
+		siflower_phy_select_reg_page(phydev, SFPHY_REG_UTP_SPACE);
+		ret = genphy_read_abilities(phydev);
+		if (ret < 0)
+			return ret;
+	} else {
+		siflower_phy_select_reg_page(phydev, SFPHY_REG_FIBER_SPACE);
+		ret = genphy_read_abilities(phydev);
+		if (ret < 0)
+			return ret;
+
+		linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+				phydev->supported, ESTATUS_1000_TFULL);
+		linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+				phydev->advertising, ESTATUS_1000_TFULL);
+	}
+
+	ret = sf1211f_rxc_txc_init(phydev);
+	if (ret < 0)
+		return ret;
+
+	ret = sf1211f_config_opt(phydev);
+	if (ret < 0)
+		return ret;
+
+#if SIFLOWER_PHY_CLK_OUT_125M_ENABLE
+	ret = sf1211f_clkout_init(phydev);
+	if (ret < 0)
+		return ret;
+#endif
+
+#if SIFLOWER_PHY_WOL_FEATURE_ENABLE
+	wol.wolopts = 0;
+	wol.supported = WAKE_MAGIC;
+	wol.wolopts |= WAKE_MAGIC;
+	siflower_set_wol(phydev, &wol);
+#endif
+
+	return 0;
+}
+
+static struct phy_driver sf_phy_drivers[] = {
+	{
+		.phy_id             = SF1211F_PHY_ID,
+		.phy_id_mask        = SIFLOWER_PHY_ID_MASK,
+		.name               = "SF1211F Gigabit Ethernet",
+		.features           = PHY_GBIT_FEATURES,
+		.flags              = PHY_POLL,
+		.config_init        = sf1211f_config_init,
+		.config_aneg        = sf1211f_config_aneg,
+		.aneg_done          = sf1211f_aneg_done,
+		.write_mmd          = genphy_write_mmd_unsupported,
+		.read_mmd           = genphy_read_mmd_unsupported,
+		.suspend            = genphy_suspend,
+		.resume             = genphy_resume,
+#if SIFLOWER_PHY_WOL_FEATURE_ENABLE
+		.get_wol            = &siflower_get_wol,
+		.set_wol            = &siflower_set_wol,
+#endif
+	},
+
+	{
+		.phy_id             = SF1240_PHY_ID,
+		.phy_id_mask        = SIFLOWER_PHY_ID_MASK,
+		.name               = "SF1240 Gigabit Ethernet",
+		.features           = PHY_GBIT_FEATURES,
+		.flags              = PHY_POLL,
+		.config_init        = sf1240_config_init,
+		.config_aneg        = genphy_config_aneg,
+		.write_mmd          = genphy_write_mmd_unsupported,
+		.read_mmd           = genphy_read_mmd_unsupported,
+		.suspend            = genphy_suspend,
+		.resume             = genphy_resume,
+	},
+};
+
+/* for linux 4.x */
+module_phy_driver(sf_phy_drivers);
+
+static struct mdio_device_id __maybe_unused siflower_phy_tbl[] = {
+	{ SF1211F_PHY_ID, SIFLOWER_PHY_ID_MASK },
+	{ SF1240_PHY_ID, SIFLOWER_PHY_ID_MASK },
+	{},
+};
+
+MODULE_DEVICE_TABLE(mdio, siflower_phy_tbl);
+
+MODULE_DESCRIPTION("Siflower PHY driver");
+MODULE_LICENSE("GPL");

+ 361 - 0
target/linux/siflower/files-6.6/drivers/pci/controller/dwc/pcie-sf21.c

@@ -0,0 +1,361 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Siflower SF21A6826/SF21H8898 PCIE driver
+ *
+ * Author: Chuanhong Guo <[email protected]>
+ */
+
+#include <linux/phy/phy.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+#include <linux/types.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+#define SF_PCIE_MAX_TIMEOUT	10000
+
+#define ELBI_REG0		0x0
+#define  APP_LTSSM_ENABLE	BIT(23)
+
+#define to_sf_pcie(x)	dev_get_drvdata((x)->dev)
+
+#define SYSM_PCIE_SET		0x0
+#define  PCIE_DEVTYPE_EP	0
+#define  PCIE_DEVTYPE_RC	4
+
+#define SYSM_PCIE_INIT		0x4
+#define SYSM_PCIE_CLK_EN	0x9c
+
+enum sf_pcie_regfield_ids {
+	DEVICE_TYPE,
+	PERST_N_OUT,
+	PERST_N,
+	BUTTON_RSTN,
+	POWER_UP_RSTN,
+	ACLK_M_EN,
+	ACLK_S_EN,
+	ACLK_C_EN,
+	HCLK_EN,
+	SYSM_REGFIELD_MAX,
+};
+
+static const struct reg_field pcie0_sysm_regs[SYSM_REGFIELD_MAX] = {
+	[DEVICE_TYPE] = REG_FIELD(SYSM_PCIE_SET, 0, 3),
+	[PERST_N_OUT] = REG_FIELD(SYSM_PCIE_INIT, 15, 15),
+	[PERST_N] = REG_FIELD(SYSM_PCIE_INIT, 5, 5),
+	[BUTTON_RSTN] = REG_FIELD(SYSM_PCIE_INIT, 4, 4),
+	[POWER_UP_RSTN] = REG_FIELD(SYSM_PCIE_INIT, 3, 3),
+	[ACLK_M_EN] = REG_FIELD(SYSM_PCIE_CLK_EN, 3, 3),
+	[ACLK_S_EN] = REG_FIELD(SYSM_PCIE_CLK_EN, 2, 2),
+	[ACLK_C_EN] = REG_FIELD(SYSM_PCIE_CLK_EN, 1, 1),
+	[HCLK_EN] = REG_FIELD(SYSM_PCIE_CLK_EN, 0, 0),
+};
+
+static const struct reg_field pcie1_sysm_regs[SYSM_REGFIELD_MAX] = {
+	[DEVICE_TYPE] = REG_FIELD(SYSM_PCIE_SET, 4, 7),
+	[PERST_N_OUT] = REG_FIELD(SYSM_PCIE_INIT, 16, 16),
+	[PERST_N] = REG_FIELD(SYSM_PCIE_INIT, 8, 8),
+	[BUTTON_RSTN] = REG_FIELD(SYSM_PCIE_INIT, 7, 7),
+	[POWER_UP_RSTN] = REG_FIELD(SYSM_PCIE_INIT, 6, 6),
+	[ACLK_M_EN] = REG_FIELD(SYSM_PCIE_CLK_EN, 8, 8),
+	[ACLK_S_EN] = REG_FIELD(SYSM_PCIE_CLK_EN, 7, 7),
+	[ACLK_C_EN] = REG_FIELD(SYSM_PCIE_CLK_EN, 6, 6),
+	[HCLK_EN] = REG_FIELD(SYSM_PCIE_CLK_EN, 5, 5),
+};
+
+struct sf_pcie {
+	struct dw_pcie			pci;
+	void __iomem			*elbi;
+	struct clk			*csr_clk;
+	struct clk			*ref_clk;
+	struct phy			*phy;
+	struct regmap			*pciesys;
+	struct regmap_field		*pciesys_reg[SYSM_REGFIELD_MAX];
+	struct gpio_desc		*reset_gpio;
+};
+
+static void sf_pcie_enable_part_lanes_rxei_exit(struct sf_pcie *sf_pcie)
+{
+	u32 val;
+	val = readl(sf_pcie->pci.dbi_base + 0x708);
+	val = val | 0x1 << 22;
+	writel(val, sf_pcie->pci.dbi_base + 0x708);
+	val = readl(sf_pcie->pci.dbi_base + 0x708);
+	msleep(20);
+}
+
+static void sf_pcie_enable_speed_change(struct sf_pcie *sf_pcie)
+{
+	u32 val;
+	val = readl(sf_pcie->pci.dbi_base + 0x80c);
+	val = val | 0x1 << 17;
+	writel(val, sf_pcie->pci.dbi_base + 0x80c);
+	val = readl(sf_pcie->pci.dbi_base + 0x80c);
+	msleep(20);
+}
+
+static int sf_pcie_clk_enable(struct sf_pcie *sf_pcie)
+{
+	int ret;
+	ret = clk_prepare_enable(sf_pcie->csr_clk);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(sf_pcie->ref_clk);
+	if (ret)
+		return ret;
+
+	regmap_field_write(sf_pcie->pciesys_reg[ACLK_M_EN], 1);
+	regmap_field_write(sf_pcie->pciesys_reg[ACLK_S_EN], 1);
+	regmap_field_write(sf_pcie->pciesys_reg[ACLK_C_EN], 1);
+	regmap_field_write(sf_pcie->pciesys_reg[HCLK_EN], 1);
+	return 0;
+}
+
+static void sf_pcie_clk_disable(struct sf_pcie *sf_pcie)
+{
+	regmap_field_write(sf_pcie->pciesys_reg[ACLK_M_EN], 0);
+	regmap_field_write(sf_pcie->pciesys_reg[ACLK_S_EN], 0);
+	regmap_field_write(sf_pcie->pciesys_reg[ACLK_C_EN], 0);
+	regmap_field_write(sf_pcie->pciesys_reg[HCLK_EN], 0);
+	clk_disable_unprepare(sf_pcie->csr_clk);
+	clk_disable_unprepare(sf_pcie->ref_clk);
+}
+
+static int sf_pcie_phy_enable(struct sf_pcie *pcie)
+{
+	int ret;
+
+	ret = phy_init(pcie->phy);
+	if (ret)
+		return ret;
+	return phy_power_on(pcie->phy);
+}
+
+static int sf_pcie_phy_disable(struct sf_pcie *pcie)
+{
+	int ret;
+
+	ret = phy_power_off(pcie->phy);
+	if (ret)
+		return ret;
+	return phy_exit(pcie->phy);
+}
+
+static void sf_pcie_ltssm_set_en(struct sf_pcie *pcie, bool enable)
+{
+	u32 val;
+
+	val = readl(pcie->elbi + ELBI_REG0);
+	if (enable)
+		val |= APP_LTSSM_ENABLE;
+	else
+		val &= ~APP_LTSSM_ENABLE;
+	writel(val, pcie->elbi + ELBI_REG0);
+}
+
+static void sf_pcie_set_reset(struct sf_pcie *pcie, bool assert) {
+	regmap_field_write(pcie->pciesys_reg[PERST_N], !assert);
+	regmap_field_write(pcie->pciesys_reg[BUTTON_RSTN], !assert);
+	regmap_field_write(pcie->pciesys_reg[POWER_UP_RSTN], !assert);
+}
+
+/*
+ * The bus interconnect subtracts address offset from the request
+ * before sending it to PCIE slave port. Since DT puts config space
+ * at the beginning, we can obtain the address offset from there and
+ * subtract it.
+ */
+static u64 sf_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
+{
+	struct dw_pcie_rp *pp = &pci->pp;
+
+	return cpu_addr - pp->cfg0_base;
+}
+
+static int sf_pcie_host_init(struct dw_pcie_rp *pp)
+{
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct sf_pcie *sf_pcie = to_sf_pcie(pci);
+	int ret;
+
+	ret = sf_pcie_clk_enable(sf_pcie);
+	if (ret)
+		return dev_err_probe(sf_pcie->pci.dev, ret,
+				     "failed to enable pcie clocks.\n");
+
+	sf_pcie_set_reset(sf_pcie, true);
+
+	ret = regmap_field_write(sf_pcie->pciesys_reg[DEVICE_TYPE], PCIE_DEVTYPE_RC);
+	if (ret)
+		return ret;
+
+	gpiod_set_value_cansleep(sf_pcie->reset_gpio, 1);
+
+	ret = sf_pcie_phy_enable(sf_pcie);
+	if (ret)
+		return ret;
+
+	/* TODO: release power-down */
+	msleep(100);
+
+	sf_pcie_set_reset(sf_pcie, false);
+
+	dw_pcie_dbi_ro_wr_en(pci);
+	sf_pcie_enable_part_lanes_rxei_exit(sf_pcie);
+
+	gpiod_set_value_cansleep(sf_pcie->reset_gpio, 0);
+	return 0;
+}
+
+void sf_pcie_host_deinit(struct dw_pcie_rp *pp) {
+	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	struct sf_pcie *sf_pcie = to_sf_pcie(pci);
+
+	sf_pcie_set_reset(sf_pcie, true);
+
+	sf_pcie_phy_disable(sf_pcie);
+
+	gpiod_set_value_cansleep(sf_pcie->reset_gpio, 1);
+
+	sf_pcie_clk_disable(sf_pcie);
+}
+
+static const struct dw_pcie_host_ops sf_pcie_host_ops = {
+	.host_init = sf_pcie_host_init,
+	.host_deinit = sf_pcie_host_deinit,
+};
+
+static int sf_pcie_start_link(struct dw_pcie *pci)
+{
+	struct sf_pcie *pcie = to_sf_pcie(pci);
+	/*
+	 * before link up with GEN1, we should config the field
+	 * DIRECTION_SPEED_CHANGE of GEN2_CTRL_OFF register to insure
+	 * the LTSSM to initiate a speed change to Gen2 or Gen3 after
+	 * the link is initialized at Gen1 speed.
+	 */
+	sf_pcie_enable_speed_change(pcie);
+
+	sf_pcie_ltssm_set_en(pcie, true);
+	return 0;
+}
+
+static void sf_pcie_stop_link(struct dw_pcie *pci) {
+	struct sf_pcie *pcie = to_sf_pcie(pci);
+
+	sf_pcie_ltssm_set_en(pcie, false);
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+	.cpu_addr_fixup = sf_pcie_cpu_addr_fixup,
+	.start_link = sf_pcie_start_link,
+	.stop_link = sf_pcie_stop_link,
+};
+
+static int sf_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct sf_pcie *sf_pcie;
+	int ret;
+	u32 ctlr_id;
+
+	sf_pcie = devm_kzalloc(dev, sizeof(*sf_pcie), GFP_KERNEL);
+	if (!sf_pcie)
+		return -ENOMEM;
+
+	sf_pcie->pci.dev = dev;
+	sf_pcie->pci.ops = &dw_pcie_ops;
+	sf_pcie->pci.pp.ops = &sf_pcie_host_ops;
+
+	platform_set_drvdata(pdev, sf_pcie);
+
+	sf_pcie->csr_clk = devm_clk_get(dev, "csr");
+	if (IS_ERR(sf_pcie->csr_clk))
+		return PTR_ERR(sf_pcie->csr_clk);
+
+	sf_pcie->ref_clk = devm_clk_get(dev, "ref");
+	if (IS_ERR(sf_pcie->ref_clk))
+		return PTR_ERR(sf_pcie->ref_clk);
+
+	sf_pcie->reset_gpio =
+		devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+	if (IS_ERR(sf_pcie->reset_gpio)) {
+		return dev_err_probe(dev, PTR_ERR(sf_pcie->reset_gpio),
+				     "unable to get reset gpio\n");
+	}
+
+	sf_pcie->pciesys = syscon_regmap_lookup_by_phandle(
+		pdev->dev.of_node, "siflower,pcie-sysm");
+	if (IS_ERR(sf_pcie->pciesys))
+		return PTR_ERR(sf_pcie->pciesys);
+
+	sf_pcie->phy = devm_phy_get(dev, NULL);
+	if (IS_ERR(sf_pcie->phy))
+		return PTR_ERR(sf_pcie->phy);
+
+	sf_pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi");
+	if (IS_ERR(sf_pcie->elbi)) {
+		return PTR_ERR(sf_pcie->elbi);
+	}
+
+	ret = of_property_read_u32(node, "siflower,ctlr-idx", &ctlr_id);
+	if (ret) {
+		ctlr_id = 0;
+	}
+
+	ret = devm_regmap_field_bulk_alloc(
+		dev, sf_pcie->pciesys, sf_pcie->pciesys_reg,
+		ctlr_id ? pcie1_sysm_regs : pcie0_sysm_regs, SYSM_REGFIELD_MAX);
+	if (ret)
+		return dev_err_probe(dev, ret,
+				     "failed to alloc regmap fields.\n");
+
+	ret = dw_pcie_host_init(&sf_pcie->pci.pp);
+	if (ret)
+		return dev_err_probe(dev, ret, "failed to initialize host\n");
+
+	return 0;
+}
+
+static void sf_pcie_remove(struct platform_device *pdev)
+{
+	struct sf_pcie *pcie = platform_get_drvdata(pdev);
+
+	dw_pcie_host_deinit(&pcie->pci.pp);
+}
+
+static const struct of_device_id sf_pcie_of_match[] = {
+	{ .compatible = "siflower,sf21-pcie", },
+	{},
+};
+
+static struct platform_driver sf_pcie_driver = {
+	.driver = {
+		.name	= "sf21-pcie",
+		.of_match_table = sf_pcie_of_match,
+		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+	},
+	.probe    = sf_pcie_probe,
+	.remove_new = sf_pcie_remove,
+};
+
+module_platform_driver(sf_pcie_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Chuanhong Guo <[email protected]>");
+MODULE_DESCRIPTION("PCIe Controller driver for SF21A6826/SF21H8898 SoC");

+ 23 - 0
target/linux/siflower/files-6.6/drivers/phy/siflower/Kconfig

@@ -0,0 +1,23 @@
+config PHY_SF19A2890_USB
+	tristate "SIFLOWER sf19a2890 USB2.0 PHY driver"
+	default n
+	select GENERIC_PHY
+	help
+	  Enable this to support the USB2.0 PHY on the SIFLOWER SF19A2890.
+
+config PHY_SF21_PCIE
+	tristate "Siflower SF21A6826/SF21H8898 PCIE PHY driver"
+	default n
+	depends on HAS_IOMEM
+	select GENERIC_PHY
+	help
+	  Enable this to support the PCIE PHY on the Siflower SF21A6826 SoC.
+
+
+config PHY_SF21_USB
+	tristate "Siflower SF21A6826/SF21H8898 USB2.0 PHY driver"
+	default n
+	depends on HAS_IOMEM
+	select GENERIC_PHY
+	help
+	  Enable this to support the USB2.0 PHY on the Siflower SF21A6826/SF21H8898 SoC.

+ 3 - 0
target/linux/siflower/files-6.6/drivers/phy/siflower/Makefile

@@ -0,0 +1,3 @@
+obj-$(CONFIG_PHY_SF19A2890_USB) += phy-sf19a2890-usb.o
+obj-$(CONFIG_PHY_SF21_PCIE) += phy-sf21-pcie.o
+obj-$(CONFIG_PHY_SF21_USB) += phy-sf21-usb.o

+ 145 - 0
target/linux/siflower/files-6.6/drivers/phy/siflower/phy-sf19a2890-usb.c

@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#define USB_SLEEPM 0x4
+
+struct sf19a2890_usb_phy {
+	struct device *dev;
+	struct clk *phy_clk;
+	struct reset_control *usb_phy_rst;
+	struct reset_control *power_on_rst;
+	void __iomem *base;
+};
+
+static int sf19a2890_usb_phy_power_on(struct phy *phy)
+{
+	struct sf19a2890_usb_phy *p_phy = phy_get_drvdata(phy);
+	int ret;
+
+	ret = clk_prepare_enable(p_phy->phy_clk);
+	if (ret < 0) {
+		dev_err(p_phy->dev, "Failed to enable PHY clock: %d\n", ret);
+		return ret;
+	}
+
+	ret = reset_control_deassert(p_phy->usb_phy_rst);
+	if (ret)
+		goto err1;
+
+	ret = reset_control_deassert(p_phy->power_on_rst);
+	if (ret)
+		goto err2;
+
+	writel(1, p_phy->base + USB_SLEEPM);
+	return 0;
+err2:
+	reset_control_assert(p_phy->usb_phy_rst);
+err1:
+	clk_disable_unprepare(p_phy->phy_clk);
+	return ret;
+}
+
+static int sf19a2890_usb_phy_power_off(struct phy *phy)
+{
+	struct sf19a2890_usb_phy *p_phy = phy_get_drvdata(phy);
+
+	writel(0, p_phy->base + USB_SLEEPM);
+	reset_control_assert(p_phy->power_on_rst);
+	reset_control_assert(p_phy->usb_phy_rst);
+	clk_disable_unprepare(p_phy->phy_clk);
+	return 0;
+}
+
+static const struct phy_ops sf19a2890_usb_phy_ops = {
+	.power_on = sf19a2890_usb_phy_power_on,
+	.power_off = sf19a2890_usb_phy_power_off,
+	.owner = THIS_MODULE,
+};
+
+static int sf19a2890_usb_phy_probe(struct platform_device *pdev)
+{
+	struct sf19a2890_usb_phy *p_phy;
+	struct phy_provider *provider;
+	struct phy *phy;
+	int ret;
+
+	p_phy = devm_kzalloc(&pdev->dev, sizeof(*p_phy), GFP_KERNEL);
+	if (!p_phy)
+		return -ENOMEM;
+
+	p_phy->dev = &pdev->dev;
+
+	p_phy->base = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(p_phy->base))
+		return PTR_ERR(p_phy->base);
+
+	p_phy->phy_clk = devm_clk_get(p_phy->dev, NULL);
+	if (IS_ERR(p_phy->phy_clk))
+		return dev_err_probe(p_phy->dev, PTR_ERR(p_phy->phy_clk),
+				     "failed to get usb phy clock\n");
+
+	p_phy->power_on_rst =
+		devm_reset_control_get_exclusive(&pdev->dev, "power_on_rst");
+	if (IS_ERR(p_phy->power_on_rst))
+		return PTR_ERR(p_phy->power_on_rst);
+
+	ret = reset_control_assert(p_phy->power_on_rst);
+	if (ret)
+		return ret;
+
+	p_phy->usb_phy_rst =
+		devm_reset_control_get_exclusive(&pdev->dev, "usb_phy_rst");
+	if (IS_ERR(p_phy->usb_phy_rst))
+		return PTR_ERR(p_phy->usb_phy_rst);
+
+	ret = reset_control_assert(p_phy->usb_phy_rst);
+	if (ret)
+		return ret;
+
+	phy = devm_phy_create(p_phy->dev, NULL, &sf19a2890_usb_phy_ops);
+	if (IS_ERR(phy))
+		return dev_err_probe(p_phy->dev, PTR_ERR(phy),
+				     "Failed to create PHY\n");
+
+	phy_set_drvdata(phy, p_phy);
+
+	provider =
+		devm_of_phy_provider_register(p_phy->dev, of_phy_simple_xlate);
+	if (IS_ERR(provider))
+		return dev_err_probe(p_phy->dev, PTR_ERR(provider),
+				     "Failed to register PHY provider\n");
+
+	platform_set_drvdata(pdev, p_phy);
+	return 0;
+}
+
+static const struct of_device_id sf19a2890_usb_phy_of_match[] = {
+	{
+		.compatible = "siflower,sf19a2890-usb-phy",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, sf19a2890_usb_phy_of_match);
+
+static struct platform_driver sf19a2890_usb_phy_driver = {
+	.probe		= sf19a2890_usb_phy_probe,
+	.driver		= {
+		.name	= "sf19a2890-usb-phy",
+		.of_match_table = sf19a2890_usb_phy_of_match,
+	},
+};
+module_platform_driver(sf19a2890_usb_phy_driver);
+
+MODULE_AUTHOR("Ziying Wu <[email protected]>");
+MODULE_DESCRIPTION("Siflower SF19A2890 USB2.0 PHY driver");
+MODULE_LICENSE("GPL");

+ 335 - 0
target/linux/siflower/files-6.6/drivers/phy/siflower/phy-sf21-pcie.c

@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/mfd/syscon.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/regmap.h>
+
+#define SF_PCIE_PHY_NLANES	2
+
+#define TOPCRM_LVDS0_CFG	0xe8
+#define TOPCRM_LVDS1_CFG	0x120
+#define  LVDS_BIAS_EN		BIT(20)
+#define  LVDS_PULLDN		BIT(19)
+#define  LVDS_SCHMITT_EN	BIT(18)
+#define  LVDS_TX_CM		BIT(17)
+#define  LVDS_RXCM_EN		BIT(16)
+#define  LVDS_RTERM_VAL		GENMASK(15, 13)
+#define  LVDS_TXDRV		GENMASK(12, 9)
+#define  LVDS_IEN_N		BIT(8)
+#define  LVDS_IEN_P		BIT(7)
+#define  LVDS_OEN_N		BIT(6)
+#define  LVDS_OEN_P		BIT(5)
+#define  LVDS_RTERM_EN		BIT(4)
+#define  LVDS_RXEN		BIT(3)
+#define  LVDS_TXEN		BIT(2)
+#define  LVDS_VBIAS_SEL		BIT(0)
+
+#define PCIE_SYSM_SET		0x0
+#define  PCIE_LANE_MUX		GENMASK(9, 8)
+#define  PHY0_L0_PHY1_L1	(0 << 8)
+#define  PHY0_L0L1		(1 << 8)
+#define  PHY0_L1_PHY1_L0	(2 << 8)
+#define  PHY1_L0L1		(3 << 8)
+
+#define PCIE_SYSM_INIT		0x4
+#define  PCIE_L1_REPEAT_CLK_EN	BIT(10)
+#define  PCIE_L0_REPEAT_CLK_EN	BIT(9)
+#define  PCIE_L1_RSTN		BIT(2)
+#define  PCIE_L0_RSTN		BIT(1)
+#define  PCIE_PHY_RSTN		BIT(0)
+
+
+struct sf21_pcie_phy_inst {
+	struct phy *phy;
+	u8 idx;
+	u8 num_lanes;
+	u8 lvds_idx;
+};
+
+struct sf21_pcie_phy {
+	struct device *dev;
+	struct clk *refclk;
+	struct clk *csrclk;
+	struct regmap *pcie_regmap;
+	struct regmap *topcrm_regmap;
+	struct mutex lock;
+	int nlanes_enabled;
+	struct sf21_pcie_phy_inst insts[2];
+};
+
+static struct sf21_pcie_phy_inst *phy_to_instance(struct phy *phy)
+{
+	return phy_get_drvdata(phy);
+}
+
+static struct sf21_pcie_phy *
+instance_to_priv(struct sf21_pcie_phy_inst *inst)
+{
+	return container_of(inst, struct sf21_pcie_phy, insts[inst->idx]);
+}
+
+static int sf_pcie_phy_lvds_on(struct sf21_pcie_phy *priv, int idx)
+{
+	return regmap_set_bits(priv->topcrm_regmap,
+			       idx ? TOPCRM_LVDS1_CFG : TOPCRM_LVDS0_CFG,
+			       LVDS_TXEN | LVDS_BIAS_EN);
+}
+
+static int sf_pcie_phy_lvds_off(struct sf21_pcie_phy *priv, int idx)
+{
+	return regmap_clear_bits(priv->topcrm_regmap,
+				 idx ? TOPCRM_LVDS1_CFG : TOPCRM_LVDS0_CFG,
+				 LVDS_TXEN | LVDS_BIAS_EN);
+}
+
+static int sf_pcie_lane_on(struct sf21_pcie_phy *priv, int idx)
+{
+	return regmap_set_bits(priv->pcie_regmap, PCIE_SYSM_INIT,
+				 idx ? PCIE_L1_RSTN : PCIE_L0_RSTN);
+}
+
+static int sf_pcie_lane_off(struct sf21_pcie_phy *priv, int idx)
+{
+	return regmap_clear_bits(priv->pcie_regmap, PCIE_SYSM_INIT,
+			       idx ? PCIE_L1_RSTN : PCIE_L0_RSTN);
+}
+
+static int sf21_pcie_phy_power_on(struct phy *phy)
+{
+	struct sf21_pcie_phy_inst *inst = phy_to_instance(phy);
+	struct sf21_pcie_phy *priv = instance_to_priv(inst);
+	int ret;
+	mutex_lock(&priv->lock);
+	if (SF_PCIE_PHY_NLANES - priv->nlanes_enabled < inst->num_lanes) {
+		dev_err(priv->dev, "too many lanes requested for PHY %u\n",
+			inst->idx);
+		ret = -EBUSY;
+		goto out;
+	}
+
+	if (inst->num_lanes == 2) {
+		regmap_update_bits(priv->pcie_regmap, PCIE_SYSM_SET,
+				   PCIE_LANE_MUX,
+				   inst->idx ? PHY1_L0L1 : PHY0_L0L1);
+	} else {
+		regmap_update_bits(priv->pcie_regmap, PCIE_SYSM_SET,
+				   PCIE_LANE_MUX, PHY0_L0_PHY1_L1);
+	}
+
+	/*
+	 * The PCIE clock goes like:
+	 *  internal refclk -- serdes0 -- serdes1 -- LVDS0
+	 *                                        \- LVDS1
+	 * Both clock repeaters must be enabled at PHY power-on,
+	 * otherwise there's no PCIE reference clock output.
+	 */
+
+	if (!priv->nlanes_enabled) {
+		ret = clk_prepare_enable(priv->refclk);
+		if (ret)
+			goto out;
+
+		ret = regmap_set_bits(priv->pcie_regmap, PCIE_SYSM_INIT, PCIE_PHY_RSTN);
+		if (ret)
+			goto out;
+
+		ret = regmap_set_bits(priv->pcie_regmap, PCIE_SYSM_INIT,
+				      PCIE_L0_REPEAT_CLK_EN |
+					      PCIE_L1_REPEAT_CLK_EN);
+		if (ret)
+			goto out;
+	}
+
+	priv->nlanes_enabled += inst->num_lanes;
+
+	ret = sf_pcie_phy_lvds_on(priv, inst->lvds_idx);
+	if (ret)
+		goto out;
+
+	ret = sf_pcie_lane_on(priv, inst->idx);
+	if (ret)
+		goto out;
+	if (inst->num_lanes == 2)
+		ret = sf_pcie_lane_on(priv, !inst->idx);
+out:
+	mutex_unlock(&priv->lock);
+	return ret;
+}
+
+static int sf21_pcie_phy_power_off(struct phy *phy)
+{
+	struct sf21_pcie_phy_inst *inst = phy_to_instance(phy);
+	struct sf21_pcie_phy *priv = instance_to_priv(inst);
+	int ret;
+	mutex_lock(&priv->lock);
+
+	if (inst->num_lanes == 2) {
+		ret = sf_pcie_lane_off(priv, !inst->idx);
+		if (ret)
+			goto out;
+	}
+
+	ret = sf_pcie_lane_off(priv, inst->idx);
+	if (ret)
+		goto out;
+
+	ret = sf_pcie_phy_lvds_off(priv, inst->lvds_idx);
+	if (ret)
+		goto out;
+	priv->nlanes_enabled -= inst->num_lanes;
+
+	if (!priv->nlanes_enabled) {
+		ret = regmap_clear_bits(priv->pcie_regmap, PCIE_SYSM_INIT, PCIE_PHY_RSTN);
+		if (ret)
+			goto out;
+
+		ret = regmap_clear_bits(priv->pcie_regmap, PCIE_SYSM_INIT,
+					PCIE_L0_REPEAT_CLK_EN |
+						PCIE_L1_REPEAT_CLK_EN);
+		if (ret)
+			goto out;
+		clk_disable_unprepare(priv->refclk);
+	}
+out:
+	mutex_unlock(&priv->lock);
+	return ret;
+}
+
+static const struct phy_ops sf21_pcie_phy_ops = {
+	.power_on = sf21_pcie_phy_power_on,
+	.power_off = sf21_pcie_phy_power_off,
+	.owner = THIS_MODULE,
+};
+
+static int sf21_pcie_phy_probe(struct platform_device *pdev)
+{
+	struct sf21_pcie_phy *p_phy;
+	struct phy_provider *provider;
+	struct phy *phy;
+	struct device_node *child;
+	int num_insts = 0;
+	u32 reg_idx, num_lanes, lvds_idx;
+	int ret;
+
+	p_phy = devm_kzalloc(&pdev->dev, sizeof(*p_phy), GFP_KERNEL);
+	if (!p_phy)
+		return -ENOMEM;
+
+	p_phy->dev = &pdev->dev;
+	platform_set_drvdata(pdev, p_phy);
+
+	p_phy->refclk = devm_clk_get(p_phy->dev, "ref");
+	if (IS_ERR(p_phy->refclk))
+		return dev_err_probe(p_phy->dev, PTR_ERR(p_phy->refclk),
+				     "Failed to get phy reference clock.\n");
+
+	p_phy->csrclk = devm_clk_get_enabled(p_phy->dev, "csr");
+	if (IS_ERR(p_phy->csrclk))
+		return dev_err_probe(p_phy->dev, PTR_ERR(p_phy->csrclk),
+				     "Failed to get enabled phy csr clock.\n");
+
+	p_phy->pcie_regmap = syscon_node_to_regmap(pdev->dev.of_node);
+	if (IS_ERR(p_phy->pcie_regmap))
+		return dev_err_probe(p_phy->dev, PTR_ERR(p_phy->pcie_regmap),
+				     "Failed to get regmap.\n");
+
+	p_phy->topcrm_regmap = syscon_regmap_lookup_by_phandle(
+		pdev->dev.of_node, "siflower,topcrm");
+	if (IS_ERR(p_phy->topcrm_regmap))
+		return dev_err_probe(p_phy->dev, PTR_ERR(p_phy->topcrm_regmap),
+				     "Failed to get regmap for topcrm.\n");
+
+	p_phy->nlanes_enabled = 0;
+	mutex_init(&p_phy->lock);
+
+	regmap_clear_bits(p_phy->pcie_regmap, PCIE_SYSM_INIT,
+			PCIE_L1_RSTN | PCIE_L0_RSTN | PCIE_PHY_RSTN);
+
+	for_each_available_child_of_node(pdev->dev.of_node, child) {
+		ret = of_property_read_u32(child, "reg", &reg_idx);
+		if (ret)
+			return dev_err_probe(
+				p_phy->dev, ret,
+				"failed to read reg of child node %d.\n",
+				num_insts);
+
+		if (reg_idx > 1) {
+			dev_err(p_phy->dev, "PHY reg should be 0 or 1.\n");
+			return -EINVAL;
+		}
+
+		p_phy->insts[reg_idx].idx = reg_idx;
+
+		ret = of_property_read_u32(child, "siflower,num-lanes",
+					   &num_lanes);
+		if (ret)
+			return dev_err_probe(
+				p_phy->dev, ret,
+				"failed to read num-lanes of phy@%u.\n",
+				reg_idx);
+
+		if (num_lanes != 1 && num_lanes != 2) {
+			dev_err(p_phy->dev,
+				"One PHY can only request 1 or 2 serdes lanes.\n");
+			return -EINVAL;
+		}
+
+		p_phy->insts[reg_idx].num_lanes = num_lanes;
+
+		/* LVDS provides PCIE reference clock and is a separated block. */
+		ret = of_property_read_u32(child, "siflower,lvds-idx",
+					   &lvds_idx);
+		if (ret)
+			p_phy->insts[reg_idx].lvds_idx = reg_idx;
+		else
+			p_phy->insts[reg_idx].lvds_idx = lvds_idx;
+
+		phy = devm_phy_create(p_phy->dev, child,
+				      &sf21_pcie_phy_ops);
+		if (IS_ERR(phy))
+			return dev_err_probe(p_phy->dev, PTR_ERR(phy),
+					     "failed to register phy@%d.\n",
+					     reg_idx);
+
+		phy_set_drvdata(phy, &p_phy->insts[reg_idx]);
+		p_phy->insts[reg_idx].phy = phy;
+
+		num_insts++;
+		if (num_insts >= 2)
+			break;
+	}
+
+	provider = devm_of_phy_provider_register(p_phy->dev, of_phy_simple_xlate);
+	if (IS_ERR(provider))
+		return dev_err_probe(p_phy->dev, PTR_ERR(provider),
+				     "Failed to register PHY provider.\n");
+
+	return 0;
+}
+
+static const struct of_device_id sf21_pcie_phy_of_match[] = {
+	{ .compatible = "siflower,sf21-pcie-phy" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, sf21_pcie_phy_of_match);
+
+static struct platform_driver sf21_pcie_phy_driver = {
+	.probe		= sf21_pcie_phy_probe,
+	.driver		= {
+		.name	= "sf21-pcie-phy",
+		.of_match_table = sf21_pcie_phy_of_match,
+	},
+};
+module_platform_driver(sf21_pcie_phy_driver);
+
+MODULE_AUTHOR("Chuanhong Guo <[email protected]>");
+MODULE_DESCRIPTION("Siflower SF21A6826/SF21H8898 PCIE PHY driver");
+MODULE_LICENSE("GPL");

+ 115 - 0
target/linux/siflower/files-6.6/drivers/phy/siflower/phy-sf21-usb.c

@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#define SF21_USB_PHY_RESET_OFFSET		0xC
+#define  RST_PRST				BIT(0)
+#define  RST_PHY_POR				BIT(1)
+#define  RST_PHY_PORT				BIT(2)
+
+struct sf21_usb_phy {
+	struct device *dev;
+	struct clk *phy_clk;
+	struct clk *bus_clk;
+	void __iomem *base;
+};
+
+static int sf21_usb_phy_power_on(struct phy *phy)
+{
+	struct sf21_usb_phy *p_phy = phy_get_drvdata(phy);
+	int ret;
+
+	writel(RST_PRST | RST_PHY_POR | RST_PHY_PORT,
+	       p_phy->base + SF21_USB_PHY_RESET_OFFSET);
+
+	ret = clk_prepare_enable(p_phy->phy_clk);
+	if (ret < 0) {
+		dev_err(p_phy->dev, "Failed to enable PHY clock: %d\n", ret);
+		return ret;
+	}
+
+	writel(RST_PRST, p_phy->base + SF21_USB_PHY_RESET_OFFSET);
+	usleep_range(50, 1000);
+	writel(0, p_phy->base + SF21_USB_PHY_RESET_OFFSET);
+	udelay(5);
+
+	return ret;
+}
+
+static int sf21_usb_phy_power_off(struct phy *phy)
+{
+	struct sf21_usb_phy *p_phy = phy_get_drvdata(phy);
+
+	writel(RST_PRST | RST_PHY_POR | RST_PHY_PORT,
+	       p_phy->base + SF21_USB_PHY_RESET_OFFSET);
+	clk_disable_unprepare(p_phy->phy_clk);
+	return 0;
+}
+
+static const struct phy_ops sf21_usb_phy_ops = {
+	.power_on = sf21_usb_phy_power_on,
+	.power_off = sf21_usb_phy_power_off,
+	.owner = THIS_MODULE,
+};
+
+static int sf21_usb_phy_probe(struct platform_device *pdev)
+{
+	struct sf21_usb_phy *p_phy;
+	struct phy_provider *provider;
+	struct phy *phy;
+
+	p_phy = devm_kzalloc(&pdev->dev, sizeof(*p_phy), GFP_KERNEL);
+	if (!p_phy)
+		return -ENOMEM;
+
+	p_phy->dev = &pdev->dev;
+	platform_set_drvdata(pdev, p_phy);
+
+	p_phy->phy_clk = devm_clk_get(p_phy->dev, "usb_phy_clk");
+	if (IS_ERR(p_phy->phy_clk))
+		return dev_err_probe(p_phy->dev, PTR_ERR(p_phy->phy_clk),
+				     "Failed to get usb_phy clock.\n");
+
+	p_phy->base = devm_platform_ioremap_resource(pdev, 0);
+
+	phy = devm_phy_create(p_phy->dev, NULL, &sf21_usb_phy_ops);
+	if (IS_ERR(phy))
+		return dev_err_probe(p_phy->dev, PTR_ERR(phy),
+				     "Failed to create PHY.\n");
+
+	phy_set_drvdata(phy, p_phy);
+
+	provider = devm_of_phy_provider_register(p_phy->dev, of_phy_simple_xlate);
+	if (IS_ERR(provider))
+		return dev_err_probe(p_phy->dev, PTR_ERR(provider),
+				     "Failed to register PHY provider.\n");
+
+	return 0;
+}
+
+static const struct of_device_id sf21_usb_phy_of_match[] = {
+	{ .compatible = "siflower,sf21-usb-phy", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, sf21_usb_phy_of_match);
+
+static struct platform_driver sf21_usb_phy_driver = {
+	.probe		= sf21_usb_phy_probe,
+	.driver		= {
+		.name	= "sf21-usb-phy",
+		.of_match_table = sf21_usb_phy_of_match,
+	},
+};
+module_platform_driver(sf21_usb_phy_driver);
+
+MODULE_AUTHOR("Ziying Wu <[email protected]>");
+MODULE_DESCRIPTION("Siflower SF21A6826/SF21H8898 USB2.0 PHY driver");
+MODULE_LICENSE("GPL");

+ 515 - 0
target/linux/siflower/files-6.6/drivers/pinctrl/pinctrl-sf19a2890.c

@@ -0,0 +1,515 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Siflower SF19A2890 pinctrl.
+ *
+ * Based on:
+ * Driver for Broadcom BCM2835 GPIO unit (pinctrl + GPIO)
+ *
+ * Copyright (C) 2012 Chris Boot, Simon Arlott, Stephen Warren
+ */
+
+#include <linux/bitmap.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#define MODULE_NAME "sf19a2890-pinctrl"
+
+struct sf_pinctrl {
+	struct device *dev;
+	void __iomem *base;
+
+	struct pinctrl_dev *pctl_dev;
+	struct pinctrl_desc pctl_desc;
+	struct pinctrl_gpio_range gpio_range;
+};
+
+#define SF19A28_NUM_GPIOS	49
+
+#define SF19A28_REG_PC(pin)	((pin) * 0x8)
+#define  PC_OEN			BIT(7)
+#define  PC_ST			BIT(6)
+#define  PC_IE			BIT(5)
+#define  PC_PD			BIT(4)
+#define  PC_PU			BIT(3)
+#define  PC_DS			GENMASK(2, 0)
+
+#define DRIVE_MIN		6
+#define DRIVE_STEP		3
+#define DRIVE_MAX		(7 * DRIVE_STEP)
+
+#define SF19A28_REG_PMX(pin)	((pin) * 0x8 + 0x4)
+/*
+ * FUNC_SW:
+ *  0: Override pad output enable with PC_OEN
+ *  1: take OEN from GPIO or alternative function
+ * FMUX_SEL:
+ *  0: Alternative function mode
+ *  1: GPIO mode
+ */
+#define  PMX_FUNC_SW		BIT(3)
+#define  PMX_FMUX_SEL		BIT(2)
+#define  PMX_MODE		GENMASK(1, 0)
+
+static struct pinctrl_pin_desc sf19a2890_gpio_pins[] = {
+	PINCTRL_PIN(0, "JTAG_TDO"),
+	PINCTRL_PIN(1, "JTAG_TDI"),
+	PINCTRL_PIN(2, "JTAG_TMS"),
+	PINCTRL_PIN(3, "JTAG_TCK"),
+	PINCTRL_PIN(4, "JTAG_RST"),
+	PINCTRL_PIN(5, "SPI_TXD"),
+	PINCTRL_PIN(6, "SPI_RXD"),
+	PINCTRL_PIN(7, "SPI_CLK"),
+	PINCTRL_PIN(8, "SPI_CSN"),
+	PINCTRL_PIN(9, "UART_TX"),
+	PINCTRL_PIN(10, "UART_RX"),
+	PINCTRL_PIN(11, "I2C_DAT"),
+	PINCTRL_PIN(12, "I2C_CLK"),
+	PINCTRL_PIN(13, "RGMII_GTX_CLK"),
+	PINCTRL_PIN(14, "RGMII_TXCLK"),
+	PINCTRL_PIN(15, "RGMII_TXD0"),
+	PINCTRL_PIN(16, "RGMII_TXD1"),
+	PINCTRL_PIN(17, "RGMII_TXD2"),
+	PINCTRL_PIN(18, "RGMII_TXD3"),
+	PINCTRL_PIN(19, "RGMII_TXCTL"),
+	PINCTRL_PIN(20, "RGMII_RXCLK"),
+	PINCTRL_PIN(21, "RGMII_RXD0"),
+	PINCTRL_PIN(22, "RGMII_RXD1"),
+	PINCTRL_PIN(23, "RGMII_RXD2"),
+	PINCTRL_PIN(24, "RGMII_RXD3"),
+	PINCTRL_PIN(25, "RGMII_RXCTL"),
+	PINCTRL_PIN(26, "RGMII_COL"),
+	PINCTRL_PIN(27, "RGMII_CRS"),
+	PINCTRL_PIN(28, "RGMII_MDC"),
+	PINCTRL_PIN(29, "RGMII_MDIO"),
+	PINCTRL_PIN(30, "HB0_PA_EN"),
+	PINCTRL_PIN(31, "HB0_LNA_EN"),
+	PINCTRL_PIN(32, "HB0_SW_CTRL0"),
+	PINCTRL_PIN(33, "HB0_SW_CTRL1"),
+	PINCTRL_PIN(34, "HB1_PA_EN"),
+	PINCTRL_PIN(35, "HB1_LNA_EN"),
+	PINCTRL_PIN(36, "HB1_SW_CTRL0"),
+	PINCTRL_PIN(37, "HB1_SW_CTRL1"),
+	PINCTRL_PIN(38, "LB0_PA_EN"),
+	PINCTRL_PIN(39, "LB0_LNA_EN"),
+	PINCTRL_PIN(40, "LB0_SW_CTRL0"),
+	PINCTRL_PIN(41, "LB0_SW_CTRL1"),
+	PINCTRL_PIN(42, "LB1_PA_EN"),
+	PINCTRL_PIN(43, "LB1_LNA_EN"),
+	PINCTRL_PIN(44, "LB1_SW_CTRL0"),
+	PINCTRL_PIN(45, "LB1_SW_CTRL1"),
+	PINCTRL_PIN(46, "CLK_OUT"),
+	PINCTRL_PIN(47, "EXT_CLK_IN"),
+	PINCTRL_PIN(48, "DRVVBUS0"),
+};
+
+static const char * const sf19a2890_gpio_groups[] = {
+	"JTAG_TDO",
+	"JTAG_TDI",
+	"JTAG_TMS",
+	"JTAG_TCK",
+	"JTAG_RST",
+	"SPI_TXD",
+	"SPI_RXD",
+	"SPI_CLK",
+	"SPI_CSN",
+	"UART_TX",
+	"UART_RX",
+	"I2C_DAT",
+	"I2C_CLK",
+	"RGMII_GTX_CLK",
+	"RGMII_TXCLK",
+	"RGMII_TXD0",
+	"RGMII_TXD1",
+	"RGMII_TXD2",
+	"RGMII_TXD3",
+	"RGMII_TXCTL",
+	"RGMII_RXCLK",
+	"RGMII_RXD0",
+	"RGMII_RXD1",
+	"RGMII_RXD2",
+	"RGMII_RXD3",
+	"RGMII_RXCTL",
+	"RGMII_COL",
+	"RGMII_CRS",
+	"RGMII_MDC",
+	"RGMII_MDIO",
+	"HB0_PA_EN",
+	"HB0_LNA_EN",
+	"HB0_SW_CTRL0",
+	"HB0_SW_CTRL1",
+	"HB1_PA_EN",
+	"HB1_LNA_EN",
+	"HB1_SW_CTRL0",
+	"HB1_SW_CTRL1",
+	"LB0_PA_EN",
+	"LB0_LNA_EN",
+	"LB0_SW_CTRL0",
+	"LB0_SW_CTRL1",
+	"LB1_PA_EN",
+	"LB1_LNA_EN",
+	"LB1_SW_CTRL0",
+	"LB1_SW_CTRL1",
+	"CLK_OUT",
+	"EXT_CLK_IN",
+	"DRVVBUS0",
+};
+
+#define SF19A28_FUNC0		0
+#define SF19A28_FUNC1		1
+#define SF19A28_FUNC2		2
+#define SF19A28_FUNC3		3
+#define SF19A28_NUM_FUNCS	4
+
+static const char * const sf19a2890_functions[] = {
+	"func0", "func1", "func2", "func3"
+};
+
+static inline u32 sf_pinctrl_rd(struct sf_pinctrl *pc, ulong reg)
+{
+	return readl(pc->base + reg);
+}
+
+static inline void sf_pinctrl_wr(struct sf_pinctrl *pc, ulong reg, u32 val)
+{
+	writel(val, pc->base + reg);
+}
+
+static inline void sf_pinctrl_rmw(struct sf_pinctrl *pc, ulong reg, u32 clr,
+				  u32 set)
+{
+	u32 val;
+
+	val = sf_pinctrl_rd(pc, reg);
+	val &= ~clr;
+	val |= set;
+	sf_pinctrl_wr(pc, reg, val);
+}
+
+static int sf19a2890_pctl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+	return SF19A28_NUM_GPIOS;
+}
+
+static const char *sf19a2890_pctl_get_group_name(struct pinctrl_dev *pctldev,
+						 unsigned selector)
+{
+	return sf19a2890_gpio_groups[selector];
+}
+
+static int sf19a2890_pctl_get_group_pins(struct pinctrl_dev *pctldev,
+					 unsigned selector,
+					 const unsigned **pins,
+					 unsigned *num_pins)
+{
+	*pins = &sf19a2890_gpio_pins[selector].number;
+	*num_pins = 1;
+
+	return 0;
+}
+
+static void sf19a2890_pctl_pin_dbg_show(struct pinctrl_dev *pctldev,
+					struct seq_file *s, unsigned offset)
+{
+	struct sf_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+	u32 conf = sf_pinctrl_rd(pc, SF19A28_REG_PC(offset));
+	u32 mux = sf_pinctrl_rd(pc, SF19A28_REG_PMX(offset));
+
+	if (!(mux & PMX_FUNC_SW))
+		seq_puts(s, "Forced OE");
+	else if (mux & PMX_FMUX_SEL)
+		seq_puts(s, "GPIO");
+	else
+		seq_printf(s, "Func%lu", mux & PMX_MODE);
+	seq_puts(s, " |");
+
+	if (!(conf & PC_OEN) && !(mux & PMX_FUNC_SW))
+		seq_puts(s, " Output");
+	if ((conf & PC_ST))
+		seq_puts(s, " Schmitt_Trigger");
+	if ((conf & PC_IE))
+		seq_puts(s, " Input");
+	if ((conf & PC_PD))
+		seq_puts(s, " Pull_Down");
+	if ((conf & PC_PU))
+		seq_puts(s, " Pull_Up");
+
+	seq_printf(s, " Drive: %lu mA",
+		   DRIVE_MIN + (conf & PC_DS) * DRIVE_STEP);
+}
+
+static const struct pinctrl_ops sf19a2890_pctl_ops = {
+	.get_groups_count = sf19a2890_pctl_get_groups_count,
+	.get_group_name = sf19a2890_pctl_get_group_name,
+	.get_group_pins = sf19a2890_pctl_get_group_pins,
+	.pin_dbg_show = sf19a2890_pctl_pin_dbg_show,
+	.dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+	.dt_free_map = pinconf_generic_dt_free_map,
+};
+
+static int sf19a2890_pmx_free(struct pinctrl_dev *pctldev, unsigned offset)
+{
+	struct sf_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+	sf_pinctrl_rmw(pc, SF19A28_REG_PC(offset), PC_IE, PC_OEN);
+	sf_pinctrl_rmw(pc, SF19A28_REG_PMX(offset), PMX_FUNC_SW, 0);
+	return 0;
+}
+
+static int sf19a2890_pmx_get_functions_count(struct pinctrl_dev *pctldev)
+{
+	return SF19A28_NUM_FUNCS;
+}
+
+static const char *sf19a2890_pmx_get_function_name(struct pinctrl_dev *pctldev,
+						   unsigned selector)
+{
+	return sf19a2890_functions[selector];
+}
+
+static int sf19a2890_pmx_get_function_groups(struct pinctrl_dev *pctldev,
+					     unsigned selector,
+					     const char *const **groups,
+					     unsigned *const num_groups)
+{
+	/* every pin can do every function */
+	*groups = sf19a2890_gpio_groups;
+	*num_groups = SF19A28_NUM_GPIOS;
+
+	return 0;
+}
+
+static int sf19a2890_pmx_set(struct pinctrl_dev *pctldev,
+			     unsigned func_selector, unsigned group_selector)
+{
+	struct sf_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+	unsigned pin = group_selector;
+
+	sf_pinctrl_wr(pc, SF19A28_REG_PMX(pin),
+		      PMX_FUNC_SW | FIELD_PREP(PMX_MODE, func_selector));
+	return 0;
+}
+
+static int sf19a2890_pmx_gpio_request_enable(struct pinctrl_dev *pctldev,
+					     struct pinctrl_gpio_range *range,
+					     unsigned offset)
+{
+	struct sf_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+	/* Set to GPIO mode & Let peripheral control OEN */
+	sf_pinctrl_wr(pc, SF19A28_REG_PMX(offset), PMX_FUNC_SW | PMX_FMUX_SEL);
+	/*
+	 * Set PC_IE regardless of whether GPIO is in input mode.
+	 * Otherwise GPIO driver can't read back its status in output mode.
+	 */
+	sf_pinctrl_rmw(pc, SF19A28_REG_PC(offset), 0, PC_IE);
+	return 0;
+}
+
+static void sf19a2890_pmx_gpio_disable_free(struct pinctrl_dev *pctldev,
+					    struct pinctrl_gpio_range *range,
+					    unsigned offset)
+{
+	sf19a2890_pmx_free(pctldev, offset);
+}
+
+static const struct pinmux_ops sf19a2890_pmx_ops = {
+	.free = sf19a2890_pmx_free,
+	.get_functions_count = sf19a2890_pmx_get_functions_count,
+	.get_function_name = sf19a2890_pmx_get_function_name,
+	.get_function_groups = sf19a2890_pmx_get_function_groups,
+	.set_mux = sf19a2890_pmx_set,
+	.gpio_request_enable = sf19a2890_pmx_gpio_request_enable,
+	.gpio_disable_free = sf19a2890_pmx_gpio_disable_free,
+};
+
+static int sf19a2890_pinconf_get(struct pinctrl_dev *pctldev, unsigned pin,
+				 unsigned long *config)
+{
+	struct sf_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+	enum pin_config_param param = pinconf_to_config_param(*config);
+	u32 arg = 0;
+	u32 val = 0;
+
+	if (pin >= SF19A28_NUM_GPIOS)
+		return -EINVAL;
+
+	val = sf_pinctrl_rd(pc, SF19A28_REG_PC(pin));
+
+	switch (param) {
+	case PIN_CONFIG_INPUT_SCHMITT:
+		val &= PC_ST;
+		if (val)
+			arg = 1;
+		break;
+
+	case PIN_CONFIG_INPUT_ENABLE:
+		val &= PC_IE;
+		if (val)
+			arg = 1;
+		break;
+
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		val &= PC_PD;
+		if (val)
+			arg = 1;
+		break;
+
+	case PIN_CONFIG_BIAS_PULL_UP:
+		val &= PC_PU;
+		if (val)
+			arg = 1;
+		break;
+
+	case PIN_CONFIG_DRIVE_STRENGTH:
+		arg = DRIVE_MIN + (val & PC_DS) * DRIVE_STEP;
+		break;
+
+	default:
+		return -ENOTSUPP;
+	}
+
+	*config = pinconf_to_config_packed(param, arg);
+	return 0;
+}
+
+static int sf19a2890_pinconf_set(struct pinctrl_dev *pctldev, unsigned pin,
+				 unsigned long *configs, unsigned num_configs)
+{
+	struct sf_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+	enum pin_config_param param;
+	u32 arg, val;
+	int i;
+
+	val = sf_pinctrl_rd(pc, SF19A28_REG_PC(pin));
+
+	if (pin >= SF19A28_NUM_GPIOS)
+		return -EINVAL;
+
+	for (i = 0; i < num_configs; i++) {
+		param = pinconf_to_config_param(configs[i]);
+		arg = pinconf_to_config_argument(configs[i]);
+		switch (param) {
+		case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+			if (arg)
+				val |= PC_ST;
+			else
+				val &= ~PC_ST;
+			break;
+
+		case PIN_CONFIG_INPUT_ENABLE:
+			if (arg)
+				val |= PC_IE;
+			else
+				val &= ~PC_IE;
+			break;
+
+		case PIN_CONFIG_BIAS_PULL_DOWN:
+			if (arg) {
+				val |= PC_PD;
+				val &= ~PC_PU;
+			} else {
+				val &= ~PC_PD;
+			}
+			break;
+
+		case PIN_CONFIG_BIAS_PULL_UP:
+			if (arg) {
+				val |= PC_PU;
+				val &= ~PC_PD;
+			} else {
+				val &= ~PC_PU;
+			}
+			break;
+
+		case PIN_CONFIG_DRIVE_STRENGTH:
+			val &= ~PC_DS;
+			if (arg > DRIVE_MAX)
+				val |= PC_DS;
+			else if (arg > DRIVE_MIN)
+				val |= FIELD_PREP(PC_DS, (arg - DRIVE_MIN) /
+								 DRIVE_STEP);
+			break;
+		default:
+			break;
+		}
+		sf_pinctrl_wr(pc, SF19A28_REG_PC(pin), val);
+	}
+	return 0;
+}
+
+static const struct pinconf_ops sf19a2890_pinconf_ops = {
+	.is_generic = true,
+	.pin_config_get = sf19a2890_pinconf_get,
+	.pin_config_set = sf19a2890_pinconf_set,
+};
+
+static const struct pinctrl_desc sf19a2890_pinctrl_desc = {
+	.name = MODULE_NAME,
+	.pins = sf19a2890_gpio_pins,
+	.npins = SF19A28_NUM_GPIOS,
+	.pctlops = &sf19a2890_pctl_ops,
+	.pmxops = &sf19a2890_pmx_ops,
+	.confops = &sf19a2890_pinconf_ops,
+	.owner = THIS_MODULE,
+};
+
+static const struct pinctrl_gpio_range sf_pinctrl_gpio_range = {
+	.name = MODULE_NAME,
+	.npins = SF19A28_NUM_GPIOS,
+};
+
+static const struct of_device_id sf_pinctrl_match[] = {
+	{ .compatible = "siflower,sf19a2890-pinctrl" },
+	{}
+};
+
+static int sf_pinctrl_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct sf_pinctrl *pc;
+
+	pc = devm_kzalloc(dev, sizeof(*pc), GFP_KERNEL);
+	if (!pc)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, pc);
+	pc->dev = dev;
+
+	pc->base = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(pc->base))
+		return PTR_ERR(pc->base);
+
+	pc->pctl_desc = sf19a2890_pinctrl_desc;
+	pc->pctl_dev = devm_pinctrl_register(dev, &pc->pctl_desc, pc);
+	if (IS_ERR(pc->pctl_dev))
+		return PTR_ERR(pc->pctl_dev);
+
+	return 0;
+}
+
+static struct platform_driver sf_pinctrl_driver = {
+	.probe = sf_pinctrl_probe,
+	.driver = {
+		.name = MODULE_NAME,
+		.of_match_table = sf_pinctrl_match,
+		.suppress_bind_attrs = true,
+	},
+};
+module_platform_driver(sf_pinctrl_driver);
+
+MODULE_AUTHOR("Chuanhong Guo <[email protected]>");
+MODULE_DESCRIPTION("Siflower SF19A2890 pinctrl driver");
+MODULE_LICENSE("GPL");

+ 131 - 0
target/linux/siflower/files-6.6/drivers/reset/reset-sf19a2890-periph.c

@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/spinlock.h>
+
+struct reset_sf19a28_periph_data {
+	struct reset_controller_dev rcdev;
+	void __iomem *base;
+	spinlock_t lock;
+	u32 reset_masks[];
+};
+
+static inline struct reset_sf19a28_periph_data *
+to_reset_sf19a28_periph_data(struct reset_controller_dev *rcdev)
+{
+	return container_of(rcdev, struct reset_sf19a28_periph_data, rcdev);
+}
+
+static int reset_sf19a28_periph_update(struct reset_controller_dev *rcdev,
+			       unsigned long id, bool assert)
+{
+	struct reset_sf19a28_periph_data *data = to_reset_sf19a28_periph_data(rcdev);
+	unsigned long flags;
+	u32 reg;
+
+	spin_lock_irqsave(&data->lock, flags);
+	reg = readl(data->base);
+	if (assert)
+		reg |= data->reset_masks[id];
+	else
+		reg &= ~data->reset_masks[id];
+	writel(reg, data->base);
+	spin_unlock_irqrestore(&data->lock, flags);
+	return 0;
+}
+
+static int reset_sf19a28_periph_assert(struct reset_controller_dev *rcdev,
+			       unsigned long id)
+{
+	return reset_sf19a28_periph_update(rcdev, id, true);
+}
+
+static int reset_sf19a28_periph_deassert(struct reset_controller_dev *rcdev,
+				 unsigned long id)
+{
+	return reset_sf19a28_periph_update(rcdev, id, false);
+}
+
+static int reset_sf19a28_periph_status(struct reset_controller_dev *rcdev,
+			       unsigned long id)
+{
+	struct reset_sf19a28_periph_data *data = to_reset_sf19a28_periph_data(rcdev);
+	u32 reg;
+
+	reg = readl(data->base);
+	return !!(reg & data->reset_masks[id]);
+}
+
+const struct reset_control_ops reset_sf19a28_periph_ops = {
+	.assert		= reset_sf19a28_periph_assert,
+	.deassert	= reset_sf19a28_periph_deassert,
+	.status		= reset_sf19a28_periph_status,
+};
+
+static const struct of_device_id reset_sf19a28_periph_dt_ids[] = {
+	{ .compatible = "siflower,sf19a2890-periph-reset", },
+	{ /* sentinel */ },
+};
+
+static int reset_sf19a28_periph_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = dev->of_node;
+	struct reset_sf19a28_periph_data *data;
+	void __iomem *base;
+	int nr_resets;
+	int ret, i;
+	u32 tmp;
+
+	nr_resets = of_property_count_u32_elems(node, "siflower,reset-masks");
+
+	if (nr_resets < 1) {
+		ret = of_property_read_u32(node, "siflower,num-resets", &tmp);
+		if (ret < 0 || tmp < 1)
+			return -EINVAL;
+		nr_resets = tmp;
+	}
+
+	if (nr_resets >= 32) {
+		dev_err(dev, "too many resets.");
+		return -EINVAL;
+	}
+
+	data = devm_kzalloc(dev, struct_size(data, reset_masks, nr_resets), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	ret = of_property_read_u32_array(node, "siflower,reset-masks",
+					 data->reset_masks, nr_resets);
+	if (ret)
+		for (i = 0; i < nr_resets; i++)
+			data->reset_masks[i] = BIT(i);
+
+	base = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	spin_lock_init(&data->lock);
+	data->base = base;
+	data->rcdev.owner = THIS_MODULE;
+	data->rcdev.nr_resets = nr_resets;
+	data->rcdev.ops = &reset_sf19a28_periph_ops;
+	data->rcdev.of_node = dev->of_node;
+
+	return devm_reset_controller_register(dev, &data->rcdev);
+}
+
+static struct platform_driver reset_sf19a28_periph_driver = {
+	.probe	= reset_sf19a28_periph_probe,
+	.driver = {
+		.name		= "reset-sf19a2890-periph",
+		.of_match_table	= reset_sf19a28_periph_dt_ids,
+	},
+};
+builtin_platform_driver(reset_sf19a28_periph_driver);

+ 142 - 0
target/linux/siflower/files-6.6/drivers/reset/reset-sf21.c

@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <dt-bindings/reset/siflower,sf21-reset.h>
+
+#define SF21_SOFT_RESET 0xC0
+
+struct sf21_reset_data {
+	struct reset_controller_dev rcdev;
+	struct regmap *regmap;
+};
+
+static inline int sf21_reset_shift(unsigned long id)
+{
+	switch (id) {
+	case SF21_RESET_GIC:
+	case SF21_RESET_AXI:
+	case SF21_RESET_AHB:
+	case SF21_RESET_APB:
+	case SF21_RESET_IRAM:
+		return id + 1;
+	case SF21_RESET_NPU:
+	case SF21_RESET_DDR_CTL:
+	case SF21_RESET_DDR_PHY:
+	case SF21_RESET_DDR_PWR_OK_IN:
+	case SF21_RESET_DDR_CTL_APB:
+	case SF21_RESET_DDR_PHY_APB:
+		return id + 2;
+	case SF21_RESET_USB:
+		return id + 8;
+	case SF21_RESET_PVT:
+	case SF21_RESET_SERDES_CSR:
+		return id + 11;
+	case SF21_RESET_CRYPT_CSR:
+	case SF21_RESET_CRYPT_APP:
+	case SF21_RESET_NPU2DDR_ASYNCBRIDGE:
+	case SF21_RESET_IROM:
+		return id + 14;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int sf21_reset_assert(struct reset_controller_dev *rcdev,
+				  unsigned long id)
+{
+	struct sf21_reset_data *rd;
+	int shift;
+	u32 mask;
+
+	rd = container_of(rcdev, struct sf21_reset_data, rcdev);
+
+	shift = sf21_reset_shift(id);
+	mask = BIT(shift);
+	return regmap_update_bits(rd->regmap, SF21_SOFT_RESET, mask, 0);
+}
+
+static int sf21_reset_deassert(struct reset_controller_dev *rcdev,
+				    unsigned long id)
+{
+	struct sf21_reset_data *rd;
+	int shift;
+	u32 mask;
+
+	rd = container_of(rcdev, struct sf21_reset_data, rcdev);
+
+	shift = sf21_reset_shift(id);
+	mask = BIT(shift);
+	return regmap_update_bits(rd->regmap, SF21_SOFT_RESET, mask, mask);
+}
+
+static int sf21_reset_status(struct reset_controller_dev *rcdev,
+				  unsigned long id)
+{
+	struct sf21_reset_data *rd;
+	int shift, ret;
+	u32 mask;
+	u32 reg;
+
+	rd = container_of(rcdev, struct sf21_reset_data, rcdev);
+	ret = regmap_read(rd->regmap, SF21_SOFT_RESET, &reg);
+	if (ret)
+		return ret;
+
+	shift = sf21_reset_shift(id);
+	mask = BIT(shift);
+	return !!(reg & mask);
+}
+
+static const struct reset_control_ops sf21_reset_ops = {
+	.assert = sf21_reset_assert,
+	.deassert = sf21_reset_deassert,
+	.status = sf21_reset_status,
+};
+
+static int sf21_reset_probe(struct platform_device *pdev)
+{
+	struct sf21_reset_data *rd;
+	struct device *dev = &pdev->dev;
+	struct device_node *np = pdev->dev.of_node;
+	struct device_node *node;
+
+	rd = devm_kzalloc(dev, sizeof(*rd), GFP_KERNEL);
+	if (!rd)
+		return -ENOMEM;
+
+	node = of_parse_phandle(np, "siflower,crm", 0);
+	rd->regmap = syscon_node_to_regmap(node);
+
+	if (IS_ERR(rd->regmap))
+		return PTR_ERR(rd->regmap);
+
+	rd->rcdev.owner = THIS_MODULE;
+	rd->rcdev.nr_resets = SF21_RESET_MAX + 1;
+	rd->rcdev.ops = &sf21_reset_ops;
+	rd->rcdev.of_node = np;
+
+	return devm_reset_controller_register(dev, &rd->rcdev);
+}
+
+static const struct of_device_id sf21_reset_dt_ids[] = {
+	{ .compatible = "siflower,sf21-reset" },
+	{},
+};
+
+static struct platform_driver sf21_reset_driver = {
+	.probe	= sf21_reset_probe,
+	.driver = {
+		.name		= "sf21-reset",
+		.of_match_table	= sf21_reset_dt_ids,
+	},
+};
+builtin_platform_driver(sf21_reset_driver);

+ 531 - 0
target/linux/siflower/files-6.6/drivers/spi/spi-sf21-qspi.c

@@ -0,0 +1,531 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * A driver for Siflower SF21A6826/SF21H8898 QSPI controller.
+ *
+ * Based on the AMBA PL022 driver:
+ * Copyright (C) 2008-2012 ST-Ericsson AB
+ * Copyright (C) 2006 STMicroelectronics Pvt. Ltd.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+
+#include <linux/spi/spi-mem.h>
+#include <linux/spi/spi.h>
+
+#define SF_SSP_FIFO_DEPTH		0x100
+
+#define SSP_CR0				0x000
+#define SSP_CR1				0x004
+#define SSP_DR				0x008
+#define SSP_SR				0x00C
+#define SSP_CPSR			0x010
+#define SSP_IMSC			0x014
+#define SSP_RIS				0x018
+#define SSP_MIS				0x01C
+#define SSP_ICR				0x020
+#define SSP_DMACR			0x024
+#define SSP_FIFO_LEVEL			0x028
+#define SSP_EXSPI_CMD0			0x02C
+#define SSP_EXSPI_CMD1			0x030
+#define SSP_EXSPI_CMD2			0x034
+
+/* SSP Control Register 0  - SSP_CR0 */
+#define SSP_CR0_EXSPI_FRAME	(0x3 << 4)
+#define SSP_CR0_SPO		(0x1 << 6)
+#define SSP_CR0_SPH		(0x1 << 7)
+#define SSP_CR0_BIT_MODE(x)	((x)-1)
+#define SSP_SCR_SHFT		8
+
+/* SSP Control Register 1  - SSP_CR1 */
+#define SSP_CR1_MASK_SSE	(0x1 << 1)
+
+/* SSP Status Register - SSP_SR */
+#define SSP_SR_MASK_TFE		(0x1 << 0) /* Transmit FIFO empty */
+#define SSP_SR_MASK_TNF		(0x1 << 1) /* Transmit FIFO not full */
+#define SSP_SR_MASK_RNE		(0x1 << 2) /* Receive FIFO not empty */
+#define SSP_SR_MASK_RFF		(0x1 << 3) /* Receive FIFO full */
+#define SSP_SR_MASK_BSY		(0x1 << 4) /* Busy Flag */
+
+/* SSP FIFO Threshold Register - SSP_FIFO_LEVEL */
+#define SSP_FIFO_LEVEL_RX	GENMASK(14, 8) /* Receive FIFO watermark */
+#define SSP_FIFO_LEVEL_TX	GENMASK(6, 0) /* Transmit FIFO watermark */
+#define DFLT_THRESH_RX		32
+#define DFLT_THRESH_TX		32
+
+/* SSP Raw Interrupt Status Register - SSP_RIS */
+#define SSP_RIS_MASK_RORRIS	(0x1 << 0) /* Receive Overrun */
+#define SSP_RIS_MASK_RTRIS	(0x1 << 1) /* Receive Timeout */
+#define SSP_RIS_MASK_RXRIS	(0x1 << 2) /* Receive FIFO Raw Interrupt status */
+#define SSP_RIS_MASK_TXRIS	(0x1 << 3) /* Transmit FIFO Raw Interrupt status */
+
+/* EXSPI command register 0 SSP_EXSPI_CMD0 */
+#define EXSPI_CMD0_CMD_COUNT	BIT(0)		/* cmd byte, must be set at last */
+#define EXSPI_CMD0_ADDR_COUNT	GENMASK(2, 1)	/* addr bytes */
+#define EXSPI_CMD0_EHC_COUNT	BIT(3)		/* Set 1 for 4-byte address mode */
+#define EXSPI_CMD0_TX_COUNT	GENMASK(14, 4)	/* TX data bytes */
+#define EXSPI_CMD0_VALID	BIT(15)		/* Set 1 to make the cmd to be run */
+
+/* EXSPI command register 1 SSP_EXSPI_CMD1 */
+#define EXSPI_CMD1_DUMMY_COUNT	GENMASK(3, 0)	/* dummy bytes */
+#define EXSPI_CMD1_RX_COUNT	GENMASK(14, 4)	/* RX data bytes */
+
+/* EXSPI command register 2 SSP_EXSPI_CMD2 */
+/* Set 1 for 1-wire, 2 for 2-wire, 3 for 4-wire */
+#define EXSPI_CMD2_CMD_IO_MODE	GENMASK(1, 0)	/* cmd IO mode */
+#define EXSPI_CMD2_ADDR_IO_MODE	GENMASK(3, 2)	/* addr IO mode */
+#define EXSPI_CMD2_DATA_IO_MODE	GENMASK(5, 4)	/* data IO mode */
+
+/* SSP Clock Defaults */
+#define SSP_DEFAULT_CLKRATE 0x2
+#define SSP_DEFAULT_PRESCALE 0x40
+
+/* SSP Clock Parameter ranges */
+#define CPSDVR_MIN 0x02
+#define CPSDVR_MAX 0xFE
+#define SCR_MIN 0x00
+#define SCR_MAX 0xFF
+
+#define SF_READ_TIMEOUT		(10 * HZ)
+#define MAX_S_BUF			100
+
+struct sf_qspi {
+	void __iomem *base;
+	struct clk *clk, *apbclk;
+	struct device *dev;
+};
+
+struct ssp_clock_params {
+	u32 freq;
+	u8 cpsdvsr; /* value from 2 to 254 (even only!) */
+	u8 scr;	    /* value from 0 to 255 */
+};
+
+struct chip_data {
+	u32 freq;
+	u32 cr0;
+	u16 cpsr;
+};
+
+static void sf_qspi_flush_rxfifo(struct sf_qspi *s)
+{
+	while (readw(s->base + SSP_SR) & SSP_SR_MASK_RNE)
+		readw(s->base + SSP_DR);
+}
+
+static int sf_qspi_wait_not_busy(struct sf_qspi *s)
+{
+	unsigned long timeout = jiffies + SF_READ_TIMEOUT;
+
+	do {
+		if (!(readw(s->base + SSP_SR) & SSP_SR_MASK_BSY))
+			return 0;
+
+		cond_resched();
+	} while (time_after(timeout, jiffies));
+
+	dev_err(s->dev, "I/O timed out\n");
+	return -ETIMEDOUT;
+}
+
+static int sf_qspi_wait_rx_not_empty(struct sf_qspi *s)
+{
+	unsigned long timeout = jiffies + SF_READ_TIMEOUT;
+
+	do {
+		if (readw(s->base + SSP_SR) & SSP_SR_MASK_RNE)
+			return 0;
+
+		cond_resched();
+	} while (time_after(timeout, jiffies));
+
+	dev_err(s->dev, "read timed out\n");
+	return -ETIMEDOUT;
+}
+
+static int sf_qspi_wait_rxfifo(struct sf_qspi *s)
+{
+	unsigned long timeout = jiffies + SF_READ_TIMEOUT;
+
+	do {
+		if (readw(s->base + SSP_RIS) & SSP_RIS_MASK_RXRIS)
+			return 0;
+
+		cond_resched();
+	} while (time_after(timeout, jiffies));
+
+	dev_err(s->dev, "read timed out\n");
+	return -ETIMEDOUT;
+}
+
+static void sf_qspi_enable(struct sf_qspi *s)
+{
+	/* Enable the SPI hardware */
+	writew(SSP_CR1_MASK_SSE, s->base + SSP_CR1);
+}
+
+static void sf_qspi_disable(struct sf_qspi *s)
+{
+	/* Disable the SPI hardware */
+	writew(0, s->base + SSP_CR1);
+}
+
+static void sf_qspi_xmit(struct sf_qspi *s, unsigned int nbytes, const u8 *out)
+{
+	while (nbytes--)
+		writew(*out++, s->base + SSP_DR);
+}
+
+static int sf_qspi_rcv(struct sf_qspi *s, unsigned int nbytes, u8 *in)
+{
+	int ret, i;
+
+	while (nbytes >= DFLT_THRESH_RX) {
+		/* wait for RX FIFO to reach the threshold */
+		ret = sf_qspi_wait_rxfifo(s);
+		if (ret)
+			return ret;
+
+		for (i = 0; i < DFLT_THRESH_RX; i++)
+			*in++ = readw(s->base + SSP_DR);
+
+		nbytes -= DFLT_THRESH_RX;
+	}
+
+	/* read the remaining data */
+	while (nbytes) {
+		ret = sf_qspi_wait_rx_not_empty(s);
+		if (ret)
+			return ret;
+
+		*in++ = readw(s->base + SSP_DR);
+		nbytes--;
+	}
+
+	return 0;
+}
+
+static void sf_qspi_set_param(struct sf_qspi *s, const struct spi_mem_op *op)
+{
+	unsigned int tx_count = 0, rx_count = 0;
+	u8 cmd_io, addr_io, data_io;
+	u8 cmd_count, addr_count, ehc_count;
+
+	cmd_io = op->cmd.buswidth == 4 ? 3 : op->cmd.buswidth;
+	addr_io = op->addr.buswidth == 4 ? 3 : op->addr.buswidth;
+	data_io = op->data.buswidth == 4 ? 3 : op->data.buswidth;
+
+	if (op->data.nbytes) {
+		if (op->data.dir == SPI_MEM_DATA_IN)
+			rx_count = op->data.nbytes;
+		else
+			tx_count = op->data.nbytes;
+	}
+	if (op->addr.nbytes > 3) {
+		addr_count = 3;
+		ehc_count = 1;
+	} else {
+		addr_count = op->addr.nbytes;
+		ehc_count = 0;
+	}
+	cmd_count = op->cmd.nbytes;
+
+	writew(FIELD_PREP(EXSPI_CMD2_CMD_IO_MODE, cmd_io) |
+		       FIELD_PREP(EXSPI_CMD2_ADDR_IO_MODE, addr_io) |
+		       FIELD_PREP(EXSPI_CMD2_DATA_IO_MODE, data_io),
+	       s->base + SSP_EXSPI_CMD2);
+	writew(FIELD_PREP(EXSPI_CMD1_DUMMY_COUNT, op->dummy.nbytes) |
+		       FIELD_PREP(EXSPI_CMD1_RX_COUNT, rx_count),
+	       s->base + SSP_EXSPI_CMD1);
+	writew(EXSPI_CMD0_VALID |
+		       FIELD_PREP(EXSPI_CMD0_CMD_COUNT, op->cmd.nbytes) |
+		       FIELD_PREP(EXSPI_CMD0_ADDR_COUNT, addr_count) |
+		       FIELD_PREP(EXSPI_CMD0_EHC_COUNT, ehc_count) |
+		       FIELD_PREP(EXSPI_CMD0_TX_COUNT, tx_count),
+	       s->base + SSP_EXSPI_CMD0);
+}
+
+static int sf_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+	struct sf_qspi *s = spi_controller_get_devdata(mem->spi->master);
+	struct chip_data *chip = spi_get_ctldata(mem->spi);
+	unsigned int pops = 0;
+	int ret, i, op_len;
+	const u8 *tx_buf = NULL;
+	u8 *rx_buf = NULL, op_buf[MAX_S_BUF];
+
+	writew(chip->cr0, s->base + SSP_CR0);
+	writew(chip->cpsr, s->base + SSP_CPSR);
+
+	if (op->data.nbytes) {
+		if (op->data.dir == SPI_MEM_DATA_IN)
+			rx_buf = op->data.buf.in;
+		else
+			tx_buf = op->data.buf.out;
+	}
+	op_len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
+	sf_qspi_set_param(s, op);
+
+	op_buf[pops++] = op->cmd.opcode;
+	if (op->addr.nbytes) {
+		for (i = 0; i < op->addr.nbytes; i++)
+			op_buf[pops + i] = op->addr.val >>
+					   (8 * (op->addr.nbytes - i - 1));
+		pops += op->addr.nbytes;
+	}
+
+	sf_qspi_flush_rxfifo(s);
+	memset(op_buf + pops, 0xff, op->dummy.nbytes);
+	sf_qspi_xmit(s, op_len, op_buf);
+	if (tx_buf) {
+		sf_qspi_xmit(s, op->data.nbytes, tx_buf);
+	}
+	sf_qspi_enable(s);
+	if (rx_buf)
+		ret = sf_qspi_rcv(s, op->data.nbytes, rx_buf);
+	else
+		ret = sf_qspi_wait_not_busy(s);
+
+	sf_qspi_disable(s);
+
+	return ret;
+}
+
+static int sf_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+	u32 nbytes;
+
+	nbytes = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
+	if (nbytes >= SF_SSP_FIFO_DEPTH)
+		return -ENOTSUPP;
+
+	if (op->data.dir == SPI_MEM_DATA_IN)
+		op->data.nbytes =
+			min_t(unsigned int, op->data.nbytes, SF_SSP_FIFO_DEPTH);
+	else
+		op->data.nbytes = min_t(unsigned int, op->data.nbytes,
+					SF_SSP_FIFO_DEPTH - nbytes);
+
+	return 0;
+}
+
+static bool sf_qspi_supports_op(struct spi_mem *mem,
+				const struct spi_mem_op *op)
+{
+	if (!spi_mem_default_supports_op(mem, op))
+		return false;
+
+	/* dummy buswidth must be the same as addr */
+	if (op->addr.nbytes && op->dummy.nbytes &&
+	    op->addr.buswidth != op->dummy.buswidth)
+		return false;
+
+	return true;
+}
+
+static inline u32 spi_rate(u32 rate, u16 cpsdvsr, u16 scr)
+{
+	return rate / (cpsdvsr * (1 + scr));
+}
+
+static int calculate_effective_freq(struct sf_qspi *s, int freq,
+				    struct ssp_clock_params *clk_freq)
+{
+	/* Lets calculate the frequency parameters */
+	u16 cpsdvsr = CPSDVR_MIN;
+	u32 rate, rate_scaled, max_tclk, min_tclk, scr;
+	u32 best_freq = 0, best_cpsdvsr = 0, best_scr = 0, tmp, found = 0;
+
+	rate = clk_get_rate(s->clk);
+	/* cpsdvscr = 2 & scr 0 */
+	max_tclk = spi_rate(rate, CPSDVR_MIN, SCR_MIN);
+	if (freq > max_tclk) {
+		dev_warn(
+			s->dev,
+			"Requested SPI frequency %d Hz is more than maximum: %d Hz\n",
+			freq, max_tclk);
+		clk_freq->freq = max_tclk;
+		clk_freq->cpsdvsr = CPSDVR_MIN;
+		clk_freq->scr = SCR_MIN;
+		return 0;
+	}
+
+	/* cpsdvsr = 254 & scr = 255 */
+	min_tclk = spi_rate(rate, CPSDVR_MAX, SCR_MAX);
+	if (freq < min_tclk) {
+		dev_err(s->dev,
+			"Requested SPI frequency %d Hz is less than minimum: %d Hz\n",
+			freq, min_tclk);
+		return -EINVAL;
+	}
+
+	/*
+	 * best_freq will give closest possible available rate (<= requested
+	 * freq) for all values of scr & cpsdvsr.
+	 */
+	while ((cpsdvsr <= CPSDVR_MAX) && !found) {
+		rate_scaled = rate / cpsdvsr;
+
+		if (rate_scaled < freq)
+			break;
+
+		scr = DIV_ROUND_UP(rate_scaled, freq) - 1;
+		if (scr > SCR_MAX)
+			continue;
+
+		tmp = spi_rate(rate, cpsdvsr, scr);
+
+		/*
+		 * If found exact value, mark found and break.
+		 * If found more closer value, update and break.
+		 */
+		if (tmp > best_freq) {
+			best_freq = tmp;
+			best_cpsdvsr = cpsdvsr;
+			best_scr = scr;
+
+			if (tmp == freq)
+				found = 1;
+		}
+
+		cpsdvsr += 2;
+	}
+
+	clk_freq->freq = best_freq;
+	clk_freq->cpsdvsr = (u8) (best_cpsdvsr & 0xFF);
+	clk_freq->scr = (u8) (best_scr & 0xFF);
+	dev_dbg(s->dev,
+		"SSP Target Frequency is: %u, Effective Frequency is %u\n",
+		freq, best_freq);
+	dev_dbg(s->dev, "SSP cpsdvsr = %d, scr = %d\n",
+		clk_freq->cpsdvsr, clk_freq->scr);
+
+	return 0;
+}
+
+static int sf_qspi_setup(struct spi_device *spi)
+{
+	struct sf_qspi *s = spi_controller_get_devdata(spi->controller);
+	struct ssp_clock_params clk_freq = { .cpsdvsr = 0, .scr = 0 };
+	struct chip_data *chip;
+	int ret = 0;
+	u16 cr0 = 0;
+
+	if (!spi->max_speed_hz)
+		return -EINVAL;
+
+	ret = calculate_effective_freq(s, spi->max_speed_hz, &clk_freq);
+	if (ret < 0)
+		return ret;
+
+	chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	cr0 = SSP_CR0_BIT_MODE(8);
+	cr0 |= clk_freq.scr << 8;
+	/*set module*/
+	cr0 &= ~(SSP_CR0_SPH | SSP_CR0_SPO);
+	if (spi->mode & SPI_CPHA)
+		cr0 |= SSP_CR0_SPH;
+	if (spi->mode & SPI_CPOL)
+		cr0 |= SSP_CR0_SPO;
+	cr0 |= SSP_CR0_EXSPI_FRAME;
+
+	chip->freq = clk_freq.freq;
+	chip->cr0 = cr0;
+	chip->cpsr = clk_freq.cpsdvsr;
+
+	spi_set_ctldata(spi, chip);
+	return 0;
+}
+
+static void sf_qspi_cleanup(struct spi_device *spi)
+{
+	struct chip_data *chip = spi_get_ctldata(spi);
+
+	spi_set_ctldata(spi, NULL);
+	kfree(chip);
+}
+
+static const struct spi_controller_mem_ops sf_qspi_mem_ops = {
+	.supports_op = sf_qspi_supports_op,
+	.adjust_op_size = sf_qspi_adjust_op_size,
+	.exec_op = sf_qspi_exec_op,
+};
+
+static int sf_qspi_probe(struct platform_device *pdev)
+{
+	struct spi_controller *master;
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	struct sf_qspi *s;
+	int ret;
+
+	master = devm_spi_alloc_master(&pdev->dev, sizeof(*s));
+	if (!master)
+		return -ENOMEM;
+	master->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL |
+			    SPI_TX_QUAD;
+	s = spi_controller_get_devdata(master);
+	s->dev = dev;
+	platform_set_drvdata(pdev, s);
+
+	s->base = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(s->base))
+		return dev_err_probe(dev, PTR_ERR(s->base),
+				     "failed to remap memory resources.\n");
+
+	s->clk = devm_clk_get_enabled(dev, "sspclk");
+	if (IS_ERR(s->clk))
+		return dev_err_probe(dev, PTR_ERR(s->clk),
+				     "failed to get and enable sspclk.\n");
+
+	s->apbclk = devm_clk_get_enabled(dev, "apb_pclk");
+	if (IS_ERR(s->apbclk))
+		return dev_err_probe(dev, PTR_ERR(s->apbclk),
+				     "failed to get and enable apb_pclk.\n");
+
+	master->cleanup = sf_qspi_cleanup;
+	master->setup = sf_qspi_setup;
+	master->use_gpio_descriptors = true;
+	master->mem_ops = &sf_qspi_mem_ops;
+	master->dev.of_node = np;
+
+	writew(FIELD_PREP(SSP_FIFO_LEVEL_RX, DFLT_THRESH_RX) |
+	       FIELD_PREP(SSP_FIFO_LEVEL_TX, DFLT_THRESH_TX),
+	       s->base + SSP_FIFO_LEVEL);
+
+	ret = devm_spi_register_controller(dev, master);
+	if (ret)
+		return dev_err_probe(dev, ret,
+				     "failed to register controller.\n");
+
+	return 0;
+}
+
+static const struct of_device_id sf_qspi_ids[] = {
+	{.compatible = "siflower,sf21-qspi"},
+	{},
+};
+MODULE_DEVICE_TABLE(of, sf_qspi_ids);
+
+static struct platform_driver sf_qspi_driver = {
+	.driver = {
+		.name = "sf21_qspi",
+		.of_match_table = sf_qspi_ids,
+	},
+	.probe = sf_qspi_probe,
+};
+module_platform_driver(sf_qspi_driver);

+ 27 - 0
target/linux/siflower/files-6.6/include/dt-bindings/clock/siflower,sf19a2890-clk.h

@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+#ifndef __DT_BINDINGS_CLOCK_SIFLOWER_SF19A2890_CLK_H
+#define __DT_BINDINGS_CLOCK_SIFLOWER_SF19A2890_CLK_H
+#define CLK_PLL_CPU		0
+#define CLK_PLL_DDR		1
+#define CLK_PLL_CMN		2
+#define CLK_MUXDIV_BUS1		3
+#define CLK_MUXDIV_BUS2		4
+#define CLK_MUXDIV_BUS3		5
+#define CLK_MUXDIV_CPU		6
+#define CLK_MUXDIV_PBUS		7
+#define CLK_MUXDIV_MEM_PHY	8
+#define CLK_MUXDIV_UART		9
+#define CLK_MUXDIV_ETH_REF	10
+#define CLK_MUXDIV_ETH_BYP_REF	11
+#define CLK_MUXDIV_ETH_TSU	12
+#define CLK_MUXDIV_GMAC_BYP_REF	13
+#define CLK_MUXDIV_M6250_0	14
+#define CLK_MUXDIV_M6250_1	15
+#define CLK_MUXDIV_WLAN24_PLF	16
+#define CLK_MUXDIV_WLAN5_PLF	17
+#define CLK_MUXDIV_USBPHY_REF	18
+#define CLK_MUXDIV_TCLK		19
+#define CLK_MUXDIV_NPU_PE_CLK	20
+
+#define CLK_SF19A2890_MAX	21
+#endif /* __DT_BINDINGS_CLOCK_SIFLOWER_SF19A2890_CLK_H */

+ 43 - 0
target/linux/siflower/files-6.6/include/dt-bindings/clock/siflower,sf21-topcrm.h

@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+
+#define CLK_CMNPLL_VCO		0
+#define CLK_CMNPLL_POSTDIV	1
+
+#define CLK_DDRPLL_POSTDIV	2
+
+#define CLK_PCIEPLL_VCO		3
+#define CLK_PCIEPLL_FOUT0	4
+#define CLK_PCIEPLL_FOUT1	5
+#define CLK_PCIEPLL_FOUT2	6
+#define CLK_ETH_REF_P		CLK_PCIEPLL_FOUT2
+#define CLK_PCIEPLL_FOUT3	7
+
+#define CLK_CPU			8
+#define CLK_PIC			9
+#define CLK_AXI			10
+#define CLK_AHB			11
+#define CLK_APB			12
+#define CLK_UART		13
+#define CLK_IRAM		14
+#define CLK_NPU			15
+#define CLK_DDRPHY_REF		16
+#define CLK_DDR_BYPASS		17
+#define CLK_ETHTSU		18
+#define CLK_GMAC_BYP_REF	19
+
+#define CLK_USB			20
+#define CLK_USBPHY		21
+#define CLK_SERDES_CSR		22
+#define CLK_CRYPT_CSR		23
+#define CLK_CRYPT_APP		24
+#define CLK_IROM		25
+
+#define CLK_BOOT		26
+
+#define CLK_PVT			27
+#define CLK_PLL_TEST		28
+
+#define CLK_PCIE_REFN		29
+#define CLK_PCIE_REFP		30
+
+#define CLK_MAX			31

+ 65 - 0
target/linux/siflower/files-6.6/include/dt-bindings/pinctrl/siflower,sf21-iomux.h

@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __DT_BINDINGS_SF21_IOMUX_H__
+#define __DT_BINDINGS_SF21_IOMUX_H__
+
+#define SW_DS		0xf		/* Drive strength */
+#define SW_ST		(1 << 4)	/* Schmitt enable */
+#define SW_PD		(1 << 5)	/* Pull-down enable */
+#define SW_PU		(1 << 6)	/* Pull-up enable */
+#define SW_OEN		(1 << 7)	/* Output disable [sic] */
+#define SW_IE		(1 << 8)	/* Input enable */
+#define MODE_BIT0	(1 << 9)	/* Function mode LSB */
+#define MODE_BIT1	(1 << 10)	/* Function mode MSB */
+#define FMUX_SEL	(1 << 11)	/* GPIO mode enable */
+#define FUNC_SW_SEL	(1 << 12)	/* Function mode enable */
+
+#define FUNC_MODE_MASK	0x1f80
+#define FUNC_MODE0	(FUNC_SW_SEL | SW_IE)
+#define FUNC_MODE1	(FUNC_MODE0 | MODE_BIT0)
+#define FUNC_MODE2	(FUNC_MODE0 | MODE_BIT1)
+#define FUNC_MODE3	(FUNC_MODE0 | MODE_BIT0 | MODE_BIT1)
+#define GPIO_MODE	(FUNC_MODE0 | FMUX_SEL)
+
+#define EXT_CLK_IN	0x00
+#define CLK_OUT		0x04
+#define SPI0_TXD	0x08
+#define SPI0_RXD	0x0c
+#define SPI0_CLK	0x10
+#define SPI0_CSN	0x14
+#define SPI0_HOLD	0x18
+#define SPI0_WP		0x1c
+#define JTAG_TDO	0x20
+#define JTAG_TDI	0x24
+#define JTAG_TMS	0x28
+#define JTAG_TCK	0x2c
+#define JTAG_RST	0x30
+#define UART1_TX	0x34
+#define UART1_RX	0x38
+#define I2C0_DAT	0x3c
+#define I2C0_CLK	0x40
+#define I2C1_DAT	0x44
+#define I2C1_CLK	0x48
+#define PWM0		0x4c
+#define PWM1		0x50
+#define RGMII_GTX_CLK	0x54
+#define RGMII_TXCLK	0x58
+#define RGMII_TXD0	0x5c
+#define RGMII_TXD1	0x60
+#define RGMII_TXD2	0x64
+#define RGMII_TXD3	0x68
+#define RGMII_TXCTL	0x6c
+#define RGMII_RXCLK	0x70
+#define RGMII_RXD0	0x74
+#define RGMII_RXD1	0x78
+#define RGMII_RXD2	0x7c
+#define RGMII_RXD3	0x80
+#define RGMII_RXCTL	0x84
+#define QSGMII_MDIO	0x88
+#define QSGMII_MDC	0x8c
+#define SXGMII_MDIO	0x90
+#define SXGMII_MDC	0x94
+#define DGS_INT		0x98
+#define PHY_RSTN	0x9c
+#define PHY_INT		0xa0
+#endif

+ 29 - 0
target/linux/siflower/files-6.6/include/dt-bindings/reset/siflower,sf21-reset.h

@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _SF21_RESETS_H
+#define _SF21_RESETS_H
+
+#define SF21_RESET_GIC			0
+#define SF21_RESET_AXI			1
+#define SF21_RESET_AHB			2
+#define SF21_RESET_APB			3
+#define SF21_RESET_IRAM			4
+
+#define SF21_RESET_NPU			5
+#define SF21_RESET_DDR_CTL		6
+#define SF21_RESET_DDR_PHY		7
+#define SF21_RESET_DDR_PWR_OK_IN	8
+#define SF21_RESET_DDR_CTL_APB		9
+#define SF21_RESET_DDR_PHY_APB		10
+
+#define SF21_RESET_USB			11
+
+#define SF21_RESET_PVT			12
+#define SF21_RESET_SERDES_CSR		13
+
+#define SF21_RESET_CRYPT_CSR		14
+#define SF21_RESET_CRYPT_APP		15
+#define SF21_RESET_NPU2DDR_ASYNCBRIDGE	16
+#define SF21_RESET_IROM			17
+#define SF21_RESET_MAX			17
+#endif

+ 180 - 0
target/linux/siflower/patches-6.6/001-net-phy-c45-add-genphy_c45_pma_read_ext_abilities-fu.patch

@@ -0,0 +1,180 @@
+From: Oleksij Rempel <[email protected]>
+Date: Tue, 12 Dec 2023 06:41:43 +0100
+Subject: [PATCH 01/20] net: phy: c45: add genphy_c45_pma_read_ext_abilities()
+ function
+
+Move part of the genphy_c45_pma_read_abilities() code to a separate
+function.
+
+Some PHYs do not implement PMA/PMD status 2 register (Register 1.8) but
+do implement PMA/PMD extended ability register (Register 1.11). To make
+use of it, we need to be able to access this part of code separately.
+
+Signed-off-by: Oleksij Rempel <[email protected]>
+Reviewed-by: Andrew Lunn <[email protected]>
+Reviewed-by: Russell King (Oracle) <[email protected]>
+Link: https://lore.kernel.org/r/[email protected]
+Signed-off-by: Jakub Kicinski <[email protected]>
+(cherry picked from commit 0c476157085fe2ad13b9bec70ea672e86647fa1a)
+---
+ drivers/net/phy/phy-c45.c | 129 ++++++++++++++++++++++----------------
+ include/linux/phy.h       |   1 +
+ 2 files changed, 75 insertions(+), 55 deletions(-)
+
+--- a/drivers/net/phy/phy-c45.c
++++ b/drivers/net/phy/phy-c45.c
+@@ -920,6 +920,79 @@ int genphy_c45_pma_baset1_read_abilities
+ EXPORT_SYMBOL_GPL(genphy_c45_pma_baset1_read_abilities);
+ 
+ /**
++ * genphy_c45_pma_read_ext_abilities - read supported link modes from PMA
++ * @phydev: target phy_device struct
++ *
++ * Read the supported link modes from the PMA/PMD extended ability register
++ * (Register 1.11).
++ */
++int genphy_c45_pma_read_ext_abilities(struct phy_device *phydev)
++{
++	int val;
++
++	val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_EXTABLE);
++	if (val < 0)
++		return val;
++
++	linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
++			 phydev->supported,
++			 val & MDIO_PMA_EXTABLE_10GBLRM);
++	linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
++			 phydev->supported,
++			 val & MDIO_PMA_EXTABLE_10GBT);
++	linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
++			 phydev->supported,
++			 val & MDIO_PMA_EXTABLE_10GBKX4);
++	linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
++			 phydev->supported,
++			 val & MDIO_PMA_EXTABLE_10GBKR);
++	linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
++			 phydev->supported,
++			 val & MDIO_PMA_EXTABLE_1000BT);
++	linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
++			 phydev->supported,
++			 val & MDIO_PMA_EXTABLE_1000BKX);
++
++	linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
++			 phydev->supported,
++			 val & MDIO_PMA_EXTABLE_100BTX);
++	linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
++			 phydev->supported,
++			 val & MDIO_PMA_EXTABLE_100BTX);
++
++	linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
++			 phydev->supported,
++			 val & MDIO_PMA_EXTABLE_10BT);
++	linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
++			 phydev->supported,
++			 val & MDIO_PMA_EXTABLE_10BT);
++
++	if (val & MDIO_PMA_EXTABLE_NBT) {
++		val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
++				   MDIO_PMA_NG_EXTABLE);
++		if (val < 0)
++			return val;
++
++		linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
++				 phydev->supported,
++				 val & MDIO_PMA_NG_EXTABLE_2_5GBT);
++
++		linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
++				 phydev->supported,
++				 val & MDIO_PMA_NG_EXTABLE_5GBT);
++	}
++
++	if (val & MDIO_PMA_EXTABLE_BT1) {
++		val = genphy_c45_pma_baset1_read_abilities(phydev);
++		if (val < 0)
++			return val;
++	}
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(genphy_c45_pma_read_ext_abilities);
++
++/**
+  * genphy_c45_pma_read_abilities - read supported link modes from PMA
+  * @phydev: target phy_device struct
+  *
+@@ -962,63 +1035,9 @@ int genphy_c45_pma_read_abilities(struct
+ 			 val & MDIO_PMA_STAT2_10GBER);
+ 
+ 	if (val & MDIO_PMA_STAT2_EXTABLE) {
+-		val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_EXTABLE);
++		val = genphy_c45_pma_read_ext_abilities(phydev);
+ 		if (val < 0)
+ 			return val;
+-
+-		linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
+-				 phydev->supported,
+-				 val & MDIO_PMA_EXTABLE_10GBLRM);
+-		linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+-				 phydev->supported,
+-				 val & MDIO_PMA_EXTABLE_10GBT);
+-		linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+-				 phydev->supported,
+-				 val & MDIO_PMA_EXTABLE_10GBKX4);
+-		linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+-				 phydev->supported,
+-				 val & MDIO_PMA_EXTABLE_10GBKR);
+-		linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+-				 phydev->supported,
+-				 val & MDIO_PMA_EXTABLE_1000BT);
+-		linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+-				 phydev->supported,
+-				 val & MDIO_PMA_EXTABLE_1000BKX);
+-
+-		linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+-				 phydev->supported,
+-				 val & MDIO_PMA_EXTABLE_100BTX);
+-		linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+-				 phydev->supported,
+-				 val & MDIO_PMA_EXTABLE_100BTX);
+-
+-		linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+-				 phydev->supported,
+-				 val & MDIO_PMA_EXTABLE_10BT);
+-		linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+-				 phydev->supported,
+-				 val & MDIO_PMA_EXTABLE_10BT);
+-
+-		if (val & MDIO_PMA_EXTABLE_NBT) {
+-			val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
+-					   MDIO_PMA_NG_EXTABLE);
+-			if (val < 0)
+-				return val;
+-
+-			linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+-					 phydev->supported,
+-					 val & MDIO_PMA_NG_EXTABLE_2_5GBT);
+-
+-			linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+-					 phydev->supported,
+-					 val & MDIO_PMA_NG_EXTABLE_5GBT);
+-		}
+-
+-		if (val & MDIO_PMA_EXTABLE_BT1) {
+-			val = genphy_c45_pma_baset1_read_abilities(phydev);
+-			if (val < 0)
+-				return val;
+-		}
+ 	}
+ 
+ 	/* This is optional functionality. If not supported, we may get an error
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -1931,6 +1931,7 @@ int genphy_c45_an_config_aneg(struct phy
+ int genphy_c45_an_disable_aneg(struct phy_device *phydev);
+ int genphy_c45_read_mdix(struct phy_device *phydev);
+ int genphy_c45_pma_read_abilities(struct phy_device *phydev);
++int genphy_c45_pma_read_ext_abilities(struct phy_device *phydev);
+ int genphy_c45_pma_baset1_read_abilities(struct phy_device *phydev);
+ int genphy_c45_read_eee_abilities(struct phy_device *phydev);
+ int genphy_c45_pma_baset1_read_master_slave(struct phy_device *phydev);

+ 49 - 0
target/linux/siflower/patches-6.6/002-net-phy-Optimize-phy-speed-mask-to-be-compatible-to-.patch

@@ -0,0 +1,49 @@
+From: Frank Sae <[email protected]>
+Date: Sun, 1 Sep 2024 01:35:25 -0700
+Subject: [PATCH 02/20] net: phy: Optimize phy speed mask to be compatible to
+ yt8821
+
+yt8521 and yt8531s as Gigabit transceiver use bit15:14(bit9 reserved
+default 0) as phy speed mask, yt8821 as 2.5G transceiver uses bit9 bit15:14
+as phy speed mask.
+
+Be compatible to yt8821, reform phy speed mask and phy speed macro.
+
+Reviewed-by: Andrew Lunn <[email protected]>
+Signed-off-by: Frank Sae <[email protected]>
+Signed-off-by: Paolo Abeni <[email protected]>
+
+(cherry picked from commit 8d878c87b5c45ae64b0aecd4aac71e210d19173f)
+---
+ drivers/net/phy/motorcomm.c | 13 +++++--------
+ 1 file changed, 5 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/phy/motorcomm.c
++++ b/drivers/net/phy/motorcomm.c
+@@ -47,12 +47,10 @@
+ 
+ /* Specific Status Register */
+ #define YTPHY_SPECIFIC_STATUS_REG		0x11
+-#define YTPHY_SSR_SPEED_MODE_OFFSET		14
+-
+-#define YTPHY_SSR_SPEED_MODE_MASK		(BIT(15) | BIT(14))
+-#define YTPHY_SSR_SPEED_10M			0x0
+-#define YTPHY_SSR_SPEED_100M			0x1
+-#define YTPHY_SSR_SPEED_1000M			0x2
++#define YTPHY_SSR_SPEED_MASK			((0x3 << 14) | BIT(9))
++#define YTPHY_SSR_SPEED_10M			((0x0 << 14))
++#define YTPHY_SSR_SPEED_100M			((0x1 << 14))
++#define YTPHY_SSR_SPEED_1000M			((0x2 << 14))
+ #define YTPHY_SSR_DUPLEX_OFFSET			13
+ #define YTPHY_SSR_DUPLEX			BIT(13)
+ #define YTPHY_SSR_PAGE_RECEIVED			BIT(12)
+@@ -1188,8 +1186,7 @@ static int yt8521_adjust_status(struct p
+ 	else
+ 		duplex = DUPLEX_FULL;	/* for fiber, it always DUPLEX_FULL */
+ 
+-	speed_mode = (status & YTPHY_SSR_SPEED_MODE_MASK) >>
+-		     YTPHY_SSR_SPEED_MODE_OFFSET;
++	speed_mode = status & YTPHY_SSR_SPEED_MASK;
+ 
+ 	switch (speed_mode) {
+ 	case YTPHY_SSR_SPEED_10M:

+ 753 - 0
target/linux/siflower/patches-6.6/003-net-phy-Add-driver-for-Motorcomm-yt8821-2.5G-etherne.patch

@@ -0,0 +1,753 @@
+From: Frank Sae <[email protected]>
+Date: Sun, 1 Sep 2024 01:35:26 -0700
+Subject: [PATCH 03/20] net: phy: Add driver for Motorcomm yt8821 2.5G ethernet
+ phy
+
+Add a driver for the motorcomm yt8821 2.5G ethernet phy. Verified the
+driver on BPI-R3(with MediaTek MT7986(Filogic 830) SoC) development board,
+which is developed by Guangdong Bipai Technology Co., Ltd..
+
+yt8821 2.5G ethernet phy works in AUTO_BX2500_SGMII or FORCE_BX2500
+interface, supports 2.5G/1000M/100M/10M speeds, and wol(magic package).
+
+Signed-off-by: Frank Sae <[email protected]>
+Reviewed-by: Sai Krishna <[email protected]>
+Reviewed-by: Andrew Lunn <[email protected]>
+Signed-off-by: Paolo Abeni <[email protected]>
+
+(cherry picked from commit b671105b88c3bb9acc1fb61a3ee2ca0ece60cb8d)
+---
+ drivers/net/phy/motorcomm.c | 671 +++++++++++++++++++++++++++++++++++-
+ 1 file changed, 667 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/phy/motorcomm.c
++++ b/drivers/net/phy/motorcomm.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0+
+ /*
+- * Motorcomm 8511/8521/8531/8531S PHY driver.
++ * Motorcomm 8511/8521/8531/8531S/8821 PHY driver.
+  *
+  * Author: Peter Geis <[email protected]>
+  * Author: Frank <[email protected]>
+@@ -17,8 +17,8 @@
+ #define PHY_ID_YT8521		0x0000011a
+ #define PHY_ID_YT8531		0x4f51e91b
+ #define PHY_ID_YT8531S		0x4f51e91a
+-
+-/* YT8521/YT8531S Register Overview
++#define PHY_ID_YT8821		0x4f51ea19
++/* YT8521/YT8531S/YT8821 Register Overview
+  *	UTP Register space	|	FIBER Register space
+  *  ------------------------------------------------------------
+  * |	UTP MII			|	FIBER MII		|
+@@ -51,6 +51,8 @@
+ #define YTPHY_SSR_SPEED_10M			((0x0 << 14))
+ #define YTPHY_SSR_SPEED_100M			((0x1 << 14))
+ #define YTPHY_SSR_SPEED_1000M			((0x2 << 14))
++#define YTPHY_SSR_SPEED_10G			((0x3 << 14))
++#define YTPHY_SSR_SPEED_2500M			((0x0 << 14) | BIT(9))
+ #define YTPHY_SSR_DUPLEX_OFFSET			13
+ #define YTPHY_SSR_DUPLEX			BIT(13)
+ #define YTPHY_SSR_PAGE_RECEIVED			BIT(12)
+@@ -269,12 +271,89 @@
+ #define YT8531_SCR_CLK_SRC_REF_25M		4
+ #define YT8531_SCR_CLK_SRC_SSC_25M		5
+ 
++#define YT8821_SDS_EXT_CSR_CTRL_REG			0x23
++#define YT8821_SDS_EXT_CSR_VCO_LDO_EN			BIT(15)
++#define YT8821_SDS_EXT_CSR_VCO_BIAS_LPF_EN		BIT(8)
++
++#define YT8821_UTP_EXT_PI_CTRL_REG			0x56
++#define YT8821_UTP_EXT_PI_RST_N_FIFO			BIT(5)
++#define YT8821_UTP_EXT_PI_TX_CLK_SEL_AFE		BIT(4)
++#define YT8821_UTP_EXT_PI_RX_CLK_3_SEL_AFE		BIT(3)
++#define YT8821_UTP_EXT_PI_RX_CLK_2_SEL_AFE		BIT(2)
++#define YT8821_UTP_EXT_PI_RX_CLK_1_SEL_AFE		BIT(1)
++#define YT8821_UTP_EXT_PI_RX_CLK_0_SEL_AFE		BIT(0)
++
++#define YT8821_UTP_EXT_VCT_CFG6_CTRL_REG		0x97
++#define YT8821_UTP_EXT_FECHO_AMP_TH_HUGE		GENMASK(15, 8)
++
++#define YT8821_UTP_EXT_ECHO_CTRL_REG			0x336
++#define YT8821_UTP_EXT_TRACE_LNG_GAIN_THR_1000		GENMASK(14, 8)
++
++#define YT8821_UTP_EXT_GAIN_CTRL_REG			0x340
++#define YT8821_UTP_EXT_TRACE_MED_GAIN_THR_1000		GENMASK(6, 0)
++
++#define YT8821_UTP_EXT_RPDN_CTRL_REG			0x34E
++#define YT8821_UTP_EXT_RPDN_BP_FFE_LNG_2500		BIT(15)
++#define YT8821_UTP_EXT_RPDN_BP_FFE_SHT_2500		BIT(7)
++#define YT8821_UTP_EXT_RPDN_IPR_SHT_2500		GENMASK(6, 0)
++
++#define YT8821_UTP_EXT_TH_20DB_2500_CTRL_REG		0x36A
++#define YT8821_UTP_EXT_TH_20DB_2500			GENMASK(15, 0)
++
++#define YT8821_UTP_EXT_TRACE_CTRL_REG			0x372
++#define YT8821_UTP_EXT_TRACE_LNG_GAIN_THE_2500		GENMASK(14, 8)
++#define YT8821_UTP_EXT_TRACE_MED_GAIN_THE_2500		GENMASK(6, 0)
++
++#define YT8821_UTP_EXT_ALPHA_IPR_CTRL_REG		0x374
++#define YT8821_UTP_EXT_ALPHA_SHT_2500			GENMASK(14, 8)
++#define YT8821_UTP_EXT_IPR_LNG_2500			GENMASK(6, 0)
++
++#define YT8821_UTP_EXT_PLL_CTRL_REG			0x450
++#define YT8821_UTP_EXT_PLL_SPARE_CFG			GENMASK(7, 0)
++
++#define YT8821_UTP_EXT_DAC_IMID_CH_2_3_CTRL_REG		0x466
++#define YT8821_UTP_EXT_DAC_IMID_CH_3_10_ORG		GENMASK(14, 8)
++#define YT8821_UTP_EXT_DAC_IMID_CH_2_10_ORG		GENMASK(6, 0)
++
++#define YT8821_UTP_EXT_DAC_IMID_CH_0_1_CTRL_REG		0x467
++#define YT8821_UTP_EXT_DAC_IMID_CH_1_10_ORG		GENMASK(14, 8)
++#define YT8821_UTP_EXT_DAC_IMID_CH_0_10_ORG		GENMASK(6, 0)
++
++#define YT8821_UTP_EXT_DAC_IMSB_CH_2_3_CTRL_REG		0x468
++#define YT8821_UTP_EXT_DAC_IMSB_CH_3_10_ORG		GENMASK(14, 8)
++#define YT8821_UTP_EXT_DAC_IMSB_CH_2_10_ORG		GENMASK(6, 0)
++
++#define YT8821_UTP_EXT_DAC_IMSB_CH_0_1_CTRL_REG		0x469
++#define YT8821_UTP_EXT_DAC_IMSB_CH_1_10_ORG		GENMASK(14, 8)
++#define YT8821_UTP_EXT_DAC_IMSB_CH_0_10_ORG		GENMASK(6, 0)
++
++#define YT8821_UTP_EXT_MU_COARSE_FR_CTRL_REG		0x4B3
++#define YT8821_UTP_EXT_MU_COARSE_FR_F_FFE		GENMASK(14, 12)
++#define YT8821_UTP_EXT_MU_COARSE_FR_F_FBE		GENMASK(10, 8)
++
++#define YT8821_UTP_EXT_MU_FINE_FR_CTRL_REG		0x4B5
++#define YT8821_UTP_EXT_MU_FINE_FR_F_FFE			GENMASK(14, 12)
++#define YT8821_UTP_EXT_MU_FINE_FR_F_FBE			GENMASK(10, 8)
++
++#define YT8821_UTP_EXT_VGA_LPF1_CAP_CTRL_REG		0x4D2
++#define YT8821_UTP_EXT_VGA_LPF1_CAP_OTHER		GENMASK(7, 4)
++#define YT8821_UTP_EXT_VGA_LPF1_CAP_2500		GENMASK(3, 0)
++
++#define YT8821_UTP_EXT_VGA_LPF2_CAP_CTRL_REG		0x4D3
++#define YT8821_UTP_EXT_VGA_LPF2_CAP_OTHER		GENMASK(7, 4)
++#define YT8821_UTP_EXT_VGA_LPF2_CAP_2500		GENMASK(3, 0)
++
++#define YT8821_UTP_EXT_TXGE_NFR_FR_THP_CTRL_REG		0x660
++#define YT8821_UTP_EXT_NFR_TX_ABILITY			BIT(3)
+ /* Extended Register  end */
+ 
+ #define YTPHY_DTS_OUTPUT_CLK_DIS		0
+ #define YTPHY_DTS_OUTPUT_CLK_25M		25000000
+ #define YTPHY_DTS_OUTPUT_CLK_125M		125000000
+ 
++#define YT8821_CHIP_MODE_AUTO_BX2500_SGMII	0
++#define YT8821_CHIP_MODE_FORCE_BX2500		1
++
+ struct yt8521_priv {
+ 	/* combo_advertising is used for case of YT8521 in combo mode,
+ 	 * this means that yt8521 may work in utp or fiber mode which depends
+@@ -2250,6 +2329,572 @@ static int yt8521_get_features(struct ph
+ 	return ret;
+ }
+ 
++/**
++ * yt8821_get_features - read mmd register to get 2.5G capability
++ * @phydev: target phy_device struct
++ *
++ * Returns: 0 or negative errno code
++ */
++static int yt8821_get_features(struct phy_device *phydev)
++{
++	int ret;
++
++	ret = genphy_c45_pma_read_ext_abilities(phydev);
++	if (ret < 0)
++		return ret;
++
++	return genphy_read_abilities(phydev);
++}
++
++/**
++ * yt8821_get_rate_matching - read register to get phy chip mode
++ * @phydev: target phy_device struct
++ * @iface: PHY data interface type
++ *
++ * Returns: rate matching type or negative errno code
++ */
++static int yt8821_get_rate_matching(struct phy_device *phydev,
++				    phy_interface_t iface)
++{
++	int val;
++
++	val = ytphy_read_ext_with_lock(phydev, YT8521_CHIP_CONFIG_REG);
++	if (val < 0)
++		return val;
++
++	if (FIELD_GET(YT8521_CCR_MODE_SEL_MASK, val) ==
++	    YT8821_CHIP_MODE_FORCE_BX2500)
++		return RATE_MATCH_PAUSE;
++
++	return RATE_MATCH_NONE;
++}
++
++/**
++ * yt8821_aneg_done() - determines the auto negotiation result
++ * @phydev: a pointer to a &struct phy_device
++ *
++ * Returns: 0(no link)or 1(utp link) or negative errno code
++ */
++static int yt8821_aneg_done(struct phy_device *phydev)
++{
++	return yt8521_aneg_done_paged(phydev, YT8521_RSSR_UTP_SPACE);
++}
++
++/**
++ * yt8821_serdes_init() - serdes init
++ * @phydev: a pointer to a &struct phy_device
++ *
++ * Returns: 0 or negative errno code
++ */
++static int yt8821_serdes_init(struct phy_device *phydev)
++{
++	int old_page;
++	int ret = 0;
++	u16 mask;
++	u16 set;
++
++	old_page = phy_select_page(phydev, YT8521_RSSR_FIBER_SPACE);
++	if (old_page < 0) {
++		phydev_err(phydev, "Failed to select page: %d\n",
++			   old_page);
++		goto err_restore_page;
++	}
++
++	ret = __phy_modify(phydev, MII_BMCR, BMCR_ANENABLE, 0);
++	if (ret < 0)
++		goto err_restore_page;
++
++	mask = YT8821_SDS_EXT_CSR_VCO_LDO_EN |
++		YT8821_SDS_EXT_CSR_VCO_BIAS_LPF_EN;
++	set = YT8821_SDS_EXT_CSR_VCO_LDO_EN;
++	ret = ytphy_modify_ext(phydev, YT8821_SDS_EXT_CSR_CTRL_REG, mask,
++			       set);
++
++err_restore_page:
++	return phy_restore_page(phydev, old_page, ret);
++}
++
++/**
++ * yt8821_utp_init() - utp init
++ * @phydev: a pointer to a &struct phy_device
++ *
++ * Returns: 0 or negative errno code
++ */
++static int yt8821_utp_init(struct phy_device *phydev)
++{
++	int old_page;
++	int ret = 0;
++	u16 mask;
++	u16 save;
++	u16 set;
++
++	old_page = phy_select_page(phydev, YT8521_RSSR_UTP_SPACE);
++	if (old_page < 0) {
++		phydev_err(phydev, "Failed to select page: %d\n",
++			   old_page);
++		goto err_restore_page;
++	}
++
++	mask = YT8821_UTP_EXT_RPDN_BP_FFE_LNG_2500 |
++		YT8821_UTP_EXT_RPDN_BP_FFE_SHT_2500 |
++		YT8821_UTP_EXT_RPDN_IPR_SHT_2500;
++	set = YT8821_UTP_EXT_RPDN_BP_FFE_LNG_2500 |
++		YT8821_UTP_EXT_RPDN_BP_FFE_SHT_2500;
++	ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_RPDN_CTRL_REG,
++			       mask, set);
++	if (ret < 0)
++		goto err_restore_page;
++
++	mask = YT8821_UTP_EXT_VGA_LPF1_CAP_OTHER |
++		YT8821_UTP_EXT_VGA_LPF1_CAP_2500;
++	ret = ytphy_modify_ext(phydev,
++			       YT8821_UTP_EXT_VGA_LPF1_CAP_CTRL_REG,
++			       mask, 0);
++	if (ret < 0)
++		goto err_restore_page;
++
++	mask = YT8821_UTP_EXT_VGA_LPF2_CAP_OTHER |
++		YT8821_UTP_EXT_VGA_LPF2_CAP_2500;
++	ret = ytphy_modify_ext(phydev,
++			       YT8821_UTP_EXT_VGA_LPF2_CAP_CTRL_REG,
++			       mask, 0);
++	if (ret < 0)
++		goto err_restore_page;
++
++	mask = YT8821_UTP_EXT_TRACE_LNG_GAIN_THE_2500 |
++		YT8821_UTP_EXT_TRACE_MED_GAIN_THE_2500;
++	set = FIELD_PREP(YT8821_UTP_EXT_TRACE_LNG_GAIN_THE_2500, 0x5a) |
++		FIELD_PREP(YT8821_UTP_EXT_TRACE_MED_GAIN_THE_2500, 0x3c);
++	ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_TRACE_CTRL_REG,
++			       mask, set);
++	if (ret < 0)
++		goto err_restore_page;
++
++	mask = YT8821_UTP_EXT_IPR_LNG_2500;
++	set = FIELD_PREP(YT8821_UTP_EXT_IPR_LNG_2500, 0x6c);
++	ret = ytphy_modify_ext(phydev,
++			       YT8821_UTP_EXT_ALPHA_IPR_CTRL_REG,
++			       mask, set);
++	if (ret < 0)
++		goto err_restore_page;
++
++	mask = YT8821_UTP_EXT_TRACE_LNG_GAIN_THR_1000;
++	set = FIELD_PREP(YT8821_UTP_EXT_TRACE_LNG_GAIN_THR_1000, 0x2a);
++	ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_ECHO_CTRL_REG,
++			       mask, set);
++	if (ret < 0)
++		goto err_restore_page;
++
++	mask = YT8821_UTP_EXT_TRACE_MED_GAIN_THR_1000;
++	set = FIELD_PREP(YT8821_UTP_EXT_TRACE_MED_GAIN_THR_1000, 0x22);
++	ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_GAIN_CTRL_REG,
++			       mask, set);
++	if (ret < 0)
++		goto err_restore_page;
++
++	mask = YT8821_UTP_EXT_TH_20DB_2500;
++	set = FIELD_PREP(YT8821_UTP_EXT_TH_20DB_2500, 0x8000);
++	ret = ytphy_modify_ext(phydev,
++			       YT8821_UTP_EXT_TH_20DB_2500_CTRL_REG,
++			       mask, set);
++	if (ret < 0)
++		goto err_restore_page;
++
++	mask = YT8821_UTP_EXT_MU_COARSE_FR_F_FFE |
++		YT8821_UTP_EXT_MU_COARSE_FR_F_FBE;
++	set = FIELD_PREP(YT8821_UTP_EXT_MU_COARSE_FR_F_FFE, 0x7) |
++		FIELD_PREP(YT8821_UTP_EXT_MU_COARSE_FR_F_FBE, 0x7);
++	ret = ytphy_modify_ext(phydev,
++			       YT8821_UTP_EXT_MU_COARSE_FR_CTRL_REG,
++			       mask, set);
++	if (ret < 0)
++		goto err_restore_page;
++
++	mask = YT8821_UTP_EXT_MU_FINE_FR_F_FFE |
++		YT8821_UTP_EXT_MU_FINE_FR_F_FBE;
++	set = FIELD_PREP(YT8821_UTP_EXT_MU_FINE_FR_F_FFE, 0x2) |
++		FIELD_PREP(YT8821_UTP_EXT_MU_FINE_FR_F_FBE, 0x2);
++	ret = ytphy_modify_ext(phydev,
++			       YT8821_UTP_EXT_MU_FINE_FR_CTRL_REG,
++			       mask, set);
++	if (ret < 0)
++		goto err_restore_page;
++
++	/* save YT8821_UTP_EXT_PI_CTRL_REG's val for use later */
++	ret = ytphy_read_ext(phydev, YT8821_UTP_EXT_PI_CTRL_REG);
++	if (ret < 0)
++		goto err_restore_page;
++
++	save = ret;
++
++	mask = YT8821_UTP_EXT_PI_TX_CLK_SEL_AFE |
++		YT8821_UTP_EXT_PI_RX_CLK_3_SEL_AFE |
++		YT8821_UTP_EXT_PI_RX_CLK_2_SEL_AFE |
++		YT8821_UTP_EXT_PI_RX_CLK_1_SEL_AFE |
++		YT8821_UTP_EXT_PI_RX_CLK_0_SEL_AFE;
++	ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_PI_CTRL_REG,
++			       mask, 0);
++	if (ret < 0)
++		goto err_restore_page;
++
++	/* restore YT8821_UTP_EXT_PI_CTRL_REG's val */
++	ret = ytphy_write_ext(phydev, YT8821_UTP_EXT_PI_CTRL_REG, save);
++	if (ret < 0)
++		goto err_restore_page;
++
++	mask = YT8821_UTP_EXT_FECHO_AMP_TH_HUGE;
++	set = FIELD_PREP(YT8821_UTP_EXT_FECHO_AMP_TH_HUGE, 0x38);
++	ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_VCT_CFG6_CTRL_REG,
++			       mask, set);
++	if (ret < 0)
++		goto err_restore_page;
++
++	mask = YT8821_UTP_EXT_NFR_TX_ABILITY;
++	set = YT8821_UTP_EXT_NFR_TX_ABILITY;
++	ret = ytphy_modify_ext(phydev,
++			       YT8821_UTP_EXT_TXGE_NFR_FR_THP_CTRL_REG,
++			       mask, set);
++	if (ret < 0)
++		goto err_restore_page;
++
++	mask = YT8821_UTP_EXT_PLL_SPARE_CFG;
++	set = FIELD_PREP(YT8821_UTP_EXT_PLL_SPARE_CFG, 0xe9);
++	ret = ytphy_modify_ext(phydev, YT8821_UTP_EXT_PLL_CTRL_REG,
++			       mask, set);
++	if (ret < 0)
++		goto err_restore_page;
++
++	mask = YT8821_UTP_EXT_DAC_IMID_CH_3_10_ORG |
++		YT8821_UTP_EXT_DAC_IMID_CH_2_10_ORG;
++	set = FIELD_PREP(YT8821_UTP_EXT_DAC_IMID_CH_3_10_ORG, 0x64) |
++		FIELD_PREP(YT8821_UTP_EXT_DAC_IMID_CH_2_10_ORG, 0x64);
++	ret = ytphy_modify_ext(phydev,
++			       YT8821_UTP_EXT_DAC_IMID_CH_2_3_CTRL_REG,
++			       mask, set);
++	if (ret < 0)
++		goto err_restore_page;
++
++	mask = YT8821_UTP_EXT_DAC_IMID_CH_1_10_ORG |
++		YT8821_UTP_EXT_DAC_IMID_CH_0_10_ORG;
++	set = FIELD_PREP(YT8821_UTP_EXT_DAC_IMID_CH_1_10_ORG, 0x64) |
++		FIELD_PREP(YT8821_UTP_EXT_DAC_IMID_CH_0_10_ORG, 0x64);
++	ret = ytphy_modify_ext(phydev,
++			       YT8821_UTP_EXT_DAC_IMID_CH_0_1_CTRL_REG,
++			       mask, set);
++	if (ret < 0)
++		goto err_restore_page;
++
++	mask = YT8821_UTP_EXT_DAC_IMSB_CH_3_10_ORG |
++		YT8821_UTP_EXT_DAC_IMSB_CH_2_10_ORG;
++	set = FIELD_PREP(YT8821_UTP_EXT_DAC_IMSB_CH_3_10_ORG, 0x64) |
++		FIELD_PREP(YT8821_UTP_EXT_DAC_IMSB_CH_2_10_ORG, 0x64);
++	ret = ytphy_modify_ext(phydev,
++			       YT8821_UTP_EXT_DAC_IMSB_CH_2_3_CTRL_REG,
++			       mask, set);
++	if (ret < 0)
++		goto err_restore_page;
++
++	mask = YT8821_UTP_EXT_DAC_IMSB_CH_1_10_ORG |
++		YT8821_UTP_EXT_DAC_IMSB_CH_0_10_ORG;
++	set = FIELD_PREP(YT8821_UTP_EXT_DAC_IMSB_CH_1_10_ORG, 0x64) |
++		FIELD_PREP(YT8821_UTP_EXT_DAC_IMSB_CH_0_10_ORG, 0x64);
++	ret = ytphy_modify_ext(phydev,
++			       YT8821_UTP_EXT_DAC_IMSB_CH_0_1_CTRL_REG,
++			       mask, set);
++
++err_restore_page:
++	return phy_restore_page(phydev, old_page, ret);
++}
++
++/**
++ * yt8821_auto_sleep_config() - phy auto sleep config
++ * @phydev: a pointer to a &struct phy_device
++ * @enable: true enable auto sleep, false disable auto sleep
++ *
++ * Returns: 0 or negative errno code
++ */
++static int yt8821_auto_sleep_config(struct phy_device *phydev,
++				    bool enable)
++{
++	int old_page;
++	int ret = 0;
++
++	old_page = phy_select_page(phydev, YT8521_RSSR_UTP_SPACE);
++	if (old_page < 0) {
++		phydev_err(phydev, "Failed to select page: %d\n",
++			   old_page);
++		goto err_restore_page;
++	}
++
++	ret = ytphy_modify_ext(phydev,
++			       YT8521_EXTREG_SLEEP_CONTROL1_REG,
++			       YT8521_ESC1R_SLEEP_SW,
++			       enable ? 1 : 0);
++
++err_restore_page:
++	return phy_restore_page(phydev, old_page, ret);
++}
++
++/**
++ * yt8821_soft_reset() - soft reset utp and serdes
++ * @phydev: a pointer to a &struct phy_device
++ *
++ * Returns: 0 or negative errno code
++ */
++static int yt8821_soft_reset(struct phy_device *phydev)
++{
++	return ytphy_modify_ext_with_lock(phydev, YT8521_CHIP_CONFIG_REG,
++					  YT8521_CCR_SW_RST, 0);
++}
++
++/**
++ * yt8821_config_init() - phy initializatioin
++ * @phydev: a pointer to a &struct phy_device
++ *
++ * Returns: 0 or negative errno code
++ */
++static int yt8821_config_init(struct phy_device *phydev)
++{
++	u8 mode = YT8821_CHIP_MODE_AUTO_BX2500_SGMII;
++	int ret;
++	u16 set;
++
++	if (phydev->interface == PHY_INTERFACE_MODE_2500BASEX)
++		mode = YT8821_CHIP_MODE_FORCE_BX2500;
++
++	set = FIELD_PREP(YT8521_CCR_MODE_SEL_MASK, mode);
++	ret = ytphy_modify_ext_with_lock(phydev,
++					 YT8521_CHIP_CONFIG_REG,
++					 YT8521_CCR_MODE_SEL_MASK,
++					 set);
++	if (ret < 0)
++		return ret;
++
++	__set_bit(PHY_INTERFACE_MODE_2500BASEX,
++		  phydev->possible_interfaces);
++
++	if (mode == YT8821_CHIP_MODE_AUTO_BX2500_SGMII) {
++		__set_bit(PHY_INTERFACE_MODE_SGMII,
++			  phydev->possible_interfaces);
++
++		phydev->rate_matching = RATE_MATCH_NONE;
++	} else if (mode == YT8821_CHIP_MODE_FORCE_BX2500) {
++		phydev->rate_matching = RATE_MATCH_PAUSE;
++	}
++
++	ret = yt8821_serdes_init(phydev);
++	if (ret < 0)
++		return ret;
++
++	ret = yt8821_utp_init(phydev);
++	if (ret < 0)
++		return ret;
++
++	/* disable auto sleep */
++	ret = yt8821_auto_sleep_config(phydev, false);
++	if (ret < 0)
++		return ret;
++
++	/* soft reset */
++	return yt8821_soft_reset(phydev);
++}
++
++/**
++ * yt8821_adjust_status() - update speed and duplex to phydev
++ * @phydev: a pointer to a &struct phy_device
++ * @val: read from YTPHY_SPECIFIC_STATUS_REG
++ */
++static void yt8821_adjust_status(struct phy_device *phydev, int val)
++{
++	int speed, duplex;
++	int speed_mode;
++
++	duplex = FIELD_GET(YTPHY_SSR_DUPLEX, val);
++	speed_mode = val & YTPHY_SSR_SPEED_MASK;
++	switch (speed_mode) {
++	case YTPHY_SSR_SPEED_10M:
++		speed = SPEED_10;
++		break;
++	case YTPHY_SSR_SPEED_100M:
++		speed = SPEED_100;
++		break;
++	case YTPHY_SSR_SPEED_1000M:
++		speed = SPEED_1000;
++		break;
++	case YTPHY_SSR_SPEED_2500M:
++		speed = SPEED_2500;
++		break;
++	default:
++		speed = SPEED_UNKNOWN;
++		break;
++	}
++
++	phydev->speed = speed;
++	phydev->duplex = duplex;
++}
++
++/**
++ * yt8821_update_interface() - update interface per current speed
++ * @phydev: a pointer to a &struct phy_device
++ */
++static void yt8821_update_interface(struct phy_device *phydev)
++{
++	if (!phydev->link)
++		return;
++
++	switch (phydev->speed) {
++	case SPEED_2500:
++		phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
++		break;
++	case SPEED_1000:
++	case SPEED_100:
++	case SPEED_10:
++		phydev->interface = PHY_INTERFACE_MODE_SGMII;
++		break;
++	default:
++		phydev_warn(phydev, "phy speed err :%d\n", phydev->speed);
++		break;
++	}
++}
++
++/**
++ * yt8821_read_status() -  determines the negotiated speed and duplex
++ * @phydev: a pointer to a &struct phy_device
++ *
++ * Returns: 0 or negative errno code
++ */
++static int yt8821_read_status(struct phy_device *phydev)
++{
++	int link;
++	int ret;
++	int val;
++
++	ret = ytphy_write_ext_with_lock(phydev,
++					YT8521_REG_SPACE_SELECT_REG,
++					YT8521_RSSR_UTP_SPACE);
++	if (ret < 0)
++		return ret;
++
++	ret = genphy_read_status(phydev);
++	if (ret < 0)
++		return ret;
++
++	if (phydev->autoneg_complete) {
++		ret = genphy_c45_read_lpa(phydev);
++		if (ret < 0)
++			return ret;
++	}
++
++	ret = phy_read(phydev, YTPHY_SPECIFIC_STATUS_REG);
++	if (ret < 0)
++		return ret;
++
++	val = ret;
++
++	link = val & YTPHY_SSR_LINK;
++	if (link)
++		yt8821_adjust_status(phydev, val);
++
++	if (link) {
++		if (phydev->link == 0)
++			phydev_dbg(phydev,
++				   "%s, phy addr: %d, link up\n",
++				   __func__, phydev->mdio.addr);
++		phydev->link = 1;
++	} else {
++		if (phydev->link == 1)
++			phydev_dbg(phydev,
++				   "%s, phy addr: %d, link down\n",
++				   __func__, phydev->mdio.addr);
++		phydev->link = 0;
++	}
++
++	val = ytphy_read_ext_with_lock(phydev, YT8521_CHIP_CONFIG_REG);
++	if (val < 0)
++		return val;
++
++	if (FIELD_GET(YT8521_CCR_MODE_SEL_MASK, val) ==
++	    YT8821_CHIP_MODE_AUTO_BX2500_SGMII)
++		yt8821_update_interface(phydev);
++
++	return 0;
++}
++
++/**
++ * yt8821_modify_utp_fiber_bmcr - bits modify a PHY's BMCR register
++ * @phydev: the phy_device struct
++ * @mask: bit mask of bits to clear
++ * @set: bit mask of bits to set
++ *
++ * NOTE: Convenience function which allows a PHY's BMCR register to be
++ * modified as new register value = (old register value & ~mask) | set.
++ *
++ * Returns: 0 or negative errno code
++ */
++static int yt8821_modify_utp_fiber_bmcr(struct phy_device *phydev,
++					u16 mask, u16 set)
++{
++	int ret;
++
++	ret = yt8521_modify_bmcr_paged(phydev, YT8521_RSSR_UTP_SPACE,
++				       mask, set);
++	if (ret < 0)
++		return ret;
++
++	return yt8521_modify_bmcr_paged(phydev, YT8521_RSSR_FIBER_SPACE,
++					mask, set);
++}
++
++/**
++ * yt8821_suspend() - suspend the hardware
++ * @phydev: a pointer to a &struct phy_device
++ *
++ * Returns: 0 or negative errno code
++ */
++static int yt8821_suspend(struct phy_device *phydev)
++{
++	int wol_config;
++
++	wol_config = ytphy_read_ext_with_lock(phydev,
++					      YTPHY_WOL_CONFIG_REG);
++	if (wol_config < 0)
++		return wol_config;
++
++	/* if wol enable, do nothing */
++	if (wol_config & YTPHY_WCR_ENABLE)
++		return 0;
++
++	return yt8821_modify_utp_fiber_bmcr(phydev, 0, BMCR_PDOWN);
++}
++
++/**
++ * yt8821_resume() - resume the hardware
++ * @phydev: a pointer to a &struct phy_device
++ *
++ * Returns: 0 or negative errno code
++ */
++static int yt8821_resume(struct phy_device *phydev)
++{
++	int wol_config;
++	int ret;
++
++	/* disable auto sleep */
++	ret = yt8821_auto_sleep_config(phydev, false);
++	if (ret < 0)
++		return ret;
++
++	wol_config = ytphy_read_ext_with_lock(phydev,
++					      YTPHY_WOL_CONFIG_REG);
++	if (wol_config < 0)
++		return wol_config;
++
++	/* if wol enable, do nothing */
++	if (wol_config & YTPHY_WCR_ENABLE)
++		return 0;
++
++	return yt8821_modify_utp_fiber_bmcr(phydev, BMCR_PDOWN, 0);
++}
++
+ static struct phy_driver motorcomm_phy_drvs[] = {
+ 	{
+ 		PHY_ID_MATCH_EXACT(PHY_ID_YT8511),
+@@ -2305,11 +2950,28 @@ static struct phy_driver motorcomm_phy_d
+ 		.suspend	= yt8521_suspend,
+ 		.resume		= yt8521_resume,
+ 	},
++	{
++		PHY_ID_MATCH_EXACT(PHY_ID_YT8821),
++		.name			= "YT8821 2.5Gbps PHY",
++		.get_features		= yt8821_get_features,
++		.read_page		= yt8521_read_page,
++		.write_page		= yt8521_write_page,
++		.get_wol		= ytphy_get_wol,
++		.set_wol		= ytphy_set_wol,
++		.config_aneg		= genphy_config_aneg,
++		.aneg_done		= yt8821_aneg_done,
++		.config_init		= yt8821_config_init,
++		.get_rate_matching	= yt8821_get_rate_matching,
++		.read_status		= yt8821_read_status,
++		.soft_reset		= yt8821_soft_reset,
++		.suspend		= yt8821_suspend,
++		.resume			= yt8821_resume,
++	},
+ };
+ 
+ module_phy_driver(motorcomm_phy_drvs);
+ 
+-MODULE_DESCRIPTION("Motorcomm 8511/8521/8531/8531S PHY driver");
++MODULE_DESCRIPTION("Motorcomm 8511/8521/8531/8531S/8821 PHY driver");
+ MODULE_AUTHOR("Peter Geis");
+ MODULE_AUTHOR("Frank");
+ MODULE_LICENSE("GPL");
+@@ -2319,6 +2981,7 @@ static const struct mdio_device_id __may
+ 	{ PHY_ID_MATCH_EXACT(PHY_ID_YT8521) },
+ 	{ PHY_ID_MATCH_EXACT(PHY_ID_YT8531) },
+ 	{ PHY_ID_MATCH_EXACT(PHY_ID_YT8531S) },
++	{ PHY_ID_MATCH_EXACT(PHY_ID_YT8821) },
+ 	{ /* sentinel */ }
+ };
+ 

+ 58 - 0
target/linux/siflower/patches-6.6/004-mips-add-support-for-Siflower-SF19A2890.patch

@@ -0,0 +1,58 @@
+From: Chuanhong Guo <[email protected]>
+Date: Tue, 20 Aug 2024 08:32:17 +0800
+Subject: [PATCH 04/20] mips: add support for Siflower SF19A2890
+
+Signed-off-by: Chuanhong Guo <[email protected]>
+---
+ arch/mips/Kconfig          | 29 +++++++++++++++++++++++++++++
+ arch/mips/generic/Platform |  1 +
+ 2 files changed, 30 insertions(+)
+
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -861,6 +861,35 @@ config SIBYTE_BIGSUR
+ 	select ZONE_DMA32 if 64BIT
+ 	select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
+ 
++config MACH_SIFLOWER_MIPS
++	bool "Siflower MIPS SoCs"
++	select MIPS_GENERIC
++	select ARM_AMBA
++	select BOOT_RAW
++	select CEVT_R4K
++	select CLKSRC_MIPS_GIC
++	select COMMON_CLK
++	select CPU_MIPSR2_IRQ_EI
++	select CPU_MIPSR2_IRQ_VI
++	select CSRC_R4K
++	select DMA_NONCOHERENT
++	select IRQ_MIPS_CPU
++	select MIPS_CPU_SCACHE
++	select MIPS_GIC
++	select MIPS_L1_CACHE_SHIFT_5
++	select NO_EXCEPT_FILL
++	select SMP_UP if SMP
++	select SYS_HAS_CPU_MIPS32_R2
++	select SYS_SUPPORTS_32BIT_KERNEL
++	select SYS_SUPPORTS_LITTLE_ENDIAN
++	select SYS_SUPPORTS_MIPS16
++	select SYS_SUPPORTS_MIPS_CPS
++	select SYS_SUPPORTS_MULTITHREADING
++	select USE_OF
++	help
++	  Select this to build a kernel which supports SoCs from Siflower
++	  with MIPS InterAptiv cores, like Siflower SF19A2890.
++
+ config SNI_RM
+ 	bool "SNI RM200/300/400"
+ 	select ARC_MEMORY
+--- a/arch/mips/generic/Platform
++++ b/arch/mips/generic/Platform
+@@ -10,6 +10,7 @@
+ 
+ # Note: order matters, keep the asm/mach-generic include last.
+ cflags-$(CONFIG_MACH_INGENIC_SOC)	+= -I$(srctree)/arch/mips/include/asm/mach-ingenic
++cflags-$(CONFIG_MACH_SIFLOWER_MIPS)	+= -I$(srctree)/arch/mips/include/asm/mach-siflower
+ cflags-$(CONFIG_MIPS_GENERIC)	+= -I$(srctree)/arch/mips/include/asm/mach-generic
+ 
+ load-$(CONFIG_MIPS_GENERIC)	+= 0xffffffff80100000

+ 30 - 0
target/linux/siflower/patches-6.6/005-clk-add-drivers-for-siflower-socs.patch

@@ -0,0 +1,30 @@
+From: Chuanhong Guo <[email protected]>
+Date: Tue, 20 Aug 2024 08:33:01 +0800
+Subject: [PATCH 05/20] clk: add drivers for siflower socs
+
+Signed-off-by: Chuanhong Guo <[email protected]>
+---
+ drivers/clk/Kconfig  | 1 +
+ drivers/clk/Makefile | 1 +
+ 2 files changed, 2 insertions(+)
+
+--- a/drivers/clk/Kconfig
++++ b/drivers/clk/Kconfig
+@@ -489,6 +489,7 @@ source "drivers/clk/renesas/Kconfig"
+ source "drivers/clk/rockchip/Kconfig"
+ source "drivers/clk/samsung/Kconfig"
+ source "drivers/clk/sifive/Kconfig"
++source "drivers/clk/siflower/Kconfig"
+ source "drivers/clk/socfpga/Kconfig"
+ source "drivers/clk/sprd/Kconfig"
+ source "drivers/clk/starfive/Kconfig"
+--- a/drivers/clk/Makefile
++++ b/drivers/clk/Makefile
+@@ -116,6 +116,7 @@ obj-y					+= renesas/
+ obj-$(CONFIG_ARCH_ROCKCHIP)		+= rockchip/
+ obj-$(CONFIG_COMMON_CLK_SAMSUNG)	+= samsung/
+ obj-$(CONFIG_CLK_SIFIVE)		+= sifive/
++obj-$(CONFIG_CLK_SIFLOWER)		+= siflower/
+ obj-y					+= socfpga/
+ obj-$(CONFIG_PLAT_SPEAR)		+= spear/
+ obj-y					+= sprd/

+ 37 - 0
target/linux/siflower/patches-6.6/006-reset-add-support-for-sf19a2890.patch

@@ -0,0 +1,37 @@
+From: Chuanhong Guo <[email protected]>
+Date: Tue, 20 Aug 2024 08:33:43 +0800
+Subject: [PATCH 06/20] reset: add support for sf19a2890
+
+Signed-off-by: Chuanhong Guo <[email protected]>
+---
+ drivers/reset/Kconfig  | 8 ++++++++
+ drivers/reset/Makefile | 1 +
+ 2 files changed, 9 insertions(+)
+
+--- a/drivers/reset/Kconfig
++++ b/drivers/reset/Kconfig
+@@ -211,6 +211,14 @@ config RESET_SCMI
+ 	  This driver uses SCMI Message Protocol to interact with the
+ 	  firmware controlling all the reset signals.
+ 
++config RESET_SF19A2890_PERIPH
++	bool "Siflower SF19A2890 Peripheral Reset Controller Driver"
++	default MACH_SIFLOWER_MIPS
++	depends on HAS_IOMEM
++	help
++	  This enables reset controller driver for peripheral reset blocks
++	  found on Siflower SF19A2890 SoC.
++
+ config RESET_SIMPLE
+ 	bool "Simple Reset Controller Driver" if COMPILE_TEST || EXPERT
+ 	default ARCH_ASPEED || ARCH_BCMBCA || ARCH_BITMAIN || ARCH_REALTEK || ARCH_STM32 || (ARCH_INTEL_SOCFPGA && ARM64) || ARCH_SUNXI || ARC
+--- a/drivers/reset/Makefile
++++ b/drivers/reset/Makefile
+@@ -29,6 +29,7 @@ obj-$(CONFIG_RESET_QCOM_PDC) += reset-qc
+ obj-$(CONFIG_RESET_RASPBERRYPI) += reset-raspberrypi.o
+ obj-$(CONFIG_RESET_RZG2L_USBPHY_CTRL) += reset-rzg2l-usbphy-ctrl.o
+ obj-$(CONFIG_RESET_SCMI) += reset-scmi.o
++obj-$(CONFIG_RESET_SF19A2890_PERIPH) += reset-sf19a2890-periph.o
+ obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o
+ obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o
+ obj-$(CONFIG_RESET_SUNPLUS) += reset-sunplus.o

+ 42 - 0
target/linux/siflower/patches-6.6/007-gpio-add-support-for-siflower-socs.patch

@@ -0,0 +1,42 @@
+From: Chuanhong Guo <[email protected]>
+Date: Tue, 20 Aug 2024 08:33:57 +0800
+Subject: [PATCH 07/20] gpio: add support for siflower socs
+
+Add support for the GPIO controller on Siflower SoCs.
+This controller is found on Siflower SF19A2890 (MIPS) and SF21A6826
+(RISC-V)
+
+Signed-off-by: Qingfang Deng <[email protected]>
+Signed-off-by: Chuanhong Guo <[email protected]>
+---
+ drivers/gpio/Kconfig  | 8 ++++++++
+ drivers/gpio/Makefile | 1 +
+ 2 files changed, 9 insertions(+)
+
+--- a/drivers/gpio/Kconfig
++++ b/drivers/gpio/Kconfig
+@@ -576,6 +576,14 @@ config GPIO_SIFIVE
+ 	help
+ 	  Say yes here to support the GPIO device on SiFive SoCs.
+ 
++config GPIO_SIFLOWER
++	tristate "SiFlower GPIO support"
++	depends on OF_GPIO
++	depends on MACH_SIFLOWER_MIPS || RISCV || COMPILE_TEST
++	select GPIOLIB_IRQCHIP
++	help
++	  GPIO controller driver for SiFlower SoCs.
++
+ config GPIO_SIOX
+ 	tristate "SIOX GPIO support"
+ 	depends on SIOX
+--- a/drivers/gpio/Makefile
++++ b/drivers/gpio/Makefile
+@@ -143,6 +143,7 @@ obj-$(CONFIG_GPIO_SAMA5D2_PIOBU)	+= gpio
+ obj-$(CONFIG_GPIO_SCH311X)		+= gpio-sch311x.o
+ obj-$(CONFIG_GPIO_SCH)			+= gpio-sch.o
+ obj-$(CONFIG_GPIO_SIFIVE)		+= gpio-sifive.o
++obj-$(CONFIG_GPIO_SIFLOWER)		+= gpio-siflower.o
+ obj-$(CONFIG_GPIO_SIM)			+= gpio-sim.o
+ obj-$(CONFIG_GPIO_SIOX)			+= gpio-siox.o
+ obj-$(CONFIG_GPIO_SL28CPLD)		+= gpio-sl28cpld.o

+ 38 - 0
target/linux/siflower/patches-6.6/008-pinctrl-add-driver-for-siflower-sf19a2890.patch

@@ -0,0 +1,38 @@
+From: Chuanhong Guo <[email protected]>
+Date: Tue, 20 Aug 2024 08:34:20 +0800
+Subject: [PATCH 08/20] pinctrl: add driver for siflower sf19a2890
+
+---
+ drivers/pinctrl/Kconfig  | 10 ++++++++++
+ drivers/pinctrl/Makefile |  1 +
+ 2 files changed, 11 insertions(+)
+
+--- a/drivers/pinctrl/Kconfig
++++ b/drivers/pinctrl/Kconfig
+@@ -417,6 +417,16 @@ config PINCTRL_ROCKCHIP
+ 	help
+           This support pinctrl and GPIO driver for Rockchip SoCs.
+ 
++config PINCTRL_SF19A2890
++	tristate "Siflower SF19A2890 pinctrl driver"
++	depends on OF && (MACH_SIFLOWER_MIPS || COMPILE_TEST)
++	select PINMUX
++	select PINCONF
++	select GENERIC_PINCONF
++	default MACH_SIFLOWER_MIPS
++	help
++	   Say Y here to enable the Siflower SF19A2890 pinctrl driver.
++
+ config PINCTRL_SINGLE
+ 	tristate "One-register-per-pin type device tree based pinctrl driver"
+ 	depends on OF
+--- a/drivers/pinctrl/Makefile
++++ b/drivers/pinctrl/Makefile
+@@ -43,6 +43,7 @@ obj-$(CONFIG_PINCTRL_PIC32)	+= pinctrl-p
+ obj-$(CONFIG_PINCTRL_PISTACHIO)	+= pinctrl-pistachio.o
+ obj-$(CONFIG_PINCTRL_RK805)	+= pinctrl-rk805.o
+ obj-$(CONFIG_PINCTRL_ROCKCHIP)	+= pinctrl-rockchip.o
++obj-$(CONFIG_PINCTRL_SF19A2890)	+= pinctrl-sf19a2890.o
+ obj-$(CONFIG_PINCTRL_SINGLE)	+= pinctrl-single.o
+ obj-$(CONFIG_PINCTRL_ST) 	+= pinctrl-st.o
+ obj-$(CONFIG_PINCTRL_STMFX) 	+= pinctrl-stmfx.o

+ 37 - 0
target/linux/siflower/patches-6.6/009-stmmac-add-support-for-sf19a2890.patch

@@ -0,0 +1,37 @@
+From: Chuanhong Guo <[email protected]>
+Date: Tue, 20 Aug 2024 08:34:42 +0800
+Subject: [PATCH 09/20] stmmac: add support for sf19a2890
+
+---
+ drivers/net/ethernet/stmicro/stmmac/Kconfig  | 9 +++++++++
+ drivers/net/ethernet/stmicro/stmmac/Makefile | 1 +
+ 2 files changed, 10 insertions(+)
+
+--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
++++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
+@@ -142,6 +142,15 @@ config DWMAC_ROCKCHIP
+ 	  This selects the Rockchip RK3288 SoC glue layer support for
+ 	  the stmmac device driver.
+ 
++config DWMAC_SF19A2890
++	tristate "Siflower SF19A2890 GMAC support"
++	default MACH_SIFLOWER_MIPS
++	help
++	  Support for GMAC on Siflower SF19A2890 SoC.
++
++	  This selects the Siflower SF19A2890 SoC glue layer support for
++	  the stmmac device driver.
++
+ config DWMAC_SOCFPGA
+ 	tristate "SOCFPGA dwmac support"
+ 	default ARCH_INTEL_SOCFPGA
+--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
++++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
+@@ -21,6 +21,7 @@ obj-$(CONFIG_DWMAC_MEDIATEK)	+= dwmac-me
+ obj-$(CONFIG_DWMAC_MESON)	+= dwmac-meson.o dwmac-meson8b.o
+ obj-$(CONFIG_DWMAC_QCOM_ETHQOS)	+= dwmac-qcom-ethqos.o
+ obj-$(CONFIG_DWMAC_ROCKCHIP)	+= dwmac-rk.o
++obj-$(CONFIG_DWMAC_SF19A2890)	+= dwmac-sf19a2890.o
+ obj-$(CONFIG_DWMAC_SOCFPGA)	+= dwmac-altr-socfpga.o
+ obj-$(CONFIG_DWMAC_STARFIVE)	+= dwmac-starfive.o
+ obj-$(CONFIG_DWMAC_STI)		+= dwmac-sti.o

+ 30 - 0
target/linux/siflower/patches-6.6/010-phy-add-support-for-Siflower-USB-PHYs.patch

@@ -0,0 +1,30 @@
+From: Chuanhong Guo <[email protected]>
+Date: Mon, 9 Sep 2024 10:18:33 +0800
+Subject: [PATCH 10/20] phy: add support for Siflower USB PHYs
+
+Signed-off-by: Chuanhong Guo <[email protected]>
+---
+ drivers/phy/Kconfig  | 1 +
+ drivers/phy/Makefile | 1 +
+ 2 files changed, 2 insertions(+)
+
+--- a/drivers/phy/Kconfig
++++ b/drivers/phy/Kconfig
+@@ -90,6 +90,7 @@ source "drivers/phy/ralink/Kconfig"
+ source "drivers/phy/renesas/Kconfig"
+ source "drivers/phy/rockchip/Kconfig"
+ source "drivers/phy/samsung/Kconfig"
++source "drivers/phy/siflower/Kconfig"
+ source "drivers/phy/socionext/Kconfig"
+ source "drivers/phy/st/Kconfig"
+ source "drivers/phy/starfive/Kconfig"
+--- a/drivers/phy/Makefile
++++ b/drivers/phy/Makefile
+@@ -29,6 +29,7 @@ obj-y					+= allwinner/	\
+ 					   renesas/	\
+ 					   rockchip/	\
+ 					   samsung/	\
++					   siflower/	\
+ 					   socionext/	\
+ 					   st/		\
+ 					   starfive/	\

+ 35 - 0
target/linux/siflower/patches-6.6/011-usb-dwc2-add-support-for-Siflower-SF19A2890.patch

@@ -0,0 +1,35 @@
+From: Chuanhong Guo <[email protected]>
+Date: Mon, 9 Sep 2024 16:46:53 +0800
+Subject: [PATCH 11/20] usb: dwc2: add support for Siflower SF19A2890
+
+Signed-off-by: Chuanhong Guo <[email protected]>
+---
+ drivers/usb/dwc2/params.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/usb/dwc2/params.c
++++ b/drivers/usb/dwc2/params.c
+@@ -200,6 +200,14 @@ static void dwc2_set_amcc_params(struct
+ 	p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 << GAHBCFG_HBSTLEN_SHIFT;
+ }
+ 
++static void dwc2_set_sf19a2890_params(struct dwc2_hsotg *hsotg)
++{
++	struct dwc2_core_params *p = &hsotg->params;
++
++	p->max_transfer_size = 65535;
++	p->ahbcfg = GAHBCFG_HBSTLEN_INCR4 << GAHBCFG_HBSTLEN_SHIFT;
++}
++
+ static void dwc2_set_stm32f4x9_fsotg_params(struct dwc2_hsotg *hsotg)
+ {
+ 	struct dwc2_core_params *p = &hsotg->params;
+@@ -294,6 +302,8 @@ const struct of_device_id dwc2_of_match_
+ 	  .data = dwc2_set_amlogic_a1_params },
+ 	{ .compatible = "amcc,dwc-otg", .data = dwc2_set_amcc_params },
+ 	{ .compatible = "apm,apm82181-dwc-otg", .data = dwc2_set_amcc_params },
++	{ .compatible = "siflower,sf19a2890-usb",
++	  .data = dwc2_set_sf19a2890_params },
+ 	{ .compatible = "st,stm32f4x9-fsotg",
+ 	  .data = dwc2_set_stm32f4x9_fsotg_params },
+ 	{ .compatible = "st,stm32f4x9-hsotg" },

+ 66 - 0
target/linux/siflower/patches-6.6/012-usb-dwc2-handle-OTG-interrupt-regardless-of-GINTSTS.patch

@@ -0,0 +1,66 @@
+From: Chuanhong Guo <[email protected]>
+Date: Tue, 10 Sep 2024 09:10:27 +0800
+Subject: [PATCH 12/20] usb: dwc2: handle OTG interrupt regardless of GINTSTS
+
+The DWC OTG 3.30a found on Siflower SF19A2890 has battery charger
+support enabled. It triggers MultVallpChng interrupt (bit 20 of
+GOTGINT) but doesn't set OTGInt in GINTSTS. As a result, this
+interrupt is never handled, and linux disables USB interrupt
+because "nobody cares".
+
+Handle OTG interrupt in IRQ handler regardless of whether the
+OTGInt bit in GINTSTS is set or not.
+
+Signed-off-by: Chuanhong Guo <[email protected]>
+---
+ drivers/usb/dwc2/core_intr.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+--- a/drivers/usb/dwc2/core_intr.c
++++ b/drivers/usb/dwc2/core_intr.c
+@@ -79,7 +79,7 @@ static void dwc2_handle_mode_mismatch_in
+  *
+  * @hsotg: Programming view of DWC_otg controller
+  */
+-static void dwc2_handle_otg_intr(struct dwc2_hsotg *hsotg)
++static irqreturn_t dwc2_handle_otg_intr(struct dwc2_hsotg *hsotg)
+ {
+ 	u32 gotgint;
+ 	u32 gotgctl;
+@@ -87,6 +87,10 @@ static void dwc2_handle_otg_intr(struct
+ 
+ 	gotgint = dwc2_readl(hsotg, GOTGINT);
+ 	gotgctl = dwc2_readl(hsotg, GOTGCTL);
++
++	if (!gotgint)
++		return IRQ_NONE;
++
+ 	dev_dbg(hsotg->dev, "++OTG Interrupt gotgint=%0x [%s]\n", gotgint,
+ 		dwc2_op_state_str(hsotg));
+ 
+@@ -229,6 +233,7 @@ static void dwc2_handle_otg_intr(struct
+ 
+ 	/* Clear GOTGINT */
+ 	dwc2_writel(hsotg, gotgint, GOTGINT);
++	return IRQ_HANDLED;
+ }
+ 
+ /**
+@@ -842,6 +847,8 @@ irqreturn_t dwc2_handle_common_intr(int
+ 		hsotg->frame_number = (dwc2_readl(hsotg, HFNUM)
+ 				       & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT;
+ 
++	retval = dwc2_handle_otg_intr(hsotg);
++
+ 	gintsts = dwc2_read_common_intr(hsotg);
+ 	if (gintsts & ~GINTSTS_PRTINT)
+ 		retval = IRQ_HANDLED;
+@@ -855,8 +862,6 @@ irqreturn_t dwc2_handle_common_intr(int
+ 
+ 	if (gintsts & GINTSTS_MODEMIS)
+ 		dwc2_handle_mode_mismatch_intr(hsotg);
+-	if (gintsts & GINTSTS_OTGINT)
+-		dwc2_handle_otg_intr(hsotg);
+ 	if (gintsts & GINTSTS_CONIDSTSCHNG)
+ 		dwc2_handle_conn_id_status_change_intr(hsotg);
+ 	if (gintsts & GINTSTS_DISCONNINT)

+ 31 - 0
target/linux/siflower/patches-6.6/013-riscv-add-Siflower-RISC-V-SoC-family-Kconfig-support.patch

@@ -0,0 +1,31 @@
+From: Chuanhong Guo <[email protected]>
+Date: Sat, 14 Sep 2024 11:57:35 +0800
+Subject: [PATCH 13/20] riscv: add Siflower RISC-V SoC family Kconfig support
+
+Siflower RISC-V SoCs, including SF21A6826, SF21H8898 and some other
+upcomping chips, are RISC-V chips with T-Head C908 cores for home
+routers and gateways. Add a Kconfig entry named ARCH_SIFLOWER for
+them.
+Notably these chips uses ARM PL011 for UART. ARM_AMBA is selected
+for its driver.
+
+Signed-off-by: Chuanhong Guo <[email protected]>
+---
+ arch/riscv/Kconfig.socs | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/riscv/Kconfig.socs
++++ b/arch/riscv/Kconfig.socs
+@@ -22,6 +22,12 @@ config SOC_SIFIVE
+ 	help
+ 	  This enables support for SiFive SoC platform hardware.
+ 
++config ARCH_SIFLOWER
++	bool "Siflower RISC-V SoCs"
++	select ARM_AMBA if TTY
++	help
++	  This enables support for Siflower RISC-V SoC platform hardware.
++
+ config ARCH_STARFIVE
+ 	def_bool SOC_STARFIVE
+ 

+ 46 - 0
target/linux/siflower/patches-6.6/014-riscv-add-an-option-for-efficient-unaligned-access.patch

@@ -0,0 +1,46 @@
+From: Qingfang Deng <[email protected]>
+Date: Sat, 14 Sep 2024 12:00:59 +0800
+Subject: [PATCH 14/20] riscv: add an option for efficient unaligned access
+
+Some riscv cpus like T-Head C908 allows unaligned memory access,
+and we don't need to force an alignment on compiler level.
+Add an option for that.
+
+Signed-off-by: Chuanhong Guo <[email protected]>
+---
+ arch/riscv/Kconfig  | 11 +++++++++++
+ arch/riscv/Makefile |  2 ++
+ 2 files changed, 13 insertions(+)
+
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -639,6 +639,17 @@ config THREAD_SIZE_ORDER
+ 	  Specify the Pages of thread stack size (from 4KB to 64KB), which also
+ 	  affects irq stack size, which is equal to thread stack size.
+ 
++config RISCV_EFFICIENT_UNALIGNED_ACCESS
++	bool "Assume the system supports fast unaligned memory accesses"
++	depends on NONPORTABLE
++	select HAVE_EFFICIENT_UNALIGNED_ACCESS
++	help
++	  Assume that the system supports fast unaligned memory accesses. When
++	  enabled, this option improves the performance of the kernel on such
++	  systems. However, the kernel and userspace programs will run much more
++	  slowly, or will not be able to run at all, on systems that do not
++	  support efficient unaligned memory accesses.
++
+ endmenu # "Platform type"
+ 
+ menu "Kernel features"
+--- a/arch/riscv/Makefile
++++ b/arch/riscv/Makefile
+@@ -104,7 +104,9 @@ KBUILD_AFLAGS_MODULE += $(call as-option
+ # unaligned accesses.  While unaligned accesses are explicitly allowed in the
+ # RISC-V ISA, they're emulated by machine mode traps on all extant
+ # architectures.  It's faster to have GCC emit only aligned accesses.
++ifneq ($(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS),y)
+ KBUILD_CFLAGS += $(call cc-option,-mstrict-align)
++endif
+ 
+ ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
+ prepare: stack_protector_prepare

+ 33 - 0
target/linux/siflower/patches-6.6/015-reset-add-support-for-sf21a6826-sf21h8898.patch

@@ -0,0 +1,33 @@
+From: Chuanhong Guo <[email protected]>
+Date: Sat, 14 Sep 2024 16:51:36 +0800
+Subject: [PATCH 15/20] reset: add support for sf21a6826/sf21h8898
+
+---
+ drivers/reset/Kconfig  | 5 +++++
+ drivers/reset/Makefile | 1 +
+ 2 files changed, 6 insertions(+)
+
+--- a/drivers/reset/Kconfig
++++ b/drivers/reset/Kconfig
+@@ -219,6 +219,11 @@ config RESET_SF19A2890_PERIPH
+ 	  This enables reset controller driver for peripheral reset blocks
+ 	  found on Siflower SF19A2890 SoC.
+ 
++config RESET_SF21
++	tristate "Siflower SF21A6826/SF21H8898 Reset Controller Driver"
++	help
++	  This enables the reset controller driver for Siflower SF21A6826/SF21H8898.
++
+ config RESET_SIMPLE
+ 	bool "Simple Reset Controller Driver" if COMPILE_TEST || EXPERT
+ 	default ARCH_ASPEED || ARCH_BCMBCA || ARCH_BITMAIN || ARCH_REALTEK || ARCH_STM32 || (ARCH_INTEL_SOCFPGA && ARM64) || ARCH_SUNXI || ARC
+--- a/drivers/reset/Makefile
++++ b/drivers/reset/Makefile
+@@ -30,6 +30,7 @@ obj-$(CONFIG_RESET_RASPBERRYPI) += reset
+ obj-$(CONFIG_RESET_RZG2L_USBPHY_CTRL) += reset-rzg2l-usbphy-ctrl.o
+ obj-$(CONFIG_RESET_SCMI) += reset-scmi.o
+ obj-$(CONFIG_RESET_SF19A2890_PERIPH) += reset-sf19a2890-periph.o
++obj-$(CONFIG_RESET_SF21) += reset-sf21.o
+ obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o
+ obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o
+ obj-$(CONFIG_RESET_SUNPLUS) += reset-sunplus.o

+ 41 - 0
target/linux/siflower/patches-6.6/016-spi-spi-mem-allow-gpio-cs-in-spi_mem_exec_op.patch

@@ -0,0 +1,41 @@
+From: Chuanhong Guo <[email protected]>
+Date: Thu, 19 Sep 2024 09:23:27 +0800
+Subject: [PATCH 16/20] spi: spi-mem: allow gpio cs in spi_mem_exec_op
+
+spi_mem_exec_op can use gpio cs, either by not asserting the native
+cs or switching the native cs pin to GPIO mode with pinctrl.
+
+Allow calling exec_op when GPIO CS present and control GPIO CS
+before and after calling exec_op.
+If exec_op decided to return -EOPNOTSUPP, the code will assert and
+deassert GPIO CS without clock pulsing, which should be fine on most
+SPI slaves.
+
+Signed-off-by: Chuanhong Guo <[email protected]>
+---
+ drivers/spi/spi-mem.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/drivers/spi/spi-mem.c
++++ b/drivers/spi/spi-mem.c
+@@ -325,13 +325,19 @@ int spi_mem_exec_op(struct spi_mem *mem,
+ 	if (!spi_mem_internal_supports_op(mem, op))
+ 		return -ENOTSUPP;
+ 
+-	if (ctlr->mem_ops && ctlr->mem_ops->exec_op && !spi_get_csgpiod(mem->spi, 0)) {
++	if (ctlr->mem_ops && ctlr->mem_ops->exec_op) {
+ 		ret = spi_mem_access_start(mem);
+ 		if (ret)
+ 			return ret;
+ 
++		if (spi_get_csgpiod(mem->spi, 0))
++			gpiod_set_value_cansleep(spi_get_csgpiod(mem->spi, 0), 1);
++
+ 		ret = ctlr->mem_ops->exec_op(mem, op);
+ 
++		if (spi_get_csgpiod(mem->spi, 0))
++			gpiod_set_value_cansleep(spi_get_csgpiod(mem->spi, 0), 0);
++
+ 		spi_mem_access_end(mem);
+ 
+ 		/*

+ 46 - 0
target/linux/siflower/patches-6.6/017-spi-add-support-for-sf21-qspi.patch

@@ -0,0 +1,46 @@
+From: Chuanhong Guo <[email protected]>
+Date: Thu, 19 Sep 2024 10:02:16 +0800
+Subject: [PATCH 17/20] spi: add support for sf21-qspi
+
+Add support for the QSPI controller found on Siflower SF21A6826
+and SF21H8898.
+It is based on ARM PL022, with custom modifications to support
+Dual/Quad SPI modes.
+A new driver is created because this modified controller is
+supported under the SPI-MEM framework. While the setup procedure
+is a bit similar to the spi-pl022.c, there aren't much code
+shared between them.
+
+Signed-off-by: Qingfang Deng <[email protected]>
+Signed-off-by: Chuanhong Guo <[email protected]>
+---
+ drivers/spi/Kconfig  | 7 +++++++
+ drivers/spi/Makefile | 1 +
+ 2 files changed, 8 insertions(+)
+
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -949,6 +949,13 @@ config SPI_SIFIVE
+ 	help
+ 	  This exposes the SPI controller IP from SiFive.
+ 
++config SPI_SF21_QSPI
++	tristate "Siflower SF21A6826/SF21H8898 QSPI controller"
++	depends on ARCH_SIFLOWER || COMPILE_TEST
++	help
++	  This enables support for the SPI controller present on the
++	  Siflower SF21A6826/SF21H8898 SoCs.
++
+ config SPI_SLAVE_MT27XX
+ 	tristate "MediaTek SPI slave device"
+ 	depends on ARCH_MEDIATEK || COMPILE_TEST
+--- a/drivers/spi/Makefile
++++ b/drivers/spi/Makefile
+@@ -126,6 +126,7 @@ obj-$(CONFIG_SPI_SH_HSPI)		+= spi-sh-hsp
+ obj-$(CONFIG_SPI_SH_MSIOF)		+= spi-sh-msiof.o
+ obj-$(CONFIG_SPI_SH_SCI)		+= spi-sh-sci.o
+ obj-$(CONFIG_SPI_SIFIVE)		+= spi-sifive.o
++obj-$(CONFIG_SPI_SF21_QSPI)		+= spi-sf21-qspi.o
+ obj-$(CONFIG_SPI_SLAVE_MT27XX)          += spi-slave-mt27xx.o
+ obj-$(CONFIG_SPI_SN_F_OSPI)		+= spi-sn-f-ospi.o
+ obj-$(CONFIG_SPI_SPRD)			+= spi-sprd.o

+ 41 - 0
target/linux/siflower/patches-6.6/018-pci-dw-pcie-add-support-for-sf21-pcie.patch

@@ -0,0 +1,41 @@
+From: Chuanhong Guo <[email protected]>
+Date: Fri, 29 Nov 2024 16:34:49 +0800
+Subject: [PATCH 18/20] pci: dw-pcie add support for sf21 pcie
+
+Add support for the PCIE controller found on Siflower SF21A6826
+and SF21H8898.
+
+Signed-off-by: Chuanhong Guo <[email protected]>
+---
+ drivers/pci/controller/dwc/Kconfig  | 9 +++++++++
+ drivers/pci/controller/dwc/Makefile | 1 +
+ 2 files changed, 10 insertions(+)
+
+--- a/drivers/pci/controller/dwc/Kconfig
++++ b/drivers/pci/controller/dwc/Kconfig
+@@ -317,6 +317,15 @@ config PCIE_FU740
+ 	  Say Y here if you want PCIe controller support for the SiFive
+ 	  FU740.
+ 
++config PCIE_SF21
++	bool "Siflower SF21A6826/SF21H8898 PCIe controller"
++	depends on ARCH_SIFLOWER || COMPILE_TEST
++	depends on PCI_MSI
++	select PCIE_DW_HOST
++	help
++	  Say Y here to enable support of the Siflower SF21A6826/SF21H8898
++	  PCIe controller.
++
+ config PCIE_UNIPHIER
+ 	bool "Socionext UniPhier PCIe controller (host mode)"
+ 	depends on ARCH_UNIPHIER || COMPILE_TEST
+--- a/drivers/pci/controller/dwc/Makefile
++++ b/drivers/pci/controller/dwc/Makefile
+@@ -22,6 +22,7 @@ obj-$(CONFIG_PCIE_KEEMBAY) += pcie-keemb
+ obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
+ obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
+ obj-$(CONFIG_PCI_MESON) += pci-meson.o
++obj-$(CONFIG_PCIE_SF21) += pcie-sf21.o
+ obj-$(CONFIG_PCIE_TEGRA194) += pcie-tegra194.o
+ obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
+ obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o

+ 30 - 0
target/linux/siflower/patches-6.6/019-net-phy-add-support-for-Siflower-SF23P1211-SF23P1240.patch

@@ -0,0 +1,30 @@
+From: "haoming.chen" <[email protected]>
+Date: Thu, 7 Nov 2024 20:18:59 +0800
+Subject: [PATCH 19/20] net: phy: add support for Siflower SF23P1211 &
+ SF23P1240
+
+Signed-off-by: haoming.chen <[email protected]>
+---
+ drivers/net/phy/Kconfig  | 5 +++++
+ drivers/net/phy/Makefile | 1 +
+ 2 files changed, 6 insertions(+)
+
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -481,3 +481,8 @@ endif # PHYLIB
+ config MICREL_KS8995MA
+ 	tristate "Micrel KS8995MA 5-ports 10/100 managed Ethernet switch"
+ 	depends on SPI
++
++config SIFLOWER_PHY
++	tristate "Siflower PHYs"
++	help
++	  Currently supports the SF1211F, SF1240 gigabit PHY.
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -108,3 +108,4 @@ obj-$(CONFIG_STE10XP)		+= ste10Xp.o
+ obj-$(CONFIG_TERANETICS_PHY)	+= teranetics.o
+ obj-$(CONFIG_VITESSE_PHY)	+= vitesse.o
+ obj-$(CONFIG_XILINX_GMII2RGMII) += xilinx_gmii2rgmii.o
++obj-$(CONFIG_SIFLOWER_PHY)	+= siflower.o
+\ No newline at end of file

+ 27 - 0
target/linux/siflower/patches-6.6/020-net-ethernet-add-support-for-Siflower-DPNS.patch

@@ -0,0 +1,27 @@
+From: "haoming.chen" <[email protected]>
+Date: Tue, 26 Nov 2024 16:38:13 +0800
+Subject: [PATCH 20/20] net: ethernet: add support for Siflower DPNS
+
+Change-Id: Ie8fc30e4714eaa666563b1a85e22d0eb8ee778b5
+Signed-off-by: haoming.chen <[email protected]>
+---
+ drivers/net/ethernet/Kconfig  | 1 +
+ drivers/net/ethernet/Makefile | 1 +
+ 2 files changed, 2 insertions(+)
+
+--- a/drivers/net/ethernet/Kconfig
++++ b/drivers/net/ethernet/Kconfig
+@@ -192,5 +192,6 @@ source "drivers/net/ethernet/wangxun/Kco
+ source "drivers/net/ethernet/wiznet/Kconfig"
+ source "drivers/net/ethernet/xilinx/Kconfig"
+ source "drivers/net/ethernet/xircom/Kconfig"
++source "drivers/net/ethernet/siflower/Kconfig"
+ 
+ endif # ETHERNET
+--- a/drivers/net/ethernet/Makefile
++++ b/drivers/net/ethernet/Makefile
+@@ -104,3 +104,4 @@ obj-$(CONFIG_NET_VENDOR_XILINX) += xilin
+ obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
+ obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/
+ obj-$(CONFIG_NET_VENDOR_PENSANDO) += pensando/
++obj-$(CONFIG_NET_VENDOR_SIFLOWER) += siflower/

+ 266 - 0
target/linux/siflower/sf19a2890/config-6.6

@@ -0,0 +1,266 @@
+CONFIG_ARCH_32BIT_OFF_T=y
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_ARCH_KEEP_MEMBLOCK=y
+CONFIG_ARCH_MMAP_RND_BITS_MAX=15
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=15
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_ARM_AMBA=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BOARD_SCACHE=y
+CONFIG_CEVT_R4K=y
+CONFIG_CLKSRC_MIPS_GIC=y
+CONFIG_CLK_SF19A2890=y
+CONFIG_CLK_SF19A2890_PERIPH=y
+CONFIG_CLK_SIFLOWER=y
+CONFIG_CLOCKSOURCE_WATCHDOG=y
+CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US=100
+CONFIG_CLONE_BACKWARDS=y
+CONFIG_COMMON_CLK=y
+CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1
+CONFIG_COMPAT_32BIT_TIME=y
+CONFIG_CONNECTOR=y
+CONFIG_CONSOLE_LOGLEVEL_DEFAULT=15
+CONFIG_CONTEXT_TRACKING=y
+CONFIG_CONTEXT_TRACKING_IDLE=y
+CONFIG_COREDUMP=y
+CONFIG_CPU_GENERIC_DUMP_TLB=y
+CONFIG_CPU_HAS_DIEI=y
+CONFIG_CPU_HAS_PREFETCH=y
+CONFIG_CPU_HAS_RIXI=y
+CONFIG_CPU_HAS_SYNC=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_CPU_MIPSR2=y
+CONFIG_CPU_MIPSR2_IRQ_EI=y
+CONFIG_CPU_MIPSR2_IRQ_VI=y
+CONFIG_CPU_MITIGATIONS=y
+CONFIG_CPU_NEEDS_NO_SMARTMIPS_OR_MICROMIPS=y
+CONFIG_CPU_R4K_CACHE_TLB=y
+CONFIG_CPU_R4K_FPU=y
+CONFIG_CPU_RMAP=y
+CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y
+CONFIG_CPU_SUPPORTS_HIGHMEM=y
+CONFIG_CPU_SUPPORTS_MSA=y
+CONFIG_CRC16=y
+CONFIG_CRC_CCITT=y
+CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y
+CONFIG_CRYPTO_LIB_GF128MUL=y
+CONFIG_CRYPTO_LIB_POLY1305_RSIZE=2
+CONFIG_CRYPTO_LIB_SHA1=y
+CONFIG_CRYPTO_LIB_UTILS=y
+CONFIG_CSRC_R4K=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_REDUCED=y
+CONFIG_DMA_NONCOHERENT=y
+CONFIG_DTC=y
+# CONFIG_DWMAC_GENERIC is not set
+CONFIG_DWMAC_SF19A2890=y
+CONFIG_DW_WATCHDOG=y
+CONFIG_ELF_CORE=y
+CONFIG_EXCLUSIVE_SYSTEM_RAM=y
+CONFIG_FANOTIFY=y
+CONFIG_FHANDLE=y
+CONFIG_FIXED_PHY=y
+CONFIG_FS_IOMAP=y
+CONFIG_FUNCTION_ALIGNMENT=0
+CONFIG_FWNODE_MDIO=y
+CONFIG_FW_LOADER_PAGED_BUF=y
+CONFIG_FW_LOADER_SYSFS=y
+CONFIG_GENERIC_ALLOCATOR=y
+CONFIG_GENERIC_ATOMIC64=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_GENERIC_CPU_AUTOPROBE=y
+CONFIG_GENERIC_GETTIMEOFDAY=y
+CONFIG_GENERIC_IDLE_POLL_SETUP=y
+CONFIG_GENERIC_IOMAP=y
+CONFIG_GENERIC_IRQ_CHIP=y
+CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y
+CONFIG_GENERIC_IRQ_MIGRATION=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_GENERIC_LIB_ASHLDI3=y
+CONFIG_GENERIC_LIB_ASHRDI3=y
+CONFIG_GENERIC_LIB_CMPDI2=y
+CONFIG_GENERIC_LIB_LSHRDI3=y
+CONFIG_GENERIC_LIB_UCMPDI2=y
+CONFIG_GENERIC_PCI_IOMAP=y
+CONFIG_GENERIC_PHY=y
+CONFIG_GENERIC_PINCONF=y
+CONFIG_GENERIC_SCHED_CLOCK=y
+CONFIG_GENERIC_SMP_IDLE_THREAD=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GPIOLIB_IRQCHIP=y
+CONFIG_GPIO_CDEV=y
+CONFIG_GPIO_SIFLOWER=y
+CONFIG_GRO_CELLS=y
+CONFIG_HARDWARE_WATCHPOINTS=y
+CONFIG_HAS_DMA=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_IOPORT_MAP=y
+CONFIG_HOTPLUG_CORE_SYNC=y
+CONFIG_HOTPLUG_CORE_SYNC_DEAD=y
+CONFIG_HOTPLUG_CPU=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_IRQCHIP=y
+CONFIG_IRQ_DOMAIN=y
+CONFIG_IRQ_DOMAIN_HIERARCHY=y
+CONFIG_IRQ_FORCED_THREADING=y
+CONFIG_IRQ_MIPS_CPU=y
+CONFIG_IRQ_WORK=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LIBFDT=y
+CONFIG_LOCK_DEBUGGING_SUPPORT=y
+CONFIG_MACH_SIFLOWER_MIPS=y
+CONFIG_MDIO_BUS=y
+CONFIG_MDIO_DEVICE=y
+CONFIG_MDIO_DEVRES=y
+CONFIG_MFD_SYSCON=y
+CONFIG_MICREL_PHY=y
+CONFIG_MIGRATION=y
+CONFIG_MIPS=y
+CONFIG_MIPS_ASID_BITS=8
+CONFIG_MIPS_ASID_SHIFT=0
+CONFIG_MIPS_CLOCK_VSYSCALL=y
+CONFIG_MIPS_CM=y
+CONFIG_MIPS_CMDLINE_DTB_EXTEND=y
+# CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER is not set
+CONFIG_MIPS_CPC=y
+CONFIG_MIPS_CPS=y
+# CONFIG_MIPS_CPS_NS16550_BOOL is not set
+CONFIG_MIPS_CPS_PM=y
+CONFIG_MIPS_CPU_SCACHE=y
+CONFIG_MIPS_FP_SUPPORT=y
+CONFIG_MIPS_GENERIC=y
+CONFIG_MIPS_GIC=y
+CONFIG_MIPS_L1_CACHE_SHIFT=5
+CONFIG_MIPS_L1_CACHE_SHIFT_5=y
+CONFIG_MIPS_MT=y
+CONFIG_MIPS_MT_FPAFF=y
+CONFIG_MIPS_MT_SMP=y
+# CONFIG_MIPS_NO_APPENDED_DTB is not set
+CONFIG_MIPS_NR_CPU_NR_MAP=4
+CONFIG_MIPS_PERF_SHARED_TC_COUNTERS=y
+CONFIG_MIPS_RAW_APPENDED_DTB=y
+CONFIG_MIPS_SPRAM=y
+CONFIG_MMU_LAZY_TLB_REFCOUNT=y
+CONFIG_MODULES_USE_ELF_REL=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y
+CONFIG_MTD_SPLIT_FIT_FW=y
+CONFIG_MTD_SPLIT_UIMAGE_FW=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_NEED_SRCU_NMI_SAFE=y
+CONFIG_NET_DEVLINK=y
+CONFIG_NET_DSA=y
+CONFIG_NET_DSA_TAG_NONE=y
+CONFIG_NET_EGRESS=y
+CONFIG_NET_FLOW_LIMIT=y
+CONFIG_NET_INGRESS=y
+CONFIG_NET_PTP_CLASSIFY=y
+CONFIG_NET_SELFTESTS=y
+CONFIG_NET_XGRESS=y
+CONFIG_NLS=y
+CONFIG_NO_EXCEPT_FILL=y
+CONFIG_NO_GENERIC_PCI_IOPORT_MAP=y
+CONFIG_NO_HZ_COMMON=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NR_CPUS=4
+CONFIG_NVMEM=y
+CONFIG_NVMEM_LAYOUTS=y
+CONFIG_NVMEM_SYSFS=y
+CONFIG_OF=y
+CONFIG_OF_ADDRESS=y
+CONFIG_OF_EARLY_FLATTREE=y
+CONFIG_OF_FLATTREE=y
+CONFIG_OF_GPIO=y
+CONFIG_OF_IRQ=y
+CONFIG_OF_KOBJ=y
+CONFIG_OF_MDIO=y
+CONFIG_PADATA=y
+CONFIG_PAGE_POOL=y
+CONFIG_PAGE_SIZE_LESS_THAN_256KB=y
+CONFIG_PAGE_SIZE_LESS_THAN_64KB=y
+CONFIG_PCI_DRIVERS_LEGACY=y
+CONFIG_PCS_XPCS=y
+CONFIG_PERF_USE_VMALLOC=y
+CONFIG_PGTABLE_LEVELS=2
+CONFIG_PHYLIB=y
+CONFIG_PHYLIB_LEDS=y
+CONFIG_PHYLINK=y
+# CONFIG_PHY_SF19A2890_USB is not set
+# CONFIG_PHY_SF21_PCIE is not set
+# CONFIG_PHY_SF21_USB is not set
+CONFIG_PINCTRL=y
+CONFIG_PINCTRL_SF19A2890=y
+# CONFIG_PINCTRL_SINGLE is not set
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_PPS=y
+CONFIG_PREEMPT_NONE_BUILD=y
+CONFIG_PRINTK_TIME=y
+CONFIG_PROC_EVENTS=y
+CONFIG_PTP_1588_CLOCK=y
+CONFIG_PTP_1588_CLOCK_OPTIONAL=y
+CONFIG_QUEUED_RWLOCKS=y
+CONFIG_QUEUED_SPINLOCKS=y
+CONFIG_RANDSTRUCT_NONE=y
+CONFIG_RATIONAL=y
+CONFIG_REGMAP=y
+CONFIG_REGMAP_MMIO=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_RESET_CONTROLLER=y
+CONFIG_RESET_SF19A2890_PERIPH=y
+# CONFIG_RESET_SF21 is not set
+CONFIG_RFS_ACCEL=y
+CONFIG_RPS=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_INFO=y
+# CONFIG_SERIAL_8250 is not set
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+# CONFIG_SIFLOWER_PHY is not set
+CONFIG_SMP=y
+CONFIG_SMP_UP=y
+CONFIG_SOCK_RX_QUEUE_MAPPING=y
+CONFIG_SPI=y
+CONFIG_SPI_MASTER=y
+CONFIG_SPI_MEM=y
+CONFIG_SPI_PL022=y
+CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y
+CONFIG_SRAM=y
+CONFIG_STACKPROTECTOR=y
+CONFIG_STMMAC_ETH=y
+CONFIG_STMMAC_PLATFORM=y
+CONFIG_SWPHY=y
+CONFIG_SYNC_R4K=y
+CONFIG_SYSCTL_EXCEPTION_TRACE=y
+CONFIG_SYS_HAS_CPU_MIPS32_R2=y
+CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
+CONFIG_SYS_SUPPORTS_ARBIT_HZ=y
+CONFIG_SYS_SUPPORTS_HOTPLUG_CPU=y
+CONFIG_SYS_SUPPORTS_LITTLE_ENDIAN=y
+CONFIG_SYS_SUPPORTS_MIPS16=y
+CONFIG_SYS_SUPPORTS_MIPS_CPS=y
+CONFIG_SYS_SUPPORTS_MULTITHREADING=y
+CONFIG_SYS_SUPPORTS_SCHED_SMT=y
+CONFIG_SYS_SUPPORTS_SMP=y
+CONFIG_TARGET_ISA_REV=2
+CONFIG_TICK_CPU_ACCOUNTING=y
+CONFIG_TIMER_OF=y
+CONFIG_TIMER_PROBE=y
+CONFIG_TREE_RCU=y
+CONFIG_TREE_SRCU=y
+CONFIG_USB_SUPPORT=y
+CONFIG_USE_OF=y
+CONFIG_WATCHDOG_CORE=y
+CONFIG_WEAK_ORDERING=y
+CONFIG_WERROR=y
+CONFIG_XPS=y

+ 286 - 0
target/linux/siflower/sf21/config-6.6

@@ -0,0 +1,286 @@
+CONFIG_64BIT=y
+CONFIG_ARCH_DMA_ADDR_T_64BIT=y
+CONFIG_ARCH_DMA_DEFAULT_COHERENT=y
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_ARCH_MMAP_RND_BITS=18
+CONFIG_ARCH_MMAP_RND_BITS_MAX=24
+CONFIG_ARCH_MMAP_RND_BITS_MIN=18
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=17
+CONFIG_ARCH_OPTIONAL_KERNEL_RWX=y
+CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT=y
+# CONFIG_ARCH_RV32I is not set
+CONFIG_ARCH_RV64I=y
+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_ARCH_SIFLOWER=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_STACKWALK=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_ARCH_THEAD is not set
+CONFIG_ARCH_WANTS_THP_SWAP=y
+CONFIG_ARM_AMBA=y
+# CONFIG_AX45MP_L2_CACHE is not set
+CONFIG_BLK_MQ_PCI=y
+CONFIG_CC_HAVE_STACKPROTECTOR_TLS=y
+CONFIG_CLK_SF19A2890_PERIPH=y
+CONFIG_CLK_SF21_TOPCRM=y
+CONFIG_CLK_SIFLOWER=y
+CONFIG_CLONE_BACKWARDS=y
+CONFIG_CMDLINE="root=/dev/fit0"
+CONFIG_CMDLINE_FALLBACK=y
+CONFIG_CMODEL_MEDANY=y
+# CONFIG_CMODEL_MEDLOW is not set
+CONFIG_COMMON_CLK=y
+CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1
+# CONFIG_COMPAT_32BIT_TIME is not set
+CONFIG_CONFIGFS_FS=y
+CONFIG_CONTEXT_TRACKING=y
+CONFIG_CONTEXT_TRACKING_IDLE=y
+CONFIG_CPU_MITIGATIONS=y
+CONFIG_CPU_RMAP=y
+CONFIG_CRC16=y
+CONFIG_CRYPTO_HASH_INFO=y
+CONFIG_CRYPTO_HW=y
+CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y
+CONFIG_CRYPTO_LIB_GF128MUL=y
+CONFIG_CRYPTO_LIB_POLY1305_RSIZE=1
+CONFIG_CRYPTO_LIB_SHA1=y
+CONFIG_CRYPTO_LIB_UTILS=y
+# CONFIG_CRYPTO_PCRYPT is not set
+CONFIG_CRYPTO_ZSTD=y
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEVPORT is not set
+CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC=y
+CONFIG_DMA_DIRECT_REMAP=y
+CONFIG_DTC=y
+CONFIG_DW_WATCHDOG=y
+CONFIG_EDAC_SUPPORT=y
+# CONFIG_ERRATA_ANDES is not set
+# CONFIG_ERRATA_SIFIVE is not set
+# CONFIG_ERRATA_THEAD is not set
+CONFIG_EXCLUSIVE_SYSTEM_RAM=y
+CONFIG_FIXED_PHY=y
+CONFIG_FIX_EARLYCON_MEM=y
+CONFIG_FPU=y
+CONFIG_FS_IOMAP=y
+CONFIG_FUNCTION_ALIGNMENT=0
+CONFIG_FWNODE_MDIO=y
+CONFIG_FW_LOADER_PAGED_BUF=y
+CONFIG_FW_LOADER_SYSFS=y
+CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE=y
+CONFIG_GENERIC_ALLOCATOR=y
+CONFIG_GENERIC_ARCH_TOPOLOGY=y
+CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_GENERIC_CSUM=y
+CONFIG_GENERIC_EARLY_IOREMAP=y
+CONFIG_GENERIC_ENTRY=y
+CONFIG_GENERIC_GETTIMEOFDAY=y
+CONFIG_GENERIC_IDLE_POLL_SETUP=y
+CONFIG_GENERIC_IOREMAP=y
+CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y
+CONFIG_GENERIC_IRQ_IPI_MUX=y
+CONFIG_GENERIC_IRQ_MULTI_HANDLER=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_GENERIC_IRQ_SHOW_LEVEL=y
+CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y
+CONFIG_GENERIC_MSI_IRQ=y
+CONFIG_GENERIC_PCI_IOMAP=y
+CONFIG_GENERIC_PHY=y
+CONFIG_GENERIC_PINCONF=y
+CONFIG_GENERIC_PINCTRL_GROUPS=y
+CONFIG_GENERIC_PINMUX_FUNCTIONS=y
+CONFIG_GENERIC_SCHED_CLOCK=y
+CONFIG_GENERIC_SMP_IDLE_THREAD=y
+CONFIG_GENERIC_STRNCPY_FROM_USER=y
+CONFIG_GENERIC_STRNLEN_USER=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GPIOLIB_IRQCHIP=y
+CONFIG_GPIO_CDEV=y
+CONFIG_GPIO_SIFLOWER=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_HAS_DMA=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_IOPORT_MAP=y
+CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_IRQCHIP=y
+CONFIG_IRQ_DOMAIN=y
+CONFIG_IRQ_DOMAIN_HIERARCHY=y
+CONFIG_IRQ_FORCED_THREADING=y
+CONFIG_IRQ_STACKS=y
+CONFIG_IRQ_WORK=y
+CONFIG_JUMP_LABEL=y
+CONFIG_KCMP=y
+CONFIG_LED_TRIGGER_PHY=y
+CONFIG_LIBFDT=y
+CONFIG_LOCK_DEBUGGING_SUPPORT=y
+CONFIG_LOCK_SPIN_ON_OWNER=y
+CONFIG_MARVELL_PHY=y
+CONFIG_MDIO_BUS=y
+CONFIG_MDIO_DEVICE=y
+CONFIG_MDIO_DEVRES=y
+CONFIG_MFD_SYSCON=y
+CONFIG_MIGRATION=y
+CONFIG_MMIOWB=y
+CONFIG_MMU_LAZY_TLB_REFCOUNT=y
+CONFIG_MODULES_USE_ELF_RELA=y
+CONFIG_MODULE_SECTIONS=y
+CONFIG_MOTORCOMM_PHY=y
+# CONFIG_MTD_CFI is not set
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_NAND_CORE=y
+CONFIG_MTD_NAND_ECC=y
+CONFIG_MTD_SPI_NAND=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_MTD_SPI_NOR_USE_VARIABLE_ERASE=y
+CONFIG_MTD_SPLIT_FIT_FW=y
+# CONFIG_MTD_SPLIT_SQUASHFS_ROOT is not set
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_BEB_LIMIT=20
+CONFIG_MTD_UBI_BLOCK=y
+CONFIG_MTD_UBI_NVMEM=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MUTEX_SPIN_ON_OWNER=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_NET_EGRESS=y
+CONFIG_NET_FLOW_LIMIT=y
+CONFIG_NET_INGRESS=y
+CONFIG_NET_SELFTESTS=y
+CONFIG_NET_SIFLOWER_ETH_DMA=y
+CONFIG_NET_SIFLOWER_ETH_DPNS=y
+CONFIG_NET_SIFLOWER_ETH_USE_INTERNAL_SRAM=y
+CONFIG_NET_SIFLOWER_ETH_XGMAC=y
+CONFIG_NET_SIFLOWER_ETH_XPCS=y
+CONFIG_NET_VENDOR_SIFLOWER=y
+CONFIG_NET_XGRESS=y
+CONFIG_NONPORTABLE=y
+CONFIG_NO_HZ=y
+CONFIG_NO_HZ_COMMON=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NR_CPUS=4
+CONFIG_NVMEM=y
+CONFIG_NVMEM_LAYOUTS=y
+CONFIG_OF=y
+CONFIG_OF_ADDRESS=y
+CONFIG_OF_EARLY_FLATTREE=y
+CONFIG_OF_FLATTREE=y
+CONFIG_OF_GPIO=y
+CONFIG_OF_IRQ=y
+CONFIG_OF_KOBJ=y
+CONFIG_OF_MDIO=y
+CONFIG_PAGE_OFFSET=0xff60000000000000
+CONFIG_PAGE_POOL=y
+CONFIG_PAGE_POOL_STATS=y
+CONFIG_PAGE_SIZE_LESS_THAN_256KB=y
+CONFIG_PAGE_SIZE_LESS_THAN_64KB=y
+CONFIG_PCI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCIE_DW=y
+CONFIG_PCIE_DW_HOST=y
+CONFIG_PCIE_SF21=y
+CONFIG_PCI_DOMAINS=y
+CONFIG_PCI_DOMAINS_GENERIC=y
+CONFIG_PCI_MSI=y
+CONFIG_PER_VMA_LOCK=y
+CONFIG_PGTABLE_LEVELS=5
+CONFIG_PHYLIB=y
+CONFIG_PHYLIB_LEDS=y
+CONFIG_PHYLINK=y
+CONFIG_PHYS_ADDR_T_64BIT=y
+# CONFIG_PHYS_RAM_BASE_FIXED is not set
+# CONFIG_PHY_SF19A2890_USB is not set
+CONFIG_PHY_SF21_PCIE=y
+# CONFIG_PHY_SF21_USB is not set
+CONFIG_PINCTRL=y
+CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_PREEMPT_NONE_BUILD=y
+CONFIG_PTP_1588_CLOCK_OPTIONAL=y
+CONFIG_QUEUED_RWLOCKS=y
+CONFIG_RANDSTRUCT_NONE=y
+CONFIG_RAS=y
+CONFIG_RATIONAL=y
+CONFIG_REGMAP=y
+CONFIG_REGMAP_MMIO=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_RESET_CONTROLLER=y
+CONFIG_RESET_SF19A2890_PERIPH=y
+CONFIG_RESET_SF21=y
+CONFIG_RFS_ACCEL=y
+CONFIG_RISCV=y
+CONFIG_RISCV_ALTERNATIVE=y
+# CONFIG_RISCV_BOOT_SPINWAIT is not set
+CONFIG_RISCV_DMA_NONCOHERENT=y
+CONFIG_RISCV_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_RISCV_INTC=y
+CONFIG_RISCV_ISA_C=y
+# CONFIG_RISCV_ISA_FALLBACK is not set
+CONFIG_RISCV_ISA_SVNAPOT=y
+CONFIG_RISCV_ISA_SVPBMT=y
+# CONFIG_RISCV_ISA_V is not set
+CONFIG_RISCV_ISA_ZBB=y
+CONFIG_RISCV_ISA_ZICBOM=y
+CONFIG_RISCV_ISA_ZICBOZ=y
+CONFIG_RISCV_SBI=y
+# CONFIG_RISCV_SBI_V01 is not set
+CONFIG_RISCV_TIMER=y
+CONFIG_RPS=y
+CONFIG_RWSEM_SPIN_ON_OWNER=y
+# CONFIG_SERIAL_8250 is not set
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SGL_ALLOC=y
+CONFIG_SIFIVE_PLIC=y
+CONFIG_SIFLOWER_PHY=y
+CONFIG_SMP=y
+CONFIG_SOCK_RX_QUEUE_MAPPING=y
+# CONFIG_SOC_MICROCHIP_POLARFIRE is not set
+# CONFIG_SOC_SIFIVE is not set
+# CONFIG_SOC_STARFIVE is not set
+# CONFIG_SOC_VIRT is not set
+CONFIG_SOFTIRQ_ON_OWN_STACK=y
+CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_SPI=y
+CONFIG_SPI_MASTER=y
+CONFIG_SPI_MEM=y
+CONFIG_SPI_SF21_QSPI=y
+# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set
+CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y
+CONFIG_SQUASHFS_DECOMP_SINGLE=y
+CONFIG_SRAM=y
+CONFIG_SWIOTLB=y
+CONFIG_SWPHY=y
+CONFIG_SYSCTL_EXCEPTION_TRACE=y
+CONFIG_THREAD_INFO_IN_TASK=y
+CONFIG_THREAD_SIZE_ORDER=2
+CONFIG_TICK_CPU_ACCOUNTING=y
+CONFIG_TIMER_OF=y
+CONFIG_TIMER_PROBE=y
+CONFIG_TOOLCHAIN_HAS_V=y
+CONFIG_TOOLCHAIN_HAS_ZBB=y
+CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE=y
+CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI=y
+CONFIG_TREE_RCU=y
+CONFIG_TREE_SRCU=y
+CONFIG_TUNE_GENERIC=y
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
+# CONFIG_UBIFS_FS_LZO is not set
+# CONFIG_UBIFS_FS_ZLIB is not set
+CONFIG_UIMAGE_FIT_BLK=y
+CONFIG_USB_SUPPORT=y
+CONFIG_VMAP_STACK=y
+CONFIG_WATCHDOG_CORE=y
+CONFIG_XPS=y
+CONFIG_XXHASH=y
+CONFIG_ZONE_DMA32=y
+CONFIG_ZSTD_COMMON=y
+CONFIG_ZSTD_COMPRESS=y
+CONFIG_ZSTD_DECOMPRESS=y